Fixing bitrotting in udp_server_test.c

pull/5270/head^2
Nicolas "Pixel" Noble 9 years ago
parent e8fb852a73
commit b9012fc03c
  1. 12
      src/core/iomgr/pollset.h
  2. 22
      test/core/iomgr/udp_server_test.c

@ -55,7 +55,7 @@ typedef struct grpc_pollset_worker grpc_pollset_worker;
size_t grpc_pollset_size(void);
void grpc_pollset_init(grpc_pollset *pollset, gpr_mu **mu);
/* Begin shutting down the pollset, and call closure when done.
* GRPC_POLLSET_MU(pollset) must be held */
* pollset's mutex must be held */
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
grpc_closure *closure);
/** Reset the pollset to its initial state (perhaps with some cached objects);
@ -66,16 +66,16 @@ void grpc_pollset_destroy(grpc_pollset *pollset);
/* Do some work on a pollset.
May involve invoking asynchronous callbacks, or actually polling file
descriptors.
Requires GRPC_POLLSET_MU(pollset) locked.
May unlock GRPC_POLLSET_MU(pollset) during its execution.
Requires pollset's mutex locked.
May unlock its mutex during its execution.
worker is a (platform-specific) handle that can be used to wake up
from grpc_pollset_work before any events are received and before the timeout
has expired. It is both initialized and destroyed by grpc_pollset_work.
Initialization of worker is guaranteed to occur BEFORE the
GRPC_POLLSET_MU(pollset) is released for the first time by
grpc_pollset_work, and it is guaranteed that GRPC_POLLSET_MU(pollset) will
not be released by grpc_pollset_work AFTER worker has been destroyed.
pollset's mutex is released for the first time by grpc_pollset_work
and it is guaranteed that it will not be released by grpc_pollset_work
AFTER worker has been destroyed.
Tries not to block past deadline.
May call grpc_closure_list_run on grpc_closure_list, without holding the

@ -31,8 +31,9 @@
*
*/
#include "src/core/iomgr/udp_server.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/iomgr/pollset_posix.h"
#include "src/core/iomgr/udp_server.h"
#include <grpc/support/log.h>
#include <grpc/support/sync.h>
#include <grpc/support/time.h>
@ -48,6 +49,7 @@
#define LOG_TEST(x) gpr_log(GPR_INFO, "%s", #x)
static grpc_pollset g_pollset;
static gpr_mu *g_mu;
static int g_number_of_reads = 0;
static int g_number_of_bytes_read = 0;
@ -56,14 +58,14 @@ static void on_read(grpc_exec_ctx *exec_ctx, grpc_fd *emfd,
char read_buffer[512];
ssize_t byte_count;
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(g_mu);
byte_count = recv(emfd->fd, read_buffer, sizeof(read_buffer), 0);
g_number_of_reads++;
g_number_of_bytes_read += (int)byte_count;
grpc_pollset_kick(&g_pollset, NULL);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(g_mu);
}
static void test_no_op(void) {
@ -142,7 +144,7 @@ static void test_receive(int number_of_clients) {
pollsets[0] = &g_pollset;
grpc_udp_server_start(&exec_ctx, s, pollsets, 1, NULL);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(g_mu);
for (i = 0; i < number_of_clients; i++) {
deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10);
@ -155,19 +157,19 @@ static void test_receive(int number_of_clients) {
GPR_ASSERT(5 == write(clifd, "hello", 5));
while (g_number_of_reads == number_of_reads_before &&
gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0) {
grpc_pollset_worker worker;
grpc_pollset_worker *worker = NULL;
grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
gpr_now(GPR_CLOCK_MONOTONIC), deadline);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(g_mu);
grpc_exec_ctx_finish(&exec_ctx);
gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_lock(g_mu);
}
GPR_ASSERT(g_number_of_reads == number_of_reads_before + 1);
close(clifd);
}
GPR_ASSERT(g_number_of_bytes_read == 5 * number_of_clients);
gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
gpr_mu_unlock(g_mu);
grpc_udp_server_destroy(&exec_ctx, s, NULL);
grpc_exec_ctx_finish(&exec_ctx);
@ -181,8 +183,8 @@ int main(int argc, char **argv) {
grpc_closure destroyed;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_test_init(argc, argv);
grpc_iomgr_init();
grpc_pollset_init(&g_pollset);
grpc_init();
grpc_pollset_init(&g_pollset, &g_mu);
test_no_op();
test_no_op_with_start();

Loading…
Cancel
Save