|
|
@ -61,209 +61,8 @@ |
|
|
|
#define SENDMSG_FLAGS 0 |
|
|
|
#define SENDMSG_FLAGS 0 |
|
|
|
#endif |
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
/* Holds a slice array and associated state. */ |
|
|
|
|
|
|
|
typedef struct grpc_tcp_slice_state { |
|
|
|
|
|
|
|
gpr_slice *slices; /* Array of slices */ |
|
|
|
|
|
|
|
size_t nslices; /* Size of slices array. */ |
|
|
|
|
|
|
|
ssize_t first_slice; /* First valid slice in array */ |
|
|
|
|
|
|
|
ssize_t last_slice; /* Last valid slice in array */ |
|
|
|
|
|
|
|
gpr_slice working_slice; /* pointer to original final slice */ |
|
|
|
|
|
|
|
int working_slice_valid; /* True if there is a working slice */ |
|
|
|
|
|
|
|
int memory_owned; /* True if slices array is owned */ |
|
|
|
|
|
|
|
} grpc_tcp_slice_state; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
int grpc_tcp_trace = 0; |
|
|
|
int grpc_tcp_trace = 0; |
|
|
|
|
|
|
|
|
|
|
|
static void slice_state_init(grpc_tcp_slice_state *state, gpr_slice *slices, |
|
|
|
|
|
|
|
size_t nslices, size_t valid_slices) { |
|
|
|
|
|
|
|
state->slices = slices; |
|
|
|
|
|
|
|
state->nslices = nslices; |
|
|
|
|
|
|
|
if (valid_slices == 0) { |
|
|
|
|
|
|
|
state->first_slice = -1; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
state->first_slice = 0; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
state->last_slice = valid_slices - 1; |
|
|
|
|
|
|
|
state->working_slice_valid = 0; |
|
|
|
|
|
|
|
state->memory_owned = 0; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Returns true if there is still available data */ |
|
|
|
|
|
|
|
static int slice_state_has_available(grpc_tcp_slice_state *state) { |
|
|
|
|
|
|
|
return state->first_slice != -1 && state->last_slice >= state->first_slice; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static ssize_t slice_state_slices_allocated(grpc_tcp_slice_state *state) { |
|
|
|
|
|
|
|
if (state->first_slice == -1) { |
|
|
|
|
|
|
|
return 0; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
return state->last_slice - state->first_slice + 1; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void slice_state_realloc(grpc_tcp_slice_state *state, size_t new_size) { |
|
|
|
|
|
|
|
/* TODO(klempner): use realloc instead when first_slice is 0 */ |
|
|
|
|
|
|
|
/* TODO(klempner): Avoid a realloc in cases where it is unnecessary */ |
|
|
|
|
|
|
|
gpr_slice *slices = state->slices; |
|
|
|
|
|
|
|
size_t original_size = slice_state_slices_allocated(state); |
|
|
|
|
|
|
|
size_t i; |
|
|
|
|
|
|
|
gpr_slice *new_slices = gpr_malloc(sizeof(gpr_slice) * new_size); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < original_size; ++i) { |
|
|
|
|
|
|
|
new_slices[i] = slices[i + state->first_slice]; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
state->slices = new_slices; |
|
|
|
|
|
|
|
state->last_slice = original_size - 1; |
|
|
|
|
|
|
|
if (original_size > 0) { |
|
|
|
|
|
|
|
state->first_slice = 0; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
state->first_slice = -1; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
state->nslices = new_size; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (state->memory_owned) { |
|
|
|
|
|
|
|
gpr_free(slices); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
state->memory_owned = 1; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void slice_state_remove_prefix(grpc_tcp_slice_state *state, |
|
|
|
|
|
|
|
size_t prefix_bytes) { |
|
|
|
|
|
|
|
gpr_slice *current_slice = &state->slices[state->first_slice]; |
|
|
|
|
|
|
|
size_t current_slice_size; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
while (slice_state_has_available(state)) { |
|
|
|
|
|
|
|
current_slice_size = GPR_SLICE_LENGTH(*current_slice); |
|
|
|
|
|
|
|
if (current_slice_size > prefix_bytes) { |
|
|
|
|
|
|
|
/* TODO(klempner): Get rid of the extra refcount created here by adding a
|
|
|
|
|
|
|
|
native "trim the first N bytes" operation to splice */ |
|
|
|
|
|
|
|
/* TODO(klempner): This really shouldn't be modifying the current slice
|
|
|
|
|
|
|
|
unless we own the slices array. */ |
|
|
|
|
|
|
|
gpr_slice tail; |
|
|
|
|
|
|
|
tail = gpr_slice_split_tail(current_slice, prefix_bytes); |
|
|
|
|
|
|
|
gpr_slice_unref(*current_slice); |
|
|
|
|
|
|
|
*current_slice = tail; |
|
|
|
|
|
|
|
return; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
gpr_slice_unref(*current_slice); |
|
|
|
|
|
|
|
++state->first_slice; |
|
|
|
|
|
|
|
++current_slice; |
|
|
|
|
|
|
|
prefix_bytes -= current_slice_size; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void slice_state_destroy(grpc_tcp_slice_state *state) { |
|
|
|
|
|
|
|
while (slice_state_has_available(state)) { |
|
|
|
|
|
|
|
gpr_slice_unref(state->slices[state->first_slice]); |
|
|
|
|
|
|
|
++state->first_slice; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (state->memory_owned) { |
|
|
|
|
|
|
|
gpr_free(state->slices); |
|
|
|
|
|
|
|
state->memory_owned = 0; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void slice_state_transfer_ownership(grpc_tcp_slice_state *state, |
|
|
|
|
|
|
|
gpr_slice **slices, size_t *nslices) { |
|
|
|
|
|
|
|
*slices = state->slices + state->first_slice; |
|
|
|
|
|
|
|
*nslices = state->last_slice - state->first_slice + 1; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
state->first_slice = -1; |
|
|
|
|
|
|
|
state->last_slice = -1; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Fills iov with the first min(iov_size, available) slices, returns number
|
|
|
|
|
|
|
|
filled */ |
|
|
|
|
|
|
|
static size_t slice_state_to_iovec(grpc_tcp_slice_state *state, |
|
|
|
|
|
|
|
struct iovec *iov, size_t iov_size) { |
|
|
|
|
|
|
|
size_t nslices = state->last_slice - state->first_slice + 1; |
|
|
|
|
|
|
|
gpr_slice *slices = state->slices + state->first_slice; |
|
|
|
|
|
|
|
size_t i; |
|
|
|
|
|
|
|
if (nslices < iov_size) { |
|
|
|
|
|
|
|
iov_size = nslices; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < iov_size; ++i) { |
|
|
|
|
|
|
|
iov[i].iov_base = GPR_SLICE_START_PTR(slices[i]); |
|
|
|
|
|
|
|
iov[i].iov_len = GPR_SLICE_LENGTH(slices[i]); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return iov_size; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Makes n blocks available at the end of state, writes them into iov, and
|
|
|
|
|
|
|
|
returns the number of bytes allocated */ |
|
|
|
|
|
|
|
static size_t slice_state_append_blocks_into_iovec(grpc_tcp_slice_state *state, |
|
|
|
|
|
|
|
struct iovec *iov, size_t n, |
|
|
|
|
|
|
|
size_t slice_size) { |
|
|
|
|
|
|
|
size_t target_size; |
|
|
|
|
|
|
|
size_t i; |
|
|
|
|
|
|
|
size_t allocated_bytes; |
|
|
|
|
|
|
|
ssize_t allocated_slices = slice_state_slices_allocated(state); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (n - state->working_slice_valid >= state->nslices - state->last_slice) { |
|
|
|
|
|
|
|
/* Need to grow the slice array */ |
|
|
|
|
|
|
|
target_size = state->nslices; |
|
|
|
|
|
|
|
do { |
|
|
|
|
|
|
|
target_size = target_size * 2; |
|
|
|
|
|
|
|
} while (target_size < allocated_slices + n - state->working_slice_valid); |
|
|
|
|
|
|
|
/* TODO(klempner): If this ever needs to support both prefix removal and
|
|
|
|
|
|
|
|
append, we should be smarter about the growth logic here */ |
|
|
|
|
|
|
|
slice_state_realloc(state, target_size); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
i = 0; |
|
|
|
|
|
|
|
allocated_bytes = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (state->working_slice_valid) { |
|
|
|
|
|
|
|
iov[0].iov_base = GPR_SLICE_END_PTR(state->slices[state->last_slice]); |
|
|
|
|
|
|
|
iov[0].iov_len = GPR_SLICE_LENGTH(state->working_slice) - |
|
|
|
|
|
|
|
GPR_SLICE_LENGTH(state->slices[state->last_slice]); |
|
|
|
|
|
|
|
allocated_bytes += iov[0].iov_len; |
|
|
|
|
|
|
|
++i; |
|
|
|
|
|
|
|
state->slices[state->last_slice] = state->working_slice; |
|
|
|
|
|
|
|
state->working_slice_valid = 0; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for (; i < n; ++i) { |
|
|
|
|
|
|
|
++state->last_slice; |
|
|
|
|
|
|
|
state->slices[state->last_slice] = gpr_slice_malloc(slice_size); |
|
|
|
|
|
|
|
iov[i].iov_base = GPR_SLICE_START_PTR(state->slices[state->last_slice]); |
|
|
|
|
|
|
|
iov[i].iov_len = slice_size; |
|
|
|
|
|
|
|
allocated_bytes += slice_size; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if (state->first_slice == -1) { |
|
|
|
|
|
|
|
state->first_slice = 0; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return allocated_bytes; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Remove the last n bytes from state */ |
|
|
|
|
|
|
|
/* TODO(klempner): Consider having this defer actual deletion until later */ |
|
|
|
|
|
|
|
static void slice_state_remove_last(grpc_tcp_slice_state *state, size_t bytes) { |
|
|
|
|
|
|
|
while (bytes > 0 && slice_state_has_available(state)) { |
|
|
|
|
|
|
|
if (GPR_SLICE_LENGTH(state->slices[state->last_slice]) > bytes) { |
|
|
|
|
|
|
|
state->working_slice = state->slices[state->last_slice]; |
|
|
|
|
|
|
|
state->working_slice_valid = 1; |
|
|
|
|
|
|
|
/* TODO(klempner): Combine these into a single operation that doesn't need
|
|
|
|
|
|
|
|
to refcount */ |
|
|
|
|
|
|
|
gpr_slice_unref(gpr_slice_split_tail( |
|
|
|
|
|
|
|
&state->slices[state->last_slice], |
|
|
|
|
|
|
|
GPR_SLICE_LENGTH(state->slices[state->last_slice]) - bytes)); |
|
|
|
|
|
|
|
bytes = 0; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
bytes -= GPR_SLICE_LENGTH(state->slices[state->last_slice]); |
|
|
|
|
|
|
|
gpr_slice_unref(state->slices[state->last_slice]); |
|
|
|
|
|
|
|
--state->last_slice; |
|
|
|
|
|
|
|
if (state->last_slice == -1) { |
|
|
|
|
|
|
|
state->first_slice = -1; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
typedef struct { |
|
|
|
typedef struct { |
|
|
|
grpc_endpoint base; |
|
|
|
grpc_endpoint base; |
|
|
|
grpc_fd *em_fd; |
|
|
|
grpc_fd *em_fd; |
|
|
@ -273,12 +72,15 @@ typedef struct { |
|
|
|
size_t slice_size; |
|
|
|
size_t slice_size; |
|
|
|
gpr_refcount refcount; |
|
|
|
gpr_refcount refcount; |
|
|
|
|
|
|
|
|
|
|
|
grpc_endpoint_read_cb read_cb; |
|
|
|
gpr_slice_buffer *incoming_buffer; |
|
|
|
void *read_user_data; |
|
|
|
gpr_slice_buffer *outgoing_buffer; |
|
|
|
grpc_endpoint_write_cb write_cb; |
|
|
|
/** slice within outgoing_buffer to write next */ |
|
|
|
void *write_user_data; |
|
|
|
size_t outgoing_slice_idx; |
|
|
|
|
|
|
|
/** byte within outgoing_buffer->slices[outgoing_slice_idx] to write next */ |
|
|
|
|
|
|
|
size_t outgoing_byte_idx; |
|
|
|
|
|
|
|
|
|
|
|
grpc_tcp_slice_state write_state; |
|
|
|
grpc_iomgr_closure *read_cb; |
|
|
|
|
|
|
|
grpc_iomgr_closure *write_cb; |
|
|
|
|
|
|
|
|
|
|
|
grpc_iomgr_closure read_closure; |
|
|
|
grpc_iomgr_closure read_closure; |
|
|
|
grpc_iomgr_closure write_closure; |
|
|
|
grpc_iomgr_closure write_closure; |
|
|
@ -288,65 +90,95 @@ typedef struct { |
|
|
|
char *peer_string; |
|
|
|
char *peer_string; |
|
|
|
} grpc_tcp; |
|
|
|
} grpc_tcp; |
|
|
|
|
|
|
|
|
|
|
|
static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success); |
|
|
|
static void tcp_handle_read(void *arg /* grpc_tcp */, int success); |
|
|
|
static void grpc_tcp_handle_write(void *arg /* grpc_tcp */, int success); |
|
|
|
static void tcp_handle_write(void *arg /* grpc_tcp */, int success); |
|
|
|
|
|
|
|
|
|
|
|
static void grpc_tcp_shutdown(grpc_endpoint *ep) { |
|
|
|
static void tcp_shutdown(grpc_endpoint *ep) { |
|
|
|
grpc_tcp *tcp = (grpc_tcp *)ep; |
|
|
|
grpc_tcp *tcp = (grpc_tcp *)ep; |
|
|
|
grpc_fd_shutdown(tcp->em_fd); |
|
|
|
grpc_fd_shutdown(tcp->em_fd); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void grpc_tcp_unref(grpc_tcp *tcp) { |
|
|
|
static void tcp_free(grpc_tcp *tcp) { |
|
|
|
int refcount_zero = gpr_unref(&tcp->refcount); |
|
|
|
grpc_fd_orphan(tcp->em_fd, NULL, "tcp_unref_orphan"); |
|
|
|
if (refcount_zero) { |
|
|
|
gpr_free(tcp->peer_string); |
|
|
|
grpc_fd_orphan(tcp->em_fd, NULL, "tcp_unref_orphan"); |
|
|
|
gpr_free(tcp); |
|
|
|
gpr_free(tcp->peer_string); |
|
|
|
} |
|
|
|
gpr_free(tcp); |
|
|
|
|
|
|
|
|
|
|
|
#define GRPC_TCP_REFCOUNT_DEBUG |
|
|
|
|
|
|
|
#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__) |
|
|
|
|
|
|
|
#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__) |
|
|
|
|
|
|
|
static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file, |
|
|
|
|
|
|
|
int line) { |
|
|
|
|
|
|
|
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp, |
|
|
|
|
|
|
|
reason, tcp->refcount.count, tcp->refcount.count - 1); |
|
|
|
|
|
|
|
if (gpr_unref(&tcp->refcount)) { |
|
|
|
|
|
|
|
tcp_free(tcp); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file, |
|
|
|
|
|
|
|
int line) { |
|
|
|
|
|
|
|
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP ref %p : %s %d -> %d", tcp, |
|
|
|
|
|
|
|
reason, tcp->refcount.count, tcp->refcount.count + 1); |
|
|
|
|
|
|
|
gpr_ref(&tcp->refcount); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
#ifdef GRPC_TCP_REFCOUNT_DEBUG |
|
|
|
|
|
|
|
#else |
|
|
|
|
|
|
|
#define TCP_UNREF(tcp, reason) tcp_unref((tcp)) |
|
|
|
|
|
|
|
#define TCP_REF(tcp, reason) tcp_ref((tcp)) |
|
|
|
|
|
|
|
static void tcp_unref(grpc_tcp *tcp) { |
|
|
|
|
|
|
|
if (gpr_unref(&tcp->refcount)) { |
|
|
|
|
|
|
|
tcp_free(tcp); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void grpc_tcp_destroy(grpc_endpoint *ep) { |
|
|
|
static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } |
|
|
|
|
|
|
|
#endif |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void tcp_destroy(grpc_endpoint *ep) { |
|
|
|
grpc_tcp *tcp = (grpc_tcp *)ep; |
|
|
|
grpc_tcp *tcp = (grpc_tcp *)ep; |
|
|
|
grpc_tcp_unref(tcp); |
|
|
|
TCP_UNREF(tcp, "destroy"); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void call_read_cb(grpc_tcp *tcp, gpr_slice *slices, size_t nslices, |
|
|
|
static void call_read_cb(grpc_tcp *tcp, int success) { |
|
|
|
grpc_endpoint_cb_status status) { |
|
|
|
grpc_iomgr_closure *cb = tcp->read_cb; |
|
|
|
grpc_endpoint_read_cb cb = tcp->read_cb; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (grpc_tcp_trace) { |
|
|
|
if (grpc_tcp_trace) { |
|
|
|
size_t i; |
|
|
|
size_t i; |
|
|
|
gpr_log(GPR_DEBUG, "read: status=%d", status); |
|
|
|
gpr_log(GPR_DEBUG, "read: success=%d", success); |
|
|
|
for (i = 0; i < nslices; i++) { |
|
|
|
for (i = 0; i < tcp->incoming_buffer->count; i++) { |
|
|
|
char *dump = gpr_dump_slice(slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); |
|
|
|
char *dump = gpr_dump_slice(tcp->incoming_buffer->slices[i], |
|
|
|
|
|
|
|
GPR_DUMP_HEX | GPR_DUMP_ASCII); |
|
|
|
gpr_log(GPR_DEBUG, "READ %p: %s", tcp, dump); |
|
|
|
gpr_log(GPR_DEBUG, "READ %p: %s", tcp, dump); |
|
|
|
gpr_free(dump); |
|
|
|
gpr_free(dump); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
tcp->read_cb = NULL; |
|
|
|
tcp->read_cb = NULL; |
|
|
|
cb(tcp->read_user_data, slices, nslices, status); |
|
|
|
tcp->incoming_buffer = NULL; |
|
|
|
|
|
|
|
cb->cb(cb->cb_arg, success); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
#define INLINE_SLICE_BUFFER_SIZE 8 |
|
|
|
|
|
|
|
#define MAX_READ_IOVEC 4 |
|
|
|
#define MAX_READ_IOVEC 4 |
|
|
|
static void grpc_tcp_continue_read(grpc_tcp *tcp) { |
|
|
|
static void tcp_continue_read(grpc_tcp *tcp) { |
|
|
|
gpr_slice static_read_slices[INLINE_SLICE_BUFFER_SIZE]; |
|
|
|
|
|
|
|
struct msghdr msg; |
|
|
|
struct msghdr msg; |
|
|
|
struct iovec iov[MAX_READ_IOVEC]; |
|
|
|
struct iovec iov[MAX_READ_IOVEC]; |
|
|
|
ssize_t read_bytes; |
|
|
|
ssize_t read_bytes; |
|
|
|
ssize_t allocated_bytes; |
|
|
|
size_t i; |
|
|
|
struct grpc_tcp_slice_state read_state; |
|
|
|
|
|
|
|
gpr_slice *final_slices; |
|
|
|
|
|
|
|
size_t final_nslices; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GPR_ASSERT(!tcp->finished_edge); |
|
|
|
GPR_ASSERT(!tcp->finished_edge); |
|
|
|
|
|
|
|
GPR_ASSERT(tcp->iov_size <= MAX_READ_IOVEC); |
|
|
|
|
|
|
|
GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC); |
|
|
|
GRPC_TIMER_BEGIN(GRPC_PTAG_HANDLE_READ, 0); |
|
|
|
GRPC_TIMER_BEGIN(GRPC_PTAG_HANDLE_READ, 0); |
|
|
|
slice_state_init(&read_state, static_read_slices, INLINE_SLICE_BUFFER_SIZE, |
|
|
|
|
|
|
|
0); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
allocated_bytes = slice_state_append_blocks_into_iovec( |
|
|
|
while (tcp->incoming_buffer->count < (size_t)tcp->iov_size) { |
|
|
|
&read_state, iov, tcp->iov_size, tcp->slice_size); |
|
|
|
gpr_slice_buffer_add_indexed(tcp->incoming_buffer, |
|
|
|
|
|
|
|
gpr_slice_malloc(tcp->slice_size)); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
for (i = 0; i < tcp->incoming_buffer->count; i++) { |
|
|
|
|
|
|
|
iov[i].iov_base = GPR_SLICE_START_PTR(tcp->incoming_buffer->slices[i]); |
|
|
|
|
|
|
|
iov[i].iov_len = GPR_SLICE_LENGTH(tcp->incoming_buffer->slices[i]); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
msg.msg_name = NULL; |
|
|
|
msg.msg_name = NULL; |
|
|
|
msg.msg_namelen = 0; |
|
|
|
msg.msg_namelen = 0; |
|
|
@ -362,87 +194,63 @@ static void grpc_tcp_continue_read(grpc_tcp *tcp) { |
|
|
|
} while (read_bytes < 0 && errno == EINTR); |
|
|
|
} while (read_bytes < 0 && errno == EINTR); |
|
|
|
GRPC_TIMER_END(GRPC_PTAG_RECVMSG, 0); |
|
|
|
GRPC_TIMER_END(GRPC_PTAG_RECVMSG, 0); |
|
|
|
|
|
|
|
|
|
|
|
if (read_bytes < allocated_bytes) { |
|
|
|
|
|
|
|
/* TODO(klempner): Consider a second read first, in hopes of getting a
|
|
|
|
|
|
|
|
* quick EAGAIN and saving a bunch of allocations. */ |
|
|
|
|
|
|
|
slice_state_remove_last(&read_state, read_bytes < 0 |
|
|
|
|
|
|
|
? allocated_bytes |
|
|
|
|
|
|
|
: allocated_bytes - read_bytes); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (read_bytes < 0) { |
|
|
|
if (read_bytes < 0) { |
|
|
|
/* NB: After calling the user_cb a parallel call of the read handler may
|
|
|
|
/* NB: After calling call_read_cb a parallel call of the read handler may
|
|
|
|
* be running. */ |
|
|
|
* be running. */ |
|
|
|
if (errno == EAGAIN) { |
|
|
|
if (errno == EAGAIN) { |
|
|
|
if (tcp->iov_size > 1) { |
|
|
|
if (tcp->iov_size > 1) { |
|
|
|
tcp->iov_size /= 2; |
|
|
|
tcp->iov_size /= 2; |
|
|
|
} |
|
|
|
} |
|
|
|
if (slice_state_has_available(&read_state)) { |
|
|
|
/* We've consumed the edge, request a new one */ |
|
|
|
/* TODO(klempner): We should probably do the call into the application
|
|
|
|
grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure); |
|
|
|
without all this junk on the stack */ |
|
|
|
|
|
|
|
/* FIXME(klempner): Refcount properly */ |
|
|
|
|
|
|
|
slice_state_transfer_ownership(&read_state, &final_slices, |
|
|
|
|
|
|
|
&final_nslices); |
|
|
|
|
|
|
|
tcp->finished_edge = 1; |
|
|
|
|
|
|
|
call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_OK); |
|
|
|
|
|
|
|
slice_state_destroy(&read_state); |
|
|
|
|
|
|
|
grpc_tcp_unref(tcp); |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
/* We've consumed the edge, request a new one */ |
|
|
|
|
|
|
|
slice_state_destroy(&read_state); |
|
|
|
|
|
|
|
grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} else { |
|
|
|
} else { |
|
|
|
/* TODO(klempner): Log interesting errors */ |
|
|
|
/* TODO(klempner): Log interesting errors */ |
|
|
|
call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_ERROR); |
|
|
|
gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer); |
|
|
|
slice_state_destroy(&read_state); |
|
|
|
call_read_cb(tcp, 0); |
|
|
|
grpc_tcp_unref(tcp); |
|
|
|
TCP_UNREF(tcp, "read"); |
|
|
|
} |
|
|
|
} |
|
|
|
} else if (read_bytes == 0) { |
|
|
|
} else if (read_bytes == 0) { |
|
|
|
/* 0 read size ==> end of stream */ |
|
|
|
/* 0 read size ==> end of stream */ |
|
|
|
if (slice_state_has_available(&read_state)) { |
|
|
|
gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer); |
|
|
|
/* there were bytes already read: pass them up to the application */ |
|
|
|
call_read_cb(tcp, 0); |
|
|
|
slice_state_transfer_ownership(&read_state, &final_slices, |
|
|
|
TCP_UNREF(tcp, "read"); |
|
|
|
&final_nslices); |
|
|
|
|
|
|
|
call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_EOF); |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_EOF); |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
slice_state_destroy(&read_state); |
|
|
|
|
|
|
|
grpc_tcp_unref(tcp); |
|
|
|
|
|
|
|
} else { |
|
|
|
} else { |
|
|
|
if (tcp->iov_size < MAX_READ_IOVEC) { |
|
|
|
GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length); |
|
|
|
|
|
|
|
if ((size_t)read_bytes < tcp->incoming_buffer->length) { |
|
|
|
|
|
|
|
gpr_slice_buffer_trim_end(tcp->incoming_buffer, |
|
|
|
|
|
|
|
tcp->incoming_buffer->length - read_bytes); |
|
|
|
|
|
|
|
} else if (tcp->iov_size < MAX_READ_IOVEC) { |
|
|
|
++tcp->iov_size; |
|
|
|
++tcp->iov_size; |
|
|
|
} |
|
|
|
} |
|
|
|
GPR_ASSERT(slice_state_has_available(&read_state)); |
|
|
|
GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length); |
|
|
|
slice_state_transfer_ownership(&read_state, &final_slices, &final_nslices); |
|
|
|
call_read_cb(tcp, 1); |
|
|
|
call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_OK); |
|
|
|
TCP_UNREF(tcp, "read"); |
|
|
|
slice_state_destroy(&read_state); |
|
|
|
|
|
|
|
grpc_tcp_unref(tcp); |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
GRPC_TIMER_END(GRPC_PTAG_HANDLE_READ, 0); |
|
|
|
GRPC_TIMER_END(GRPC_PTAG_HANDLE_READ, 0); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success) { |
|
|
|
static void tcp_handle_read(void *arg /* grpc_tcp */, int success) { |
|
|
|
grpc_tcp *tcp = (grpc_tcp *)arg; |
|
|
|
grpc_tcp *tcp = (grpc_tcp *)arg; |
|
|
|
GPR_ASSERT(!tcp->finished_edge); |
|
|
|
GPR_ASSERT(!tcp->finished_edge); |
|
|
|
|
|
|
|
|
|
|
|
if (!success) { |
|
|
|
if (!success) { |
|
|
|
call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_SHUTDOWN); |
|
|
|
call_read_cb(tcp, 0); |
|
|
|
grpc_tcp_unref(tcp); |
|
|
|
TCP_UNREF(tcp, "read"); |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
grpc_tcp_continue_read(tcp); |
|
|
|
tcp_continue_read(tcp); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void grpc_tcp_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, |
|
|
|
static grpc_endpoint_op_status tcp_read(grpc_endpoint *ep, |
|
|
|
void *user_data) { |
|
|
|
gpr_slice_buffer *incoming_buffer, |
|
|
|
|
|
|
|
grpc_iomgr_closure *cb) { |
|
|
|
grpc_tcp *tcp = (grpc_tcp *)ep; |
|
|
|
grpc_tcp *tcp = (grpc_tcp *)ep; |
|
|
|
GPR_ASSERT(tcp->read_cb == NULL); |
|
|
|
GPR_ASSERT(tcp->read_cb == NULL); |
|
|
|
tcp->read_cb = cb; |
|
|
|
tcp->read_cb = cb; |
|
|
|
tcp->read_user_data = user_data; |
|
|
|
tcp->incoming_buffer = incoming_buffer; |
|
|
|
gpr_ref(&tcp->refcount); |
|
|
|
gpr_slice_buffer_reset_and_unref(incoming_buffer); |
|
|
|
|
|
|
|
TCP_REF(tcp, "read"); |
|
|
|
if (tcp->finished_edge) { |
|
|
|
if (tcp->finished_edge) { |
|
|
|
tcp->finished_edge = 0; |
|
|
|
tcp->finished_edge = 0; |
|
|
|
grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure); |
|
|
|
grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure); |
|
|
@ -450,18 +258,41 @@ static void grpc_tcp_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, |
|
|
|
tcp->handle_read_closure.cb_arg = tcp; |
|
|
|
tcp->handle_read_closure.cb_arg = tcp; |
|
|
|
grpc_iomgr_add_delayed_callback(&tcp->handle_read_closure, 1); |
|
|
|
grpc_iomgr_add_delayed_callback(&tcp->handle_read_closure, 1); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
/* TODO(ctiller): immediate return */ |
|
|
|
|
|
|
|
return GRPC_ENDPOINT_PENDING; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
#define MAX_WRITE_IOVEC 16 |
|
|
|
#define MAX_WRITE_IOVEC 16 |
|
|
|
static grpc_endpoint_write_status grpc_tcp_flush(grpc_tcp *tcp) { |
|
|
|
static grpc_endpoint_op_status tcp_flush(grpc_tcp *tcp) { |
|
|
|
struct msghdr msg; |
|
|
|
struct msghdr msg; |
|
|
|
struct iovec iov[MAX_WRITE_IOVEC]; |
|
|
|
struct iovec iov[MAX_WRITE_IOVEC]; |
|
|
|
int iov_size; |
|
|
|
int iov_size; |
|
|
|
ssize_t sent_length; |
|
|
|
ssize_t sent_length; |
|
|
|
grpc_tcp_slice_state *state = &tcp->write_state; |
|
|
|
ssize_t sending_length; |
|
|
|
|
|
|
|
ssize_t trailing; |
|
|
|
|
|
|
|
ssize_t unwind_slice_idx; |
|
|
|
|
|
|
|
ssize_t unwind_byte_idx; |
|
|
|
|
|
|
|
|
|
|
|
for (;;) { |
|
|
|
for (;;) { |
|
|
|
iov_size = slice_state_to_iovec(state, iov, MAX_WRITE_IOVEC); |
|
|
|
sending_length = 0; |
|
|
|
|
|
|
|
unwind_slice_idx = tcp->outgoing_slice_idx; |
|
|
|
|
|
|
|
unwind_byte_idx = tcp->outgoing_byte_idx; |
|
|
|
|
|
|
|
for (iov_size = 0; tcp->outgoing_slice_idx != tcp->outgoing_buffer->count && |
|
|
|
|
|
|
|
iov_size != MAX_WRITE_IOVEC; |
|
|
|
|
|
|
|
iov_size++) { |
|
|
|
|
|
|
|
iov[iov_size].iov_base = |
|
|
|
|
|
|
|
GPR_SLICE_START_PTR( |
|
|
|
|
|
|
|
tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]) + |
|
|
|
|
|
|
|
tcp->outgoing_byte_idx; |
|
|
|
|
|
|
|
iov[iov_size].iov_len = |
|
|
|
|
|
|
|
GPR_SLICE_LENGTH( |
|
|
|
|
|
|
|
tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]) - |
|
|
|
|
|
|
|
tcp->outgoing_byte_idx; |
|
|
|
|
|
|
|
sending_length += iov[iov_size].iov_len; |
|
|
|
|
|
|
|
tcp->outgoing_slice_idx++; |
|
|
|
|
|
|
|
tcp->outgoing_byte_idx = 0; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
GPR_ASSERT(iov_size > 0); |
|
|
|
|
|
|
|
|
|
|
|
msg.msg_name = NULL; |
|
|
|
msg.msg_name = NULL; |
|
|
|
msg.msg_namelen = 0; |
|
|
|
msg.msg_namelen = 0; |
|
|
@ -480,70 +311,75 @@ static grpc_endpoint_write_status grpc_tcp_flush(grpc_tcp *tcp) { |
|
|
|
|
|
|
|
|
|
|
|
if (sent_length < 0) { |
|
|
|
if (sent_length < 0) { |
|
|
|
if (errno == EAGAIN) { |
|
|
|
if (errno == EAGAIN) { |
|
|
|
return GRPC_ENDPOINT_WRITE_PENDING; |
|
|
|
tcp->outgoing_slice_idx = unwind_slice_idx; |
|
|
|
|
|
|
|
tcp->outgoing_byte_idx = unwind_byte_idx; |
|
|
|
|
|
|
|
return GRPC_ENDPOINT_PENDING; |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
/* TODO(klempner): Log some of these */ |
|
|
|
/* TODO(klempner): Log some of these */ |
|
|
|
slice_state_destroy(state); |
|
|
|
return GRPC_ENDPOINT_ERROR; |
|
|
|
return GRPC_ENDPOINT_WRITE_ERROR; |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
/* TODO(klempner): Probably better to batch this after we finish flushing */ |
|
|
|
GPR_ASSERT(tcp->outgoing_byte_idx == 0); |
|
|
|
slice_state_remove_prefix(state, sent_length); |
|
|
|
trailing = sending_length - sent_length; |
|
|
|
|
|
|
|
while (trailing > 0) { |
|
|
|
|
|
|
|
ssize_t slice_length; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tcp->outgoing_slice_idx--; |
|
|
|
|
|
|
|
slice_length = GPR_SLICE_LENGTH( |
|
|
|
|
|
|
|
tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]); |
|
|
|
|
|
|
|
if (slice_length > trailing) { |
|
|
|
|
|
|
|
tcp->outgoing_byte_idx = slice_length - trailing; |
|
|
|
|
|
|
|
break; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
trailing -= slice_length; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (!slice_state_has_available(state)) { |
|
|
|
if (tcp->outgoing_slice_idx == tcp->outgoing_buffer->count) { |
|
|
|
return GRPC_ENDPOINT_WRITE_DONE; |
|
|
|
return GRPC_ENDPOINT_DONE; |
|
|
|
} |
|
|
|
} |
|
|
|
}; |
|
|
|
}; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void grpc_tcp_handle_write(void *arg /* grpc_tcp */, int success) { |
|
|
|
static void tcp_handle_write(void *arg /* grpc_tcp */, int success) { |
|
|
|
grpc_tcp *tcp = (grpc_tcp *)arg; |
|
|
|
grpc_tcp *tcp = (grpc_tcp *)arg; |
|
|
|
grpc_endpoint_write_status write_status; |
|
|
|
grpc_endpoint_op_status status; |
|
|
|
grpc_endpoint_cb_status cb_status; |
|
|
|
grpc_iomgr_closure *cb; |
|
|
|
grpc_endpoint_write_cb cb; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!success) { |
|
|
|
if (!success) { |
|
|
|
slice_state_destroy(&tcp->write_state); |
|
|
|
|
|
|
|
cb = tcp->write_cb; |
|
|
|
cb = tcp->write_cb; |
|
|
|
tcp->write_cb = NULL; |
|
|
|
tcp->write_cb = NULL; |
|
|
|
cb(tcp->write_user_data, GRPC_ENDPOINT_CB_SHUTDOWN); |
|
|
|
cb->cb(cb->cb_arg, 0); |
|
|
|
grpc_tcp_unref(tcp); |
|
|
|
TCP_UNREF(tcp, "write"); |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_CB_WRITE, 0); |
|
|
|
GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_CB_WRITE, 0); |
|
|
|
write_status = grpc_tcp_flush(tcp); |
|
|
|
status = tcp_flush(tcp); |
|
|
|
if (write_status == GRPC_ENDPOINT_WRITE_PENDING) { |
|
|
|
if (status == GRPC_ENDPOINT_PENDING) { |
|
|
|
grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure); |
|
|
|
grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure); |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
slice_state_destroy(&tcp->write_state); |
|
|
|
|
|
|
|
if (write_status == GRPC_ENDPOINT_WRITE_DONE) { |
|
|
|
|
|
|
|
cb_status = GRPC_ENDPOINT_CB_OK; |
|
|
|
|
|
|
|
} else { |
|
|
|
|
|
|
|
cb_status = GRPC_ENDPOINT_CB_ERROR; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
cb = tcp->write_cb; |
|
|
|
cb = tcp->write_cb; |
|
|
|
tcp->write_cb = NULL; |
|
|
|
tcp->write_cb = NULL; |
|
|
|
cb(tcp->write_user_data, cb_status); |
|
|
|
cb->cb(cb->cb_arg, status == GRPC_ENDPOINT_DONE); |
|
|
|
grpc_tcp_unref(tcp); |
|
|
|
TCP_UNREF(tcp, "write"); |
|
|
|
} |
|
|
|
} |
|
|
|
GRPC_TIMER_END(GRPC_PTAG_TCP_CB_WRITE, 0); |
|
|
|
GRPC_TIMER_END(GRPC_PTAG_TCP_CB_WRITE, 0); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static grpc_endpoint_write_status grpc_tcp_write(grpc_endpoint *ep, |
|
|
|
static grpc_endpoint_op_status tcp_write(grpc_endpoint *ep, |
|
|
|
gpr_slice *slices, |
|
|
|
gpr_slice_buffer *buf, |
|
|
|
size_t nslices, |
|
|
|
grpc_iomgr_closure *cb) { |
|
|
|
grpc_endpoint_write_cb cb, |
|
|
|
|
|
|
|
void *user_data) { |
|
|
|
|
|
|
|
grpc_tcp *tcp = (grpc_tcp *)ep; |
|
|
|
grpc_tcp *tcp = (grpc_tcp *)ep; |
|
|
|
grpc_endpoint_write_status status; |
|
|
|
grpc_endpoint_op_status status; |
|
|
|
|
|
|
|
|
|
|
|
if (grpc_tcp_trace) { |
|
|
|
if (grpc_tcp_trace) { |
|
|
|
size_t i; |
|
|
|
size_t i; |
|
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < nslices; i++) { |
|
|
|
for (i = 0; i < buf->count; i++) { |
|
|
|
char *data = gpr_dump_slice(slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); |
|
|
|
char *data = |
|
|
|
|
|
|
|
gpr_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); |
|
|
|
gpr_log(GPR_DEBUG, "WRITE %p: %s", tcp, data); |
|
|
|
gpr_log(GPR_DEBUG, "WRITE %p: %s", tcp, data); |
|
|
|
gpr_free(data); |
|
|
|
gpr_free(data); |
|
|
|
} |
|
|
|
} |
|
|
@ -551,15 +387,19 @@ static grpc_endpoint_write_status grpc_tcp_write(grpc_endpoint *ep, |
|
|
|
|
|
|
|
|
|
|
|
GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_WRITE, 0); |
|
|
|
GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_WRITE, 0); |
|
|
|
GPR_ASSERT(tcp->write_cb == NULL); |
|
|
|
GPR_ASSERT(tcp->write_cb == NULL); |
|
|
|
slice_state_init(&tcp->write_state, slices, nslices, nslices); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
status = grpc_tcp_flush(tcp); |
|
|
|
if (buf->length == 0) { |
|
|
|
if (status == GRPC_ENDPOINT_WRITE_PENDING) { |
|
|
|
GRPC_TIMER_END(GRPC_PTAG_TCP_WRITE, 0); |
|
|
|
/* TODO(klempner): Consider inlining rather than malloc for small nslices */ |
|
|
|
return GRPC_ENDPOINT_DONE; |
|
|
|
slice_state_realloc(&tcp->write_state, nslices); |
|
|
|
} |
|
|
|
gpr_ref(&tcp->refcount); |
|
|
|
tcp->outgoing_buffer = buf; |
|
|
|
|
|
|
|
tcp->outgoing_slice_idx = 0; |
|
|
|
|
|
|
|
tcp->outgoing_byte_idx = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
status = tcp_flush(tcp); |
|
|
|
|
|
|
|
if (status == GRPC_ENDPOINT_PENDING) { |
|
|
|
|
|
|
|
TCP_REF(tcp, "write"); |
|
|
|
tcp->write_cb = cb; |
|
|
|
tcp->write_cb = cb; |
|
|
|
tcp->write_user_data = user_data; |
|
|
|
|
|
|
|
grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure); |
|
|
|
grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
@ -567,27 +407,25 @@ static grpc_endpoint_write_status grpc_tcp_write(grpc_endpoint *ep, |
|
|
|
return status; |
|
|
|
return status; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void grpc_tcp_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) { |
|
|
|
static void tcp_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) { |
|
|
|
grpc_tcp *tcp = (grpc_tcp *)ep; |
|
|
|
grpc_tcp *tcp = (grpc_tcp *)ep; |
|
|
|
grpc_pollset_add_fd(pollset, tcp->em_fd); |
|
|
|
grpc_pollset_add_fd(pollset, tcp->em_fd); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void grpc_tcp_add_to_pollset_set(grpc_endpoint *ep, |
|
|
|
static void tcp_add_to_pollset_set(grpc_endpoint *ep, |
|
|
|
grpc_pollset_set *pollset_set) { |
|
|
|
grpc_pollset_set *pollset_set) { |
|
|
|
grpc_tcp *tcp = (grpc_tcp *)ep; |
|
|
|
grpc_tcp *tcp = (grpc_tcp *)ep; |
|
|
|
grpc_pollset_set_add_fd(pollset_set, tcp->em_fd); |
|
|
|
grpc_pollset_set_add_fd(pollset_set, tcp->em_fd); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static char *grpc_tcp_get_peer(grpc_endpoint *ep) { |
|
|
|
static char *tcp_get_peer(grpc_endpoint *ep) { |
|
|
|
grpc_tcp *tcp = (grpc_tcp *)ep; |
|
|
|
grpc_tcp *tcp = (grpc_tcp *)ep; |
|
|
|
return gpr_strdup(tcp->peer_string); |
|
|
|
return gpr_strdup(tcp->peer_string); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static const grpc_endpoint_vtable vtable = { |
|
|
|
static const grpc_endpoint_vtable vtable = { |
|
|
|
grpc_tcp_notify_on_read, grpc_tcp_write, |
|
|
|
tcp_read, tcp_write, tcp_add_to_pollset, tcp_add_to_pollset_set, |
|
|
|
grpc_tcp_add_to_pollset, grpc_tcp_add_to_pollset_set, |
|
|
|
tcp_shutdown, tcp_destroy, tcp_get_peer}; |
|
|
|
grpc_tcp_shutdown, grpc_tcp_destroy, |
|
|
|
|
|
|
|
grpc_tcp_get_peer}; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size, |
|
|
|
grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size, |
|
|
|
const char *peer_string) { |
|
|
|
const char *peer_string) { |
|
|
@ -597,21 +435,19 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size, |
|
|
|
tcp->fd = em_fd->fd; |
|
|
|
tcp->fd = em_fd->fd; |
|
|
|
tcp->read_cb = NULL; |
|
|
|
tcp->read_cb = NULL; |
|
|
|
tcp->write_cb = NULL; |
|
|
|
tcp->write_cb = NULL; |
|
|
|
tcp->read_user_data = NULL; |
|
|
|
tcp->incoming_buffer = NULL; |
|
|
|
tcp->write_user_data = NULL; |
|
|
|
|
|
|
|
tcp->slice_size = slice_size; |
|
|
|
tcp->slice_size = slice_size; |
|
|
|
tcp->iov_size = 1; |
|
|
|
tcp->iov_size = 1; |
|
|
|
tcp->finished_edge = 1; |
|
|
|
tcp->finished_edge = 1; |
|
|
|
slice_state_init(&tcp->write_state, NULL, 0, 0); |
|
|
|
|
|
|
|
/* paired with unref in grpc_tcp_destroy */ |
|
|
|
/* paired with unref in grpc_tcp_destroy */ |
|
|
|
gpr_ref_init(&tcp->refcount, 1); |
|
|
|
gpr_ref_init(&tcp->refcount, 1); |
|
|
|
tcp->em_fd = em_fd; |
|
|
|
tcp->em_fd = em_fd; |
|
|
|
tcp->read_closure.cb = grpc_tcp_handle_read; |
|
|
|
tcp->read_closure.cb = tcp_handle_read; |
|
|
|
tcp->read_closure.cb_arg = tcp; |
|
|
|
tcp->read_closure.cb_arg = tcp; |
|
|
|
tcp->write_closure.cb = grpc_tcp_handle_write; |
|
|
|
tcp->write_closure.cb = tcp_handle_write; |
|
|
|
tcp->write_closure.cb_arg = tcp; |
|
|
|
tcp->write_closure.cb_arg = tcp; |
|
|
|
|
|
|
|
|
|
|
|
tcp->handle_read_closure.cb = grpc_tcp_handle_read; |
|
|
|
tcp->handle_read_closure.cb = tcp_handle_read; |
|
|
|
return &tcp->base; |
|
|
|
return &tcp->base; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|