|
|
@ -76,8 +76,11 @@ int grpc_tcp_prepare_socket(SOCKET sock) { |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
typedef struct grpc_tcp { |
|
|
|
typedef struct grpc_tcp { |
|
|
|
|
|
|
|
/* This is our C++ class derivation emulation. */ |
|
|
|
grpc_endpoint base; |
|
|
|
grpc_endpoint base; |
|
|
|
|
|
|
|
/* The one socket this endpoint is using. */ |
|
|
|
grpc_winsocket *socket; |
|
|
|
grpc_winsocket *socket; |
|
|
|
|
|
|
|
/* Refcounting how many operations are in progress. */ |
|
|
|
gpr_refcount refcount; |
|
|
|
gpr_refcount refcount; |
|
|
|
|
|
|
|
|
|
|
|
grpc_endpoint_read_cb read_cb; |
|
|
|
grpc_endpoint_read_cb read_cb; |
|
|
@ -90,6 +93,10 @@ typedef struct grpc_tcp { |
|
|
|
gpr_slice_buffer write_slices; |
|
|
|
gpr_slice_buffer write_slices; |
|
|
|
int outstanding_write; |
|
|
|
int outstanding_write; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* The IO Completion Port runs from another thread. We need some mechanism
|
|
|
|
|
|
|
|
to protect ourselves when requesting a shutdown. */ |
|
|
|
|
|
|
|
gpr_mu mu; |
|
|
|
|
|
|
|
int shutting_down; |
|
|
|
} grpc_tcp; |
|
|
|
} grpc_tcp; |
|
|
|
|
|
|
|
|
|
|
|
static void tcp_ref(grpc_tcp *tcp) { |
|
|
|
static void tcp_ref(grpc_tcp *tcp) { |
|
|
@ -100,11 +107,13 @@ static void tcp_unref(grpc_tcp *tcp) { |
|
|
|
if (gpr_unref(&tcp->refcount)) { |
|
|
|
if (gpr_unref(&tcp->refcount)) { |
|
|
|
gpr_slice_buffer_destroy(&tcp->write_slices); |
|
|
|
gpr_slice_buffer_destroy(&tcp->write_slices); |
|
|
|
grpc_winsocket_orphan(tcp->socket); |
|
|
|
grpc_winsocket_orphan(tcp->socket); |
|
|
|
|
|
|
|
gpr_mu_destroy(&tcp->mu); |
|
|
|
gpr_free(tcp); |
|
|
|
gpr_free(tcp); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void on_read(void *tcpp, int success) { |
|
|
|
/* Asynchronous callback from the IOCP, or the background thread. */ |
|
|
|
|
|
|
|
static void on_read(void *tcpp, int from_iocp) { |
|
|
|
grpc_tcp *tcp = (grpc_tcp *) tcpp; |
|
|
|
grpc_tcp *tcp = (grpc_tcp *) tcpp; |
|
|
|
grpc_winsocket *socket = tcp->socket; |
|
|
|
grpc_winsocket *socket = tcp->socket; |
|
|
|
gpr_slice sub; |
|
|
|
gpr_slice sub; |
|
|
@ -114,16 +123,25 @@ static void on_read(void *tcpp, int success) { |
|
|
|
grpc_endpoint_read_cb cb = tcp->read_cb; |
|
|
|
grpc_endpoint_read_cb cb = tcp->read_cb; |
|
|
|
grpc_winsocket_callback_info *info = &socket->read_info; |
|
|
|
grpc_winsocket_callback_info *info = &socket->read_info; |
|
|
|
void *opaque = tcp->read_user_data; |
|
|
|
void *opaque = tcp->read_user_data; |
|
|
|
|
|
|
|
int do_abort = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gpr_mu_lock(&tcp->mu); |
|
|
|
|
|
|
|
if (!from_iocp || tcp->shutting_down) { |
|
|
|
|
|
|
|
/* If we are here with from_iocp set to true, it means we got raced to
|
|
|
|
|
|
|
|
shutting down the endpoint. No actual abort callback will happen |
|
|
|
|
|
|
|
though, so we're going to do it from here. */ |
|
|
|
|
|
|
|
do_abort = 1; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
gpr_mu_unlock(&tcp->mu); |
|
|
|
|
|
|
|
|
|
|
|
GPR_ASSERT(tcp->outstanding_read); |
|
|
|
if (do_abort) { |
|
|
|
|
|
|
|
if (from_iocp) gpr_slice_unref(tcp->read_slice); |
|
|
|
if (!success) { |
|
|
|
|
|
|
|
tcp_unref(tcp); |
|
|
|
tcp_unref(tcp); |
|
|
|
cb(opaque, NULL, 0, GRPC_ENDPOINT_CB_SHUTDOWN); |
|
|
|
cb(opaque, NULL, 0, GRPC_ENDPOINT_CB_SHUTDOWN); |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
tcp->outstanding_read = 0; |
|
|
|
GPR_ASSERT(tcp->outstanding_read); |
|
|
|
|
|
|
|
|
|
|
|
if (socket->read_info.wsa_error != 0) { |
|
|
|
if (socket->read_info.wsa_error != 0) { |
|
|
|
char *utf8_message = gpr_format_message(info->wsa_error); |
|
|
|
char *utf8_message = gpr_format_message(info->wsa_error); |
|
|
@ -142,6 +160,9 @@ static void on_read(void *tcpp, int success) { |
|
|
|
status = GRPC_ENDPOINT_CB_EOF; |
|
|
|
status = GRPC_ENDPOINT_CB_EOF; |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tcp->outstanding_read = 0; |
|
|
|
|
|
|
|
|
|
|
|
tcp_unref(tcp); |
|
|
|
tcp_unref(tcp); |
|
|
|
cb(opaque, slice, nslices, status); |
|
|
|
cb(opaque, slice, nslices, status); |
|
|
|
} |
|
|
|
} |
|
|
@ -158,6 +179,7 @@ static void win_notify_on_read(grpc_endpoint *ep, |
|
|
|
WSABUF buffer; |
|
|
|
WSABUF buffer; |
|
|
|
|
|
|
|
|
|
|
|
GPR_ASSERT(!tcp->outstanding_read); |
|
|
|
GPR_ASSERT(!tcp->outstanding_read); |
|
|
|
|
|
|
|
GPR_ASSERT(!tcp->shutting_down); |
|
|
|
tcp_ref(tcp); |
|
|
|
tcp_ref(tcp); |
|
|
|
tcp->outstanding_read = 1; |
|
|
|
tcp->outstanding_read = 1; |
|
|
|
tcp->read_cb = cb; |
|
|
|
tcp->read_cb = cb; |
|
|
@ -168,10 +190,12 @@ static void win_notify_on_read(grpc_endpoint *ep, |
|
|
|
buffer.len = GPR_SLICE_LENGTH(tcp->read_slice); |
|
|
|
buffer.len = GPR_SLICE_LENGTH(tcp->read_slice); |
|
|
|
buffer.buf = (char *)GPR_SLICE_START_PTR(tcp->read_slice); |
|
|
|
buffer.buf = (char *)GPR_SLICE_START_PTR(tcp->read_slice); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* First let's try a synchronous, non-blocking read. */ |
|
|
|
status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, |
|
|
|
status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, |
|
|
|
NULL, NULL); |
|
|
|
NULL, NULL); |
|
|
|
info->wsa_error = status == 0 ? 0 : WSAGetLastError(); |
|
|
|
info->wsa_error = status == 0 ? 0 : WSAGetLastError(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Did we get data immediately ? Yay. */ |
|
|
|
if (info->wsa_error != WSAEWOULDBLOCK) { |
|
|
|
if (info->wsa_error != WSAEWOULDBLOCK) { |
|
|
|
info->bytes_transfered = bytes_read; |
|
|
|
info->bytes_transfered = bytes_read; |
|
|
|
/* This might heavily recurse. */ |
|
|
|
/* This might heavily recurse. */ |
|
|
@ -179,6 +203,7 @@ static void win_notify_on_read(grpc_endpoint *ep, |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Otherwise, let's retry, by queuing a read. */ |
|
|
|
memset(&tcp->socket->read_info.overlapped, 0, sizeof(OVERLAPPED)); |
|
|
|
memset(&tcp->socket->read_info.overlapped, 0, sizeof(OVERLAPPED)); |
|
|
|
status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, |
|
|
|
status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, |
|
|
|
&info->overlapped, NULL); |
|
|
|
&info->overlapped, NULL); |
|
|
@ -192,30 +217,53 @@ static void win_notify_on_read(grpc_endpoint *ep, |
|
|
|
|
|
|
|
|
|
|
|
if (error != WSA_IO_PENDING) { |
|
|
|
if (error != WSA_IO_PENDING) { |
|
|
|
char *utf8_message = gpr_format_message(WSAGetLastError()); |
|
|
|
char *utf8_message = gpr_format_message(WSAGetLastError()); |
|
|
|
__debugbreak(); |
|
|
|
gpr_log(GPR_ERROR, "WSARecv error: %s - this means we're going to leak.", |
|
|
|
gpr_log(GPR_ERROR, "WSARecv error: %s", utf8_message); |
|
|
|
utf8_message); |
|
|
|
gpr_free(utf8_message); |
|
|
|
gpr_free(utf8_message); |
|
|
|
/* would the IO completion port be called anyway... ? Let's assume not. */ |
|
|
|
/* I'm pretty sure this is a very bad situation there. Hence the log.
|
|
|
|
|
|
|
|
What will happen now is that the socket will neither wait for read |
|
|
|
|
|
|
|
or write, unless the caller retry, which is unlikely, but I am not |
|
|
|
|
|
|
|
sure if that's guaranteed. And there might also be a write pending. |
|
|
|
|
|
|
|
This means that the future orphanage of that socket will be in limbo, |
|
|
|
|
|
|
|
and we're going to leak it. I have no idea what could cause this |
|
|
|
|
|
|
|
specific case however, aside from a parameter error from our call. |
|
|
|
|
|
|
|
Normal read errors would actually happen during the overlapped |
|
|
|
|
|
|
|
operation, which is the supported way to go for that. */ |
|
|
|
tcp->outstanding_read = 0; |
|
|
|
tcp->outstanding_read = 0; |
|
|
|
tcp_unref(tcp); |
|
|
|
tcp_unref(tcp); |
|
|
|
cb(arg, NULL, 0, GRPC_ENDPOINT_CB_ERROR); |
|
|
|
cb(arg, NULL, 0, GRPC_ENDPOINT_CB_ERROR); |
|
|
|
|
|
|
|
/* Per the comment above, I'm going to treat that case as a hard failure
|
|
|
|
|
|
|
|
for now, and leave the option to catch that and debug. */ |
|
|
|
|
|
|
|
__debugbreak(); |
|
|
|
return; |
|
|
|
return; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
grpc_socket_notify_on_read(tcp->socket, on_read, tcp); |
|
|
|
grpc_socket_notify_on_read(tcp->socket, on_read, tcp); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void on_write(void *tcpp, int success) { |
|
|
|
/* Asynchronous callback from the IOCP, or the background thread. */ |
|
|
|
|
|
|
|
static void on_write(void *tcpp, int from_iocp) { |
|
|
|
grpc_tcp *tcp = (grpc_tcp *) tcpp; |
|
|
|
grpc_tcp *tcp = (grpc_tcp *) tcpp; |
|
|
|
grpc_winsocket *handle = tcp->socket; |
|
|
|
grpc_winsocket *handle = tcp->socket; |
|
|
|
grpc_winsocket_callback_info *info = &handle->write_info; |
|
|
|
grpc_winsocket_callback_info *info = &handle->write_info; |
|
|
|
grpc_endpoint_cb_status status = GRPC_ENDPOINT_CB_OK; |
|
|
|
grpc_endpoint_cb_status status = GRPC_ENDPOINT_CB_OK; |
|
|
|
grpc_endpoint_write_cb cb = tcp->write_cb; |
|
|
|
grpc_endpoint_write_cb cb = tcp->write_cb; |
|
|
|
void *opaque = tcp->write_user_data; |
|
|
|
void *opaque = tcp->write_user_data; |
|
|
|
|
|
|
|
int do_abort = 0; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gpr_mu_lock(&tcp->mu); |
|
|
|
|
|
|
|
if (!from_iocp || tcp->shutting_down) { |
|
|
|
|
|
|
|
/* If we are here with from_iocp set to true, it means we got raced to
|
|
|
|
|
|
|
|
shutting down the endpoint. No actual abort callback will happen |
|
|
|
|
|
|
|
though, so we're going to do it from here. */ |
|
|
|
|
|
|
|
do_abort = 1; |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
gpr_mu_unlock(&tcp->mu); |
|
|
|
|
|
|
|
|
|
|
|
GPR_ASSERT(tcp->outstanding_write); |
|
|
|
GPR_ASSERT(tcp->outstanding_write); |
|
|
|
|
|
|
|
|
|
|
|
if (!success) { |
|
|
|
if (do_abort) { |
|
|
|
|
|
|
|
if (from_iocp) gpr_slice_buffer_reset_and_unref(&tcp->write_slices); |
|
|
|
tcp_unref(tcp); |
|
|
|
tcp_unref(tcp); |
|
|
|
cb(opaque, GRPC_ENDPOINT_CB_SHUTDOWN); |
|
|
|
cb(opaque, GRPC_ENDPOINT_CB_SHUTDOWN); |
|
|
|
return; |
|
|
|
return; |
|
|
@ -238,6 +286,7 @@ static void on_write(void *tcpp, int success) { |
|
|
|
cb(opaque, status); |
|
|
|
cb(opaque, status); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Initiates a write. */ |
|
|
|
static grpc_endpoint_write_status win_write(grpc_endpoint *ep, |
|
|
|
static grpc_endpoint_write_status win_write(grpc_endpoint *ep, |
|
|
|
gpr_slice *slices, size_t nslices, |
|
|
|
gpr_slice *slices, size_t nslices, |
|
|
|
grpc_endpoint_write_cb cb, |
|
|
|
grpc_endpoint_write_cb cb, |
|
|
@ -253,11 +302,13 @@ static grpc_endpoint_write_status win_write(grpc_endpoint *ep, |
|
|
|
WSABUF *buffers = local_buffers; |
|
|
|
WSABUF *buffers = local_buffers; |
|
|
|
|
|
|
|
|
|
|
|
GPR_ASSERT(!tcp->outstanding_write); |
|
|
|
GPR_ASSERT(!tcp->outstanding_write); |
|
|
|
|
|
|
|
GPR_ASSERT(!tcp->shutting_down); |
|
|
|
tcp_ref(tcp); |
|
|
|
tcp_ref(tcp); |
|
|
|
|
|
|
|
|
|
|
|
tcp->outstanding_write = 1; |
|
|
|
tcp->outstanding_write = 1; |
|
|
|
tcp->write_cb = cb; |
|
|
|
tcp->write_cb = cb; |
|
|
|
tcp->write_user_data = arg; |
|
|
|
tcp->write_user_data = arg; |
|
|
|
|
|
|
|
|
|
|
|
gpr_slice_buffer_addn(&tcp->write_slices, slices, nslices); |
|
|
|
gpr_slice_buffer_addn(&tcp->write_slices, slices, nslices); |
|
|
|
|
|
|
|
|
|
|
|
if (tcp->write_slices.count > GPR_ARRAY_SIZE(local_buffers)) { |
|
|
|
if (tcp->write_slices.count > GPR_ARRAY_SIZE(local_buffers)) { |
|
|
@ -270,10 +321,14 @@ static grpc_endpoint_write_status win_write(grpc_endpoint *ep, |
|
|
|
buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices.slices[i]); |
|
|
|
buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices.slices[i]); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* First, let's try a synchronous, non-blocking write. */ |
|
|
|
status = WSASend(socket->socket, buffers, tcp->write_slices.count, |
|
|
|
status = WSASend(socket->socket, buffers, tcp->write_slices.count, |
|
|
|
&bytes_sent, 0, NULL, NULL); |
|
|
|
&bytes_sent, 0, NULL, NULL); |
|
|
|
info->wsa_error = status == 0 ? 0 : WSAGetLastError(); |
|
|
|
info->wsa_error = status == 0 ? 0 : WSAGetLastError(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* We would kind of expect to get a WSAEWOULDBLOCK here, especially on a busy
|
|
|
|
|
|
|
|
connection that has its send queue filled up. But if we don't, then we can |
|
|
|
|
|
|
|
avoid doing an async write operation at all. */ |
|
|
|
if (info->wsa_error != WSAEWOULDBLOCK) { |
|
|
|
if (info->wsa_error != WSAEWOULDBLOCK) { |
|
|
|
grpc_endpoint_write_status ret = GRPC_ENDPOINT_WRITE_ERROR; |
|
|
|
grpc_endpoint_write_status ret = GRPC_ENDPOINT_WRITE_ERROR; |
|
|
|
if (status == 0) { |
|
|
|
if (status == 0) { |
|
|
@ -291,25 +346,42 @@ static grpc_endpoint_write_status win_write(grpc_endpoint *ep, |
|
|
|
return ret; |
|
|
|
return ret; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* If we got a WSAEWOULDBLOCK earlier, then we need to re-do the same
|
|
|
|
|
|
|
|
operation, this time asynchronously. */ |
|
|
|
memset(&socket->write_info.overlapped, 0, sizeof(OVERLAPPED)); |
|
|
|
memset(&socket->write_info.overlapped, 0, sizeof(OVERLAPPED)); |
|
|
|
status = WSASend(socket->socket, buffers, tcp->write_slices.count, |
|
|
|
status = WSASend(socket->socket, buffers, tcp->write_slices.count, |
|
|
|
&bytes_sent, 0, &socket->write_info.overlapped, NULL); |
|
|
|
&bytes_sent, 0, &socket->write_info.overlapped, NULL); |
|
|
|
if (allocated) gpr_free(allocated); |
|
|
|
if (allocated) gpr_free(allocated); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* It is possible the operation completed then. But we'd still get an IOCP
|
|
|
|
|
|
|
|
notification. So let's ignore it and wait for the IOCP. */ |
|
|
|
if (status != 0) { |
|
|
|
if (status != 0) { |
|
|
|
int error = WSAGetLastError(); |
|
|
|
int error = WSAGetLastError(); |
|
|
|
if (error != WSA_IO_PENDING) { |
|
|
|
if (error != WSA_IO_PENDING) { |
|
|
|
char *utf8_message = gpr_format_message(WSAGetLastError()); |
|
|
|
char *utf8_message = gpr_format_message(WSAGetLastError()); |
|
|
|
__debugbreak(); |
|
|
|
gpr_log(GPR_ERROR, "WSASend error: %s - this means we're going to leak.", |
|
|
|
gpr_log(GPR_ERROR, "WSASend error: %s", utf8_message); |
|
|
|
utf8_message); |
|
|
|
gpr_free(utf8_message); |
|
|
|
gpr_free(utf8_message); |
|
|
|
/* would the IO completion port be called anyway ? Let's assume not. */ |
|
|
|
/* I'm pretty sure this is a very bad situation there. Hence the log.
|
|
|
|
|
|
|
|
What will happen now is that the socket will neither wait for read |
|
|
|
|
|
|
|
or write, unless the caller retry, which is unlikely, but I am not |
|
|
|
|
|
|
|
sure if that's guaranteed. And there might also be a read pending. |
|
|
|
|
|
|
|
This means that the future orphanage of that socket will be in limbo, |
|
|
|
|
|
|
|
and we're going to leak it. I have no idea what could cause this |
|
|
|
|
|
|
|
specific case however, aside from a parameter error from our call. |
|
|
|
|
|
|
|
Normal read errors would actually happen during the overlapped |
|
|
|
|
|
|
|
operation, which is the supported way to go for that. */ |
|
|
|
tcp->outstanding_write = 0; |
|
|
|
tcp->outstanding_write = 0; |
|
|
|
tcp_unref(tcp); |
|
|
|
tcp_unref(tcp); |
|
|
|
|
|
|
|
/* Per the comment above, I'm going to treat that case as a hard failure
|
|
|
|
|
|
|
|
for now, and leave the option to catch that and debug. */ |
|
|
|
|
|
|
|
__debugbreak(); |
|
|
|
return GRPC_ENDPOINT_WRITE_ERROR; |
|
|
|
return GRPC_ENDPOINT_WRITE_ERROR; |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* As all is now setup, we can now ask for the IOCP notification. It may
|
|
|
|
|
|
|
|
trigger the callback immediately however, but no matter. */ |
|
|
|
grpc_socket_notify_on_write(socket, on_write, tcp); |
|
|
|
grpc_socket_notify_on_write(socket, on_write, tcp); |
|
|
|
return GRPC_ENDPOINT_WRITE_PENDING; |
|
|
|
return GRPC_ENDPOINT_WRITE_PENDING; |
|
|
|
} |
|
|
|
} |
|
|
@ -319,9 +391,20 @@ static void win_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) { |
|
|
|
grpc_iocp_add_socket(tcp->socket); |
|
|
|
grpc_iocp_add_socket(tcp->socket); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* Initiates a shutdown of the TCP endpoint. This will queue abort callbacks
|
|
|
|
|
|
|
|
for the potential read and write operations. It is up to the caller to |
|
|
|
|
|
|
|
guarantee this isn't called in parallel to a read or write request, so |
|
|
|
|
|
|
|
we're not going to protect against these. However the IO Completion Port |
|
|
|
|
|
|
|
callback will happen from another thread, so we need to protect against |
|
|
|
|
|
|
|
concurrent access of the data structure in that regard. */ |
|
|
|
static void win_shutdown(grpc_endpoint *ep) { |
|
|
|
static void win_shutdown(grpc_endpoint *ep) { |
|
|
|
grpc_tcp *tcp = (grpc_tcp *) ep; |
|
|
|
grpc_tcp *tcp = (grpc_tcp *) ep; |
|
|
|
|
|
|
|
gpr_mu_lock(&tcp->mu); |
|
|
|
|
|
|
|
/* At that point, what may happen is that we're already inside the IOCP
|
|
|
|
|
|
|
|
callback. See the comments in on_read and on_write. */ |
|
|
|
|
|
|
|
tcp->shutting_down = 1; |
|
|
|
grpc_winsocket_shutdown(tcp->socket); |
|
|
|
grpc_winsocket_shutdown(tcp->socket); |
|
|
|
|
|
|
|
gpr_mu_unlock(&tcp->mu); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
static void win_destroy(grpc_endpoint *ep) { |
|
|
|
static void win_destroy(grpc_endpoint *ep) { |
|
|
@ -338,6 +421,7 @@ grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket) { |
|
|
|
memset(tcp, 0, sizeof(grpc_tcp)); |
|
|
|
memset(tcp, 0, sizeof(grpc_tcp)); |
|
|
|
tcp->base.vtable = &vtable; |
|
|
|
tcp->base.vtable = &vtable; |
|
|
|
tcp->socket = socket; |
|
|
|
tcp->socket = socket; |
|
|
|
|
|
|
|
gpr_mu_init(&tcp->mu); |
|
|
|
gpr_slice_buffer_init(&tcp->write_slices); |
|
|
|
gpr_slice_buffer_init(&tcp->write_slices); |
|
|
|
gpr_ref_init(&tcp->refcount, 1); |
|
|
|
gpr_ref_init(&tcp->refcount, 1); |
|
|
|
return &tcp->base; |
|
|
|
return &tcp->base; |
|
|
|