From cb951f6c57c35d26ff8b643c4a498be397be6750 Mon Sep 17 00:00:00 2001 From: murgatroid99 Date: Tue, 18 Aug 2015 17:38:11 -0700 Subject: [PATCH 01/29] Split server shutdown into tryShutdown and forceShutdown --- src/node/ext/server.cc | 50 +++++++++++++++------------- src/node/ext/server.h | 3 +- src/node/src/server.js | 21 ++++++++++-- src/node/test/call_test.js | 2 +- src/node/test/end_to_end_test.js | 2 +- src/node/test/health_test.js | 2 +- src/node/test/interop_sanity_test.js | 2 +- src/node/test/math_client_test.js | 2 +- src/node/test/server_test.js | 27 ++++++++++++++- src/node/test/surface_test.js | 16 ++++----- 10 files changed, 86 insertions(+), 41 deletions(-) diff --git a/src/node/ext/server.cc b/src/node/ext/server.cc index 8e39644846d..c32e3ae9182 100644 --- a/src/node/ext/server.cc +++ b/src/node/ext/server.cc @@ -139,8 +139,11 @@ void Server::Init(Handle exports) { NanSetPrototypeTemplate(tpl, "start", NanNew(Start)->GetFunction()); - NanSetPrototypeTemplate(tpl, "shutdown", - NanNew(Shutdown)->GetFunction()); + NanSetPrototypeTemplate(tpl, "tryShutdown", + NanNew(TryShutdown)->GetFunction()); + NanSetPrototypeTemplate( + tpl, "forceShutdown", + NanNew(ForceShutdown)->GetFunction()); NanAssignPersistent(fun_tpl, tpl); Handle ctr = tpl->GetFunction(); @@ -153,14 +156,13 @@ bool Server::HasInstance(Handle val) { } void Server::ShutdownServer() { - if (this->wrapped_server != NULL) { - grpc_server_shutdown_and_notify(this->wrapped_server, - this->shutdown_queue, - NULL); - grpc_completion_queue_pluck(this->shutdown_queue, NULL, - gpr_inf_future(GPR_CLOCK_REALTIME), NULL); - this->wrapped_server = NULL; - } + grpc_server_shutdown_and_notify(this->wrapped_server, + this->shutdown_queue, + NULL); + grpc_server_cancel_all_calls(this->wrapped_server); + grpc_completion_queue_pluck(this->shutdown_queue, NULL, + gpr_inf_future(GPR_CLOCK_REALTIME), NULL); + this->wrapped_server = NULL; } NAN_METHOD(Server::New) { @@ -222,9 +224,6 @@ NAN_METHOD(Server::RequestCall) { return NanThrowTypeError("requestCall can only be called on a Server"); } Server *server = ObjectWrap::Unwrap(args.This()); - if (server->wrapped_server == NULL) { - return NanThrowError("requestCall cannot be called on a shut down Server"); - } NewCallOp *op = new NewCallOp(); unique_ptr ops(new OpVec()); ops->push_back(unique_ptr(op)); @@ -256,10 +255,6 @@ NAN_METHOD(Server::AddHttp2Port) { "addHttp2Port's second argument must be ServerCredentials"); } Server *server = ObjectWrap::Unwrap(args.This()); - if (server->wrapped_server == NULL) { - return NanThrowError( - "addHttp2Port cannot be called on a shut down Server"); - } ServerCredentials *creds_object = ObjectWrap::Unwrap( args[1]->ToObject()); grpc_server_credentials *creds = creds_object->GetWrappedServerCredentials(); @@ -281,21 +276,30 @@ NAN_METHOD(Server::Start) { return NanThrowTypeError("start can only be called on a Server"); } Server *server = ObjectWrap::Unwrap(args.This()); - if (server->wrapped_server == NULL) { - return NanThrowError("start cannot be called on a shut down Server"); - } grpc_server_start(server->wrapped_server); NanReturnUndefined(); } -NAN_METHOD(ShutdownCallback) { +NAN_METHOD(Server::TryShutdown) { + NanScope(); + if (!HasInstance(args.This())) { + return NanThrowTypeError("tryShutdown can only be called on a Server"); + } + Server *server = ObjectWrap::Unwrap(args.This()); + unique_ptr ops(new OpVec()); + grpc_server_shutdown_and_notify( + server->wrapped_server, + CompletionQueueAsyncWorker::GetQueue(), + new struct tag(new NanCallback(args[0].As()), ops.release(), + shared_ptr(nullptr))); + CompletionQueueAsyncWorker::Next(); NanReturnUndefined(); } -NAN_METHOD(Server::Shutdown) { +NAN_METHOD(Server::ForceShutdown) { NanScope(); if (!HasInstance(args.This())) { - return NanThrowTypeError("shutdown can only be called on a Server"); + return NanThrowTypeError("forceShutdown can only be called on a Server"); } Server *server = ObjectWrap::Unwrap(args.This()); server->ShutdownServer(); diff --git a/src/node/ext/server.h b/src/node/ext/server.h index faab7e3418c..e7d5c3fb11a 100644 --- a/src/node/ext/server.h +++ b/src/node/ext/server.h @@ -67,7 +67,8 @@ class Server : public ::node::ObjectWrap { static NAN_METHOD(RequestCall); static NAN_METHOD(AddHttp2Port); static NAN_METHOD(Start); - static NAN_METHOD(Shutdown); + static NAN_METHOD(TryShutdown); + static NAN_METHOD(ForceShutdown); static NanCallback *constructor; static v8::Persistent fun_tpl; diff --git a/src/node/src/server.js b/src/node/src/server.js index 8b86173f082..f2520c3c970 100644 --- a/src/node/src/server.js +++ b/src/node/src/server.js @@ -613,11 +613,26 @@ function Server(options) { } server.requestCall(handleNewCall); }; + + /** + * Gracefully shuts down the server. The server will stop receiving new calls, + * and any pending calls will complete. The callback will be called when all + * pending calls have completed and the server is fully shut down. This method + * is idempotent with itself and forceShutdown. + * @param {function()} callback The shutdown complete callback + */ + this.tryShutdown = function(callback) { + server.tryShutdown(callback); + }; + /** - * Shuts down the server. + * Forcibly shuts down the server. The server will stop receiving new calls + * and cancel all pending calls. When it returns, the server has shut down. + * This method is idempotent with itself and tryShutdown, and it will trigger + * any outstanding tryShutdown callbacks. */ - this.shutdown = function() { - server.shutdown(); + this.forceShutdown = function() { + server.forceShutdown(); }; } diff --git a/src/node/test/call_test.js b/src/node/test/call_test.js index 8d0f20b0747..e7f071bcd53 100644 --- a/src/node/test/call_test.js +++ b/src/node/test/call_test.js @@ -61,7 +61,7 @@ describe('call', function() { channel = new grpc.Channel('localhost:' + port, insecureCreds); }); after(function() { - server.shutdown(); + server.forceShutdown(); }); describe('constructor', function() { it('should reject anything less than 3 arguments', function() { diff --git a/src/node/test/end_to_end_test.js b/src/node/test/end_to_end_test.js index 7574d98b8af..4b8da3bfb17 100644 --- a/src/node/test/end_to_end_test.js +++ b/src/node/test/end_to_end_test.js @@ -70,7 +70,7 @@ describe('end-to-end', function() { channel = new grpc.Channel('localhost:' + port_num, insecureCreds); }); after(function() { - server.shutdown(); + server.forceShutdown(); }); it('should start and end a request without error', function(complete) { var done = multiDone(complete, 2); diff --git a/src/node/test/health_test.js b/src/node/test/health_test.js index be4ef1d251b..22c58d3956b 100644 --- a/src/node/test/health_test.js +++ b/src/node/test/health_test.js @@ -61,7 +61,7 @@ describe('Health Checking', function() { grpc.Credentials.createInsecure()); }); after(function() { - healthServer.shutdown(); + healthServer.forceShutdown(); }); it('should say an enabled service is SERVING', function(done) { healthClient.check({service: ''}, function(err, response) { diff --git a/src/node/test/interop_sanity_test.js b/src/node/test/interop_sanity_test.js index 0a5eb29c0c1..2ca07c1d50d 100644 --- a/src/node/test/interop_sanity_test.js +++ b/src/node/test/interop_sanity_test.js @@ -51,7 +51,7 @@ describe('Interop tests', function() { done(); }); after(function() { - server.shutdown(); + server.forceShutdown(); }); // This depends on not using a binary stream it('should pass empty_unary', function(done) { diff --git a/src/node/test/math_client_test.js b/src/node/test/math_client_test.js index ef01870a4c1..80b0c5ff2a9 100644 --- a/src/node/test/math_client_test.js +++ b/src/node/test/math_client_test.js @@ -59,7 +59,7 @@ describe('Math client', function() { done(); }); after(function() { - server.shutdown(); + server.forceShutdown(); }); it('should handle a single request', function(done) { var arg = {dividend: 7, divisor: 4}; diff --git a/src/node/test/server_test.js b/src/node/test/server_test.js index 20c9a07ffa3..4670a62efa3 100644 --- a/src/node/test/server_test.js +++ b/src/node/test/server_test.js @@ -90,7 +90,7 @@ describe('server', function() { server.addHttp2Port('0.0.0.0:0', grpc.ServerCredentials.createInsecure()); }); after(function() { - server.shutdown(); + server.forceShutdown(); }); it('should start without error', function() { assert.doesNotThrow(function() { @@ -98,4 +98,29 @@ describe('server', function() { }); }); }); + describe('shutdown', function() { + var server; + beforeEach(function() { + server = new grpc.Server(); + server.addHttp2Port('0.0.0.0:0', grpc.ServerCredentials.createInsecure()); + server.start(); + }); + afterEach(function() { + server.forceShutdown(); + }); + it('tryShutdown should shutdown successfully', function(done) { + server.tryShutdown(done); + }); + it.only('forceShutdown should shutdown successfully', function() { + server.forceShutdown(); + }); + it('tryShutdown should be idempotent', function(done) { + server.tryShutdown(done); + server.tryShutdown(function() {}); + }); + it('forceShutdown should be idempotent', function() { + server.forceShutdown(); + server.forceShutdown(); + }); + }); }); diff --git a/src/node/test/surface_test.js b/src/node/test/surface_test.js index 52515cc8e7c..d12ba0465e9 100644 --- a/src/node/test/surface_test.js +++ b/src/node/test/surface_test.js @@ -104,7 +104,7 @@ describe('Server.prototype.addProtoService', function() { server = new grpc.Server(); }); afterEach(function() { - server.shutdown(); + server.forceShutdown(); }); it('Should succeed with a single service', function() { assert.doesNotThrow(function() { @@ -148,7 +148,7 @@ describe('Client#$waitForReady', function() { client = new Client('localhost:' + port, grpc.Credentials.createInsecure()); }); after(function() { - server.shutdown(); + server.forceShutdown(); }); it('should complete when called alone', function(done) { client.$waitForReady(Infinity, function(error) { @@ -203,7 +203,7 @@ describe('Echo service', function() { server.start(); }); after(function() { - server.shutdown(); + server.forceShutdown(); }); it('should echo the recieved message directly', function(done) { client.echo({value: 'test value', value2: 3}, function(error, response) { @@ -248,7 +248,7 @@ describe('Generic client and server', function() { grpc.Credentials.createInsecure()); }); after(function() { - server.shutdown(); + server.forceShutdown(); }); it('Should respond with a capitalized string', function(done) { client.capitalize('abc', function(err, response) { @@ -296,7 +296,7 @@ describe('Echo metadata', function() { server.start(); }); after(function() { - server.shutdown(); + server.forceShutdown(); }); it('with unary call', function(done) { var call = client.unary({}, function(err, data) { @@ -419,7 +419,7 @@ describe('Other conditions', function() { server.start(); }); after(function() { - server.shutdown(); + server.forceShutdown(); }); it('channel.getTarget should be available', function() { assert.strictEqual(typeof client.channel.getTarget(), 'string'); @@ -681,7 +681,7 @@ describe('Other conditions', function() { }); afterEach(function() { console.log('Shutting down server'); - proxy.shutdown(); + proxy.forceShutdown(); }); describe('Cancellation', function() { it('With a unary call', function(done) { @@ -845,7 +845,7 @@ describe('Cancelling surface client', function() { server.start(); }); after(function() { - server.shutdown(); + server.forceShutdown(); }); it('Should correctly cancel a unary call', function(done) { var call = client.div({'divisor': 0, 'dividend': 0}, function(err, resp) { From 8a2ab3b249558f5bf4c6e4ef938c68b106750828 Mon Sep 17 00:00:00 2001 From: murgatroid99 Date: Wed, 19 Aug 2015 10:34:59 -0700 Subject: [PATCH 02/29] Removed errant NULL setting --- src/node/ext/server.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/node/ext/server.cc b/src/node/ext/server.cc index c32e3ae9182..57c43104907 100644 --- a/src/node/ext/server.cc +++ b/src/node/ext/server.cc @@ -120,7 +120,7 @@ Server::Server(grpc_server *server) : wrapped_server(server) { Server::~Server() { this->ShutdownServer(); grpc_completion_queue_shutdown(this->shutdown_queue); - grpc_server_destroy(wrapped_server); + grpc_server_destroy(this->wrapped_server); grpc_completion_queue_destroy(this->shutdown_queue); } @@ -162,7 +162,6 @@ void Server::ShutdownServer() { grpc_server_cancel_all_calls(this->wrapped_server); grpc_completion_queue_pluck(this->shutdown_queue, NULL, gpr_inf_future(GPR_CLOCK_REALTIME), NULL); - this->wrapped_server = NULL; } NAN_METHOD(Server::New) { From c5dac97bd3b1e87b228d0d130ce2cb457297fdd0 Mon Sep 17 00:00:00 2001 From: murgatroid99 Date: Wed, 19 Aug 2015 11:49:53 -0700 Subject: [PATCH 03/29] Added a test, enabled other tests --- src/node/test/server_test.js | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/node/test/server_test.js b/src/node/test/server_test.js index 4670a62efa3..9574709f602 100644 --- a/src/node/test/server_test.js +++ b/src/node/test/server_test.js @@ -111,7 +111,7 @@ describe('server', function() { it('tryShutdown should shutdown successfully', function(done) { server.tryShutdown(done); }); - it.only('forceShutdown should shutdown successfully', function() { + it('forceShutdown should shutdown successfully', function() { server.forceShutdown(); }); it('tryShutdown should be idempotent', function(done) { @@ -122,5 +122,9 @@ describe('server', function() { server.forceShutdown(); server.forceShutdown(); }); + it('forceShutdown should trigger tryShutdown', function(done) { + server.tryShutdown(done); + server.forceShutdown(); + }); }); }); From 592e7f2dd0c059468de6377e8d6bc0d61fe2dd2c Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 21 Aug 2015 10:45:48 -0700 Subject: [PATCH 04/29] Refactor Endpoint API - Allow reads to complete immediately - Unify read/write signatures - Simplify memory management to allow future optimization work --- include/grpc/support/slice_buffer.h | 2 + src/core/httpcli/httpcli.c | 95 ++-- src/core/iomgr/endpoint.c | 17 +- src/core/iomgr/endpoint.h | 51 +- src/core/iomgr/tcp_posix.c | 520 +++++++-------------- src/core/security/secure_endpoint.c | 188 ++++---- src/core/security/secure_transport_setup.c | 119 +++-- src/core/support/slice_buffer.c | 22 + src/core/transport/chttp2/internal.h | 10 +- src/core/transport/chttp2/writing.c | 21 +- src/core/transport/chttp2_transport.c | 125 ++--- test/core/bad_client/bad_client.c | 17 +- test/core/iomgr/endpoint_tests.c | 204 ++++---- test/core/iomgr/tcp_posix_test.c | 147 +++--- test/core/security/secure_endpoint_test.c | 55 +-- 15 files changed, 734 insertions(+), 859 deletions(-) diff --git a/include/grpc/support/slice_buffer.h b/include/grpc/support/slice_buffer.h index ec048e8c91f..04db003ac58 100644 --- a/include/grpc/support/slice_buffer.h +++ b/include/grpc/support/slice_buffer.h @@ -86,6 +86,8 @@ void gpr_slice_buffer_reset_and_unref(gpr_slice_buffer *sb); void gpr_slice_buffer_swap(gpr_slice_buffer *a, gpr_slice_buffer *b); /* move all of the elements of src into dst */ void gpr_slice_buffer_move_into(gpr_slice_buffer *src, gpr_slice_buffer *dst); +/* remove n bytes from the end of a slice buffer */ +void gpr_slice_buffer_trim_end(gpr_slice_buffer *src, size_t n); #ifdef __cplusplus } diff --git a/src/core/httpcli/httpcli.c b/src/core/httpcli/httpcli.c index 9012070e8ea..1e38479eb16 100644 --- a/src/core/httpcli/httpcli.c +++ b/src/core/httpcli/httpcli.c @@ -61,6 +61,10 @@ typedef struct { grpc_httpcli_context *context; grpc_pollset *pollset; grpc_iomgr_object iomgr_obj; + gpr_slice_buffer incoming; + gpr_slice_buffer outgoing; + grpc_iomgr_closure on_read; + grpc_iomgr_closure done_write; } internal_request; static grpc_httpcli_get_override g_get_override = NULL; @@ -99,73 +103,70 @@ static void finish(internal_request *req, int success) { gpr_slice_unref(req->request_text); gpr_free(req->host); grpc_iomgr_unregister_object(&req->iomgr_obj); + gpr_slice_buffer_destroy(&req->incoming); + gpr_slice_buffer_destroy(&req->outgoing); gpr_free(req); } -static void on_read(void *user_data, gpr_slice *slices, size_t nslices, - grpc_endpoint_cb_status status) { +static void on_read(void *user_data, int success); + +static void do_read(internal_request *req) { + switch (grpc_endpoint_read(req->ep, &req->incoming, &req->on_read)) { + case GRPC_ENDPOINT_DONE: + on_read(req, 1); + break; + case GRPC_ENDPOINT_PENDING: + break; + case GRPC_ENDPOINT_ERROR: + on_read(req, 0); + break; + } +} + +static void on_read(void *user_data, int success) { internal_request *req = user_data; size_t i; - for (i = 0; i < nslices; i++) { - if (GPR_SLICE_LENGTH(slices[i])) { + for (i = 0; i < req->incoming.count; i++) { + if (GPR_SLICE_LENGTH(req->incoming.slices[i])) { req->have_read_byte = 1; - if (!grpc_httpcli_parser_parse(&req->parser, slices[i])) { + if (!grpc_httpcli_parser_parse(&req->parser, req->incoming.slices[i])) { finish(req, 0); - goto done; + return; } } } - switch (status) { - case GRPC_ENDPOINT_CB_OK: - grpc_endpoint_notify_on_read(req->ep, on_read, req); - break; - case GRPC_ENDPOINT_CB_EOF: - case GRPC_ENDPOINT_CB_ERROR: - case GRPC_ENDPOINT_CB_SHUTDOWN: - if (!req->have_read_byte) { - next_address(req); - } else { - finish(req, grpc_httpcli_parser_eof(&req->parser)); - } - break; - } - -done: - for (i = 0; i < nslices; i++) { - gpr_slice_unref(slices[i]); + if (success) { + do_read(req); + } else if (!req->have_read_byte) { + next_address(req); + } else { + finish(req, grpc_httpcli_parser_eof(&req->parser)); } } -static void on_written(internal_request *req) { - grpc_endpoint_notify_on_read(req->ep, on_read, req); -} +static void on_written(internal_request *req) { do_read(req); } -static void done_write(void *arg, grpc_endpoint_cb_status status) { +static void done_write(void *arg, int success) { internal_request *req = arg; - switch (status) { - case GRPC_ENDPOINT_CB_OK: - on_written(req); - break; - case GRPC_ENDPOINT_CB_EOF: - case GRPC_ENDPOINT_CB_SHUTDOWN: - case GRPC_ENDPOINT_CB_ERROR: - next_address(req); - break; + if (success) { + on_written(req); + } else { + next_address(req); } } static void start_write(internal_request *req) { gpr_slice_ref(req->request_text); - switch ( - grpc_endpoint_write(req->ep, &req->request_text, 1, done_write, req)) { - case GRPC_ENDPOINT_WRITE_DONE: + gpr_slice_buffer_add(&req->outgoing, req->request_text); + switch (grpc_endpoint_write(req->ep, &req->outgoing, &req->done_write)) { + case GRPC_ENDPOINT_DONE: on_written(req); break; - case GRPC_ENDPOINT_WRITE_PENDING: + case GRPC_ENDPOINT_PENDING: break; - case GRPC_ENDPOINT_WRITE_ERROR: + case GRPC_ENDPOINT_ERROR: finish(req, 0); break; } @@ -237,6 +238,10 @@ void grpc_httpcli_get(grpc_httpcli_context *context, grpc_pollset *pollset, request->handshaker ? request->handshaker : &grpc_httpcli_plaintext; req->context = context; req->pollset = pollset; + grpc_iomgr_closure_init(&req->on_read, on_read, req); + grpc_iomgr_closure_init(&req->done_write, done_write, req); + gpr_slice_buffer_init(&req->incoming); + gpr_slice_buffer_init(&req->outgoing); gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->path); grpc_iomgr_register_object(&req->iomgr_obj, name); gpr_free(name); @@ -270,7 +275,11 @@ void grpc_httpcli_post(grpc_httpcli_context *context, grpc_pollset *pollset, request->handshaker ? request->handshaker : &grpc_httpcli_plaintext; req->context = context; req->pollset = pollset; - gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->path); + grpc_iomgr_closure_init(&req->on_read, on_read, req); + grpc_iomgr_closure_init(&req->done_write, done_write, req); + gpr_slice_buffer_init(&req->incoming); + gpr_slice_buffer_init(&req->outgoing); + gpr_asprintf(&name, "HTTP:POST:%s:%s", request->host, request->path); grpc_iomgr_register_object(&req->iomgr_obj, name); gpr_free(name); req->host = gpr_strdup(request->host); diff --git a/src/core/iomgr/endpoint.c b/src/core/iomgr/endpoint.c index 8ee14bce9b7..a7878e31dd4 100644 --- a/src/core/iomgr/endpoint.c +++ b/src/core/iomgr/endpoint.c @@ -33,17 +33,16 @@ #include "src/core/iomgr/endpoint.h" -void grpc_endpoint_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, - void *user_data) { - ep->vtable->notify_on_read(ep, cb, user_data); +grpc_endpoint_op_status grpc_endpoint_read(grpc_endpoint *ep, + gpr_slice_buffer *slices, + grpc_iomgr_closure *cb) { + return ep->vtable->read(ep, slices, cb); } -grpc_endpoint_write_status grpc_endpoint_write(grpc_endpoint *ep, - gpr_slice *slices, - size_t nslices, - grpc_endpoint_write_cb cb, - void *user_data) { - return ep->vtable->write(ep, slices, nslices, cb, user_data); +grpc_endpoint_op_status grpc_endpoint_write(grpc_endpoint *ep, + gpr_slice_buffer *slices, + grpc_iomgr_closure *cb) { + return ep->vtable->write(ep, slices, cb); } void grpc_endpoint_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) { diff --git a/src/core/iomgr/endpoint.h b/src/core/iomgr/endpoint.h index ea92a500e84..38f1e46d670 100644 --- a/src/core/iomgr/endpoint.h +++ b/src/core/iomgr/endpoint.h @@ -37,6 +37,7 @@ #include "src/core/iomgr/pollset.h" #include "src/core/iomgr/pollset_set.h" #include +#include #include /* An endpoint caps a streaming channel between two communicating processes. @@ -45,31 +46,17 @@ typedef struct grpc_endpoint grpc_endpoint; typedef struct grpc_endpoint_vtable grpc_endpoint_vtable; -typedef enum grpc_endpoint_cb_status { - GRPC_ENDPOINT_CB_OK = 0, /* Call completed successfully */ - GRPC_ENDPOINT_CB_EOF, /* Call completed successfully, end of file reached */ - GRPC_ENDPOINT_CB_SHUTDOWN, /* Call interrupted by shutdown */ - GRPC_ENDPOINT_CB_ERROR /* Call interrupted by socket error */ -} grpc_endpoint_cb_status; - -typedef enum grpc_endpoint_write_status { - GRPC_ENDPOINT_WRITE_DONE, /* completed immediately, cb won't be called */ - GRPC_ENDPOINT_WRITE_PENDING, /* cb will be called when completed */ - GRPC_ENDPOINT_WRITE_ERROR /* write errored out, cb won't be called */ -} grpc_endpoint_write_status; - -typedef void (*grpc_endpoint_read_cb)(void *user_data, gpr_slice *slices, - size_t nslices, - grpc_endpoint_cb_status error); -typedef void (*grpc_endpoint_write_cb)(void *user_data, - grpc_endpoint_cb_status error); +typedef enum grpc_endpoint_op_status { + GRPC_ENDPOINT_DONE, /* completed immediately, cb won't be called */ + GRPC_ENDPOINT_PENDING, /* cb will be called when completed */ + GRPC_ENDPOINT_ERROR /* write errored out, cb won't be called */ +} grpc_endpoint_op_status; struct grpc_endpoint_vtable { - void (*notify_on_read)(grpc_endpoint *ep, grpc_endpoint_read_cb cb, - void *user_data); - grpc_endpoint_write_status (*write)(grpc_endpoint *ep, gpr_slice *slices, - size_t nslices, grpc_endpoint_write_cb cb, - void *user_data); + grpc_endpoint_op_status (*read)(grpc_endpoint *ep, gpr_slice_buffer *slices, + grpc_iomgr_closure *cb); + grpc_endpoint_op_status (*write)(grpc_endpoint *ep, gpr_slice_buffer *slices, + grpc_iomgr_closure *cb); void (*add_to_pollset)(grpc_endpoint *ep, grpc_pollset *pollset); void (*add_to_pollset_set)(grpc_endpoint *ep, grpc_pollset_set *pollset); void (*shutdown)(grpc_endpoint *ep); @@ -77,9 +64,13 @@ struct grpc_endpoint_vtable { char *(*get_peer)(grpc_endpoint *ep); }; -/* When data is available on the connection, calls the callback with slices. */ -void grpc_endpoint_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, - void *user_data); +/* When data is available on the connection, calls the callback with slices. + Callback success indicates that the endpoint can accept more reads, failure + indicates the endpoint is closed. + Valid slices may be placed into \a slices even on callback success == 0. */ +grpc_endpoint_op_status grpc_endpoint_read( + grpc_endpoint *ep, gpr_slice_buffer *slices, + grpc_iomgr_closure *cb) GRPC_MUST_USE_RESULT; char *grpc_endpoint_get_peer(grpc_endpoint *ep); @@ -89,11 +80,9 @@ char *grpc_endpoint_get_peer(grpc_endpoint *ep); returns GRPC_ENDPOINT_WRITE_DONE. Otherwise it returns GRPC_ENDPOINT_WRITE_PENDING and calls cb when the connection is ready for more data. */ -grpc_endpoint_write_status grpc_endpoint_write(grpc_endpoint *ep, - gpr_slice *slices, - size_t nslices, - grpc_endpoint_write_cb cb, - void *user_data); +grpc_endpoint_op_status grpc_endpoint_write( + grpc_endpoint *ep, gpr_slice_buffer *slices, + grpc_iomgr_closure *cb) GRPC_MUST_USE_RESULT; /* Causes any pending read/write callbacks to run immediately with GRPC_ENDPOINT_CB_SHUTDOWN status */ diff --git a/src/core/iomgr/tcp_posix.c b/src/core/iomgr/tcp_posix.c index 360e6ebd8cf..36ba3a76063 100644 --- a/src/core/iomgr/tcp_posix.c +++ b/src/core/iomgr/tcp_posix.c @@ -61,209 +61,8 @@ #define SENDMSG_FLAGS 0 #endif -/* Holds a slice array and associated state. */ -typedef struct grpc_tcp_slice_state { - gpr_slice *slices; /* Array of slices */ - size_t nslices; /* Size of slices array. */ - ssize_t first_slice; /* First valid slice in array */ - ssize_t last_slice; /* Last valid slice in array */ - gpr_slice working_slice; /* pointer to original final slice */ - int working_slice_valid; /* True if there is a working slice */ - int memory_owned; /* True if slices array is owned */ -} grpc_tcp_slice_state; - int grpc_tcp_trace = 0; -static void slice_state_init(grpc_tcp_slice_state *state, gpr_slice *slices, - size_t nslices, size_t valid_slices) { - state->slices = slices; - state->nslices = nslices; - if (valid_slices == 0) { - state->first_slice = -1; - } else { - state->first_slice = 0; - } - state->last_slice = valid_slices - 1; - state->working_slice_valid = 0; - state->memory_owned = 0; -} - -/* Returns true if there is still available data */ -static int slice_state_has_available(grpc_tcp_slice_state *state) { - return state->first_slice != -1 && state->last_slice >= state->first_slice; -} - -static ssize_t slice_state_slices_allocated(grpc_tcp_slice_state *state) { - if (state->first_slice == -1) { - return 0; - } else { - return state->last_slice - state->first_slice + 1; - } -} - -static void slice_state_realloc(grpc_tcp_slice_state *state, size_t new_size) { - /* TODO(klempner): use realloc instead when first_slice is 0 */ - /* TODO(klempner): Avoid a realloc in cases where it is unnecessary */ - gpr_slice *slices = state->slices; - size_t original_size = slice_state_slices_allocated(state); - size_t i; - gpr_slice *new_slices = gpr_malloc(sizeof(gpr_slice) * new_size); - - for (i = 0; i < original_size; ++i) { - new_slices[i] = slices[i + state->first_slice]; - } - - state->slices = new_slices; - state->last_slice = original_size - 1; - if (original_size > 0) { - state->first_slice = 0; - } else { - state->first_slice = -1; - } - state->nslices = new_size; - - if (state->memory_owned) { - gpr_free(slices); - } - state->memory_owned = 1; -} - -static void slice_state_remove_prefix(grpc_tcp_slice_state *state, - size_t prefix_bytes) { - gpr_slice *current_slice = &state->slices[state->first_slice]; - size_t current_slice_size; - - while (slice_state_has_available(state)) { - current_slice_size = GPR_SLICE_LENGTH(*current_slice); - if (current_slice_size > prefix_bytes) { - /* TODO(klempner): Get rid of the extra refcount created here by adding a - native "trim the first N bytes" operation to splice */ - /* TODO(klempner): This really shouldn't be modifying the current slice - unless we own the slices array. */ - gpr_slice tail; - tail = gpr_slice_split_tail(current_slice, prefix_bytes); - gpr_slice_unref(*current_slice); - *current_slice = tail; - return; - } else { - gpr_slice_unref(*current_slice); - ++state->first_slice; - ++current_slice; - prefix_bytes -= current_slice_size; - } - } -} - -static void slice_state_destroy(grpc_tcp_slice_state *state) { - while (slice_state_has_available(state)) { - gpr_slice_unref(state->slices[state->first_slice]); - ++state->first_slice; - } - - if (state->memory_owned) { - gpr_free(state->slices); - state->memory_owned = 0; - } -} - -void slice_state_transfer_ownership(grpc_tcp_slice_state *state, - gpr_slice **slices, size_t *nslices) { - *slices = state->slices + state->first_slice; - *nslices = state->last_slice - state->first_slice + 1; - - state->first_slice = -1; - state->last_slice = -1; -} - -/* Fills iov with the first min(iov_size, available) slices, returns number - filled */ -static size_t slice_state_to_iovec(grpc_tcp_slice_state *state, - struct iovec *iov, size_t iov_size) { - size_t nslices = state->last_slice - state->first_slice + 1; - gpr_slice *slices = state->slices + state->first_slice; - size_t i; - if (nslices < iov_size) { - iov_size = nslices; - } - - for (i = 0; i < iov_size; ++i) { - iov[i].iov_base = GPR_SLICE_START_PTR(slices[i]); - iov[i].iov_len = GPR_SLICE_LENGTH(slices[i]); - } - return iov_size; -} - -/* Makes n blocks available at the end of state, writes them into iov, and - returns the number of bytes allocated */ -static size_t slice_state_append_blocks_into_iovec(grpc_tcp_slice_state *state, - struct iovec *iov, size_t n, - size_t slice_size) { - size_t target_size; - size_t i; - size_t allocated_bytes; - ssize_t allocated_slices = slice_state_slices_allocated(state); - - if (n - state->working_slice_valid >= state->nslices - state->last_slice) { - /* Need to grow the slice array */ - target_size = state->nslices; - do { - target_size = target_size * 2; - } while (target_size < allocated_slices + n - state->working_slice_valid); - /* TODO(klempner): If this ever needs to support both prefix removal and - append, we should be smarter about the growth logic here */ - slice_state_realloc(state, target_size); - } - - i = 0; - allocated_bytes = 0; - - if (state->working_slice_valid) { - iov[0].iov_base = GPR_SLICE_END_PTR(state->slices[state->last_slice]); - iov[0].iov_len = GPR_SLICE_LENGTH(state->working_slice) - - GPR_SLICE_LENGTH(state->slices[state->last_slice]); - allocated_bytes += iov[0].iov_len; - ++i; - state->slices[state->last_slice] = state->working_slice; - state->working_slice_valid = 0; - } - - for (; i < n; ++i) { - ++state->last_slice; - state->slices[state->last_slice] = gpr_slice_malloc(slice_size); - iov[i].iov_base = GPR_SLICE_START_PTR(state->slices[state->last_slice]); - iov[i].iov_len = slice_size; - allocated_bytes += slice_size; - } - if (state->first_slice == -1) { - state->first_slice = 0; - } - return allocated_bytes; -} - -/* Remove the last n bytes from state */ -/* TODO(klempner): Consider having this defer actual deletion until later */ -static void slice_state_remove_last(grpc_tcp_slice_state *state, size_t bytes) { - while (bytes > 0 && slice_state_has_available(state)) { - if (GPR_SLICE_LENGTH(state->slices[state->last_slice]) > bytes) { - state->working_slice = state->slices[state->last_slice]; - state->working_slice_valid = 1; - /* TODO(klempner): Combine these into a single operation that doesn't need - to refcount */ - gpr_slice_unref(gpr_slice_split_tail( - &state->slices[state->last_slice], - GPR_SLICE_LENGTH(state->slices[state->last_slice]) - bytes)); - bytes = 0; - } else { - bytes -= GPR_SLICE_LENGTH(state->slices[state->last_slice]); - gpr_slice_unref(state->slices[state->last_slice]); - --state->last_slice; - if (state->last_slice == -1) { - state->first_slice = -1; - } - } - } -} - typedef struct { grpc_endpoint base; grpc_fd *em_fd; @@ -273,12 +72,15 @@ typedef struct { size_t slice_size; gpr_refcount refcount; - grpc_endpoint_read_cb read_cb; - void *read_user_data; - grpc_endpoint_write_cb write_cb; - void *write_user_data; + gpr_slice_buffer *incoming_buffer; + gpr_slice_buffer *outgoing_buffer; + /** slice within outgoing_buffer to write next */ + size_t outgoing_slice_idx; + /** byte within outgoing_buffer->slices[outgoing_slice_idx] to write next */ + size_t outgoing_byte_idx; - grpc_tcp_slice_state write_state; + grpc_iomgr_closure *read_cb; + grpc_iomgr_closure *write_cb; grpc_iomgr_closure read_closure; grpc_iomgr_closure write_closure; @@ -288,65 +90,95 @@ typedef struct { char *peer_string; } grpc_tcp; -static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success); -static void grpc_tcp_handle_write(void *arg /* grpc_tcp */, int success); +static void tcp_handle_read(void *arg /* grpc_tcp */, int success); +static void tcp_handle_write(void *arg /* grpc_tcp */, int success); -static void grpc_tcp_shutdown(grpc_endpoint *ep) { +static void tcp_shutdown(grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_fd_shutdown(tcp->em_fd); } -static void grpc_tcp_unref(grpc_tcp *tcp) { - int refcount_zero = gpr_unref(&tcp->refcount); - if (refcount_zero) { - grpc_fd_orphan(tcp->em_fd, NULL, "tcp_unref_orphan"); - gpr_free(tcp->peer_string); - gpr_free(tcp); +static void tcp_free(grpc_tcp *tcp) { + grpc_fd_orphan(tcp->em_fd, NULL, "tcp_unref_orphan"); + gpr_free(tcp->peer_string); + gpr_free(tcp); +} + +#define GRPC_TCP_REFCOUNT_DEBUG +#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__) +#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__) +static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file, + int line) { + gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp, + reason, tcp->refcount.count, tcp->refcount.count - 1); + if (gpr_unref(&tcp->refcount)) { + tcp_free(tcp); + } +} + +static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file, + int line) { + gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP ref %p : %s %d -> %d", tcp, + reason, tcp->refcount.count, tcp->refcount.count + 1); + gpr_ref(&tcp->refcount); +} +#ifdef GRPC_TCP_REFCOUNT_DEBUG +#else +#define TCP_UNREF(tcp, reason) tcp_unref((tcp)) +#define TCP_REF(tcp, reason) tcp_ref((tcp)) +static void tcp_unref(grpc_tcp *tcp) { + if (gpr_unref(&tcp->refcount)) { + tcp_free(tcp); } } -static void grpc_tcp_destroy(grpc_endpoint *ep) { +static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } +#endif + +static void tcp_destroy(grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; - grpc_tcp_unref(tcp); + TCP_UNREF(tcp, "destroy"); } -static void call_read_cb(grpc_tcp *tcp, gpr_slice *slices, size_t nslices, - grpc_endpoint_cb_status status) { - grpc_endpoint_read_cb cb = tcp->read_cb; +static void call_read_cb(grpc_tcp *tcp, int success) { + grpc_iomgr_closure *cb = tcp->read_cb; if (grpc_tcp_trace) { size_t i; - gpr_log(GPR_DEBUG, "read: status=%d", status); - for (i = 0; i < nslices; i++) { - char *dump = gpr_dump_slice(slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); + gpr_log(GPR_DEBUG, "read: success=%d", success); + for (i = 0; i < tcp->incoming_buffer->count; i++) { + char *dump = gpr_dump_slice(tcp->incoming_buffer->slices[i], + GPR_DUMP_HEX | GPR_DUMP_ASCII); gpr_log(GPR_DEBUG, "READ %p: %s", tcp, dump); gpr_free(dump); } } tcp->read_cb = NULL; - cb(tcp->read_user_data, slices, nslices, status); + tcp->incoming_buffer = NULL; + cb->cb(cb->cb_arg, success); } -#define INLINE_SLICE_BUFFER_SIZE 8 #define MAX_READ_IOVEC 4 -static void grpc_tcp_continue_read(grpc_tcp *tcp) { - gpr_slice static_read_slices[INLINE_SLICE_BUFFER_SIZE]; +static void tcp_continue_read(grpc_tcp *tcp) { struct msghdr msg; struct iovec iov[MAX_READ_IOVEC]; ssize_t read_bytes; - ssize_t allocated_bytes; - struct grpc_tcp_slice_state read_state; - gpr_slice *final_slices; - size_t final_nslices; + size_t i; GPR_ASSERT(!tcp->finished_edge); + GPR_ASSERT(tcp->iov_size <= MAX_READ_IOVEC); + GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC); GRPC_TIMER_BEGIN(GRPC_PTAG_HANDLE_READ, 0); - slice_state_init(&read_state, static_read_slices, INLINE_SLICE_BUFFER_SIZE, - 0); - allocated_bytes = slice_state_append_blocks_into_iovec( - &read_state, iov, tcp->iov_size, tcp->slice_size); + while (tcp->incoming_buffer->count < (size_t)tcp->iov_size) { + gpr_slice_buffer_add_indexed(tcp->incoming_buffer, + gpr_slice_malloc(tcp->slice_size)); + } + for (i = 0; i < tcp->incoming_buffer->count; i++) { + iov[i].iov_base = GPR_SLICE_START_PTR(tcp->incoming_buffer->slices[i]); + iov[i].iov_len = GPR_SLICE_LENGTH(tcp->incoming_buffer->slices[i]); + } msg.msg_name = NULL; msg.msg_namelen = 0; @@ -362,87 +194,63 @@ static void grpc_tcp_continue_read(grpc_tcp *tcp) { } while (read_bytes < 0 && errno == EINTR); GRPC_TIMER_END(GRPC_PTAG_RECVMSG, 0); - if (read_bytes < allocated_bytes) { - /* TODO(klempner): Consider a second read first, in hopes of getting a - * quick EAGAIN and saving a bunch of allocations. */ - slice_state_remove_last(&read_state, read_bytes < 0 - ? allocated_bytes - : allocated_bytes - read_bytes); - } - if (read_bytes < 0) { - /* NB: After calling the user_cb a parallel call of the read handler may + /* NB: After calling call_read_cb a parallel call of the read handler may * be running. */ if (errno == EAGAIN) { if (tcp->iov_size > 1) { tcp->iov_size /= 2; } - if (slice_state_has_available(&read_state)) { - /* TODO(klempner): We should probably do the call into the application - without all this junk on the stack */ - /* FIXME(klempner): Refcount properly */ - slice_state_transfer_ownership(&read_state, &final_slices, - &final_nslices); - tcp->finished_edge = 1; - call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_OK); - slice_state_destroy(&read_state); - grpc_tcp_unref(tcp); - } else { - /* We've consumed the edge, request a new one */ - slice_state_destroy(&read_state); - grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure); - } + /* We've consumed the edge, request a new one */ + grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure); } else { /* TODO(klempner): Log interesting errors */ - call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_ERROR); - slice_state_destroy(&read_state); - grpc_tcp_unref(tcp); + gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer); + call_read_cb(tcp, 0); + TCP_UNREF(tcp, "read"); } } else if (read_bytes == 0) { /* 0 read size ==> end of stream */ - if (slice_state_has_available(&read_state)) { - /* there were bytes already read: pass them up to the application */ - slice_state_transfer_ownership(&read_state, &final_slices, - &final_nslices); - call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_EOF); - } else { - call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_EOF); - } - slice_state_destroy(&read_state); - grpc_tcp_unref(tcp); + gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer); + call_read_cb(tcp, 0); + TCP_UNREF(tcp, "read"); } else { - if (tcp->iov_size < MAX_READ_IOVEC) { + GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length); + if ((size_t)read_bytes < tcp->incoming_buffer->length) { + gpr_slice_buffer_trim_end(tcp->incoming_buffer, + tcp->incoming_buffer->length - read_bytes); + } else if (tcp->iov_size < MAX_READ_IOVEC) { ++tcp->iov_size; } - GPR_ASSERT(slice_state_has_available(&read_state)); - slice_state_transfer_ownership(&read_state, &final_slices, &final_nslices); - call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_OK); - slice_state_destroy(&read_state); - grpc_tcp_unref(tcp); + GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length); + call_read_cb(tcp, 1); + TCP_UNREF(tcp, "read"); } GRPC_TIMER_END(GRPC_PTAG_HANDLE_READ, 0); } -static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success) { +static void tcp_handle_read(void *arg /* grpc_tcp */, int success) { grpc_tcp *tcp = (grpc_tcp *)arg; GPR_ASSERT(!tcp->finished_edge); if (!success) { - call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_SHUTDOWN); - grpc_tcp_unref(tcp); + call_read_cb(tcp, 0); + TCP_UNREF(tcp, "read"); } else { - grpc_tcp_continue_read(tcp); + tcp_continue_read(tcp); } } -static void grpc_tcp_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, - void *user_data) { +static grpc_endpoint_op_status tcp_read(grpc_endpoint *ep, + gpr_slice_buffer *incoming_buffer, + grpc_iomgr_closure *cb) { grpc_tcp *tcp = (grpc_tcp *)ep; GPR_ASSERT(tcp->read_cb == NULL); tcp->read_cb = cb; - tcp->read_user_data = user_data; - gpr_ref(&tcp->refcount); + tcp->incoming_buffer = incoming_buffer; + gpr_slice_buffer_reset_and_unref(incoming_buffer); + TCP_REF(tcp, "read"); if (tcp->finished_edge) { tcp->finished_edge = 0; grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure); @@ -450,18 +258,41 @@ static void grpc_tcp_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, tcp->handle_read_closure.cb_arg = tcp; grpc_iomgr_add_delayed_callback(&tcp->handle_read_closure, 1); } + /* TODO(ctiller): immediate return */ + return GRPC_ENDPOINT_PENDING; } #define MAX_WRITE_IOVEC 16 -static grpc_endpoint_write_status grpc_tcp_flush(grpc_tcp *tcp) { +static grpc_endpoint_op_status tcp_flush(grpc_tcp *tcp) { struct msghdr msg; struct iovec iov[MAX_WRITE_IOVEC]; int iov_size; ssize_t sent_length; - grpc_tcp_slice_state *state = &tcp->write_state; + ssize_t sending_length; + ssize_t trailing; + ssize_t unwind_slice_idx; + ssize_t unwind_byte_idx; for (;;) { - iov_size = slice_state_to_iovec(state, iov, MAX_WRITE_IOVEC); + sending_length = 0; + unwind_slice_idx = tcp->outgoing_slice_idx; + unwind_byte_idx = tcp->outgoing_byte_idx; + for (iov_size = 0; tcp->outgoing_slice_idx != tcp->outgoing_buffer->count && + iov_size != MAX_WRITE_IOVEC; + iov_size++) { + iov[iov_size].iov_base = + GPR_SLICE_START_PTR( + tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]) + + tcp->outgoing_byte_idx; + iov[iov_size].iov_len = + GPR_SLICE_LENGTH( + tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]) - + tcp->outgoing_byte_idx; + sending_length += iov[iov_size].iov_len; + tcp->outgoing_slice_idx++; + tcp->outgoing_byte_idx = 0; + } + GPR_ASSERT(iov_size > 0); msg.msg_name = NULL; msg.msg_namelen = 0; @@ -480,70 +311,75 @@ static grpc_endpoint_write_status grpc_tcp_flush(grpc_tcp *tcp) { if (sent_length < 0) { if (errno == EAGAIN) { - return GRPC_ENDPOINT_WRITE_PENDING; + tcp->outgoing_slice_idx = unwind_slice_idx; + tcp->outgoing_byte_idx = unwind_byte_idx; + return GRPC_ENDPOINT_PENDING; } else { /* TODO(klempner): Log some of these */ - slice_state_destroy(state); - return GRPC_ENDPOINT_WRITE_ERROR; + return GRPC_ENDPOINT_ERROR; } } - /* TODO(klempner): Probably better to batch this after we finish flushing */ - slice_state_remove_prefix(state, sent_length); + GPR_ASSERT(tcp->outgoing_byte_idx == 0); + trailing = sending_length - sent_length; + while (trailing > 0) { + ssize_t slice_length; + + tcp->outgoing_slice_idx--; + slice_length = GPR_SLICE_LENGTH( + tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]); + if (slice_length > trailing) { + tcp->outgoing_byte_idx = slice_length - trailing; + break; + } else { + trailing -= slice_length; + } + } - if (!slice_state_has_available(state)) { - return GRPC_ENDPOINT_WRITE_DONE; + if (tcp->outgoing_slice_idx == tcp->outgoing_buffer->count) { + return GRPC_ENDPOINT_DONE; } }; } -static void grpc_tcp_handle_write(void *arg /* grpc_tcp */, int success) { +static void tcp_handle_write(void *arg /* grpc_tcp */, int success) { grpc_tcp *tcp = (grpc_tcp *)arg; - grpc_endpoint_write_status write_status; - grpc_endpoint_cb_status cb_status; - grpc_endpoint_write_cb cb; + grpc_endpoint_op_status status; + grpc_iomgr_closure *cb; if (!success) { - slice_state_destroy(&tcp->write_state); cb = tcp->write_cb; tcp->write_cb = NULL; - cb(tcp->write_user_data, GRPC_ENDPOINT_CB_SHUTDOWN); - grpc_tcp_unref(tcp); + cb->cb(cb->cb_arg, 0); + TCP_UNREF(tcp, "write"); return; } GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_CB_WRITE, 0); - write_status = grpc_tcp_flush(tcp); - if (write_status == GRPC_ENDPOINT_WRITE_PENDING) { + status = tcp_flush(tcp); + if (status == GRPC_ENDPOINT_PENDING) { grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure); } else { - slice_state_destroy(&tcp->write_state); - if (write_status == GRPC_ENDPOINT_WRITE_DONE) { - cb_status = GRPC_ENDPOINT_CB_OK; - } else { - cb_status = GRPC_ENDPOINT_CB_ERROR; - } cb = tcp->write_cb; tcp->write_cb = NULL; - cb(tcp->write_user_data, cb_status); - grpc_tcp_unref(tcp); + cb->cb(cb->cb_arg, status == GRPC_ENDPOINT_DONE); + TCP_UNREF(tcp, "write"); } GRPC_TIMER_END(GRPC_PTAG_TCP_CB_WRITE, 0); } -static grpc_endpoint_write_status grpc_tcp_write(grpc_endpoint *ep, - gpr_slice *slices, - size_t nslices, - grpc_endpoint_write_cb cb, - void *user_data) { +static grpc_endpoint_op_status tcp_write(grpc_endpoint *ep, + gpr_slice_buffer *buf, + grpc_iomgr_closure *cb) { grpc_tcp *tcp = (grpc_tcp *)ep; - grpc_endpoint_write_status status; + grpc_endpoint_op_status status; if (grpc_tcp_trace) { size_t i; - for (i = 0; i < nslices; i++) { - char *data = gpr_dump_slice(slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); + for (i = 0; i < buf->count; i++) { + char *data = + gpr_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); gpr_log(GPR_DEBUG, "WRITE %p: %s", tcp, data); gpr_free(data); } @@ -551,15 +387,19 @@ static grpc_endpoint_write_status grpc_tcp_write(grpc_endpoint *ep, GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_WRITE, 0); GPR_ASSERT(tcp->write_cb == NULL); - slice_state_init(&tcp->write_state, slices, nslices, nslices); - status = grpc_tcp_flush(tcp); - if (status == GRPC_ENDPOINT_WRITE_PENDING) { - /* TODO(klempner): Consider inlining rather than malloc for small nslices */ - slice_state_realloc(&tcp->write_state, nslices); - gpr_ref(&tcp->refcount); + if (buf->length == 0) { + GRPC_TIMER_END(GRPC_PTAG_TCP_WRITE, 0); + return GRPC_ENDPOINT_DONE; + } + tcp->outgoing_buffer = buf; + tcp->outgoing_slice_idx = 0; + tcp->outgoing_byte_idx = 0; + + status = tcp_flush(tcp); + if (status == GRPC_ENDPOINT_PENDING) { + TCP_REF(tcp, "write"); tcp->write_cb = cb; - tcp->write_user_data = user_data; grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure); } @@ -567,27 +407,25 @@ static grpc_endpoint_write_status grpc_tcp_write(grpc_endpoint *ep, return status; } -static void grpc_tcp_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) { +static void tcp_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_pollset_add_fd(pollset, tcp->em_fd); } -static void grpc_tcp_add_to_pollset_set(grpc_endpoint *ep, - grpc_pollset_set *pollset_set) { +static void tcp_add_to_pollset_set(grpc_endpoint *ep, + grpc_pollset_set *pollset_set) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_pollset_set_add_fd(pollset_set, tcp->em_fd); } -static char *grpc_tcp_get_peer(grpc_endpoint *ep) { +static char *tcp_get_peer(grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; return gpr_strdup(tcp->peer_string); } static const grpc_endpoint_vtable vtable = { - grpc_tcp_notify_on_read, grpc_tcp_write, - grpc_tcp_add_to_pollset, grpc_tcp_add_to_pollset_set, - grpc_tcp_shutdown, grpc_tcp_destroy, - grpc_tcp_get_peer}; + tcp_read, tcp_write, tcp_add_to_pollset, tcp_add_to_pollset_set, + tcp_shutdown, tcp_destroy, tcp_get_peer}; grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size, const char *peer_string) { @@ -597,21 +435,19 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size, tcp->fd = em_fd->fd; tcp->read_cb = NULL; tcp->write_cb = NULL; - tcp->read_user_data = NULL; - tcp->write_user_data = NULL; + tcp->incoming_buffer = NULL; tcp->slice_size = slice_size; tcp->iov_size = 1; tcp->finished_edge = 1; - slice_state_init(&tcp->write_state, NULL, 0, 0); /* paired with unref in grpc_tcp_destroy */ gpr_ref_init(&tcp->refcount, 1); tcp->em_fd = em_fd; - tcp->read_closure.cb = grpc_tcp_handle_read; + tcp->read_closure.cb = tcp_handle_read; tcp->read_closure.cb_arg = tcp; - tcp->write_closure.cb = grpc_tcp_handle_write; + tcp->write_closure.cb = tcp_handle_write; tcp->write_closure.cb_arg = tcp; - tcp->handle_read_closure.cb = grpc_tcp_handle_read; + tcp->handle_read_closure.cb = tcp_handle_read; return &tcp->base; } diff --git a/src/core/security/secure_endpoint.c b/src/core/security/secure_endpoint.c index 81b3e33cb2c..4206c393187 100644 --- a/src/core/security/secure_endpoint.c +++ b/src/core/security/secure_endpoint.c @@ -49,15 +49,15 @@ typedef struct { struct tsi_frame_protector *protector; gpr_mu protector_mu; /* saved upper level callbacks and user_data. */ - grpc_endpoint_read_cb read_cb; - void *read_user_data; - grpc_endpoint_write_cb write_cb; - void *write_user_data; + grpc_iomgr_closure *read_cb; + grpc_iomgr_closure *write_cb; + grpc_iomgr_closure on_read; + gpr_slice_buffer *read_buffer; + gpr_slice_buffer source_buffer; /* saved handshaker leftover data to unprotect. */ gpr_slice_buffer leftover_bytes; /* buffers for read and write */ gpr_slice read_staging_buffer; - gpr_slice_buffer input_buffer; gpr_slice write_staging_buffer; gpr_slice_buffer output_buffer; @@ -67,62 +67,91 @@ typedef struct { int grpc_trace_secure_endpoint = 0; -static void secure_endpoint_ref(secure_endpoint *ep) { gpr_ref(&ep->ref); } - static void destroy(secure_endpoint *secure_ep) { secure_endpoint *ep = secure_ep; grpc_endpoint_destroy(ep->wrapped_ep); tsi_frame_protector_destroy(ep->protector); gpr_slice_buffer_destroy(&ep->leftover_bytes); gpr_slice_unref(ep->read_staging_buffer); - gpr_slice_buffer_destroy(&ep->input_buffer); gpr_slice_unref(ep->write_staging_buffer); gpr_slice_buffer_destroy(&ep->output_buffer); + gpr_slice_buffer_destroy(&ep->source_buffer); gpr_mu_destroy(&ep->protector_mu); gpr_free(ep); } +#define GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG +#define SECURE_ENDPOINT_UNREF(ep, reason) \ + secure_endpoint_unref((ep), (reason), __FILE__, __LINE__) +#define SECURE_ENDPOINT_REF(ep, reason) \ + secure_endpoint_ref((ep), (reason), __FILE__, __LINE__) +static void secure_endpoint_unref(secure_endpoint *ep, const char *reason, + const char *file, int line) { + gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "SECENDP unref %p : %s %d -> %d", + ep, reason, ep->ref.count, ep->ref.count - 1); + if (gpr_unref(&ep->ref)) { + destroy(ep); + } +} + +static void secure_endpoint_ref(secure_endpoint *ep, const char *reason, + const char *file, int line) { + gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "SECENDP ref %p : %s %d -> %d", + ep, reason, ep->ref.count, ep->ref.count + 1); + gpr_ref(&ep->ref); +} +#ifdef GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG +#else +#define SECURE_ENDPOINT_UNREF(ep, reason) secure_endpoint_unref((ep)) +#define SECURE_ENDPOINT_REF(ep, reason) secure_endpoint_ref((ep)) static void secure_endpoint_unref(secure_endpoint *ep) { if (gpr_unref(&ep->ref)) { destroy(ep); } } +static void secure_endpoint_ref(secure_endpoint *ep) { gpr_ref(&ep->ref); } +#endif + static void flush_read_staging_buffer(secure_endpoint *ep, gpr_uint8 **cur, gpr_uint8 **end) { - gpr_slice_buffer_add(&ep->input_buffer, ep->read_staging_buffer); + gpr_slice_buffer_add(ep->read_buffer, ep->read_staging_buffer); ep->read_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE); *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer); *end = GPR_SLICE_END_PTR(ep->read_staging_buffer); } -static void call_read_cb(secure_endpoint *ep, gpr_slice *slices, size_t nslices, - grpc_endpoint_cb_status error) { +static void call_read_cb(secure_endpoint *ep, int success) { if (grpc_trace_secure_endpoint) { size_t i; - for (i = 0; i < nslices; i++) { - char *data = gpr_dump_slice(slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); + for (i = 0; i < ep->read_buffer->count; i++) { + char *data = gpr_dump_slice(ep->read_buffer->slices[i], + GPR_DUMP_HEX | GPR_DUMP_ASCII); gpr_log(GPR_DEBUG, "READ %p: %s", ep, data); gpr_free(data); } } - ep->read_cb(ep->read_user_data, slices, nslices, error); - secure_endpoint_unref(ep); + ep->read_buffer = NULL; + ep->read_cb->cb(ep->read_cb->cb_arg, success); + SECURE_ENDPOINT_UNREF(ep, "read"); } -static void on_read(void *user_data, gpr_slice *slices, size_t nslices, - grpc_endpoint_cb_status error) { +static int on_read(void *user_data, int success) { unsigned i; gpr_uint8 keep_looping = 0; - size_t input_buffer_count = 0; tsi_result result = TSI_OK; secure_endpoint *ep = (secure_endpoint *)user_data; gpr_uint8 *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer); gpr_uint8 *end = GPR_SLICE_END_PTR(ep->read_staging_buffer); + if (!success) { + gpr_slice_buffer_reset_and_unref(ep->read_buffer); + return 0; + } + /* TODO(yangg) check error, maybe bail out early */ - for (i = 0; i < nslices; i++) { - gpr_slice encrypted = slices[i]; + for (i = 0; i < ep->source_buffer.count; i++) { + gpr_slice encrypted = ep->source_buffer.slices[i]; gpr_uint8 *message_bytes = GPR_SLICE_START_PTR(encrypted); size_t message_size = GPR_SLICE_LENGTH(encrypted); @@ -161,7 +190,7 @@ static void on_read(void *user_data, gpr_slice *slices, size_t nslices, if (cur != GPR_SLICE_START_PTR(ep->read_staging_buffer)) { gpr_slice_buffer_add( - &ep->input_buffer, + ep->read_buffer, gpr_slice_split_head( &ep->read_staging_buffer, (size_t)(cur - GPR_SLICE_START_PTR(ep->read_staging_buffer)))); @@ -169,38 +198,53 @@ static void on_read(void *user_data, gpr_slice *slices, size_t nslices, /* TODO(yangg) experiment with moving this block after read_cb to see if it helps latency */ - for (i = 0; i < nslices; i++) { - gpr_slice_unref(slices[i]); - } + gpr_slice_buffer_reset_and_unref(&ep->source_buffer); if (result != TSI_OK) { - gpr_slice_buffer_reset_and_unref(&ep->input_buffer); - call_read_cb(ep, NULL, 0, GRPC_ENDPOINT_CB_ERROR); - return; + gpr_slice_buffer_reset_and_unref(ep->read_buffer); + return 0; } - /* The upper level will unref the slices. */ - input_buffer_count = ep->input_buffer.count; - ep->input_buffer.count = 0; - call_read_cb(ep, ep->input_buffer.slices, input_buffer_count, error); + + return 1; +} + +static void on_read_cb(void *user_data, int success) { + call_read_cb(user_data, on_read(user_data, success)); } -static void endpoint_notify_on_read(grpc_endpoint *secure_ep, - grpc_endpoint_read_cb cb, void *user_data) { +static grpc_endpoint_op_status endpoint_read(grpc_endpoint *secure_ep, + gpr_slice_buffer *slices, + grpc_iomgr_closure *cb) { secure_endpoint *ep = (secure_endpoint *)secure_ep; + int immediate_read_success = -1; ep->read_cb = cb; - ep->read_user_data = user_data; - - secure_endpoint_ref(ep); + ep->read_buffer = slices; + gpr_slice_buffer_reset_and_unref(ep->read_buffer); if (ep->leftover_bytes.count) { - size_t leftover_nslices = ep->leftover_bytes.count; - ep->leftover_bytes.count = 0; - on_read(ep, ep->leftover_bytes.slices, leftover_nslices, - GRPC_ENDPOINT_CB_OK); - return; + gpr_slice_buffer_swap(&ep->leftover_bytes, &ep->source_buffer); + GPR_ASSERT(ep->leftover_bytes.count == 0); + return on_read(ep, 1) ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR; } - grpc_endpoint_notify_on_read(ep->wrapped_ep, on_read, ep); + SECURE_ENDPOINT_REF(ep, "read"); + + switch ( + grpc_endpoint_read(ep->wrapped_ep, &ep->source_buffer, &ep->on_read)) { + case GRPC_ENDPOINT_DONE: + immediate_read_success = on_read(ep, 1); + break; + case GRPC_ENDPOINT_PENDING: + return GRPC_ENDPOINT_PENDING; + case GRPC_ENDPOINT_ERROR: + immediate_read_success = on_read(ep, 0); + break; + } + + GPR_ASSERT(immediate_read_success != -1); + SECURE_ENDPOINT_UNREF(ep, "read"); + + return immediate_read_success ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR; } static void flush_write_staging_buffer(secure_endpoint *ep, gpr_uint8 **cur, @@ -211,36 +255,28 @@ static void flush_write_staging_buffer(secure_endpoint *ep, gpr_uint8 **cur, *end = GPR_SLICE_END_PTR(ep->write_staging_buffer); } -static void on_write(void *data, grpc_endpoint_cb_status error) { - secure_endpoint *ep = data; - ep->write_cb(ep->write_user_data, error); - secure_endpoint_unref(ep); -} - -static grpc_endpoint_write_status endpoint_write(grpc_endpoint *secure_ep, - gpr_slice *slices, - size_t nslices, - grpc_endpoint_write_cb cb, - void *user_data) { +static grpc_endpoint_op_status endpoint_write(grpc_endpoint *secure_ep, + gpr_slice_buffer *slices, + grpc_iomgr_closure *cb) { unsigned i; - size_t output_buffer_count = 0; tsi_result result = TSI_OK; secure_endpoint *ep = (secure_endpoint *)secure_ep; gpr_uint8 *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer); gpr_uint8 *end = GPR_SLICE_END_PTR(ep->write_staging_buffer); - grpc_endpoint_write_status status; - GPR_ASSERT(ep->output_buffer.count == 0); + + gpr_slice_buffer_reset_and_unref(&ep->output_buffer); if (grpc_trace_secure_endpoint) { - for (i = 0; i < nslices; i++) { - char *data = gpr_dump_slice(slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); + for (i = 0; i < slices->count; i++) { + char *data = + gpr_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); gpr_log(GPR_DEBUG, "WRITE %p: %s", ep, data); gpr_free(data); } } - for (i = 0; i < nslices; i++) { - gpr_slice plain = slices[i]; + for (i = 0; i < slices->count; i++) { + gpr_slice plain = slices->slices[i]; gpr_uint8 *message_bytes = GPR_SLICE_START_PTR(plain); size_t message_size = GPR_SLICE_LENGTH(plain); while (message_size > 0) { @@ -290,29 +326,13 @@ static grpc_endpoint_write_status endpoint_write(grpc_endpoint *secure_ep, } } - for (i = 0; i < nslices; i++) { - gpr_slice_unref(slices[i]); - } - if (result != TSI_OK) { /* TODO(yangg) do different things according to the error type? */ gpr_slice_buffer_reset_and_unref(&ep->output_buffer); - return GRPC_ENDPOINT_WRITE_ERROR; + return GRPC_ENDPOINT_ERROR; } - /* clear output_buffer and let the lower level handle its slices. */ - output_buffer_count = ep->output_buffer.count; - ep->output_buffer.count = 0; - ep->write_cb = cb; - ep->write_user_data = user_data; - /* Need to keep the endpoint alive across a transport */ - secure_endpoint_ref(ep); - status = grpc_endpoint_write(ep->wrapped_ep, ep->output_buffer.slices, - output_buffer_count, on_write, ep); - if (status != GRPC_ENDPOINT_WRITE_PENDING) { - secure_endpoint_unref(ep); - } - return status; + return grpc_endpoint_write(ep->wrapped_ep, &ep->output_buffer, cb); } static void endpoint_shutdown(grpc_endpoint *secure_ep) { @@ -320,9 +340,9 @@ static void endpoint_shutdown(grpc_endpoint *secure_ep) { grpc_endpoint_shutdown(ep->wrapped_ep); } -static void endpoint_unref(grpc_endpoint *secure_ep) { +static void endpoint_destroy(grpc_endpoint *secure_ep) { secure_endpoint *ep = (secure_endpoint *)secure_ep; - secure_endpoint_unref(ep); + SECURE_ENDPOINT_UNREF(ep, "destroy"); } static void endpoint_add_to_pollset(grpc_endpoint *secure_ep, @@ -343,9 +363,9 @@ static char *endpoint_get_peer(grpc_endpoint *secure_ep) { } static const grpc_endpoint_vtable vtable = { - endpoint_notify_on_read, endpoint_write, + endpoint_read, endpoint_write, endpoint_add_to_pollset, endpoint_add_to_pollset_set, - endpoint_shutdown, endpoint_unref, + endpoint_shutdown, endpoint_destroy, endpoint_get_peer}; grpc_endpoint *grpc_secure_endpoint_create( @@ -363,8 +383,10 @@ grpc_endpoint *grpc_secure_endpoint_create( } ep->write_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE); ep->read_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE); - gpr_slice_buffer_init(&ep->input_buffer); gpr_slice_buffer_init(&ep->output_buffer); + gpr_slice_buffer_init(&ep->source_buffer); + ep->read_buffer = NULL; + grpc_iomgr_closure_init(&ep->on_read, on_read_cb, ep); gpr_mu_init(&ep->protector_mu); gpr_ref_init(&ep->ref, 1); return &ep->base; diff --git a/src/core/security/secure_transport_setup.c b/src/core/security/secure_transport_setup.c index 0c3572b53c4..bf0079577e4 100644 --- a/src/core/security/secure_transport_setup.c +++ b/src/core/security/secure_transport_setup.c @@ -50,16 +50,17 @@ typedef struct { grpc_endpoint *wrapped_endpoint; grpc_endpoint *secure_endpoint; gpr_slice_buffer left_overs; + gpr_slice_buffer incoming; + gpr_slice_buffer outgoing; grpc_secure_transport_setup_done_cb cb; void *user_data; + grpc_iomgr_closure on_handshake_data_sent_to_peer; + grpc_iomgr_closure on_handshake_data_received_from_peer; } grpc_secure_transport_setup; -static void on_handshake_data_received_from_peer(void *setup, gpr_slice *slices, - size_t nslices, - grpc_endpoint_cb_status error); +static void on_handshake_data_received_from_peer(void *setup, int success); -static void on_handshake_data_sent_to_peer(void *setup, - grpc_endpoint_cb_status error); +static void on_handshake_data_sent_to_peer(void *setup, int success); static void secure_transport_setup_done(grpc_secure_transport_setup *s, int is_success) { @@ -78,6 +79,8 @@ static void secure_transport_setup_done(grpc_secure_transport_setup *s, if (s->handshaker != NULL) tsi_handshaker_destroy(s->handshaker); if (s->handshake_buffer != NULL) gpr_free(s->handshake_buffer); gpr_slice_buffer_destroy(&s->left_overs); + gpr_slice_buffer_destroy(&s->outgoing); + gpr_slice_buffer_destroy(&s->incoming); GRPC_SECURITY_CONNECTOR_UNREF(s->connector, "secure_transport_setup"); gpr_free(s); } @@ -102,6 +105,8 @@ static void on_peer_checked(void *user_data, grpc_security_status status) { s->secure_endpoint = grpc_secure_endpoint_create(protector, s->wrapped_endpoint, s->left_overs.slices, s->left_overs.count); + s->left_overs.count = 0; + s->left_overs.length = 0; secure_transport_setup_done(s, 1); return; } @@ -132,7 +137,6 @@ static void send_handshake_bytes_to_peer(grpc_secure_transport_setup *s) { size_t offset = 0; tsi_result result = TSI_OK; gpr_slice to_send; - grpc_endpoint_write_status write_status; do { size_t to_send_size = s->handshake_buffer_size - offset; @@ -155,28 +159,25 @@ static void send_handshake_bytes_to_peer(grpc_secure_transport_setup *s) { to_send = gpr_slice_from_copied_buffer((const char *)s->handshake_buffer, offset); + gpr_slice_buffer_reset_and_unref(&s->outgoing); + gpr_slice_buffer_add(&s->outgoing, to_send); /* TODO(klempner,jboeuf): This should probably use the client setup deadline */ - write_status = grpc_endpoint_write(s->wrapped_endpoint, &to_send, 1, - on_handshake_data_sent_to_peer, s); - if (write_status == GRPC_ENDPOINT_WRITE_ERROR) { - gpr_log(GPR_ERROR, "Could not send handshake data to peer."); - secure_transport_setup_done(s, 0); - } else if (write_status == GRPC_ENDPOINT_WRITE_DONE) { - on_handshake_data_sent_to_peer(s, GRPC_ENDPOINT_CB_OK); - } -} - -static void cleanup_slices(gpr_slice *slices, size_t num_slices) { - size_t i; - for (i = 0; i < num_slices; i++) { - gpr_slice_unref(slices[i]); + switch (grpc_endpoint_write(s->wrapped_endpoint, &s->outgoing, + &s->on_handshake_data_sent_to_peer)) { + case GRPC_ENDPOINT_ERROR: + gpr_log(GPR_ERROR, "Could not send handshake data to peer."); + secure_transport_setup_done(s, 0); + break; + case GRPC_ENDPOINT_DONE: + on_handshake_data_sent_to_peer(s, 1); + break; + case GRPC_ENDPOINT_PENDING: + break; } } -static void on_handshake_data_received_from_peer( - void *setup, gpr_slice *slices, size_t nslices, - grpc_endpoint_cb_status error) { +static void on_handshake_data_received_from_peer(void *setup, int success) { grpc_secure_transport_setup *s = setup; size_t consumed_slice_size = 0; tsi_result result = TSI_OK; @@ -184,32 +185,37 @@ static void on_handshake_data_received_from_peer( size_t num_left_overs; int has_left_overs_in_current_slice = 0; - if (error != GRPC_ENDPOINT_CB_OK) { + if (!success) { gpr_log(GPR_ERROR, "Read failed."); - cleanup_slices(slices, nslices); secure_transport_setup_done(s, 0); return; } - for (i = 0; i < nslices; i++) { - consumed_slice_size = GPR_SLICE_LENGTH(slices[i]); + for (i = 0; i < s->incoming.count; i++) { + consumed_slice_size = GPR_SLICE_LENGTH(s->incoming.slices[i]); result = tsi_handshaker_process_bytes_from_peer( - s->handshaker, GPR_SLICE_START_PTR(slices[i]), &consumed_slice_size); + s->handshaker, GPR_SLICE_START_PTR(s->incoming.slices[i]), + &consumed_slice_size); if (!tsi_handshaker_is_in_progress(s->handshaker)) break; } if (tsi_handshaker_is_in_progress(s->handshaker)) { /* We may need more data. */ if (result == TSI_INCOMPLETE_DATA) { - /* TODO(klempner,jboeuf): This should probably use the client setup - deadline */ - grpc_endpoint_notify_on_read(s->wrapped_endpoint, - on_handshake_data_received_from_peer, setup); - cleanup_slices(slices, nslices); + switch (grpc_endpoint_read(s->wrapped_endpoint, &s->incoming, + &s->on_handshake_data_received_from_peer)) { + case GRPC_ENDPOINT_DONE: + on_handshake_data_received_from_peer(s, 1); + break; + case GRPC_ENDPOINT_ERROR: + on_handshake_data_received_from_peer(s, 0); + break; + case GRPC_ENDPOINT_PENDING: + break; + } return; } else { send_handshake_bytes_to_peer(s); - cleanup_slices(slices, nslices); return; } } @@ -217,42 +223,40 @@ static void on_handshake_data_received_from_peer( if (result != TSI_OK) { gpr_log(GPR_ERROR, "Handshake failed with error %s", tsi_result_to_string(result)); - cleanup_slices(slices, nslices); secure_transport_setup_done(s, 0); return; } /* Handshake is done and successful this point. */ has_left_overs_in_current_slice = - (consumed_slice_size < GPR_SLICE_LENGTH(slices[i])); - num_left_overs = (has_left_overs_in_current_slice ? 1 : 0) + nslices - i - 1; + (consumed_slice_size < GPR_SLICE_LENGTH(s->incoming.slices[i])); + num_left_overs = + (has_left_overs_in_current_slice ? 1 : 0) + s->incoming.count - i - 1; if (num_left_overs == 0) { - cleanup_slices(slices, nslices); check_peer(s); return; } - cleanup_slices(slices, nslices - num_left_overs); - /* Put the leftovers in our buffer (ownership transfered). */ if (has_left_overs_in_current_slice) { - gpr_slice_buffer_add(&s->left_overs, - gpr_slice_split_tail(&slices[i], consumed_slice_size)); - gpr_slice_unref(slices[i]); /* split_tail above increments refcount. */ + gpr_slice_buffer_add( + &s->left_overs, + gpr_slice_split_tail(&s->incoming.slices[i], consumed_slice_size)); + gpr_slice_unref( + s->incoming.slices[i]); /* split_tail above increments refcount. */ } gpr_slice_buffer_addn( - &s->left_overs, &slices[i + 1], + &s->left_overs, &s->incoming.slices[i + 1], num_left_overs - (size_t)has_left_overs_in_current_slice); check_peer(s); } /* If setup is NULL, the setup is done. */ -static void on_handshake_data_sent_to_peer(void *setup, - grpc_endpoint_cb_status error) { +static void on_handshake_data_sent_to_peer(void *setup, int success) { grpc_secure_transport_setup *s = setup; /* Make sure that write is OK. */ - if (error != GRPC_ENDPOINT_CB_OK) { - gpr_log(GPR_ERROR, "Write failed with error %d.", error); + if (!success) { + gpr_log(GPR_ERROR, "Write failed."); if (setup != NULL) secure_transport_setup_done(s, 0); return; } @@ -261,8 +265,17 @@ static void on_handshake_data_sent_to_peer(void *setup, if (tsi_handshaker_is_in_progress(s->handshaker)) { /* TODO(klempner,jboeuf): This should probably use the client setup deadline */ - grpc_endpoint_notify_on_read(s->wrapped_endpoint, - on_handshake_data_received_from_peer, setup); + switch (grpc_endpoint_read(s->wrapped_endpoint, &s->incoming, + &s->on_handshake_data_received_from_peer)) { + case GRPC_ENDPOINT_ERROR: + on_handshake_data_received_from_peer(s, 0); + break; + case GRPC_ENDPOINT_PENDING: + break; + case GRPC_ENDPOINT_DONE: + on_handshake_data_received_from_peer(s, 1); + break; + } } else { check_peer(s); } @@ -288,6 +301,12 @@ void grpc_setup_secure_transport(grpc_security_connector *connector, s->wrapped_endpoint = nonsecure_endpoint; s->user_data = user_data; s->cb = cb; + grpc_iomgr_closure_init(&s->on_handshake_data_sent_to_peer, + on_handshake_data_sent_to_peer, s); + grpc_iomgr_closure_init(&s->on_handshake_data_received_from_peer, + on_handshake_data_received_from_peer, s); gpr_slice_buffer_init(&s->left_overs); + gpr_slice_buffer_init(&s->outgoing); + gpr_slice_buffer_init(&s->incoming); send_handshake_bytes_to_peer(s); } diff --git a/src/core/support/slice_buffer.c b/src/core/support/slice_buffer.c index 987d5cb9b55..6482ef9c9fe 100644 --- a/src/core/support/slice_buffer.c +++ b/src/core/support/slice_buffer.c @@ -207,3 +207,25 @@ void gpr_slice_buffer_move_into(gpr_slice_buffer *src, gpr_slice_buffer *dst) { src->count = 0; src->length = 0; } + +void gpr_slice_buffer_trim_end(gpr_slice_buffer *sb, size_t n) { + GPR_ASSERT(n <= sb->length); + sb->length -= n; + for (;;) { + size_t idx = sb->count - 1; + gpr_slice slice = sb->slices[idx]; + size_t slice_len = GPR_SLICE_LENGTH(slice); + if (slice_len > n) { + sb->slices[idx] = gpr_slice_sub_no_ref(slice, 0, slice_len - n); + return; + } else if (slice_len == n) { + gpr_slice_unref(slice); + sb->count = idx; + return; + } else { + gpr_slice_unref(slice); + n -= slice_len; + sb->count = idx; + } + } +} diff --git a/src/core/transport/chttp2/internal.h b/src/core/transport/chttp2/internal.h index 42cf0ecd5be..0e7f94be385 100644 --- a/src/core/transport/chttp2/internal.h +++ b/src/core/transport/chttp2/internal.h @@ -214,6 +214,8 @@ typedef struct { grpc_chttp2_hpack_compressor hpack_compressor; /** is this a client? */ gpr_uint8 is_client; + /** callback for when writing is done */ + grpc_iomgr_closure done_cb; } grpc_chttp2_transport_writing; struct grpc_chttp2_transport_parsing { @@ -331,6 +333,11 @@ struct grpc_chttp2_transport { grpc_iomgr_closure writing_action; /** closure to start reading from the endpoint */ grpc_iomgr_closure reading_action; + /** closure to finish reading from the endpoint */ + grpc_iomgr_closure recv_data; + + /** incoming read bytes */ + gpr_slice_buffer read_buffer; /** address to place a newly accepted stream - set and unset by grpc_chttp2_parsing_accept_stream; used by init_stream to @@ -463,8 +470,7 @@ int grpc_chttp2_unlocking_check_writes(grpc_chttp2_transport_global *global, grpc_chttp2_transport_writing *writing); void grpc_chttp2_perform_writes( grpc_chttp2_transport_writing *transport_writing, grpc_endpoint *endpoint); -void grpc_chttp2_terminate_writing( - grpc_chttp2_transport_writing *transport_writing, int success); +void grpc_chttp2_terminate_writing(void *transport_writing, int success); void grpc_chttp2_cleanup_writing(grpc_chttp2_transport_global *global, grpc_chttp2_transport_writing *writing); diff --git a/src/core/transport/chttp2/writing.c b/src/core/transport/chttp2/writing.c index 123061b3fcc..2c8c48f47b4 100644 --- a/src/core/transport/chttp2/writing.c +++ b/src/core/transport/chttp2/writing.c @@ -37,7 +37,6 @@ #include static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing); -static void finish_write_cb(void *tw, grpc_endpoint_cb_status write_status); int grpc_chttp2_unlocking_check_writes( grpc_chttp2_transport_global *transport_global, @@ -165,16 +164,15 @@ void grpc_chttp2_perform_writes( GPR_ASSERT(transport_writing->outbuf.count > 0); GPR_ASSERT(endpoint); - switch (grpc_endpoint_write(endpoint, transport_writing->outbuf.slices, - transport_writing->outbuf.count, finish_write_cb, - transport_writing)) { - case GRPC_ENDPOINT_WRITE_DONE: + switch (grpc_endpoint_write(endpoint, &transport_writing->outbuf, + &transport_writing->done_cb)) { + case GRPC_ENDPOINT_DONE: grpc_chttp2_terminate_writing(transport_writing, 1); break; - case GRPC_ENDPOINT_WRITE_ERROR: + case GRPC_ENDPOINT_ERROR: grpc_chttp2_terminate_writing(transport_writing, 0); break; - case GRPC_ENDPOINT_WRITE_PENDING: + case GRPC_ENDPOINT_PENDING: break; } } @@ -209,12 +207,6 @@ static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) { } } -static void finish_write_cb(void *tw, grpc_endpoint_cb_status write_status) { - grpc_chttp2_transport_writing *transport_writing = tw; - grpc_chttp2_terminate_writing(transport_writing, - write_status == GRPC_ENDPOINT_CB_OK); -} - void grpc_chttp2_cleanup_writing( grpc_chttp2_transport_global *transport_global, grpc_chttp2_transport_writing *transport_writing) { @@ -243,6 +235,5 @@ void grpc_chttp2_cleanup_writing( grpc_chttp2_list_add_read_write_state_changed(transport_global, stream_global); } - transport_writing->outbuf.count = 0; - transport_writing->outbuf.length = 0; + gpr_slice_buffer_reset_and_unref(&transport_writing->outbuf); } diff --git a/src/core/transport/chttp2_transport.c b/src/core/transport/chttp2_transport.c index 1bbd210e466..3d3d708e2f9 100644 --- a/src/core/transport/chttp2_transport.c +++ b/src/core/transport/chttp2_transport.c @@ -91,8 +91,7 @@ static void push_setting(grpc_chttp2_transport *t, grpc_chttp2_setting_id id, gpr_uint32 value); /** Endpoint callback to process incoming data */ -static void recv_data(void *tp, gpr_slice *slices, size_t nslices, - grpc_endpoint_cb_status error); +static void recv_data(void *tp, int success); /** Start disconnection chain */ static void drop_connection(grpc_chttp2_transport *t); @@ -143,6 +142,7 @@ static void destruct_transport(grpc_chttp2_transport *t) { grpc_chttp2_hpack_compressor_destroy(&t->writing.hpack_compressor); gpr_slice_buffer_destroy(&t->parsing.qbuf); + gpr_slice_buffer_destroy(&t->read_buffer); grpc_chttp2_hpack_parser_destroy(&t->parsing.hpack_parser); grpc_chttp2_goaway_parser_destroy(&t->parsing.goaway_parser); @@ -255,6 +255,11 @@ static void init_transport(grpc_chttp2_transport *t, grpc_chttp2_goaway_parser_init(&t->parsing.goaway_parser); grpc_chttp2_hpack_parser_init(&t->parsing.hpack_parser, t->metadata_context); + grpc_iomgr_closure_init(&t->writing.done_cb, grpc_chttp2_terminate_writing, + &t->writing); + grpc_iomgr_closure_init(&t->recv_data, recv_data, t); + gpr_slice_buffer_init(&t->read_buffer); + if (is_client) { gpr_slice_buffer_add( &t->global.qbuf, @@ -502,8 +507,8 @@ static void push_setting(grpc_chttp2_transport *t, grpc_chttp2_setting_id id, } } -void grpc_chttp2_terminate_writing( - grpc_chttp2_transport_writing *transport_writing, int success) { +void grpc_chttp2_terminate_writing(void *transport_writing_ptr, int success) { + grpc_chttp2_transport_writing *transport_writing = transport_writing_ptr; grpc_chttp2_transport *t = TRANSPORT_FROM_WRITING(transport_writing); lock(t); @@ -1060,66 +1065,56 @@ static void read_error_locked(grpc_chttp2_transport *t) { } /* tcp read callback */ -static void recv_data(void *tp, gpr_slice *slices, size_t nslices, - grpc_endpoint_cb_status error) { +static void recv_data(void *tp, int success) { grpc_chttp2_transport *t = tp; size_t i; int unref = 0; - switch (error) { - case GRPC_ENDPOINT_CB_SHUTDOWN: - case GRPC_ENDPOINT_CB_EOF: - case GRPC_ENDPOINT_CB_ERROR: - lock(t); + lock(t); + i = 0; + GPR_ASSERT(!t->parsing_active); + if (!t->closed) { + t->parsing_active = 1; + /* merge stream lists */ + grpc_chttp2_stream_map_move_into(&t->new_stream_map, + &t->parsing_stream_map); + grpc_chttp2_prepare_to_read(&t->global, &t->parsing); + gpr_mu_unlock(&t->mu); + for (; i < t->read_buffer.count && + grpc_chttp2_perform_read(&t->parsing, t->read_buffer.slices[i]); + i++) + ; + gpr_mu_lock(&t->mu); + if (i != t->read_buffer.count) { drop_connection(t); - read_error_locked(t); - unlock(t); - unref = 1; - for (i = 0; i < nslices; i++) gpr_slice_unref(slices[i]); - break; - case GRPC_ENDPOINT_CB_OK: - lock(t); - i = 0; - GPR_ASSERT(!t->parsing_active); - if (!t->closed) { - t->parsing_active = 1; - /* merge stream lists */ - grpc_chttp2_stream_map_move_into(&t->new_stream_map, - &t->parsing_stream_map); - grpc_chttp2_prepare_to_read(&t->global, &t->parsing); - gpr_mu_unlock(&t->mu); - for (; i < nslices && grpc_chttp2_perform_read(&t->parsing, slices[i]); - i++) { - gpr_slice_unref(slices[i]); - } - gpr_mu_lock(&t->mu); - if (i != nslices) { - drop_connection(t); - } - /* merge stream lists */ - grpc_chttp2_stream_map_move_into(&t->new_stream_map, - &t->parsing_stream_map); - t->global.concurrent_stream_count = - grpc_chttp2_stream_map_size(&t->parsing_stream_map); - if (t->parsing.initial_window_update != 0) { - grpc_chttp2_stream_map_for_each(&t->parsing_stream_map, - update_global_window, t); - t->parsing.initial_window_update = 0; - } - /* handle higher level things */ - grpc_chttp2_publish_reads(&t->global, &t->parsing); - t->parsing_active = 0; - } - if (i == nslices) { - grpc_chttp2_schedule_closure(&t->global, &t->reading_action, 1); - } else { - read_error_locked(t); - unref = 1; - } - unlock(t); - for (; i < nslices; i++) gpr_slice_unref(slices[i]); - break; + } + /* merge stream lists */ + grpc_chttp2_stream_map_move_into(&t->new_stream_map, + &t->parsing_stream_map); + t->global.concurrent_stream_count = + grpc_chttp2_stream_map_size(&t->parsing_stream_map); + if (t->parsing.initial_window_update != 0) { + grpc_chttp2_stream_map_for_each(&t->parsing_stream_map, + update_global_window, t); + t->parsing.initial_window_update = 0; + } + /* handle higher level things */ + grpc_chttp2_publish_reads(&t->global, &t->parsing); + t->parsing_active = 0; + } + if (!success) { + drop_connection(t); + read_error_locked(t); + unref = 1; + } else if (i == t->read_buffer.count) { + grpc_chttp2_schedule_closure(&t->global, &t->reading_action, 1); + } else { + read_error_locked(t); + unref = 1; } + gpr_slice_buffer_reset_and_unref(&t->read_buffer); + unlock(t); + if (unref) { UNREF_TRANSPORT(t, "recv_data"); } @@ -1127,7 +1122,16 @@ static void recv_data(void *tp, gpr_slice *slices, size_t nslices, static void reading_action(void *pt, int iomgr_success_ignored) { grpc_chttp2_transport *t = pt; - grpc_endpoint_notify_on_read(t->ep, recv_data, t); + switch (grpc_endpoint_read(t->ep, &t->read_buffer, &t->recv_data)) { + case GRPC_ENDPOINT_DONE: + recv_data(t, 1); + break; + case GRPC_ENDPOINT_ERROR: + recv_data(t, 0); + break; + case GRPC_ENDPOINT_PENDING: + break; + } } /* @@ -1240,5 +1244,6 @@ void grpc_chttp2_transport_start_reading(grpc_transport *transport, gpr_slice *slices, size_t nslices) { grpc_chttp2_transport *t = (grpc_chttp2_transport *)transport; REF_TRANSPORT(t, "recv_data"); /* matches unref inside recv_data */ - recv_data(t, slices, nslices, GRPC_ENDPOINT_CB_OK); + gpr_slice_buffer_addn(&t->read_buffer, slices, nslices); + recv_data(t, 1); } diff --git a/test/core/bad_client/bad_client.c b/test/core/bad_client/bad_client.c index 24bf5d3625f..1d988796625 100644 --- a/test/core/bad_client/bad_client.c +++ b/test/core/bad_client/bad_client.c @@ -59,7 +59,7 @@ static void thd_func(void *arg) { gpr_event_set(&a->done_thd, (void *)1); } -static void done_write(void *arg, grpc_endpoint_cb_status status) { +static void done_write(void *arg, int success) { thd_args *a = arg; gpr_event_set(&a->done_write, (void *)1); } @@ -85,6 +85,8 @@ void grpc_run_bad_client_test(grpc_bad_client_server_side_validator validator, grpc_mdctx *mdctx = grpc_mdctx_create(); gpr_slice slice = gpr_slice_from_copied_buffer(client_payload, client_payload_length); + gpr_slice_buffer outgoing; + grpc_iomgr_closure done_write_closure; hex = gpr_dump(client_payload, client_payload_length, GPR_DUMP_HEX | GPR_DUMP_ASCII); @@ -122,14 +124,18 @@ void grpc_run_bad_client_test(grpc_bad_client_server_side_validator validator, /* Start validator */ gpr_thd_new(&id, thd_func, &a, NULL); + gpr_slice_buffer_init(&outgoing); + gpr_slice_buffer_add(&outgoing, slice); + grpc_iomgr_closure_init(&done_write_closure, done_write, &a); + /* Write data */ - switch (grpc_endpoint_write(sfd.client, &slice, 1, done_write, &a)) { - case GRPC_ENDPOINT_WRITE_DONE: + switch (grpc_endpoint_write(sfd.client, &outgoing, &done_write_closure)) { + case GRPC_ENDPOINT_DONE: done_write(&a, 1); break; - case GRPC_ENDPOINT_WRITE_PENDING: + case GRPC_ENDPOINT_PENDING: break; - case GRPC_ENDPOINT_WRITE_ERROR: + case GRPC_ENDPOINT_ERROR: done_write(&a, 0); break; } @@ -155,6 +161,7 @@ void grpc_run_bad_client_test(grpc_bad_client_server_side_validator validator, .type == GRPC_OP_COMPLETE); grpc_server_destroy(a.server); grpc_completion_queue_destroy(a.cq); + gpr_slice_buffer_destroy(&outgoing); grpc_shutdown(); } diff --git a/test/core/iomgr/endpoint_tests.c b/test/core/iomgr/endpoint_tests.c index 8186c96da1b..b4f170cfbed 100644 --- a/test/core/iomgr/endpoint_tests.c +++ b/test/core/iomgr/endpoint_tests.c @@ -59,8 +59,7 @@ static grpc_pollset *g_pollset; -size_t count_and_unref_slices(gpr_slice *slices, size_t nslices, - int *current_data) { +size_t count_slices(gpr_slice *slices, size_t nslices, int *current_data) { size_t num_bytes = 0; size_t i; size_t j; @@ -72,7 +71,6 @@ size_t count_and_unref_slices(gpr_slice *slices, size_t nslices, *current_data = (*current_data + 1) % 256; } num_bytes += GPR_SLICE_LENGTH(slices[i]); - gpr_slice_unref(slices[i]); } return num_bytes; } @@ -121,86 +119,76 @@ struct read_and_write_test_state { int current_write_data; int read_done; int write_done; + gpr_slice_buffer incoming; + gpr_slice_buffer outgoing; + grpc_iomgr_closure done_read; + grpc_iomgr_closure done_write; }; -static void read_and_write_test_read_handler(void *data, gpr_slice *slices, - size_t nslices, - grpc_endpoint_cb_status error) { +static void read_and_write_test_read_handler(void *data, int success) { struct read_and_write_test_state *state = data; - GPR_ASSERT(error != GRPC_ENDPOINT_CB_ERROR); - if (error == GRPC_ENDPOINT_CB_SHUTDOWN) { - gpr_log(GPR_INFO, "Read handler shutdown"); - gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); - state->read_done = 1; - grpc_pollset_kick(g_pollset, NULL); - gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); - return; - } - state->bytes_read += - count_and_unref_slices(slices, nslices, &state->current_read_data); - if (state->bytes_read == state->target_bytes) { + state->bytes_read += count_slices( + state->incoming.slices, state->incoming.count, &state->current_read_data); + if (state->bytes_read == state->target_bytes || !success) { gpr_log(GPR_INFO, "Read handler done"); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); - state->read_done = 1; + state->read_done = 1 + success; grpc_pollset_kick(g_pollset, NULL); gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); - } else { - grpc_endpoint_notify_on_read(state->read_ep, - read_and_write_test_read_handler, data); + } else if (success) { + switch (grpc_endpoint_read(state->read_ep, &state->incoming, + &state->done_read)) { + case GRPC_ENDPOINT_ERROR: + read_and_write_test_read_handler(data, 0); + break; + case GRPC_ENDPOINT_DONE: + read_and_write_test_read_handler(data, 1); + break; + case GRPC_ENDPOINT_PENDING: + break; + } } } -static void read_and_write_test_write_handler(void *data, - grpc_endpoint_cb_status error) { +static void read_and_write_test_write_handler(void *data, int success) { struct read_and_write_test_state *state = data; gpr_slice *slices = NULL; size_t nslices; - grpc_endpoint_write_status write_status; - - GPR_ASSERT(error != GRPC_ENDPOINT_CB_ERROR); - - gpr_log(GPR_DEBUG, "%s: error=%d", "read_and_write_test_write_handler", - error); - - if (error == GRPC_ENDPOINT_CB_SHUTDOWN) { - gpr_log(GPR_INFO, "Write handler shutdown"); - gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); - state->write_done = 1; - grpc_pollset_kick(g_pollset, NULL); - gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); - return; - } - - for (;;) { - /* Need to do inline writes until they don't succeed synchronously or we - finish writing */ - state->bytes_written += state->current_write_size; - if (state->target_bytes - state->bytes_written < - state->current_write_size) { - state->current_write_size = state->target_bytes - state->bytes_written; - } - if (state->current_write_size == 0) { - break; - } - - slices = allocate_blocks(state->current_write_size, 8192, &nslices, - &state->current_write_data); - write_status = - grpc_endpoint_write(state->write_ep, slices, nslices, - read_and_write_test_write_handler, state); - gpr_log(GPR_DEBUG, "write_status=%d", write_status); - GPR_ASSERT(write_status != GRPC_ENDPOINT_WRITE_ERROR); - free(slices); - if (write_status == GRPC_ENDPOINT_WRITE_PENDING) { - return; + grpc_endpoint_op_status write_status; + + if (success) { + for (;;) { + /* Need to do inline writes until they don't succeed synchronously or we + finish writing */ + state->bytes_written += state->current_write_size; + if (state->target_bytes - state->bytes_written < + state->current_write_size) { + state->current_write_size = state->target_bytes - state->bytes_written; + } + if (state->current_write_size == 0) { + break; + } + + slices = allocate_blocks(state->current_write_size, 8192, &nslices, + &state->current_write_data); + gpr_slice_buffer_reset_and_unref(&state->outgoing); + gpr_slice_buffer_addn(&state->outgoing, slices, nslices); + write_status = grpc_endpoint_write(state->write_ep, &state->outgoing, + &state->done_write); + gpr_log(GPR_DEBUG, "write_status=%d", write_status); + GPR_ASSERT(write_status != GRPC_ENDPOINT_ERROR); + free(slices); + if (write_status == GRPC_ENDPOINT_PENDING) { + return; + } } + GPR_ASSERT(state->bytes_written == state->target_bytes); } - GPR_ASSERT(state->bytes_written == state->target_bytes); gpr_log(GPR_INFO, "Write handler done"); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); - state->write_done = 1; + state->write_done = 1 + success; grpc_pollset_kick(g_pollset, NULL); gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); } @@ -234,16 +222,31 @@ static void read_and_write_test(grpc_endpoint_test_config config, state.write_done = 0; state.current_read_data = 0; state.current_write_data = 0; + grpc_iomgr_closure_init(&state.done_read, read_and_write_test_read_handler, + &state); + grpc_iomgr_closure_init(&state.done_write, read_and_write_test_write_handler, + &state); + gpr_slice_buffer_init(&state.outgoing); + gpr_slice_buffer_init(&state.incoming); /* Get started by pretending an initial write completed */ /* NOTE: Sets up initial conditions so we can have the same write handler for the first iteration as for later iterations. It does the right thing even when bytes_written is unsigned. */ state.bytes_written -= state.current_write_size; - read_and_write_test_write_handler(&state, GRPC_ENDPOINT_CB_OK); + read_and_write_test_write_handler(&state, 1); - grpc_endpoint_notify_on_read(state.read_ep, read_and_write_test_read_handler, - &state); + switch ( + grpc_endpoint_read(state.read_ep, &state.incoming, &state.done_read)) { + case GRPC_ENDPOINT_PENDING: + break; + case GRPC_ENDPOINT_ERROR: + read_and_write_test_read_handler(&state, 0); + break; + case GRPC_ENDPOINT_DONE: + read_and_write_test_read_handler(&state, 1); + break; + } if (shutdown) { gpr_log(GPR_DEBUG, "shutdown read"); @@ -262,6 +265,8 @@ static void read_and_write_test(grpc_endpoint_test_config config, grpc_endpoint_destroy(state.read_ep); grpc_endpoint_destroy(state.write_ep); + gpr_slice_buffer_destroy(&state.outgoing); + gpr_slice_buffer_destroy(&state.incoming); end_test(config); } @@ -272,36 +277,40 @@ struct timeout_test_state { typedef struct { int done; grpc_endpoint *ep; + gpr_slice_buffer incoming; + grpc_iomgr_closure done_read; } shutdown_during_write_test_state; -static void shutdown_during_write_test_read_handler( - void *user_data, gpr_slice *slices, size_t nslices, - grpc_endpoint_cb_status error) { - size_t i; +static void shutdown_during_write_test_read_handler(void *user_data, + int success) { shutdown_during_write_test_state *st = user_data; - for (i = 0; i < nslices; i++) { - gpr_slice_unref(slices[i]); - } - - if (error != GRPC_ENDPOINT_CB_OK) { + if (!success) { grpc_endpoint_destroy(st->ep); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); - st->done = error; + st->done = 1; grpc_pollset_kick(g_pollset, NULL); gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); } else { - grpc_endpoint_notify_on_read( - st->ep, shutdown_during_write_test_read_handler, user_data); + switch (grpc_endpoint_read(st->ep, &st->incoming, &st->done_read)) { + case GRPC_ENDPOINT_PENDING: + break; + case GRPC_ENDPOINT_ERROR: + shutdown_during_write_test_read_handler(user_data, 0); + break; + case GRPC_ENDPOINT_DONE: + shutdown_during_write_test_read_handler(user_data, 1); + break; + } } } -static void shutdown_during_write_test_write_handler( - void *user_data, grpc_endpoint_cb_status error) { +static void shutdown_during_write_test_write_handler(void *user_data, + int success) { shutdown_during_write_test_state *st = user_data; - gpr_log(GPR_INFO, "shutdown_during_write_test_write_handler: error = %d", - error); - if (error == 0) { + gpr_log(GPR_INFO, "shutdown_during_write_test_write_handler: success = %d", + success); + if (success) { /* This happens about 0.5% of the time when run under TSAN, and is entirely legitimate, but means we aren't testing the path we think we are. */ /* TODO(klempner): Change this test to retry the write in that case */ @@ -324,6 +333,8 @@ static void shutdown_during_write_test(grpc_endpoint_test_config config, shutdown_during_write_test_state read_st; shutdown_during_write_test_state write_st; gpr_slice *slices; + gpr_slice_buffer outgoing; + grpc_iomgr_closure done_write; grpc_endpoint_test_fixture f = begin_test(config, "shutdown_during_write_test", slice_size); @@ -334,19 +345,26 @@ static void shutdown_during_write_test(grpc_endpoint_test_config config, read_st.done = 0; write_st.done = 0; - grpc_endpoint_notify_on_read( - read_st.ep, shutdown_during_write_test_read_handler, &read_st); + grpc_iomgr_closure_init(&done_write, shutdown_during_write_test_write_handler, + &write_st); + grpc_iomgr_closure_init(&read_st.done_read, + shutdown_during_write_test_read_handler, &read_st); + gpr_slice_buffer_init(&read_st.incoming); + gpr_slice_buffer_init(&outgoing); + + GPR_ASSERT(grpc_endpoint_read(read_st.ep, &read_st.incoming, + &read_st.done_read) == GRPC_ENDPOINT_PENDING); for (size = 1;; size *= 2) { slices = allocate_blocks(size, 1, &nblocks, ¤t_data); - switch (grpc_endpoint_write(write_st.ep, slices, nblocks, - shutdown_during_write_test_write_handler, - &write_st)) { - case GRPC_ENDPOINT_WRITE_DONE: + gpr_slice_buffer_reset_and_unref(&outgoing); + gpr_slice_buffer_addn(&outgoing, slices, nblocks); + switch (grpc_endpoint_write(write_st.ep, &outgoing, &done_write)) { + case GRPC_ENDPOINT_DONE: break; - case GRPC_ENDPOINT_WRITE_ERROR: + case GRPC_ENDPOINT_ERROR: gpr_log(GPR_ERROR, "error writing"); abort(); - case GRPC_ENDPOINT_WRITE_PENDING: + case GRPC_ENDPOINT_PENDING: grpc_endpoint_shutdown(write_st.ep); deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); @@ -365,6 +383,8 @@ static void shutdown_during_write_test(grpc_endpoint_test_config config, } gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); gpr_free(slices); + gpr_slice_buffer_destroy(&read_st.incoming); + gpr_slice_buffer_destroy(&outgoing); end_test(config); return; } diff --git a/test/core/iomgr/tcp_posix_test.c b/test/core/iomgr/tcp_posix_test.c index 17a85ceaec7..a2e3adcf292 100644 --- a/test/core/iomgr/tcp_posix_test.c +++ b/test/core/iomgr/tcp_posix_test.c @@ -118,10 +118,12 @@ struct read_socket_state { grpc_endpoint *ep; ssize_t read_bytes; ssize_t target_read_bytes; + gpr_slice_buffer incoming; + grpc_iomgr_closure read_cb; }; -static ssize_t count_and_unref_slices(gpr_slice *slices, size_t nslices, - int *current_data) { +static ssize_t count_slices(gpr_slice *slices, size_t nslices, + int *current_data) { ssize_t num_bytes = 0; unsigned i, j; unsigned char *buf; @@ -132,31 +134,41 @@ static ssize_t count_and_unref_slices(gpr_slice *slices, size_t nslices, *current_data = (*current_data + 1) % 256; } num_bytes += GPR_SLICE_LENGTH(slices[i]); - gpr_slice_unref(slices[i]); } return num_bytes; } -static void read_cb(void *user_data, gpr_slice *slices, size_t nslices, - grpc_endpoint_cb_status error) { +static void read_cb(void *user_data, int success) { struct read_socket_state *state = (struct read_socket_state *)user_data; ssize_t read_bytes; int current_data; - GPR_ASSERT(error == GRPC_ENDPOINT_CB_OK); + GPR_ASSERT(success); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); current_data = state->read_bytes % 256; - read_bytes = count_and_unref_slices(slices, nslices, ¤t_data); + read_bytes = count_slices(state->incoming.slices, state->incoming.count, + ¤t_data); state->read_bytes += read_bytes; gpr_log(GPR_INFO, "Read %d bytes of %d", read_bytes, state->target_read_bytes); if (state->read_bytes >= state->target_read_bytes) { - /* empty */ + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); } else { - grpc_endpoint_notify_on_read(state->ep, read_cb, state); + switch (grpc_endpoint_read(state->ep, &state->incoming, &state->read_cb)) { + case GRPC_ENDPOINT_DONE: + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); + read_cb(user_data, 1); + break; + case GRPC_ENDPOINT_ERROR: + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); + read_cb(user_data, 0); + break; + case GRPC_ENDPOINT_PENDING: + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); + break; + } } - gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); } /* Write to a socket, then read from it using the grpc_tcp API. */ @@ -181,8 +193,19 @@ static void read_test(ssize_t num_bytes, ssize_t slice_size) { state.ep = ep; state.read_bytes = 0; state.target_read_bytes = written_bytes; + gpr_slice_buffer_init(&state.incoming); + grpc_iomgr_closure_init(&state.read_cb, read_cb, &state); - grpc_endpoint_notify_on_read(ep, read_cb, &state); + switch (grpc_endpoint_read(ep, &state.incoming, &state.read_cb)) { + case GRPC_ENDPOINT_DONE: + read_cb(&state, 1); + break; + case GRPC_ENDPOINT_ERROR: + read_cb(&state, 0); + break; + case GRPC_ENDPOINT_PENDING: + break; + } gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); while (state.read_bytes < state.target_read_bytes) { @@ -192,6 +215,7 @@ static void read_test(ssize_t num_bytes, ssize_t slice_size) { GPR_ASSERT(state.read_bytes == state.target_read_bytes); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); + gpr_slice_buffer_destroy(&state.incoming); grpc_endpoint_destroy(ep); } @@ -218,8 +242,19 @@ static void large_read_test(ssize_t slice_size) { state.ep = ep; state.read_bytes = 0; state.target_read_bytes = written_bytes; + gpr_slice_buffer_init(&state.incoming); + grpc_iomgr_closure_init(&state.read_cb, read_cb, &state); - grpc_endpoint_notify_on_read(ep, read_cb, &state); + switch (grpc_endpoint_read(ep, &state.incoming, &state.read_cb)) { + case GRPC_ENDPOINT_DONE: + read_cb(&state, 1); + break; + case GRPC_ENDPOINT_ERROR: + read_cb(&state, 0); + break; + case GRPC_ENDPOINT_PENDING: + break; + } gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); while (state.read_bytes < state.target_read_bytes) { @@ -229,6 +264,7 @@ static void large_read_test(ssize_t slice_size) { GPR_ASSERT(state.read_bytes == state.target_read_bytes); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); + gpr_slice_buffer_destroy(&state.incoming); grpc_endpoint_destroy(ep); } @@ -260,8 +296,7 @@ static gpr_slice *allocate_blocks(ssize_t num_bytes, ssize_t slice_size, return slices; } -static void write_done(void *user_data /* write_socket_state */, - grpc_endpoint_cb_status error) { +static void write_done(void *user_data /* write_socket_state */, int success) { struct write_socket_state *state = (struct write_socket_state *)user_data; gpr_log(GPR_INFO, "Write done callback called"); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); @@ -336,6 +371,8 @@ static void write_test(ssize_t num_bytes, ssize_t slice_size) { size_t num_blocks; gpr_slice *slices; int current_data = 0; + gpr_slice_buffer outgoing; + grpc_iomgr_closure write_done_closure; gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20); gpr_log(GPR_INFO, "Start write test with %d bytes, slice size %d", num_bytes, @@ -352,73 +389,21 @@ static void write_test(ssize_t num_bytes, ssize_t slice_size) { slices = allocate_blocks(num_bytes, slice_size, &num_blocks, ¤t_data); - if (grpc_endpoint_write(ep, slices, num_blocks, write_done, &state) == - GRPC_ENDPOINT_WRITE_DONE) { - /* Write completed immediately */ - read_bytes = drain_socket(sv[0]); - GPR_ASSERT(read_bytes == num_bytes); - } else { - drain_socket_blocking(sv[0], num_bytes, num_bytes); - gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); - for (;;) { - grpc_pollset_worker worker; - if (state.write_done) { - break; - } - grpc_pollset_work(&g_pollset, &worker, deadline); - } - gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); - } - - grpc_endpoint_destroy(ep); - gpr_free(slices); -} - -static void read_done_for_write_error(void *ud, gpr_slice *slices, - size_t nslices, - grpc_endpoint_cb_status error) { - GPR_ASSERT(error != GRPC_ENDPOINT_CB_OK); - GPR_ASSERT(nslices == 0); -} - -/* Write to a socket using the grpc_tcp API, then drain it directly. - Note that if the write does not complete immediately we need to drain the - socket in parallel with the read. */ -static void write_error_test(ssize_t num_bytes, ssize_t slice_size) { - int sv[2]; - grpc_endpoint *ep; - struct write_socket_state state; - size_t num_blocks; - gpr_slice *slices; - int current_data = 0; - grpc_pollset_worker worker; - gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20); - - gpr_log(GPR_INFO, "Start write error test with %d bytes, slice size %d", - num_bytes, slice_size); - - create_sockets(sv); + gpr_slice_buffer_init(&outgoing); + gpr_slice_buffer_addn(&outgoing, slices, num_blocks); + grpc_iomgr_closure_init(&write_done_closure, write_done, &state); - ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_error_test"), - GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test"); - grpc_endpoint_add_to_pollset(ep, &g_pollset); - - close(sv[0]); - - state.ep = ep; - state.write_done = 0; - - slices = allocate_blocks(num_bytes, slice_size, &num_blocks, ¤t_data); - - switch (grpc_endpoint_write(ep, slices, num_blocks, write_done, &state)) { - case GRPC_ENDPOINT_WRITE_DONE: - case GRPC_ENDPOINT_WRITE_ERROR: + switch (grpc_endpoint_write(ep, &outgoing, &write_done_closure)) { + case GRPC_ENDPOINT_DONE: /* Write completed immediately */ + read_bytes = drain_socket(sv[0]); + GPR_ASSERT(read_bytes == num_bytes); break; - case GRPC_ENDPOINT_WRITE_PENDING: - grpc_endpoint_notify_on_read(ep, read_done_for_write_error, NULL); + case GRPC_ENDPOINT_PENDING: + drain_socket_blocking(sv[0], num_bytes, num_bytes); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); for (;;) { + grpc_pollset_worker worker; if (state.write_done) { break; } @@ -426,10 +411,14 @@ static void write_error_test(ssize_t num_bytes, ssize_t slice_size) { } gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); break; + case GRPC_ENDPOINT_ERROR: + gpr_log(GPR_ERROR, "endpoint got error"); + abort(); } + gpr_slice_buffer_destroy(&outgoing); grpc_endpoint_destroy(ep); - free(slices); + gpr_free(slices); } void run_tests(void) { @@ -448,10 +437,6 @@ void run_tests(void) { write_test(100000, 1); write_test(100000, 137); - for (i = 1; i < 1000; i = GPR_MAX(i + 1, i * 5 / 4)) { - write_error_test(40320, i); - } - for (i = 1; i < 1000; i = GPR_MAX(i + 1, i * 5 / 4)) { write_test(40320, i); } diff --git a/test/core/security/secure_endpoint_test.c b/test/core/security/secure_endpoint_test.c index a8368fc8426..c76ddcd1940 100644 --- a/test/core/security/secure_endpoint_test.c +++ b/test/core/security/secure_endpoint_test.c @@ -135,62 +135,26 @@ static grpc_endpoint_test_config configs[] = { secure_endpoint_create_fixture_tcp_socketpair_leftover, clean_up}, }; -static void verify_leftover(void *user_data, gpr_slice *slices, size_t nslices, - grpc_endpoint_cb_status error) { - gpr_slice s = - gpr_slice_from_copied_string("hello world 12345678900987654321"); - - GPR_ASSERT(error == GRPC_ENDPOINT_CB_OK); - GPR_ASSERT(nslices == 1); - - GPR_ASSERT(0 == gpr_slice_cmp(s, slices[0])); - gpr_slice_unref(slices[0]); - gpr_slice_unref(s); - *(int *)user_data = 1; -} - static void test_leftover(grpc_endpoint_test_config config, size_t slice_size) { grpc_endpoint_test_fixture f = config.create_fixture(slice_size); - int verified = 0; + gpr_slice_buffer incoming; + gpr_slice s = + gpr_slice_from_copied_string("hello world 12345678900987654321"); gpr_log(GPR_INFO, "Start test left over"); - grpc_endpoint_notify_on_read(f.client_ep, verify_leftover, &verified); - GPR_ASSERT(verified == 1); + gpr_slice_buffer_init(&incoming); + GPR_ASSERT(grpc_endpoint_read(f.client_ep, &incoming, NULL) == + GRPC_ENDPOINT_DONE); + GPR_ASSERT(incoming.count == 1); + GPR_ASSERT(0 == gpr_slice_cmp(s, incoming.slices[0])); grpc_endpoint_shutdown(f.client_ep); grpc_endpoint_shutdown(f.server_ep); grpc_endpoint_destroy(f.client_ep); grpc_endpoint_destroy(f.server_ep); - clean_up(); -} - -static void destroy_early(void *user_data, gpr_slice *slices, size_t nslices, - grpc_endpoint_cb_status error) { - grpc_endpoint_test_fixture *f = user_data; - gpr_slice s = - gpr_slice_from_copied_string("hello world 12345678900987654321"); - - GPR_ASSERT(error == GRPC_ENDPOINT_CB_OK); - GPR_ASSERT(nslices == 1); - - grpc_endpoint_shutdown(f->client_ep); - grpc_endpoint_destroy(f->client_ep); - - GPR_ASSERT(0 == gpr_slice_cmp(s, slices[0])); - gpr_slice_unref(slices[0]); gpr_slice_unref(s); -} + gpr_slice_buffer_destroy(&incoming); -/* test which destroys the ep before finishing reading */ -static void test_destroy_ep_early(grpc_endpoint_test_config config, - size_t slice_size) { - grpc_endpoint_test_fixture f = config.create_fixture(slice_size); - gpr_log(GPR_INFO, "Start test destroy early"); - - grpc_endpoint_notify_on_read(f.client_ep, destroy_early, &f); - - grpc_endpoint_shutdown(f.server_ep); - grpc_endpoint_destroy(f.server_ep); clean_up(); } @@ -203,7 +167,6 @@ int main(int argc, char **argv) { grpc_pollset_init(&g_pollset); grpc_endpoint_tests(configs[0], &g_pollset); test_leftover(configs[1], 1); - test_destroy_ep_early(configs[1], 1); grpc_pollset_shutdown(&g_pollset, destroy_pollset, &g_pollset); grpc_iomgr_shutdown(); From ec9acabb4f197c8da4de4b7d5109c38611c23be0 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 21 Aug 2015 13:06:00 -0700 Subject: [PATCH 05/29] Update Windows to new endpoint API Also solve an infinite recursion in chttp2_transport --- src/core/iomgr/tcp_windows.c | 124 ++++++++++++-------------- src/core/transport/chttp2/internal.h | 2 - src/core/transport/chttp2_transport.c | 64 ++++++------- 3 files changed, 90 insertions(+), 100 deletions(-) diff --git a/src/core/iomgr/tcp_windows.c b/src/core/iomgr/tcp_windows.c index 123f46d71d4..a63123c6c9d 100644 --- a/src/core/iomgr/tcp_windows.c +++ b/src/core/iomgr/tcp_windows.c @@ -82,13 +82,11 @@ typedef struct grpc_tcp { /* Refcounting how many operations are in progress. */ gpr_refcount refcount; - grpc_endpoint_read_cb read_cb; - void *read_user_data; + grpc_iomgr_closure *read_cb; + grpc_iomgr_closure *write_cb; gpr_slice read_slice; - - grpc_endpoint_write_cb write_cb; - void *write_user_data; - gpr_slice_buffer write_slices; + gpr_slice_buffer *write_slices; + gpr_slice_buffer *read_slices; /* The IO Completion Port runs from another thread. We need some mechanism to protect ourselves when requesting a shutdown. */ @@ -102,7 +100,6 @@ static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } static void tcp_unref(grpc_tcp *tcp) { if (gpr_unref(&tcp->refcount)) { - gpr_slice_buffer_destroy(&tcp->write_slices); grpc_winsocket_orphan(tcp->socket); gpr_mu_destroy(&tcp->mu); gpr_free(tcp->peer_string); @@ -111,21 +108,16 @@ static void tcp_unref(grpc_tcp *tcp) { } /* Asynchronous callback from the IOCP, or the background thread. */ -static void on_read(void *tcpp, int from_iocp) { - grpc_tcp *tcp = (grpc_tcp *)tcpp; +static int on_read(grpc_tcp *tcp, int from_iocp) { grpc_winsocket *socket = tcp->socket; gpr_slice sub; gpr_slice *slice = NULL; size_t nslices = 0; - grpc_endpoint_cb_status status; - grpc_endpoint_read_cb cb; + int success; grpc_winsocket_callback_info *info = &socket->read_info; - void *opaque = tcp->read_user_data; int do_abort = 0; gpr_mu_lock(&tcp->mu); - cb = tcp->read_cb; - tcp->read_cb = NULL; if (!from_iocp || tcp->shutting_down) { /* If we are here with from_iocp set to true, it means we got raced to shutting down the endpoint. No actual abort callback will happen @@ -140,8 +132,7 @@ static void on_read(void *tcpp, int from_iocp) { gpr_slice_unref(tcp->read_slice); } tcp_unref(tcp); - if (cb) cb(opaque, NULL, 0, GRPC_ENDPOINT_CB_SHUTDOWN); - return; + return 0; } GPR_ASSERT(tcp->socket->read_info.outstanding); @@ -152,27 +143,33 @@ static void on_read(void *tcpp, int from_iocp) { gpr_log(GPR_ERROR, "ReadFile overlapped error: %s", utf8_message); gpr_free(utf8_message); } - status = GRPC_ENDPOINT_CB_ERROR; + success = 0; } else { if (info->bytes_transfered != 0) { sub = gpr_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered); - status = GRPC_ENDPOINT_CB_OK; - slice = ⊂ - nslices = 1; + gpr_slice_buffer_add(tcp->read_slices, sub); + success = 1; } else { gpr_slice_unref(tcp->read_slice); - status = GRPC_ENDPOINT_CB_EOF; + success = 0; } } tcp->socket->read_info.outstanding = 0; + return success; +} + +static void on_read_cb(void *tcpp, int from_iocp) { + grpc_tcp *tcp = tcpp; + grpc_iomgr_closure *cb = tcp->read_cb; + int success = on_read(tcp, from_iocp); + tcp->read_cb = NULL; tcp_unref(tcp); - cb(opaque, slice, nslices, status); + cb->cb(cb->cb_arg, success); } -static void win_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, - void *arg) { +static grpc_endpoint_op_status win_read(grpc_endpoint *ep, gpr_slice_buffer *read_slices, grpc_iomgr_closure *cb) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_winsocket *handle = tcp->socket; grpc_winsocket_callback_info *info = &handle->read_info; @@ -183,13 +180,11 @@ static void win_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, GPR_ASSERT(!tcp->socket->read_info.outstanding); if (tcp->shutting_down) { - cb(arg, NULL, 0, GRPC_ENDPOINT_CB_SHUTDOWN); - return; + return GRPC_ENDPOINT_ERROR; } - tcp_ref(tcp); tcp->socket->read_info.outstanding = 1; tcp->read_cb = cb; - tcp->read_user_data = arg; + tcp->read_slices = read_slices; tcp->read_slice = gpr_slice_malloc(8192); @@ -204,9 +199,8 @@ static void win_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, /* Did we get data immediately ? Yay. */ if (info->wsa_error != WSAEWOULDBLOCK) { info->bytes_transfered = bytes_read; - /* This might heavily recurse. */ - on_read(tcp, 1); - return; + gpr_log(GPR_DEBUG, "immread: %d bytes", bytes_read); + return on_read(tcp, 1) ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR; } /* Otherwise, let's retry, by queuing a read. */ @@ -218,12 +212,14 @@ static void win_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, int wsa_error = WSAGetLastError(); if (wsa_error != WSA_IO_PENDING) { info->wsa_error = wsa_error; - on_read(tcp, 1); - return; + gpr_log(GPR_DEBUG, "immread: err=%d", wsa_error); + return on_read(tcp, 1) ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR; } } - grpc_socket_notify_on_read(tcp->socket, on_read, tcp); + tcp_ref(tcp); + grpc_socket_notify_on_read(tcp->socket, on_read_cb, tcp); + return GRPC_ENDPOINT_PENDING; } /* Asynchronous callback from the IOCP, or the background thread. */ @@ -231,9 +227,8 @@ static void on_write(void *tcpp, int from_iocp) { grpc_tcp *tcp = (grpc_tcp *)tcpp; grpc_winsocket *handle = tcp->socket; grpc_winsocket_callback_info *info = &handle->write_info; - grpc_endpoint_cb_status status = GRPC_ENDPOINT_CB_OK; - grpc_endpoint_write_cb cb; - void *opaque = tcp->write_user_data; + grpc_iomgr_closure *cb; + int success; int do_abort = 0; gpr_mu_lock(&tcp->mu); @@ -250,10 +245,11 @@ static void on_write(void *tcpp, int from_iocp) { if (do_abort) { if (from_iocp) { tcp->socket->write_info.outstanding = 0; - gpr_slice_buffer_reset_and_unref(&tcp->write_slices); } tcp_unref(tcp); - if (cb) cb(opaque, GRPC_ENDPOINT_CB_SHUTDOWN); + if (cb) { + cb->cb(cb->cb_arg, 0); + } return; } @@ -265,23 +261,22 @@ static void on_write(void *tcpp, int from_iocp) { gpr_log(GPR_ERROR, "WSASend overlapped error: %s", utf8_message); gpr_free(utf8_message); } - status = GRPC_ENDPOINT_CB_ERROR; + success = 0; } else { - GPR_ASSERT(info->bytes_transfered == tcp->write_slices.length); + GPR_ASSERT(info->bytes_transfered == tcp->write_slices->length); + success = 1; } - gpr_slice_buffer_reset_and_unref(&tcp->write_slices); tcp->socket->write_info.outstanding = 0; tcp_unref(tcp); - cb(opaque, status); + cb->cb(cb->cb_arg, success); } /* Initiates a write. */ -static grpc_endpoint_write_status win_write(grpc_endpoint *ep, - gpr_slice *slices, size_t nslices, - grpc_endpoint_write_cb cb, - void *arg) { +static grpc_endpoint_op_status win_write(grpc_endpoint *ep, + gpr_slice_buffer *slices, + grpc_iomgr_closure *cb) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_winsocket *socket = tcp->socket; grpc_winsocket_callback_info *info = &socket->write_info; @@ -294,28 +289,26 @@ static grpc_endpoint_write_status win_write(grpc_endpoint *ep, GPR_ASSERT(!tcp->socket->write_info.outstanding); if (tcp->shutting_down) { - return GRPC_ENDPOINT_WRITE_ERROR; + return GRPC_ENDPOINT_ERROR; } tcp_ref(tcp); tcp->socket->write_info.outstanding = 1; tcp->write_cb = cb; - tcp->write_user_data = arg; - - gpr_slice_buffer_addn(&tcp->write_slices, slices, nslices); + tcp->write_slices = slices; - if (tcp->write_slices.count > GPR_ARRAY_SIZE(local_buffers)) { - buffers = (WSABUF *)gpr_malloc(sizeof(WSABUF) * tcp->write_slices.count); + if (tcp->write_slices->count > GPR_ARRAY_SIZE(local_buffers)) { + buffers = (WSABUF *)gpr_malloc(sizeof(WSABUF) * tcp->write_slices->count); allocated = buffers; } - for (i = 0; i < tcp->write_slices.count; i++) { - buffers[i].len = GPR_SLICE_LENGTH(tcp->write_slices.slices[i]); - buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices.slices[i]); + for (i = 0; i < tcp->write_slices->count; i++) { + buffers[i].len = GPR_SLICE_LENGTH(tcp->write_slices->slices[i]); + buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices->slices[i]); } /* First, let's try a synchronous, non-blocking write. */ - status = WSASend(socket->socket, buffers, tcp->write_slices.count, + status = WSASend(socket->socket, buffers, tcp->write_slices->count, &bytes_sent, 0, NULL, NULL); info->wsa_error = status == 0 ? 0 : WSAGetLastError(); @@ -323,10 +316,10 @@ static grpc_endpoint_write_status win_write(grpc_endpoint *ep, connection that has its send queue filled up. But if we don't, then we can avoid doing an async write operation at all. */ if (info->wsa_error != WSAEWOULDBLOCK) { - grpc_endpoint_write_status ret = GRPC_ENDPOINT_WRITE_ERROR; + grpc_endpoint_op_status ret = GRPC_ENDPOINT_ERROR; if (status == 0) { - ret = GRPC_ENDPOINT_WRITE_DONE; - GPR_ASSERT(bytes_sent == tcp->write_slices.length); + ret = GRPC_ENDPOINT_DONE; + GPR_ASSERT(bytes_sent == tcp->write_slices->length); } else { if (socket->read_info.wsa_error != WSAECONNRESET) { char *utf8_message = gpr_format_message(info->wsa_error); @@ -335,7 +328,6 @@ static grpc_endpoint_write_status win_write(grpc_endpoint *ep, } } if (allocated) gpr_free(allocated); - gpr_slice_buffer_reset_and_unref(&tcp->write_slices); tcp->socket->write_info.outstanding = 0; tcp_unref(tcp); return ret; @@ -344,24 +336,23 @@ static grpc_endpoint_write_status win_write(grpc_endpoint *ep, /* If we got a WSAEWOULDBLOCK earlier, then we need to re-do the same operation, this time asynchronously. */ memset(&socket->write_info.overlapped, 0, sizeof(OVERLAPPED)); - status = WSASend(socket->socket, buffers, tcp->write_slices.count, + status = WSASend(socket->socket, buffers, tcp->write_slices->count, &bytes_sent, 0, &socket->write_info.overlapped, NULL); if (allocated) gpr_free(allocated); if (status != 0) { int wsa_error = WSAGetLastError(); if (wsa_error != WSA_IO_PENDING) { - gpr_slice_buffer_reset_and_unref(&tcp->write_slices); tcp->socket->write_info.outstanding = 0; tcp_unref(tcp); - return GRPC_ENDPOINT_WRITE_ERROR; + return GRPC_ENDPOINT_ERROR; } } /* As all is now setup, we can now ask for the IOCP notification. It may trigger the callback immediately however, but no matter. */ grpc_socket_notify_on_write(socket, on_write, tcp); - return GRPC_ENDPOINT_WRITE_PENDING; + return GRPC_ENDPOINT_PENDING; } static void win_add_to_pollset(grpc_endpoint *ep, grpc_pollset *ps) { @@ -407,7 +398,7 @@ static char *win_get_peer(grpc_endpoint *ep) { } static grpc_endpoint_vtable vtable = { - win_notify_on_read, win_write, win_add_to_pollset, win_add_to_pollset_set, + win_read, win_write, win_add_to_pollset, win_add_to_pollset_set, win_shutdown, win_destroy, win_get_peer}; grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) { @@ -416,7 +407,6 @@ grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) { tcp->base.vtable = &vtable; tcp->socket = socket; gpr_mu_init(&tcp->mu); - gpr_slice_buffer_init(&tcp->write_slices); gpr_ref_init(&tcp->refcount, 1); tcp->peer_string = gpr_strdup(peer_string); return &tcp->base; diff --git a/src/core/transport/chttp2/internal.h b/src/core/transport/chttp2/internal.h index 0e7f94be385..a1b773b1cad 100644 --- a/src/core/transport/chttp2/internal.h +++ b/src/core/transport/chttp2/internal.h @@ -331,8 +331,6 @@ struct grpc_chttp2_transport { /** closure to execute writing */ grpc_iomgr_closure writing_action; - /** closure to start reading from the endpoint */ - grpc_iomgr_closure reading_action; /** closure to finish reading from the endpoint */ grpc_iomgr_closure recv_data; diff --git a/src/core/transport/chttp2_transport.c b/src/core/transport/chttp2_transport.c index 3d3d708e2f9..46ab0a585f9 100644 --- a/src/core/transport/chttp2_transport.c +++ b/src/core/transport/chttp2_transport.c @@ -84,7 +84,6 @@ static void unlock_check_read_write_state(grpc_chttp2_transport *t); /* forward declarations of various callbacks that we'll build closures around */ static void writing_action(void *t, int iomgr_success_ignored); -static void reading_action(void *t, int iomgr_success_ignored); /** Set a transport level setting, and push it to our peer */ static void push_setting(grpc_chttp2_transport *t, grpc_chttp2_setting_id id, @@ -249,7 +248,6 @@ static void init_transport(grpc_chttp2_transport *t, gpr_slice_buffer_init(&t->writing.outbuf); grpc_chttp2_hpack_compressor_init(&t->writing.hpack_compressor, mdctx); grpc_iomgr_closure_init(&t->writing_action, writing_action, t); - grpc_iomgr_closure_init(&t->reading_action, reading_action, t); gpr_slice_buffer_init(&t->parsing.qbuf); grpc_chttp2_goaway_parser_init(&t->parsing.goaway_parser); @@ -1065,10 +1063,9 @@ static void read_error_locked(grpc_chttp2_transport *t) { } /* tcp read callback */ -static void recv_data(void *tp, int success) { - grpc_chttp2_transport *t = tp; +static int recv_data_loop(grpc_chttp2_transport *t, int *success) { size_t i; - int unref = 0; + int keep_reading = 0; lock(t); i = 0; @@ -1077,12 +1074,12 @@ static void recv_data(void *tp, int success) { t->parsing_active = 1; /* merge stream lists */ grpc_chttp2_stream_map_move_into(&t->new_stream_map, - &t->parsing_stream_map); + &t->parsing_stream_map); grpc_chttp2_prepare_to_read(&t->global, &t->parsing); gpr_mu_unlock(&t->mu); for (; i < t->read_buffer.count && - grpc_chttp2_perform_read(&t->parsing, t->read_buffer.slices[i]); - i++) + grpc_chttp2_perform_read(&t->parsing, t->read_buffer.slices[i]); + i++) ; gpr_mu_lock(&t->mu); if (i != t->read_buffer.count) { @@ -1090,48 +1087,53 @@ static void recv_data(void *tp, int success) { } /* merge stream lists */ grpc_chttp2_stream_map_move_into(&t->new_stream_map, - &t->parsing_stream_map); + &t->parsing_stream_map); t->global.concurrent_stream_count = - grpc_chttp2_stream_map_size(&t->parsing_stream_map); + grpc_chttp2_stream_map_size(&t->parsing_stream_map); if (t->parsing.initial_window_update != 0) { grpc_chttp2_stream_map_for_each(&t->parsing_stream_map, - update_global_window, t); + update_global_window, t); t->parsing.initial_window_update = 0; } /* handle higher level things */ grpc_chttp2_publish_reads(&t->global, &t->parsing); t->parsing_active = 0; } - if (!success) { + if (!*success || i != t->read_buffer.count) { drop_connection(t); read_error_locked(t); - unref = 1; - } else if (i == t->read_buffer.count) { - grpc_chttp2_schedule_closure(&t->global, &t->reading_action, 1); - } else { - read_error_locked(t); - unref = 1; + } + else { + keep_reading = 1; } gpr_slice_buffer_reset_and_unref(&t->read_buffer); unlock(t); - if (unref) { - UNREF_TRANSPORT(t, "recv_data"); - } -} - -static void reading_action(void *pt, int iomgr_success_ignored) { - grpc_chttp2_transport *t = pt; - switch (grpc_endpoint_read(t->ep, &t->read_buffer, &t->recv_data)) { + if (keep_reading) { + switch (grpc_endpoint_read(t->ep, &t->read_buffer, &t->recv_data)) { case GRPC_ENDPOINT_DONE: - recv_data(t, 1); - break; + *success = 1; + return 1; case GRPC_ENDPOINT_ERROR: - recv_data(t, 0); - break; + *success = 0; + return 1; case GRPC_ENDPOINT_PENDING: - break; + return 0; + } + } + else { + UNREF_TRANSPORT(t, "recv_data"); + return 0; } + + gpr_log(GPR_ERROR, "should never reach here"); + abort(); +} + +static void recv_data(void *tp, int success) { + grpc_chttp2_transport *t = tp; + + while (recv_data_loop(t, &success)); } /* From 1c090699da0ba779e001cc9bc13af42d7716f7ce Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 21 Aug 2015 13:07:01 -0700 Subject: [PATCH 06/29] clang-format --- src/core/iomgr/tcp_windows.c | 12 +++++---- src/core/transport/chttp2_transport.c | 37 +++++++++++++-------------- 2 files changed, 25 insertions(+), 24 deletions(-) diff --git a/src/core/iomgr/tcp_windows.c b/src/core/iomgr/tcp_windows.c index a63123c6c9d..1cf2ca2131e 100644 --- a/src/core/iomgr/tcp_windows.c +++ b/src/core/iomgr/tcp_windows.c @@ -169,7 +169,9 @@ static void on_read_cb(void *tcpp, int from_iocp) { cb->cb(cb->cb_arg, success); } -static grpc_endpoint_op_status win_read(grpc_endpoint *ep, gpr_slice_buffer *read_slices, grpc_iomgr_closure *cb) { +static grpc_endpoint_op_status win_read(grpc_endpoint *ep, + gpr_slice_buffer *read_slices, + grpc_iomgr_closure *cb) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_winsocket *handle = tcp->socket; grpc_winsocket_callback_info *info = &handle->read_info; @@ -275,8 +277,8 @@ static void on_write(void *tcpp, int from_iocp) { /* Initiates a write. */ static grpc_endpoint_op_status win_write(grpc_endpoint *ep, - gpr_slice_buffer *slices, - grpc_iomgr_closure *cb) { + gpr_slice_buffer *slices, + grpc_iomgr_closure *cb) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_winsocket *socket = tcp->socket; grpc_winsocket_callback_info *info = &socket->write_info; @@ -398,8 +400,8 @@ static char *win_get_peer(grpc_endpoint *ep) { } static grpc_endpoint_vtable vtable = { - win_read, win_write, win_add_to_pollset, win_add_to_pollset_set, - win_shutdown, win_destroy, win_get_peer}; + win_read, win_write, win_add_to_pollset, win_add_to_pollset_set, + win_shutdown, win_destroy, win_get_peer}; grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) { grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp)); diff --git a/src/core/transport/chttp2_transport.c b/src/core/transport/chttp2_transport.c index 46ab0a585f9..8caa10c9389 100644 --- a/src/core/transport/chttp2_transport.c +++ b/src/core/transport/chttp2_transport.c @@ -1074,12 +1074,12 @@ static int recv_data_loop(grpc_chttp2_transport *t, int *success) { t->parsing_active = 1; /* merge stream lists */ grpc_chttp2_stream_map_move_into(&t->new_stream_map, - &t->parsing_stream_map); + &t->parsing_stream_map); grpc_chttp2_prepare_to_read(&t->global, &t->parsing); gpr_mu_unlock(&t->mu); for (; i < t->read_buffer.count && - grpc_chttp2_perform_read(&t->parsing, t->read_buffer.slices[i]); - i++) + grpc_chttp2_perform_read(&t->parsing, t->read_buffer.slices[i]); + i++) ; gpr_mu_lock(&t->mu); if (i != t->read_buffer.count) { @@ -1087,12 +1087,12 @@ static int recv_data_loop(grpc_chttp2_transport *t, int *success) { } /* merge stream lists */ grpc_chttp2_stream_map_move_into(&t->new_stream_map, - &t->parsing_stream_map); + &t->parsing_stream_map); t->global.concurrent_stream_count = - grpc_chttp2_stream_map_size(&t->parsing_stream_map); + grpc_chttp2_stream_map_size(&t->parsing_stream_map); if (t->parsing.initial_window_update != 0) { grpc_chttp2_stream_map_for_each(&t->parsing_stream_map, - update_global_window, t); + update_global_window, t); t->parsing.initial_window_update = 0; } /* handle higher level things */ @@ -1102,8 +1102,7 @@ static int recv_data_loop(grpc_chttp2_transport *t, int *success) { if (!*success || i != t->read_buffer.count) { drop_connection(t); read_error_locked(t); - } - else { + } else { keep_reading = 1; } gpr_slice_buffer_reset_and_unref(&t->read_buffer); @@ -1111,17 +1110,16 @@ static int recv_data_loop(grpc_chttp2_transport *t, int *success) { if (keep_reading) { switch (grpc_endpoint_read(t->ep, &t->read_buffer, &t->recv_data)) { - case GRPC_ENDPOINT_DONE: - *success = 1; - return 1; - case GRPC_ENDPOINT_ERROR: - *success = 0; - return 1; - case GRPC_ENDPOINT_PENDING: - return 0; + case GRPC_ENDPOINT_DONE: + *success = 1; + return 1; + case GRPC_ENDPOINT_ERROR: + *success = 0; + return 1; + case GRPC_ENDPOINT_PENDING: + return 0; } - } - else { + } else { UNREF_TRANSPORT(t, "recv_data"); return 0; } @@ -1133,7 +1131,8 @@ static int recv_data_loop(grpc_chttp2_transport *t, int *success) { static void recv_data(void *tp, int success) { grpc_chttp2_transport *t = tp; - while (recv_data_loop(t, &success)); + while (recv_data_loop(t, &success)) + ; } /* From eb95b11bc1ce2a9ef6fd6fee728c7af43cf5536e Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Mon, 24 Aug 2015 07:51:35 -0700 Subject: [PATCH 07/29] Turn off refcount debugging --- src/core/iomgr/tcp_posix.c | 4 ++-- src/core/security/secure_endpoint.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/core/iomgr/tcp_posix.c b/src/core/iomgr/tcp_posix.c index 36ba3a76063..03be462960b 100644 --- a/src/core/iomgr/tcp_posix.c +++ b/src/core/iomgr/tcp_posix.c @@ -104,7 +104,8 @@ static void tcp_free(grpc_tcp *tcp) { gpr_free(tcp); } -#define GRPC_TCP_REFCOUNT_DEBUG +/*#define GRPC_TCP_REFCOUNT_DEBUG*/ +#ifdef GRPC_TCP_REFCOUNT_DEBUG #define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__) #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__) static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file, @@ -122,7 +123,6 @@ static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file, reason, tcp->refcount.count, tcp->refcount.count + 1); gpr_ref(&tcp->refcount); } -#ifdef GRPC_TCP_REFCOUNT_DEBUG #else #define TCP_UNREF(tcp, reason) tcp_unref((tcp)) #define TCP_REF(tcp, reason) tcp_ref((tcp)) diff --git a/src/core/security/secure_endpoint.c b/src/core/security/secure_endpoint.c index 4206c393187..b696e384fca 100644 --- a/src/core/security/secure_endpoint.c +++ b/src/core/security/secure_endpoint.c @@ -80,7 +80,8 @@ static void destroy(secure_endpoint *secure_ep) { gpr_free(ep); } -#define GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG +/*#define GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG*/ +#ifdef GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG #define SECURE_ENDPOINT_UNREF(ep, reason) \ secure_endpoint_unref((ep), (reason), __FILE__, __LINE__) #define SECURE_ENDPOINT_REF(ep, reason) \ @@ -100,7 +101,6 @@ static void secure_endpoint_ref(secure_endpoint *ep, const char *reason, ep, reason, ep->ref.count, ep->ref.count + 1); gpr_ref(&ep->ref); } -#ifdef GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG #else #define SECURE_ENDPOINT_UNREF(ep, reason) secure_endpoint_unref((ep)) #define SECURE_ENDPOINT_REF(ep, reason) secure_endpoint_ref((ep)) From d1ed567866e600955b49bfe87d5ee03570aad85a Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Mon, 24 Aug 2015 14:29:54 -0700 Subject: [PATCH 08/29] Update comments --- src/core/iomgr/endpoint.h | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/core/iomgr/endpoint.h b/src/core/iomgr/endpoint.h index 38f1e46d670..d14d52d5616 100644 --- a/src/core/iomgr/endpoint.h +++ b/src/core/iomgr/endpoint.h @@ -77,15 +77,19 @@ char *grpc_endpoint_get_peer(grpc_endpoint *ep); /* Write slices out to the socket. If the connection is ready for more data after the end of the call, it - returns GRPC_ENDPOINT_WRITE_DONE. - Otherwise it returns GRPC_ENDPOINT_WRITE_PENDING and calls cb when the - connection is ready for more data. */ + returns GRPC_ENDPOINT_DONE. + Otherwise it returns GRPC_ENDPOINT_PENDING and calls cb when the + connection is ready for more data. + \a slices may be mutated at will by the endpoint until cb is called. + No guarantee is made to the content of slices after a write EXCEPT that + it is a valid slice buffer. + */ grpc_endpoint_op_status grpc_endpoint_write( grpc_endpoint *ep, gpr_slice_buffer *slices, grpc_iomgr_closure *cb) GRPC_MUST_USE_RESULT; /* Causes any pending read/write callbacks to run immediately with - GRPC_ENDPOINT_CB_SHUTDOWN status */ + success==0 */ void grpc_endpoint_shutdown(grpc_endpoint *ep); void grpc_endpoint_destroy(grpc_endpoint *ep); From 815604fdcdb06757efb79b456a811374656c976d Mon Sep 17 00:00:00 2001 From: Nathaniel Manista Date: Mon, 24 Aug 2015 21:44:55 +0000 Subject: [PATCH 09/29] Add code and message to base.NoSuchMethodError This is part of support for applications being able to respond to RPCs with unrecognized names with specific codes and messages. --- .../grpcio/grpc/framework/core/_context.py | 2 +- .../grpcio/grpc/framework/core/_emission.py | 3 +- .../grpcio/grpc/framework/core/_expiration.py | 2 +- .../grpcio/grpc/framework/core/_ingestion.py | 66 ++++++++++++------- .../grpcio/grpc/framework/core/_interfaces.py | 8 ++- .../grpcio/grpc/framework/core/_operation.py | 2 +- .../grpcio/grpc/framework/core/_reception.py | 2 +- .../grpc/framework/core/_transmission.py | 14 +++- .../grpc/framework/interfaces/base/base.py | 21 +++++- .../framework/interfaces/base/test_cases.py | 2 +- 10 files changed, 86 insertions(+), 36 deletions(-) diff --git a/src/python/grpcio/grpc/framework/core/_context.py b/src/python/grpcio/grpc/framework/core/_context.py index 24a12b612e5..76b35345308 100644 --- a/src/python/grpcio/grpc/framework/core/_context.py +++ b/src/python/grpcio/grpc/framework/core/_context.py @@ -60,7 +60,7 @@ class OperationContext(base.OperationContext): with self._lock: if self._termination_manager.outcome is None: self._termination_manager.abort(outcome) - self._transmission_manager.abort(outcome) + self._transmission_manager.abort(outcome, None, None) self._expiration_manager.terminate() def outcome(self): diff --git a/src/python/grpcio/grpc/framework/core/_emission.py b/src/python/grpcio/grpc/framework/core/_emission.py index 7c702ab2ce0..2d7b2e2f100 100644 --- a/src/python/grpcio/grpc/framework/core/_emission.py +++ b/src/python/grpcio/grpc/framework/core/_emission.py @@ -82,7 +82,8 @@ class EmissionManager(_interfaces.EmissionManager): completion_present and self._completion_seen or allowance_present and allowance <= 0): self._termination_manager.abort(base.Outcome.LOCAL_FAILURE) - self._transmission_manager.abort(base.Outcome.LOCAL_FAILURE) + self._transmission_manager.abort( + base.Outcome.LOCAL_FAILURE, None, None) self._expiration_manager.terminate() else: self._initial_metadata_seen |= initial_metadata_present diff --git a/src/python/grpcio/grpc/framework/core/_expiration.py b/src/python/grpcio/grpc/framework/core/_expiration.py index d94bdf2d2b7..d8690b3a02a 100644 --- a/src/python/grpcio/grpc/framework/core/_expiration.py +++ b/src/python/grpcio/grpc/framework/core/_expiration.py @@ -73,7 +73,7 @@ class _ExpirationManager(_interfaces.ExpirationManager): if self._future is not None and index == self._index: self._future = None self._termination_manager.expire() - self._transmission_manager.abort(base.Outcome.EXPIRED) + self._transmission_manager.abort(base.Outcome.EXPIRED, None, None) return expire def start(self): diff --git a/src/python/grpcio/grpc/framework/core/_ingestion.py b/src/python/grpcio/grpc/framework/core/_ingestion.py index 59f7f8adc86..7b8127f3fce 100644 --- a/src/python/grpcio/grpc/framework/core/_ingestion.py +++ b/src/python/grpcio/grpc/framework/core/_ingestion.py @@ -31,6 +31,7 @@ import abc import collections +import enum from grpc.framework.core import _constants from grpc.framework.core import _interfaces @@ -42,21 +43,31 @@ _CREATE_SUBSCRIPTION_EXCEPTION_LOG_MESSAGE = 'Exception initializing ingestion!' _INGESTION_EXCEPTION_LOG_MESSAGE = 'Exception during ingestion!' -class _SubscriptionCreation(collections.namedtuple( - '_SubscriptionCreation', ('subscription', 'remote_error', 'abandoned'))): +class _SubscriptionCreation( + collections.namedtuple( + '_SubscriptionCreation', + ('kind', 'subscription', 'code', 'message',))): """A sum type for the outcome of ingestion initialization. - Either subscription will be non-None, remote_error will be True, or abandoned - will be True. - Attributes: - subscription: A base.Subscription describing the customer's interest in - operation values from the other side. - remote_error: A boolean indicating that the subscription could not be - created due to an error on the remote side of the operation. - abandoned: A boolean indicating that subscription creation was abandoned. + kind: A Kind value coarsely indicating how subscription creation completed. + subscription: The created subscription. Only present if kind is + Kind.SUBSCRIPTION. + code: A code value to be sent to the other side of the operation along with + an indication that the operation is being aborted due to an error on the + remote side of the operation. Only present if kind is Kind.REMOTE_ERROR. + message: A message value to be sent to the other side of the operation + along with an indication that the operation is being aborted due to an + error on the remote side of the operation. Only present if kind is + Kind.REMOTE_ERROR. """ + @enum.unique + class Kind(enum.Enum): + SUBSCRIPTION = 'subscription' + REMOTE_ERROR = 'remote error' + ABANDONED = 'abandoned' + class _SubscriptionCreator(object): """Common specification of subscription-creating behavior.""" @@ -101,12 +112,15 @@ class _ServiceSubscriptionCreator(_SubscriptionCreator): try: subscription = self._servicer.service( group, method, self._operation_context, self._output_operator) - except base.NoSuchMethodError: - return _SubscriptionCreation(None, True, False) + except base.NoSuchMethodError as e: + return _SubscriptionCreation( + _SubscriptionCreation.Kind.REMOTE_ERROR, None, e.code, e.message) except abandonment.Abandoned: - return _SubscriptionCreation(None, False, True) + return _SubscriptionCreation( + _SubscriptionCreation.Kind.ABANDONED, None, None, None) else: - return _SubscriptionCreation(subscription, False, False) + return _SubscriptionCreation( + _SubscriptionCreation.Kind.SUBSCRIPTION, subscription, None, None) def _wrap(behavior): @@ -176,10 +190,10 @@ class _IngestionManager(_interfaces.IngestionManager): self._pending_payloads = None self._pending_completion = None - def _abort_and_notify(self, outcome): + def _abort_and_notify(self, outcome, code, message): self._abort_internal_only() self._termination_manager.abort(outcome) - self._transmission_manager.abort(outcome) + self._transmission_manager.abort(outcome, code, message) self._expiration_manager.terminate() def _operator_next(self): @@ -236,12 +250,12 @@ class _IngestionManager(_interfaces.IngestionManager): else: with self._lock: if self._termination_manager.outcome is None: - self._abort_and_notify(base.Outcome.LOCAL_FAILURE) + self._abort_and_notify(base.Outcome.LOCAL_FAILURE, None, None) return else: with self._lock: if self._termination_manager.outcome is None: - self._abort_and_notify(base.Outcome.LOCAL_FAILURE) + self._abort_and_notify(base.Outcome.LOCAL_FAILURE, None, None) return def _operator_post_create(self, subscription): @@ -260,20 +274,22 @@ class _IngestionManager(_interfaces.IngestionManager): def _create(self, subscription_creator, group, name): outcome = callable_util.call_logging_exceptions( - subscription_creator.create, _CREATE_SUBSCRIPTION_EXCEPTION_LOG_MESSAGE, - group, name) + subscription_creator.create, + _CREATE_SUBSCRIPTION_EXCEPTION_LOG_MESSAGE, group, name) if outcome.return_value is None: with self._lock: if self._termination_manager.outcome is None: - self._abort_and_notify(base.Outcome.LOCAL_FAILURE) - elif outcome.return_value.abandoned: + self._abort_and_notify(base.Outcome.LOCAL_FAILURE, None, None) + elif outcome.return_value.kind is _SubscriptionCreation.Kind.ABANDONED: with self._lock: if self._termination_manager.outcome is None: - self._abort_and_notify(base.Outcome.LOCAL_FAILURE) - elif outcome.return_value.remote_error: + self._abort_and_notify(base.Outcome.LOCAL_FAILURE, None, None) + elif outcome.return_value.kind is _SubscriptionCreation.Kind.REMOTE_ERROR: + code = outcome.return_value.code + message = outcome.return_value.message with self._lock: if self._termination_manager.outcome is None: - self._abort_and_notify(base.Outcome.REMOTE_FAILURE) + self._abort_and_notify(base.Outcome.REMOTE_FAILURE, code, message) elif outcome.return_value.subscription.kind is base.Subscription.Kind.FULL: self._operator_post_create(outcome.return_value.subscription) else: diff --git a/src/python/grpcio/grpc/framework/core/_interfaces.py b/src/python/grpcio/grpc/framework/core/_interfaces.py index a626b9f7679..deb5f34f9b4 100644 --- a/src/python/grpcio/grpc/framework/core/_interfaces.py +++ b/src/python/grpcio/grpc/framework/core/_interfaces.py @@ -155,13 +155,19 @@ class TransmissionManager(object): raise NotImplementedError() @abc.abstractmethod - def abort(self, outcome): + def abort(self, outcome, code, message): """Indicates that the operation has aborted. Args: outcome: An interfaces.Outcome for the operation. If None, indicates that the operation abortion should not be communicated to the other side of the operation. + code: A code value to communicate to the other side of the operation + along with indication of operation abortion. May be None, and has no + effect if outcome is None. + message: A message value to communicate to the other side of the + operation along with indication of operation abortion. May be None, and + has no effect if outcome is None. """ raise NotImplementedError() diff --git a/src/python/grpcio/grpc/framework/core/_operation.py b/src/python/grpcio/grpc/framework/core/_operation.py index d20e40a53da..cc873c03f93 100644 --- a/src/python/grpcio/grpc/framework/core/_operation.py +++ b/src/python/grpcio/grpc/framework/core/_operation.py @@ -79,7 +79,7 @@ class _EasyOperation(_interfaces.Operation): with self._lock: if self._termination_manager.outcome is None: self._termination_manager.abort(outcome) - self._transmission_manager.abort(outcome) + self._transmission_manager.abort(outcome, None, None) self._expiration_manager.terminate() diff --git a/src/python/grpcio/grpc/framework/core/_reception.py b/src/python/grpcio/grpc/framework/core/_reception.py index 0858f64ff6b..1cebe3874ba 100644 --- a/src/python/grpcio/grpc/framework/core/_reception.py +++ b/src/python/grpcio/grpc/framework/core/_reception.py @@ -73,7 +73,7 @@ class ReceptionManager(_interfaces.ReceptionManager): self._aborted = True if self._termination_manager.outcome is None: self._termination_manager.abort(outcome) - self._transmission_manager.abort(None) + self._transmission_manager.abort(None, None, None) self._expiration_manager.terminate() def _sequence_failure(self, ticket): diff --git a/src/python/grpcio/grpc/framework/core/_transmission.py b/src/python/grpcio/grpc/framework/core/_transmission.py index 03644f4d491..efef87dd4c6 100644 --- a/src/python/grpcio/grpc/framework/core/_transmission.py +++ b/src/python/grpcio/grpc/framework/core/_transmission.py @@ -104,9 +104,13 @@ class TransmissionManager(_interfaces.TransmissionManager): return None else: self._abortion_outcome = None + if self._completion is None: + code, message = None, None + else: + code, message = self._completion.code, self._completion.message return links.Ticket( self._operation_id, self._lowest_unused_sequence_number, None, - None, None, None, None, None, None, None, None, None, + None, None, None, None, None, None, None, code, message, termination, None) action = False @@ -277,7 +281,7 @@ class TransmissionManager(_interfaces.TransmissionManager): self._remote_complete = True self._local_allowance = 0 - def abort(self, outcome): + def abort(self, outcome, code, message): """See _interfaces.TransmissionManager.abort for specification.""" if self._transmitting: self._aborted, self._abortion_outcome = True, outcome @@ -287,8 +291,12 @@ class TransmissionManager(_interfaces.TransmissionManager): termination = _constants.ABORTION_OUTCOME_TO_TICKET_TERMINATION[ outcome] if termination is not None: + if self._completion is None: + code, message = None, None + else: + code, message = self._completion.code, self._completion.message ticket = links.Ticket( self._operation_id, self._lowest_unused_sequence_number, None, - None, None, None, None, None, None, None, None, None, + None, None, None, None, None, None, None, code, message, termination, None) self._transmit(ticket) diff --git a/src/python/grpcio/grpc/framework/interfaces/base/base.py b/src/python/grpcio/grpc/framework/interfaces/base/base.py index 76e0a5bdaea..bc52efb4c5a 100644 --- a/src/python/grpcio/grpc/framework/interfaces/base/base.py +++ b/src/python/grpcio/grpc/framework/interfaces/base/base.py @@ -47,7 +47,26 @@ from grpc.framework.foundation import abandonment # pylint: disable=unused-impo class NoSuchMethodError(Exception): - """Indicates that an unrecognized operation has been called.""" + """Indicates that an unrecognized operation has been called. + + Attributes: + code: A code value to communicate to the other side of the operation along + with indication of operation termination. May be None. + details: A details value to communicate to the other side of the operation + along with indication of operation termination. May be None. + """ + + def __init__(self, code, details): + """Constructor. + + Args: + code: A code value to communicate to the other side of the operation + along with indication of operation termination. May be None. + details: A details value to communicate to the other side of the + operation along with indication of operation termination. May be None. + """ + self.code = code + self.details = details @enum.unique diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/base/test_cases.py b/src/python/grpcio_test/grpc_test/framework/interfaces/base/test_cases.py index 5c8b176da4f..87332cf612e 100644 --- a/src/python/grpcio_test/grpc_test/framework/interfaces/base/test_cases.py +++ b/src/python/grpcio_test/grpc_test/framework/interfaces/base/test_cases.py @@ -134,7 +134,7 @@ class _Servicer(base.Servicer): if group != self._group or method != self._method: controller.fail( '%s != %s or %s != %s' % (group, self._group, method, self._method)) - raise base.NoSuchMethodError() + raise base.NoSuchMethodError(None, None) else: operator = _Operator( controller, controller.on_service_advance, self._pool, From 1dc323b45e1a109c860806cb25e7eca21c29a3c0 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Tue, 25 Aug 2015 11:33:26 -0700 Subject: [PATCH 10/29] Spam cleanup --- src/core/iomgr/tcp_windows.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/core/iomgr/tcp_windows.c b/src/core/iomgr/tcp_windows.c index 1cf2ca2131e..469c3822187 100644 --- a/src/core/iomgr/tcp_windows.c +++ b/src/core/iomgr/tcp_windows.c @@ -201,7 +201,6 @@ static grpc_endpoint_op_status win_read(grpc_endpoint *ep, /* Did we get data immediately ? Yay. */ if (info->wsa_error != WSAEWOULDBLOCK) { info->bytes_transfered = bytes_read; - gpr_log(GPR_DEBUG, "immread: %d bytes", bytes_read); return on_read(tcp, 1) ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR; } @@ -214,7 +213,6 @@ static grpc_endpoint_op_status win_read(grpc_endpoint *ep, int wsa_error = WSAGetLastError(); if (wsa_error != WSA_IO_PENDING) { info->wsa_error = wsa_error; - gpr_log(GPR_DEBUG, "immread: err=%d", wsa_error); return on_read(tcp, 1) ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR; } } From 4c8288ec010ed79bb50659fb6010b92a385e24ad Mon Sep 17 00:00:00 2001 From: Nathaniel Manista Date: Tue, 25 Aug 2015 01:51:49 +0000 Subject: [PATCH 11/29] The RPC Framework crust package --- .../grpcio/grpc/framework/crust/__init__.py | 30 + .../grpcio/grpc/framework/crust/_calls.py | 204 +++++++ .../grpcio/grpc/framework/crust/_control.py | 545 ++++++++++++++++++ .../grpcio/grpc/framework/crust/_service.py | 166 ++++++ .../grpc/framework/crust/implementations.py | 352 +++++++++++ .../_core_over_links_base_interface_test.py | 2 +- ...ver_core_over_links_face_interface_test.py | 160 +++++ .../_crust_over_core_face_interface_test.py | 111 ++++ .../interfaces/face/_3069_test_constant.py | 37 ++ .../_blocking_invocation_inline_service.py | 9 +- ...nt_invocation_synchronous_event_service.py | 11 +- ...e_invocation_asynchronous_event_service.py | 17 +- .../interfaces/face/_stock_service.py | 2 +- tools/run_tests/run_python.sh | 2 + 14 files changed, 1630 insertions(+), 18 deletions(-) create mode 100644 src/python/grpcio/grpc/framework/crust/__init__.py create mode 100644 src/python/grpcio/grpc/framework/crust/_calls.py create mode 100644 src/python/grpcio/grpc/framework/crust/_control.py create mode 100644 src/python/grpcio/grpc/framework/crust/_service.py create mode 100644 src/python/grpcio/grpc/framework/crust/implementations.py create mode 100644 src/python/grpcio_test/grpc_test/_crust_over_core_over_links_face_interface_test.py create mode 100644 src/python/grpcio_test/grpc_test/framework/_crust_over_core_face_interface_test.py create mode 100644 src/python/grpcio_test/grpc_test/framework/interfaces/face/_3069_test_constant.py diff --git a/src/python/grpcio/grpc/framework/crust/__init__.py b/src/python/grpcio/grpc/framework/crust/__init__.py new file mode 100644 index 00000000000..70865191060 --- /dev/null +++ b/src/python/grpcio/grpc/framework/crust/__init__.py @@ -0,0 +1,30 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + diff --git a/src/python/grpcio/grpc/framework/crust/_calls.py b/src/python/grpcio/grpc/framework/crust/_calls.py new file mode 100644 index 00000000000..f9077bedfe1 --- /dev/null +++ b/src/python/grpcio/grpc/framework/crust/_calls.py @@ -0,0 +1,204 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Utility functions for invoking RPCs.""" + +from grpc.framework.crust import _control +from grpc.framework.interfaces.base import utilities +from grpc.framework.interfaces.face import face + +_ITERATOR_EXCEPTION_LOG_MESSAGE = 'Exception iterating over requests!' + +_EMPTY_COMPLETION = utilities.completion(None, None, None) + + +def _invoke(end, group, method, timeout, initial_metadata, payload, complete): + rendezvous = _control.Rendezvous(None, None) + operation_context, operator = end.operate( + group, method, utilities.full_subscription(rendezvous), timeout, + initial_metadata=initial_metadata, payload=payload, + completion=_EMPTY_COMPLETION if complete else None) + rendezvous.set_operator_and_context(operator, operation_context) + outcome = operation_context.add_termination_callback(rendezvous.set_outcome) + if outcome is not None: + rendezvous.set_outcome(outcome) + return rendezvous, operation_context, outcome + + +def _event_return_unary( + receiver, abortion_callback, rendezvous, operation_context, outcome, pool): + if outcome is None: + def in_pool(): + abortion = rendezvous.add_abortion_callback(abortion_callback) + if abortion is None: + try: + receiver.initial_metadata(rendezvous.initial_metadata()) + receiver.response(next(rendezvous)) + receiver.complete( + rendezvous.terminal_metadata(), rendezvous.code(), + rendezvous.details()) + except face.AbortionError: + pass + else: + abortion_callback(abortion) + pool.submit(_control.pool_wrap(in_pool, operation_context)) + return rendezvous + + +def _event_return_stream( + receiver, abortion_callback, rendezvous, operation_context, outcome, pool): + if outcome is None: + def in_pool(): + abortion = rendezvous.add_abortion_callback(abortion_callback) + if abortion is None: + try: + receiver.initial_metadata(rendezvous.initial_metadata()) + for response in rendezvous: + receiver.response(response) + receiver.complete( + rendezvous.terminal_metadata(), rendezvous.code(), + rendezvous.details()) + except face.AbortionError: + pass + else: + abortion_callback(abortion) + pool.submit(_control.pool_wrap(in_pool, operation_context)) + return rendezvous + + +def blocking_unary_unary( + end, group, method, timeout, with_call, initial_metadata, payload): + """Services in a blocking fashion a unary-unary servicer method.""" + rendezvous, unused_operation_context, unused_outcome = _invoke( + end, group, method, timeout, initial_metadata, payload, True) + if with_call: + return next(rendezvous, rendezvous) + else: + return next(rendezvous) + + +def future_unary_unary(end, group, method, timeout, initial_metadata, payload): + """Services a value-in value-out servicer method by returning a Future.""" + rendezvous, unused_operation_context, unused_outcome = _invoke( + end, group, method, timeout, initial_metadata, payload, True) + return rendezvous + + +def inline_unary_stream(end, group, method, timeout, initial_metadata, payload): + """Services a value-in stream-out servicer method.""" + rendezvous, unused_operation_context, unused_outcome = _invoke( + end, group, method, timeout, initial_metadata, payload, True) + return rendezvous + + +def blocking_stream_unary( + end, group, method, timeout, with_call, initial_metadata, payload_iterator, + pool): + """Services in a blocking fashion a stream-in value-out servicer method.""" + rendezvous, operation_context, outcome = _invoke( + end, group, method, timeout, initial_metadata, None, False) + if outcome is None: + def in_pool(): + for payload in payload_iterator: + rendezvous.consume(payload) + rendezvous.terminate() + pool.submit(_control.pool_wrap(in_pool, operation_context)) + if with_call: + return next(rendezvous), rendezvous + else: + return next(rendezvous) + else: + if with_call: + return next(rendezvous), rendezvous + else: + return next(rendezvous) + + +def future_stream_unary( + end, group, method, timeout, initial_metadata, payload_iterator, pool): + """Services a stream-in value-out servicer method by returning a Future.""" + rendezvous, operation_context, outcome = _invoke( + end, group, method, timeout, initial_metadata, None, False) + if outcome is None: + def in_pool(): + for payload in payload_iterator: + rendezvous.consume(payload) + rendezvous.terminate() + pool.submit(_control.pool_wrap(in_pool, operation_context)) + return rendezvous + + +def inline_stream_stream( + end, group, method, timeout, initial_metadata, payload_iterator, pool): + """Services a stream-in stream-out servicer method.""" + rendezvous, operation_context, outcome = _invoke( + end, group, method, timeout, initial_metadata, None, False) + if outcome is None: + def in_pool(): + for payload in payload_iterator: + rendezvous.consume(payload) + rendezvous.terminate() + pool.submit(_control.pool_wrap(in_pool, operation_context)) + return rendezvous + + +def event_unary_unary( + end, group, method, timeout, initial_metadata, payload, receiver, + abortion_callback, pool): + rendezvous, operation_context, outcome = _invoke( + end, group, method, timeout, initial_metadata, payload, True) + return _event_return_unary( + receiver, abortion_callback, rendezvous, operation_context, outcome, pool) + + +def event_unary_stream( + end, group, method, timeout, initial_metadata, payload, + receiver, abortion_callback, pool): + rendezvous, operation_context, outcome = _invoke( + end, group, method, timeout, initial_metadata, payload, True) + return _event_return_stream( + receiver, abortion_callback, rendezvous, operation_context, outcome, pool) + + +def event_stream_unary( + end, group, method, timeout, initial_metadata, receiver, abortion_callback, + pool): + rendezvous, operation_context, outcome = _invoke( + end, group, method, timeout, initial_metadata, None, False) + return _event_return_unary( + receiver, abortion_callback, rendezvous, operation_context, outcome, pool) + + +def event_stream_stream( + end, group, method, timeout, initial_metadata, receiver, abortion_callback, + pool): + rendezvous, operation_context, outcome = _invoke( + end, group, method, timeout, initial_metadata, None, False) + return _event_return_stream( + receiver, abortion_callback, rendezvous, operation_context, outcome, pool) diff --git a/src/python/grpcio/grpc/framework/crust/_control.py b/src/python/grpcio/grpc/framework/crust/_control.py new file mode 100644 index 00000000000..01de3c15bd1 --- /dev/null +++ b/src/python/grpcio/grpc/framework/crust/_control.py @@ -0,0 +1,545 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""State and behavior for translating between sync and async control flow.""" + +import collections +import enum +import sys +import threading +import time + +from grpc.framework.foundation import abandonment +from grpc.framework.foundation import callable_util +from grpc.framework.foundation import future +from grpc.framework.foundation import stream +from grpc.framework.interfaces.base import base +from grpc.framework.interfaces.base import utilities +from grpc.framework.interfaces.face import face + +_DONE_CALLBACK_LOG_MESSAGE = 'Exception calling Future "done" callback!' +_INTERNAL_ERROR_LOG_MESSAGE = ':-( RPC Framework (Crust) Internal Error! )-:' + +_CANNOT_SET_INITIAL_METADATA = ( + 'Could not set initial metadata - has it already been set, or has a ' + + 'payload already been sent?') +_CANNOT_SET_TERMINAL_METADATA = ( + 'Could not set terminal metadata - has it already been set, or has RPC ' + + 'completion already been indicated?') +_CANNOT_SET_CODE = ( + 'Could not set code - has it already been set, or has RPC completion ' + + 'already been indicated?') +_CANNOT_SET_DETAILS = ( + 'Could not set details - has it already been set, or has RPC completion ' + + 'already been indicated?') + + +class _DummyOperator(base.Operator): + + def advance( + self, initial_metadata=None, payload=None, completion=None, + allowance=None): + pass + +_DUMMY_OPERATOR = _DummyOperator() + + +class _Awaited( + collections.namedtuple('_Awaited', ('kind', 'value',))): + + @enum.unique + class Kind(enum.Enum): + NOT_YET_ARRIVED = 'not yet arrived' + ARRIVED = 'arrived' + +_NOT_YET_ARRIVED = _Awaited(_Awaited.Kind.NOT_YET_ARRIVED, None) +_ARRIVED_AND_NONE = _Awaited(_Awaited.Kind.ARRIVED, None) + + +class _Transitory( + collections.namedtuple('_Transitory', ('kind', 'value',))): + + @enum.unique + class Kind(enum.Enum): + NOT_YET_SEEN = 'not yet seen' + PRESENT = 'present' + GONE = 'gone' + +_NOT_YET_SEEN = _Transitory(_Transitory.Kind.NOT_YET_SEEN, None) +_GONE = _Transitory(_Transitory.Kind.GONE, None) + + +class _Termination( + collections.namedtuple( + '_Termination', ('terminated', 'abortion', 'abortion_error',))): + """Values indicating whether and how an RPC has terminated. + + Attributes: + terminated: A boolean indicating whether or not the RPC has terminated. + abortion: A face.Abortion value describing the RPC's abortion or None if the + RPC did not abort. + abortion_error: A face.AbortionError describing the RPC's abortion or None + if the RPC did not abort. + """ + +_NOT_TERMINATED = _Termination(False, None, None) + +_OPERATION_OUTCOME_TO_TERMINATION_CONSTRUCTOR = { + base.Outcome.COMPLETED: lambda *unused_args: _Termination(True, None, None), + base.Outcome.CANCELLED: lambda *args: _Termination( + True, face.Abortion(face.Abortion.Kind.CANCELLED, *args), + face.CancellationError(*args)), + base.Outcome.EXPIRED: lambda *args: _Termination( + True, face.Abortion(face.Abortion.Kind.EXPIRED, *args), + face.ExpirationError(*args)), + base.Outcome.LOCAL_SHUTDOWN: lambda *args: _Termination( + True, face.Abortion(face.Abortion.Kind.LOCAL_SHUTDOWN, *args), + face.LocalShutdownError(*args)), + base.Outcome.REMOTE_SHUTDOWN: lambda *args: _Termination( + True, face.Abortion(face.Abortion.Kind.REMOTE_SHUTDOWN, *args), + face.RemoteShutdownError(*args)), + base.Outcome.RECEPTION_FAILURE: lambda *args: _Termination( + True, face.Abortion(face.Abortion.Kind.NETWORK_FAILURE, *args), + face.NetworkError(*args)), + base.Outcome.TRANSMISSION_FAILURE: lambda *args: _Termination( + True, face.Abortion(face.Abortion.Kind.NETWORK_FAILURE, *args), + face.NetworkError(*args)), + base.Outcome.LOCAL_FAILURE: lambda *args: _Termination( + True, face.Abortion(face.Abortion.Kind.LOCAL_FAILURE, *args), + face.LocalError(*args)), + base.Outcome.REMOTE_FAILURE: lambda *args: _Termination( + True, face.Abortion(face.Abortion.Kind.REMOTE_FAILURE, *args), + face.RemoteError(*args)), +} + + +def _wait_once_until(condition, until): + if until is None: + condition.wait() + else: + remaining = until - time.time() + if remaining < 0: + raise future.TimeoutError() + else: + condition.wait(timeout=remaining) + + +def _done_callback_as_operation_termination_callback( + done_callback, rendezvous): + def operation_termination_callback(operation_outcome): + rendezvous.set_outcome(operation_outcome) + done_callback(rendezvous) + return operation_termination_callback + + +def _abortion_callback_as_operation_termination_callback( + rpc_abortion_callback, rendezvous_set_outcome): + def operation_termination_callback(operation_outcome): + termination = rendezvous_set_outcome(operation_outcome) + if termination.abortion is not None: + rpc_abortion_callback(termination.abortion) + return operation_termination_callback + + +class Rendezvous(base.Operator, future.Future, stream.Consumer, face.Call): + """A rendez-vous for the threads of an operation. + + Instances of this object present iterator and stream.Consumer interfaces for + interacting with application code and present a base.Operator interface and + maintain a base.Operator internally for interacting with base interface code. + """ + + def __init__(self, operator, operation_context): + self._condition = threading.Condition() + + self._operator = operator + self._operation_context = operation_context + + self._up_initial_metadata = _NOT_YET_ARRIVED + self._up_payload = None + self._up_allowance = 1 + self._up_completion = _NOT_YET_ARRIVED + self._down_initial_metadata = _NOT_YET_SEEN + self._down_payload = None + self._down_allowance = 1 + self._down_terminal_metadata = _NOT_YET_SEEN + self._down_code = _NOT_YET_SEEN + self._down_details = _NOT_YET_SEEN + + self._termination = _NOT_TERMINATED + + # The semantics of future.Future.cancel and future.Future.cancelled are + # slightly wonky, so they have to be tracked separately from the rest of the + # result of the RPC. This field tracks whether cancellation was requested + # prior to termination of the RPC + self._cancelled = False + + def set_operator_and_context(self, operator, operation_context): + with self._condition: + self._operator = operator + self._operation_context = operation_context + + def _down_completion(self): + if self._down_terminal_metadata.kind is _Transitory.Kind.NOT_YET_SEEN: + terminal_metadata = None + self._down_terminal_metadata = _GONE + elif self._down_terminal_metadata.kind is _Transitory.Kind.PRESENT: + terminal_metadata = self._down_terminal_metadata.value + self._down_terminal_metadata = _GONE + else: + terminal_metadata = None + if self._down_code.kind is _Transitory.Kind.NOT_YET_SEEN: + code = None + self._down_code = _GONE + elif self._down_code.kind is _Transitory.Kind.PRESENT: + code = self._down_code.value + self._down_code = _GONE + else: + code = None + if self._down_details.kind is _Transitory.Kind.NOT_YET_SEEN: + details = None + self._down_details = _GONE + elif self._down_details.kind is _Transitory.Kind.PRESENT: + details = self._down_details.value + self._down_details = _GONE + else: + details = None + return utilities.completion(terminal_metadata, code, details) + + def _set_outcome(self, outcome): + if not self._termination.terminated: + self._operator = _DUMMY_OPERATOR + self._operation_context = None + self._down_initial_metadata = _GONE + self._down_payload = None + self._down_terminal_metadata = _GONE + self._down_code = _GONE + self._down_details = _GONE + + if self._up_initial_metadata.kind is _Awaited.Kind.NOT_YET_ARRIVED: + initial_metadata = None + else: + initial_metadata = self._up_initial_metadata.value + if self._up_completion.kind is _Awaited.Kind.NOT_YET_ARRIVED: + terminal_metadata, code, details = None, None, None + else: + terminal_metadata = self._up_completion.value.terminal_metadata + code = self._up_completion.value.code + details = self._up_completion.value.message + self._termination = _OPERATION_OUTCOME_TO_TERMINATION_CONSTRUCTOR[ + outcome](initial_metadata, terminal_metadata, code, details) + + self._condition.notify_all() + + return self._termination + + def advance( + self, initial_metadata=None, payload=None, completion=None, + allowance=None): + with self._condition: + if initial_metadata is not None: + self._up_initial_metadata = _Awaited( + _Awaited.Kind.ARRIVED, initial_metadata) + if payload is not None: + if self._up_initial_metadata.kind is _Awaited.Kind.NOT_YET_ARRIVED: + self._up_initial_metadata = _ARRIVED_AND_NONE + self._up_payload = payload + self._up_allowance -= 1 + if completion is not None: + if self._up_initial_metadata.kind is _Awaited.Kind.NOT_YET_ARRIVED: + self._up_initial_metadata = _ARRIVED_AND_NONE + self._up_completion = _Awaited( + _Awaited.Kind.ARRIVED, completion) + if allowance is not None: + if self._down_payload is not None: + self._operator.advance(payload=self._down_payload) + self._down_payload = None + self._down_allowance += allowance - 1 + else: + self._down_allowance += allowance + self._condition.notify_all() + + def cancel(self): + with self._condition: + if self._operation_context is not None: + self._operation_context.cancel() + self._cancelled = True + return False + + def cancelled(self): + with self._condition: + return self._cancelled + + def running(self): + with self._condition: + return not self._termination.terminated + + def done(self): + with self._condition: + return self._termination.terminated + + def result(self, timeout=None): + until = None if timeout is None else time.time() + timeout + with self._condition: + while True: + if self._termination.terminated: + if self._termination.abortion is None: + return self._up_payload + elif self._termination.abortion.kind is face.Abortion.Kind.CANCELLED: + raise future.CancelledError() + else: + raise self._termination.abortion_error # pylint: disable=raising-bad-type + else: + _wait_once_until(self._condition, until) + + def exception(self, timeout=None): + until = None if timeout is None else time.time() + timeout + with self._condition: + while True: + if self._termination.terminated: + if self._termination.abortion is None: + return None + else: + return self._termination.abortion_error + else: + _wait_once_until(self._condition, until) + + def traceback(self, timeout=None): + until = None if timeout is None else time.time() + timeout + with self._condition: + while True: + if self._termination.terminated: + if self._termination.abortion_error is None: + return None + else: + abortion_error = self._termination.abortion_error + break + else: + _wait_once_until(self._condition, until) + + try: + raise abortion_error + except face.AbortionError: + return sys.exc_info()[2] + + def add_done_callback(self, fn): + with self._condition: + if self._operation_context is not None: + outcome = self._operation_context.add_termination_callback( + _done_callback_as_operation_termination_callback(fn, self)) + if outcome is None: + return + else: + self._set_outcome(outcome) + + fn(self) + + def consume(self, value): + with self._condition: + while True: + if self._termination.terminated: + return + elif 0 < self._down_allowance: + self._operator.advance(payload=value) + self._down_allowance -= 1 + return + else: + self._condition.wait() + + def terminate(self): + with self._condition: + if self._termination.terminated: + return + elif self._down_code.kind is _Transitory.Kind.GONE: + # Conform to specified idempotence of terminate by ignoring extra calls. + return + else: + completion = self._down_completion() + self._operator.advance(completion=completion) + + def consume_and_terminate(self, value): + with self._condition: + while True: + if self._termination.terminated: + return + elif 0 < self._down_allowance: + completion = self._down_completion() + self._operator.advance(payload=value, completion=completion) + return + else: + self._condition.wait() + + def __iter__(self): + return self + + def next(self): + with self._condition: + while True: + if self._termination.abortion_error is not None: + raise self._termination.abortion_error + elif self._up_payload is not None: + payload = self._up_payload + self._up_payload = None + if self._up_completion.kind is _Awaited.Kind.NOT_YET_ARRIVED: + self._operator.advance(allowance=1) + return payload + elif self._up_completion.kind is _Awaited.Kind.ARRIVED: + raise StopIteration() + else: + self._condition.wait() + + def is_active(self): + with self._condition: + return not self._termination.terminated + + def time_remaining(self): + if self._operation_context is None: + return 0 + else: + return self._operation_context.time_remaining() + + def add_abortion_callback(self, abortion_callback): + with self._condition: + if self._operation_context is None: + return self._termination.abortion + else: + outcome = self._operation_context.add_termination_callback( + _abortion_callback_as_operation_termination_callback( + abortion_callback, self.set_outcome)) + if outcome is not None: + return self._set_outcome(outcome).abortion + else: + return self._termination.abortion + + def initial_metadata(self): + with self._condition: + while True: + if self._up_initial_metadata.kind is _Awaited.Kind.ARRIVED: + return self._up_initial_metadata.value + elif self._termination.terminated: + return None + else: + self._condition.wait() + + def terminal_metadata(self): + with self._condition: + while True: + if self._up_completion.kind is _Awaited.Kind.ARRIVED: + return self._up_completion.value.terminal_metadata + elif self._termination.terminated: + return None + else: + self._condition.wait() + + def code(self): + with self._condition: + while True: + if self._up_completion.kind is _Awaited.Kind.ARRIVED: + return self._up_completion.value.code + elif self._termination.terminated: + return None + else: + self._condition.wait() + + def details(self): + with self._condition: + while True: + if self._up_completion.kind is _Awaited.Kind.ARRIVED: + return self._up_completion.value.message + elif self._termination.terminated: + return None + else: + self._condition.wait() + + def set_initial_metadata(self, initial_metadata): + with self._condition: + if (self._down_initial_metadata.kind is not + _Transitory.Kind.NOT_YET_SEEN): + raise ValueError(_CANNOT_SET_INITIAL_METADATA) + else: + self._down_initial_metadata = _GONE + self._operator.advance(initial_metadata=initial_metadata) + + def set_terminal_metadata(self, terminal_metadata): + with self._condition: + if (self._down_terminal_metadata.kind is not + _Transitory.Kind.NOT_YET_SEEN): + raise ValueError(_CANNOT_SET_TERMINAL_METADATA) + else: + self._down_terminal_metadata = _Transitory( + _Transitory.Kind.PRESENT, terminal_metadata) + + def set_code(self, code): + with self._condition: + if self._down_code.kind is not _Transitory.Kind.NOT_YET_SEEN: + raise ValueError(_CANNOT_SET_CODE) + else: + self._down_code = _Transitory(_Transitory.Kind.PRESENT, code) + + def set_details(self, details): + with self._condition: + if self._down_details.kind is not _Transitory.Kind.NOT_YET_SEEN: + raise ValueError(_CANNOT_SET_DETAILS) + else: + self._down_details = _Transitory(_Transitory.Kind.PRESENT, details) + + def set_outcome(self, outcome): + with self._condition: + return self._set_outcome(outcome) + + +def pool_wrap(behavior, operation_context): + """Wraps an operation-related behavior so that it may be called in a pool. + + Args: + behavior: A callable related to carrying out an operation. + operation_context: A base_interfaces.OperationContext for the operation. + + Returns: + A callable that when called carries out the behavior of the given callable + and handles whatever exceptions it raises appropriately. + """ + def translation(*args): + try: + behavior(*args) + except ( + abandonment.Abandoned, + face.CancellationError, + face.ExpirationError, + face.LocalShutdownError, + face.RemoteShutdownError, + face.NetworkError, + face.RemoteError, + ) as e: + if operation_context.outcome() is None: + operation_context.fail(e) + except Exception as e: + operation_context.fail(e) + return callable_util.with_exceptions_logged( + translation, _INTERNAL_ERROR_LOG_MESSAGE) diff --git a/src/python/grpcio/grpc/framework/crust/_service.py b/src/python/grpcio/grpc/framework/crust/_service.py new file mode 100644 index 00000000000..2455a58f59a --- /dev/null +++ b/src/python/grpcio/grpc/framework/crust/_service.py @@ -0,0 +1,166 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Behaviors for servicing RPCs.""" + +from grpc.framework.crust import _control +from grpc.framework.foundation import abandonment +from grpc.framework.interfaces.base import utilities +from grpc.framework.interfaces.face import face + + +class _ServicerContext(face.ServicerContext): + + def __init__(self, rendezvous): + self._rendezvous = rendezvous + + def is_active(self): + return self._rendezvous.is_active() + + def time_remaining(self): + return self._rendezvous.time_remaining() + + def add_abortion_callback(self, abortion_callback): + return self._rendezvous.add_abortion_callback(abortion_callback) + + def cancel(self): + self._rendezvous.cancel() + + def invocation_metadata(self): + return self._rendezvous.initial_metadata() + + def initial_metadata(self, initial_metadata): + self._rendezvous.set_initial_metadata(initial_metadata) + + def terminal_metadata(self, terminal_metadata): + self._rendezvous.set_terminal_metadata(terminal_metadata) + + def code(self, code): + self._rendezvous.set_code(code) + + def details(self, details): + self._rendezvous.set_details(details) + + +def _adaptation(pool, in_pool): + def adaptation(operator, operation_context): + rendezvous = _control.Rendezvous(operator, operation_context) + outcome = operation_context.add_termination_callback(rendezvous.set_outcome) + if outcome is None: + pool.submit(_control.pool_wrap(in_pool, operation_context), rendezvous) + return utilities.full_subscription(rendezvous) + else: + raise abandonment.Abandoned() + return adaptation + + +def adapt_inline_unary_unary(method, pool): + def in_pool(rendezvous): + request = next(rendezvous) + response = method(request, _ServicerContext(rendezvous)) + rendezvous.consume_and_terminate(response) + return _adaptation(pool, in_pool) + + +def adapt_inline_unary_stream(method, pool): + def in_pool(rendezvous): + request = next(rendezvous) + response_iterator = method(request, _ServicerContext(rendezvous)) + for response in response_iterator: + rendezvous.consume(response) + rendezvous.terminate() + return _adaptation(pool, in_pool) + + +def adapt_inline_stream_unary(method, pool): + def in_pool(rendezvous): + response = method(rendezvous, _ServicerContext(rendezvous)) + rendezvous.consume_and_terminate(response) + return _adaptation(pool, in_pool) + + +def adapt_inline_stream_stream(method, pool): + def in_pool(rendezvous): + response_iterator = method(rendezvous, _ServicerContext(rendezvous)) + for response in response_iterator: + rendezvous.consume(response) + rendezvous.terminate() + return _adaptation(pool, in_pool) + + +def adapt_event_unary_unary(method, pool): + def in_pool(rendezvous): + request = next(rendezvous) + method( + request, rendezvous.consume_and_terminate, _ServicerContext(rendezvous)) + return _adaptation(pool, in_pool) + + +def adapt_event_unary_stream(method, pool): + def in_pool(rendezvous): + request = next(rendezvous) + method(request, rendezvous, _ServicerContext(rendezvous)) + return _adaptation(pool, in_pool) + + +def adapt_event_stream_unary(method, pool): + def in_pool(rendezvous): + request_consumer = method( + rendezvous.consume_and_terminate, _ServicerContext(rendezvous)) + for request in rendezvous: + request_consumer.consume(request) + request_consumer.terminate() + return _adaptation(pool, in_pool) + + +def adapt_event_stream_stream(method, pool): + def in_pool(rendezvous): + request_consumer = method(rendezvous, _ServicerContext(rendezvous)) + for request in rendezvous: + request_consumer.consume(request) + request_consumer.terminate() + return _adaptation(pool, in_pool) + + +def adapt_multi_method(multi_method, pool): + def adaptation(group, method, operator, operation_context): + rendezvous = _control.Rendezvous(operator, operation_context) + outcome = operation_context.add_termination_callback(rendezvous.set_outcome) + if outcome is None: + def in_pool(): + request_consumer = multi_method( + group, method, rendezvous, _ServicerContext(rendezvous)) + for request in rendezvous: + request_consumer.consume(request) + request_consumer.terminate() + pool.submit(_control.pool_wrap(in_pool, operation_context), rendezvous) + return utilities.full_subscription(rendezvous) + else: + raise abandonment.Abandoned() + return adaptation diff --git a/src/python/grpcio/grpc/framework/crust/implementations.py b/src/python/grpcio/grpc/framework/crust/implementations.py new file mode 100644 index 00000000000..12f7e796419 --- /dev/null +++ b/src/python/grpcio/grpc/framework/crust/implementations.py @@ -0,0 +1,352 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Entry points into the Crust layer of RPC Framework.""" + +from grpc.framework.common import cardinality +from grpc.framework.common import style +from grpc.framework.crust import _calls +from grpc.framework.crust import _service +from grpc.framework.interfaces.base import base +from grpc.framework.interfaces.face import face + + +class _BaseServicer(base.Servicer): + + def __init__(self, adapted_methods, adapted_multi_method): + self._adapted_methods = adapted_methods + self._adapted_multi_method = adapted_multi_method + + def service(self, group, method, context, output_operator): + adapted_method = self._adapted_methods.get((group, method), None) + if adapted_method is not None: + return adapted_method(output_operator, context) + elif self._adapted_multi_method is not None: + try: + return self._adapted_multi_method.service( + group, method, output_operator, context) + except face.NoSuchMethodError: + raise base.NoSuchMethodError() + else: + raise base.NoSuchMethodError() + + +class _UnaryUnaryMultiCallable(face.UnaryUnaryMultiCallable): + + def __init__(self, end, group, method, pool): + self._end = end + self._group = group + self._method = method + self._pool = pool + + def __call__( + self, request, timeout, metadata=None, with_call=False): + return _calls.blocking_unary_unary( + self._end, self._group, self._method, timeout, with_call, + metadata, request) + + def future(self, request, timeout, metadata=None): + return _calls.future_unary_unary( + self._end, self._group, self._method, timeout, metadata, + request) + + def event( + self, request, receiver, abortion_callback, timeout, + metadata=None): + return _calls.event_unary_unary( + self._end, self._group, self._method, timeout, metadata, + request, receiver, abortion_callback, self._pool) + + +class _UnaryStreamMultiCallable(face.UnaryStreamMultiCallable): + + def __init__(self, end, group, method, pool): + self._end = end + self._group = group + self._method = method + self._pool = pool + + def __call__(self, request, timeout, metadata=None): + return _calls.inline_unary_stream( + self._end, self._group, self._method, timeout, metadata, + request) + + def event( + self, request, receiver, abortion_callback, timeout, + metadata=None): + return _calls.event_unary_stream( + self._end, self._group, self._method, timeout, metadata, + request, receiver, abortion_callback, self._pool) + + +class _StreamUnaryMultiCallable(face.StreamUnaryMultiCallable): + + def __init__(self, end, group, method, pool): + self._end = end + self._group = group + self._method = method + self._pool = pool + + def __call__( + self, request_iterator, timeout, metadata=None, + with_call=False): + return _calls.blocking_stream_unary( + self._end, self._group, self._method, timeout, with_call, + metadata, request_iterator, self._pool) + + def future(self, request_iterator, timeout, metadata=None): + return _calls.future_stream_unary( + self._end, self._group, self._method, timeout, metadata, + request_iterator, self._pool) + + def event( + self, receiver, abortion_callback, timeout, metadata=None): + return _calls.event_stream_unary( + self._end, self._group, self._method, timeout, metadata, + receiver, abortion_callback, self._pool) + + +class _StreamStreamMultiCallable(face.StreamStreamMultiCallable): + + def __init__(self, end, group, method, pool): + self._end = end + self._group = group + self._method = method + self._pool = pool + + def __call__(self, request_iterator, timeout, metadata=None): + return _calls.inline_stream_stream( + self._end, self._group, self._method, timeout, metadata, + request_iterator, self._pool) + + def event( + self, receiver, abortion_callback, timeout, metadata=None): + return _calls.event_stream_stream( + self._end, self._group, self._method, timeout, metadata, + receiver, abortion_callback, self._pool) + + +class _GenericStub(face.GenericStub): + """An face.GenericStub implementation.""" + + def __init__(self, end, pool): + self._end = end + self._pool = pool + + def blocking_unary_unary( + self, group, method, request, timeout, metadata=None, + with_call=None): + return _calls.blocking_unary_unary( + self._end, group, method, timeout, with_call, metadata, + request) + + def future_unary_unary( + self, group, method, request, timeout, metadata=None): + return _calls.future_unary_unary( + self._end, group, method, timeout, metadata, request) + + def inline_unary_stream( + self, group, method, request, timeout, metadata=None): + return _calls.inline_unary_stream( + self._end, group, method, timeout, metadata, request) + + def blocking_stream_unary( + self, group, method, request_iterator, timeout, metadata=None, + with_call=None): + return _calls.blocking_stream_unary( + self._end, group, method, timeout, with_call, metadata, + request_iterator, self._pool) + + def future_stream_unary( + self, group, method, request_iterator, timeout, metadata=None): + return _calls.future_stream_unary( + self._end, group, method, timeout, metadata, + request_iterator, self._pool) + + def inline_stream_stream( + self, group, method, request_iterator, timeout, metadata=None): + return _calls.inline_stream_stream( + self._end, group, method, timeout, metadata, + request_iterator, self._pool) + + def event_unary_unary( + self, group, method, request, receiver, abortion_callback, timeout, + metadata=None): + return _calls.event_unary_unary( + self._end, group, method, timeout, metadata, request, + receiver, abortion_callback, self._pool) + + def event_unary_stream( + self, group, method, request, receiver, abortion_callback, timeout, + metadata=None): + return _calls.event_unary_stream( + self._end, group, method, timeout, metadata, request, + receiver, abortion_callback, self._pool) + + def event_stream_unary( + self, group, method, receiver, abortion_callback, timeout, + metadata=None): + return _calls.event_stream_unary( + self._end, group, method, timeout, metadata, receiver, + abortion_callback, self._pool) + + def event_stream_stream( + self, group, method, receiver, abortion_callback, timeout, + metadata=None): + return _calls.event_stream_stream( + self._end, group, method, timeout, metadata, receiver, + abortion_callback, self._pool) + + def unary_unary(self, group, method): + return _UnaryUnaryMultiCallable(self._end, group, method, self._pool) + + def unary_stream(self, group, method): + return _UnaryStreamMultiCallable(self._end, group, method, self._pool) + + def stream_unary(self, group, method): + return _StreamUnaryMultiCallable(self._end, group, method, self._pool) + + def stream_stream(self, group, method): + return _StreamStreamMultiCallable(self._end, group, method, self._pool) + + +class _DynamicStub(face.DynamicStub): + """An face.DynamicStub implementation.""" + + def __init__(self, end, group, cardinalities, pool): + self._end = end + self._group = group + self._cardinalities = cardinalities + self._pool = pool + + def __getattr__(self, attr): + method_cardinality = self._cardinalities.get(attr) + if method_cardinality is cardinality.Cardinality.UNARY_UNARY: + return _UnaryUnaryMultiCallable(self._end, self._group, attr, self._pool) + elif method_cardinality is cardinality.Cardinality.UNARY_STREAM: + return _UnaryStreamMultiCallable(self._end, self._group, attr, self._pool) + elif method_cardinality is cardinality.Cardinality.STREAM_UNARY: + return _StreamUnaryMultiCallable(self._end, self._group, attr, self._pool) + elif method_cardinality is cardinality.Cardinality.STREAM_STREAM: + return _StreamStreamMultiCallable( + self._end, self._group, attr, self._pool) + else: + raise AttributeError('_DynamicStub object has no attribute "%s"!' % attr) + + +def _adapt_method_implementations(method_implementations, pool): + adapted_implementations = {} + for name, method_implementation in method_implementations.iteritems(): + if method_implementation.style is style.Service.INLINE: + if method_implementation.cardinality is cardinality.Cardinality.UNARY_UNARY: + adapted_implementations[name] = _service.adapt_inline_unary_unary( + method_implementation.unary_unary_inline, pool) + elif method_implementation.cardinality is cardinality.Cardinality.UNARY_STREAM: + adapted_implementations[name] = _service.adapt_inline_unary_stream( + method_implementation.unary_stream_inline, pool) + elif method_implementation.cardinality is cardinality.Cardinality.STREAM_UNARY: + adapted_implementations[name] = _service.adapt_inline_stream_unary( + method_implementation.stream_unary_inline, pool) + elif method_implementation.cardinality is cardinality.Cardinality.STREAM_STREAM: + adapted_implementations[name] = _service.adapt_inline_stream_stream( + method_implementation.stream_stream_inline, pool) + elif method_implementation.style is style.Service.EVENT: + if method_implementation.cardinality is cardinality.Cardinality.UNARY_UNARY: + adapted_implementations[name] = _service.adapt_event_unary_unary( + method_implementation.unary_unary_event, pool) + elif method_implementation.cardinality is cardinality.Cardinality.UNARY_STREAM: + adapted_implementations[name] = _service.adapt_event_unary_stream( + method_implementation.unary_stream_event, pool) + elif method_implementation.cardinality is cardinality.Cardinality.STREAM_UNARY: + adapted_implementations[name] = _service.adapt_event_stream_unary( + method_implementation.stream_unary_event, pool) + elif method_implementation.cardinality is cardinality.Cardinality.STREAM_STREAM: + adapted_implementations[name] = _service.adapt_event_stream_stream( + method_implementation.stream_stream_event, pool) + return adapted_implementations + + +def servicer(method_implementations, multi_method_implementation, pool): + """Creates a base.Servicer. + + It is guaranteed that any passed face.MultiMethodImplementation will + only be called to service an RPC if there is no + face.MethodImplementation for the RPC method in the passed + method_implementations dictionary. + + Args: + method_implementations: A dictionary from RPC method name to + face.MethodImplementation object to be used to service the named + RPC method. + multi_method_implementation: An face.MultiMethodImplementation to be + used to service any RPCs not serviced by the + face.MethodImplementations given in the method_implementations + dictionary, or None. + pool: A thread pool. + + Returns: + A base.Servicer that services RPCs via the given implementations. + """ + adapted_implementations = _adapt_method_implementations( + method_implementations, pool) + adapted_multi_method_implementation = _service.adapt_multi_method( + multi_method_implementation, pool) + return _BaseServicer( + adapted_implementations, adapted_multi_method_implementation) + + +def generic_stub(end, pool): + """Creates an face.GenericStub. + + Args: + end: A base.End. + pool: A futures.ThreadPoolExecutor. + + Returns: + A face.GenericStub that performs RPCs via the given base.End. + """ + return _GenericStub(end, pool) + + +def dynamic_stub(end, group, cardinalities, pool): + """Creates an face.DynamicStub. + + Args: + end: A base.End. + group: The group identifier for all RPCs to be made with the created + face.DynamicStub. + cardinalities: A dict from method identifier to cardinality.Cardinality + value identifying the cardinality of every RPC method to be supported by + the created face.DynamicStub. + pool: A futures.ThreadPoolExecutor. + + Returns: + A face.DynamicStub that performs RPCs via the given base.End. + """ + return _DynamicStub(end, group, cardinalities, pool) diff --git a/src/python/grpcio_test/grpc_test/_core_over_links_base_interface_test.py b/src/python/grpcio_test/grpc_test/_core_over_links_base_interface_test.py index 72b1ae56426..7fa90fe35f0 100644 --- a/src/python/grpcio_test/grpc_test/_core_over_links_base_interface_test.py +++ b/src/python/grpcio_test/grpc_test/_core_over_links_base_interface_test.py @@ -27,7 +27,7 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""Tests the RPC Framework Core's implementation of the Base interface.""" +"""Tests Base interface compliance of the core-over-gRPC-links stack.""" import collections import logging diff --git a/src/python/grpcio_test/grpc_test/_crust_over_core_over_links_face_interface_test.py b/src/python/grpcio_test/grpc_test/_crust_over_core_over_links_face_interface_test.py new file mode 100644 index 00000000000..25b99cbbaf5 --- /dev/null +++ b/src/python/grpcio_test/grpc_test/_crust_over_core_over_links_face_interface_test.py @@ -0,0 +1,160 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Tests Face compliance of the crust-over-core-over-gRPC-links stack.""" + +import collections +import unittest + +from grpc._adapter import _intermediary_low +from grpc._links import invocation +from grpc._links import service +from grpc.framework.core import implementations as core_implementations +from grpc.framework.crust import implementations as crust_implementations +from grpc.framework.foundation import logging_pool +from grpc.framework.interfaces.links import utilities +from grpc_test import test_common +from grpc_test.framework.common import test_constants +from grpc_test.framework.interfaces.face import test_cases +from grpc_test.framework.interfaces.face import test_interfaces +from grpc_test.framework.interfaces.links import test_utilities + + +class _SerializationBehaviors( + collections.namedtuple( + '_SerializationBehaviors', + ('request_serializers', 'request_deserializers', 'response_serializers', + 'response_deserializers',))): + pass + + +def _serialization_behaviors_from_test_methods(test_methods): + request_serializers = {} + request_deserializers = {} + response_serializers = {} + response_deserializers = {} + for (group, method), test_method in test_methods.iteritems(): + request_serializers[group, method] = test_method.serialize_request + request_deserializers[group, method] = test_method.deserialize_request + response_serializers[group, method] = test_method.serialize_response + response_deserializers[group, method] = test_method.deserialize_response + return _SerializationBehaviors( + request_serializers, request_deserializers, response_serializers, + response_deserializers) + + +class _Implementation(test_interfaces.Implementation): + + def instantiate( + self, methods, method_implementations, multi_method_implementation): + pool = logging_pool.pool(test_constants.POOL_SIZE) + servicer = crust_implementations.servicer( + method_implementations, multi_method_implementation, pool) + serialization_behaviors = _serialization_behaviors_from_test_methods( + methods) + invocation_end_link = core_implementations.invocation_end_link() + service_end_link = core_implementations.service_end_link( + servicer, test_constants.DEFAULT_TIMEOUT, + test_constants.MAXIMUM_TIMEOUT) + service_grpc_link = service.service_link( + serialization_behaviors.request_deserializers, + serialization_behaviors.response_serializers) + port = service_grpc_link.add_port(0, None) + channel = _intermediary_low.Channel('localhost:%d' % port, None) + invocation_grpc_link = invocation.invocation_link( + channel, b'localhost', + serialization_behaviors.request_serializers, + serialization_behaviors.response_deserializers) + + invocation_end_link.join_link(invocation_grpc_link) + invocation_grpc_link.join_link(invocation_end_link) + service_grpc_link.join_link(service_end_link) + service_end_link.join_link(service_grpc_link) + service_end_link.start() + invocation_end_link.start() + invocation_grpc_link.start() + service_grpc_link.start() + + generic_stub = crust_implementations.generic_stub(invocation_end_link, pool) + # TODO(nathaniel): Add a "groups" attribute to _digest.TestServiceDigest. + group = next(iter(methods))[0] + # TODO(nathaniel): Add a "cardinalities_by_group" attribute to + # _digest.TestServiceDigest. + cardinalities = { + method: method_object.cardinality() + for (group, method), method_object in methods.iteritems()} + dynamic_stub = crust_implementations.dynamic_stub( + invocation_end_link, group, cardinalities, pool) + + return generic_stub, {group: dynamic_stub}, ( + invocation_end_link, invocation_grpc_link, service_grpc_link, + service_end_link, pool) + + def destantiate(self, memo): + (invocation_end_link, invocation_grpc_link, service_grpc_link, + service_end_link, pool) = memo + invocation_end_link.stop(0).wait() + invocation_grpc_link.stop() + service_grpc_link.stop_gracefully() + service_end_link.stop(0).wait() + invocation_end_link.join_link(utilities.NULL_LINK) + invocation_grpc_link.join_link(utilities.NULL_LINK) + service_grpc_link.join_link(utilities.NULL_LINK) + service_end_link.join_link(utilities.NULL_LINK) + pool.shutdown(wait=True) + + def invocation_metadata(self): + return test_common.INVOCATION_INITIAL_METADATA + + def initial_metadata(self): + return test_common.SERVICE_INITIAL_METADATA + + def terminal_metadata(self): + return test_common.SERVICE_TERMINAL_METADATA + + def code(self): + return _intermediary_low.Code.OK + + def details(self): + return test_common.DETAILS + + def metadata_transmitted(self, original_metadata, transmitted_metadata): + return original_metadata is None or grpc_test_common.metadata_transmitted( + original_metadata, transmitted_metadata) + + +def load_tests(loader, tests, pattern): + return unittest.TestSuite( + tests=tuple( + loader.loadTestsFromTestCase(test_case_class) + for test_case_class in test_cases.test_cases(_Implementation()))) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/python/grpcio_test/grpc_test/framework/_crust_over_core_face_interface_test.py b/src/python/grpcio_test/grpc_test/framework/_crust_over_core_face_interface_test.py new file mode 100644 index 00000000000..30bb85f6c3b --- /dev/null +++ b/src/python/grpcio_test/grpc_test/framework/_crust_over_core_face_interface_test.py @@ -0,0 +1,111 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Tests Face interface compliance of the crust-over-core stack.""" + +import collections +import unittest + +from grpc.framework.core import implementations as core_implementations +from grpc.framework.crust import implementations as crust_implementations +from grpc.framework.foundation import logging_pool +from grpc.framework.interfaces.links import utilities +from grpc_test.framework.common import test_constants +from grpc_test.framework.interfaces.face import test_cases +from grpc_test.framework.interfaces.face import test_interfaces +from grpc_test.framework.interfaces.links import test_utilities + + +class _Implementation(test_interfaces.Implementation): + + def instantiate( + self, methods, method_implementations, multi_method_implementation): + pool = logging_pool.pool(test_constants.POOL_SIZE) + servicer = crust_implementations.servicer( + method_implementations, multi_method_implementation, pool) + + service_end_link = core_implementations.service_end_link( + servicer, test_constants.DEFAULT_TIMEOUT, + test_constants.MAXIMUM_TIMEOUT) + invocation_end_link = core_implementations.invocation_end_link() + invocation_end_link.join_link(service_end_link) + service_end_link.join_link(invocation_end_link) + service_end_link.start() + invocation_end_link.start() + + generic_stub = crust_implementations.generic_stub(invocation_end_link, pool) + # TODO(nathaniel): Add a "groups" attribute to _digest.TestServiceDigest. + group = next(iter(methods))[0] + # TODO(nathaniel): Add a "cardinalities_by_group" attribute to + # _digest.TestServiceDigest. + cardinalities = { + method: method_object.cardinality() + for (group, method), method_object in methods.iteritems()} + dynamic_stub = crust_implementations.dynamic_stub( + invocation_end_link, group, cardinalities, pool) + + return generic_stub, {group: dynamic_stub}, ( + invocation_end_link, service_end_link, pool) + + def destantiate(self, memo): + invocation_end_link, service_end_link, pool = memo + invocation_end_link.stop(0).wait() + service_end_link.stop(0).wait() + invocation_end_link.join_link(utilities.NULL_LINK) + service_end_link.join_link(utilities.NULL_LINK) + pool.shutdown(wait=True) + + def invocation_metadata(self): + return object() + + def initial_metadata(self): + return object() + + def terminal_metadata(self): + return object() + + def code(self): + return object() + + def details(self): + return object() + + def metadata_transmitted(self, original_metadata, transmitted_metadata): + return original_metadata is transmitted_metadata + + +def load_tests(loader, tests, pattern): + return unittest.TestSuite( + tests=tuple( + loader.loadTestsFromTestCase(test_case_class) + for test_case_class in test_cases.test_cases(_Implementation()))) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/face/_3069_test_constant.py b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_3069_test_constant.py new file mode 100644 index 00000000000..363d9ce8f1c --- /dev/null +++ b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_3069_test_constant.py @@ -0,0 +1,37 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A test constant working around issue 3069.""" + +# test_constants is referenced from specification in this module. +from grpc_test.framework.common import test_constants # pylint: disable=unused-import + +# TODO(issue 3069): Replace uses of this constant with +# test_constants.SHORT_TIMEOUT. +REALLY_SHORT_TIMEOUT = 0.1 diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/face/_blocking_invocation_inline_service.py b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_blocking_invocation_inline_service.py index 857ad5cf3e0..8804f3f2233 100644 --- a/src/python/grpcio_test/grpc_test/framework/interfaces/face/_blocking_invocation_inline_service.py +++ b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_blocking_invocation_inline_service.py @@ -37,6 +37,7 @@ from grpc.framework.interfaces.face import face from grpc_test.framework.common import test_constants from grpc_test.framework.common import test_control from grpc_test.framework.common import test_coverage +from grpc_test.framework.interfaces.face import _3069_test_constant from grpc_test.framework.interfaces.face import _digest from grpc_test.framework.interfaces.face import _stock_service from grpc_test.framework.interfaces.face import test_interfaces # pylint: disable=unused-import @@ -170,7 +171,7 @@ class TestCase(test_coverage.Coverage, unittest.TestCase): with self._control.pause(), self.assertRaises( face.ExpirationError): self._invoker.blocking(group, method)( - request, test_constants.SHORT_TIMEOUT) + request, _3069_test_constant.REALLY_SHORT_TIMEOUT) def testExpiredUnaryRequestStreamResponse(self): for (group, method), test_messages_sequence in ( @@ -181,7 +182,7 @@ class TestCase(test_coverage.Coverage, unittest.TestCase): with self._control.pause(), self.assertRaises( face.ExpirationError): response_iterator = self._invoker.blocking(group, method)( - request, test_constants.SHORT_TIMEOUT) + request, _3069_test_constant.REALLY_SHORT_TIMEOUT) list(response_iterator) def testExpiredStreamRequestUnaryResponse(self): @@ -193,7 +194,7 @@ class TestCase(test_coverage.Coverage, unittest.TestCase): with self._control.pause(), self.assertRaises( face.ExpirationError): self._invoker.blocking(group, method)( - iter(requests), test_constants.SHORT_TIMEOUT) + iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT) def testExpiredStreamRequestStreamResponse(self): for (group, method), test_messages_sequence in ( @@ -204,7 +205,7 @@ class TestCase(test_coverage.Coverage, unittest.TestCase): with self._control.pause(), self.assertRaises( face.ExpirationError): response_iterator = self._invoker.blocking(group, method)( - iter(requests), test_constants.SHORT_TIMEOUT) + iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT) list(response_iterator) def testFailedUnaryRequestUnaryResponse(self): diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/face/_event_invocation_synchronous_event_service.py b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_event_invocation_synchronous_event_service.py index ea5cdeaea30..5a78b4bed24 100644 --- a/src/python/grpcio_test/grpc_test/framework/interfaces/face/_event_invocation_synchronous_event_service.py +++ b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_event_invocation_synchronous_event_service.py @@ -37,6 +37,7 @@ from grpc.framework.interfaces.face import face from grpc_test.framework.common import test_constants from grpc_test.framework.common import test_control from grpc_test.framework.common import test_coverage +from grpc_test.framework.interfaces.face import _3069_test_constant from grpc_test.framework.interfaces.face import _digest from grpc_test.framework.interfaces.face import _receiver from grpc_test.framework.interfaces.face import _stock_service @@ -264,7 +265,8 @@ class TestCase(test_coverage.Coverage, unittest.TestCase): with self._control.pause(): self._invoker.event(group, method)( - request, receiver, receiver.abort, test_constants.SHORT_TIMEOUT) + request, receiver, receiver.abort, + _3069_test_constant.REALLY_SHORT_TIMEOUT) receiver.block_until_terminated() self.assertIs(face.Abortion.Kind.EXPIRED, receiver.abortion().kind) @@ -278,7 +280,8 @@ class TestCase(test_coverage.Coverage, unittest.TestCase): with self._control.pause(): self._invoker.event(group, method)( - request, receiver, receiver.abort, test_constants.SHORT_TIMEOUT) + request, receiver, receiver.abort, + _3069_test_constant.REALLY_SHORT_TIMEOUT) receiver.block_until_terminated() self.assertIs(face.Abortion.Kind.EXPIRED, receiver.abortion().kind) @@ -290,7 +293,7 @@ class TestCase(test_coverage.Coverage, unittest.TestCase): receiver = _receiver.Receiver() self._invoker.event(group, method)( - receiver, receiver.abort, test_constants.SHORT_TIMEOUT) + receiver, receiver.abort, _3069_test_constant.REALLY_SHORT_TIMEOUT) receiver.block_until_terminated() self.assertIs(face.Abortion.Kind.EXPIRED, receiver.abortion().kind) @@ -303,7 +306,7 @@ class TestCase(test_coverage.Coverage, unittest.TestCase): receiver = _receiver.Receiver() call_consumer = self._invoker.event(group, method)( - receiver, receiver.abort, test_constants.SHORT_TIMEOUT) + receiver, receiver.abort, _3069_test_constant.REALLY_SHORT_TIMEOUT) for request in requests: call_consumer.consume(request) receiver.block_until_terminated() diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/face/_future_invocation_asynchronous_event_service.py b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_future_invocation_asynchronous_event_service.py index a649362cef8..d1107e1576d 100644 --- a/src/python/grpcio_test/grpc_test/framework/interfaces/face/_future_invocation_asynchronous_event_service.py +++ b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_future_invocation_asynchronous_event_service.py @@ -40,6 +40,7 @@ from grpc.framework.interfaces.face import face from grpc_test.framework.common import test_constants from grpc_test.framework.common import test_control from grpc_test.framework.common import test_coverage +from grpc_test.framework.interfaces.face import _3069_test_constant from grpc_test.framework.interfaces.face import _digest from grpc_test.framework.interfaces.face import _stock_service from grpc_test.framework.interfaces.face import test_interfaces # pylint: disable=unused-import @@ -265,7 +266,7 @@ class TestCase(test_coverage.Coverage, unittest.TestCase): with self._control.pause(): response_future = self._invoker.future( - group, method)(request, test_constants.SHORT_TIMEOUT) + group, method)(request, _3069_test_constant.REALLY_SHORT_TIMEOUT) self.assertIsInstance( response_future.exception(), face.ExpirationError) with self.assertRaises(face.ExpirationError): @@ -279,7 +280,7 @@ class TestCase(test_coverage.Coverage, unittest.TestCase): with self._control.pause(): response_iterator = self._invoker.future(group, method)( - request, test_constants.SHORT_TIMEOUT) + request, _3069_test_constant.REALLY_SHORT_TIMEOUT) with self.assertRaises(face.ExpirationError): list(response_iterator) @@ -291,7 +292,7 @@ class TestCase(test_coverage.Coverage, unittest.TestCase): with self._control.pause(): response_future = self._invoker.future(group, method)( - iter(requests), test_constants.SHORT_TIMEOUT) + iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT) self.assertIsInstance( response_future.exception(), face.ExpirationError) with self.assertRaises(face.ExpirationError): @@ -305,7 +306,7 @@ class TestCase(test_coverage.Coverage, unittest.TestCase): with self._control.pause(): response_iterator = self._invoker.future(group, method)( - iter(requests), test_constants.SHORT_TIMEOUT) + iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT) with self.assertRaises(face.ExpirationError): list(response_iterator) @@ -317,7 +318,7 @@ class TestCase(test_coverage.Coverage, unittest.TestCase): with self._control.fail(): response_future = self._invoker.future(group, method)( - request, test_constants.SHORT_TIMEOUT) + request, _3069_test_constant.REALLY_SHORT_TIMEOUT) # Because the servicer fails outside of the thread from which the # servicer-side runtime called into it its failure is @@ -340,7 +341,7 @@ class TestCase(test_coverage.Coverage, unittest.TestCase): # expiration of the RPC. with self._control.fail(), self.assertRaises(face.ExpirationError): response_iterator = self._invoker.future(group, method)( - request, test_constants.SHORT_TIMEOUT) + request, _3069_test_constant.REALLY_SHORT_TIMEOUT) list(response_iterator) def testFailedStreamRequestUnaryResponse(self): @@ -351,7 +352,7 @@ class TestCase(test_coverage.Coverage, unittest.TestCase): with self._control.fail(): response_future = self._invoker.future(group, method)( - iter(requests), test_constants.SHORT_TIMEOUT) + iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT) # Because the servicer fails outside of the thread from which the # servicer-side runtime called into it its failure is @@ -374,5 +375,5 @@ class TestCase(test_coverage.Coverage, unittest.TestCase): # expiration of the RPC. with self._control.fail(), self.assertRaises(face.ExpirationError): response_iterator = self._invoker.future(group, method)( - iter(requests), test_constants.SHORT_TIMEOUT) + iter(requests), _3069_test_constant.REALLY_SHORT_TIMEOUT) list(response_iterator) diff --git a/src/python/grpcio_test/grpc_test/framework/interfaces/face/_stock_service.py b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_stock_service.py index 1dd2ec36331..808e2c4e36a 100644 --- a/src/python/grpcio_test/grpc_test/framework/interfaces/face/_stock_service.py +++ b/src/python/grpcio_test/grpc_test/framework/interfaces/face/_stock_service.py @@ -1,4 +1,4 @@ -B# Copyright 2015, Google Inc. +# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without diff --git a/tools/run_tests/run_python.sh b/tools/run_tests/run_python.sh index 6fdca93fd5f..8a111268531 100755 --- a/tools/run_tests/run_python.sh +++ b/tools/run_tests/run_python.sh @@ -45,6 +45,8 @@ source "python"$PYVER"_virtual_environment"/bin/activate # py.test (or find another tool or *something*) that's acceptable to the rest of # the team... "python"$PYVER -m grpc_test._core_over_links_base_interface_test +"python"$PYVER -m grpc_test._crust_over_core_over_links_face_interface_test +"python"$PYVER -m grpc_test.framework._crust_over_core_face_interface_test "python"$PYVER -m grpc_test.framework.core._base_interface_test "python"$PYVER $GRPCIO_TEST/setup.py test -a "-n8 --cov=grpc --junitxml=./report.xml" From d090fe1379932d7d785fa805339110e70b96bd87 Mon Sep 17 00:00:00 2001 From: yang-g Date: Tue, 25 Aug 2015 16:53:07 -0700 Subject: [PATCH 12/29] auth context api change for string_ref --- Makefile | 12 ++++----- build.json | 6 +++++ include/grpc++/support/auth_context.h | 7 +++--- src/cpp/common/auth_property_iterator.cc | 6 ++--- src/cpp/common/secure_auth_context.cc | 17 +++++++------ src/cpp/common/secure_auth_context.h | 6 ++--- .../cpp/common/auth_property_iterator_test.cc | 15 ++++++----- test/cpp/common/secure_auth_context_test.cc | 25 +++++++++++-------- test/cpp/end2end/end2end_test.cc | 15 +++++------ tools/run_tests/sources_and_headers.json | 10 ++++++-- vsprojects/Grpc.mak | 8 +++--- 11 files changed, 74 insertions(+), 53 deletions(-) diff --git a/Makefile b/Makefile index 77cb62086b1..e872da981b6 100644 --- a/Makefile +++ b/Makefile @@ -8945,16 +8945,16 @@ $(BINDIR)/$(CONFIG)/auth_property_iterator_test: protobuf_dep_error else -$(BINDIR)/$(CONFIG)/auth_property_iterator_test: $(PROTOBUF_DEP) $(AUTH_PROPERTY_ITERATOR_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(BINDIR)/$(CONFIG)/auth_property_iterator_test: $(PROTOBUF_DEP) $(AUTH_PROPERTY_ITERATOR_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) $(AUTH_PROPERTY_ITERATOR_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/auth_property_iterator_test + $(Q) $(LDXX) $(LDFLAGS) $(AUTH_PROPERTY_ITERATOR_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/auth_property_iterator_test endif endif -$(OBJDIR)/$(CONFIG)/test/cpp/common/auth_property_iterator_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/test/cpp/common/auth_property_iterator_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a deps_auth_property_iterator_test: $(AUTH_PROPERTY_ITERATOR_TEST_OBJS:.o=.dep) ifneq ($(NO_SECURE),true) @@ -10155,16 +10155,16 @@ $(BINDIR)/$(CONFIG)/secure_auth_context_test: protobuf_dep_error else -$(BINDIR)/$(CONFIG)/secure_auth_context_test: $(PROTOBUF_DEP) $(SECURE_AUTH_CONTEXT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(BINDIR)/$(CONFIG)/secure_auth_context_test: $(PROTOBUF_DEP) $(SECURE_AUTH_CONTEXT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(E) "[LD] Linking $@" $(Q) mkdir -p `dirname $@` - $(Q) $(LDXX) $(LDFLAGS) $(SECURE_AUTH_CONTEXT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/secure_auth_context_test + $(Q) $(LDXX) $(LDFLAGS) $(SECURE_AUTH_CONTEXT_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/secure_auth_context_test endif endif -$(OBJDIR)/$(CONFIG)/test/cpp/common/secure_auth_context_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a +$(OBJDIR)/$(CONFIG)/test/cpp/common/secure_auth_context_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a deps_secure_auth_context_test: $(SECURE_AUTH_CONTEXT_TEST_OBJS:.o=.dep) ifneq ($(NO_SECURE),true) diff --git a/build.json b/build.json index c974250d7cd..56f14c6590f 100644 --- a/build.json +++ b/build.json @@ -2020,8 +2020,11 @@ "test/cpp/common/auth_property_iterator_test.cc" ], "deps": [ + "grpc++_test_util", + "grpc_test_util", "grpc++", "grpc", + "gpr_test_util", "gpr" ] }, @@ -2572,8 +2575,11 @@ "test/cpp/common/secure_auth_context_test.cc" ], "deps": [ + "grpc++_test_util", + "grpc_test_util", "grpc++", "grpc", + "gpr_test_util", "gpr" ] }, diff --git a/include/grpc++/support/auth_context.h b/include/grpc++/support/auth_context.h index f4f2dcf5bbd..67e3e66c05b 100644 --- a/include/grpc++/support/auth_context.h +++ b/include/grpc++/support/auth_context.h @@ -38,6 +38,7 @@ #include #include +#include struct grpc_auth_context; struct grpc_auth_property; @@ -46,7 +47,7 @@ struct grpc_auth_property_iterator; namespace grpc { class SecureAuthContext; -typedef std::pair AuthProperty; +typedef std::pair AuthProperty; class AuthPropertyIterator : public std::iterator { @@ -78,11 +79,11 @@ class AuthContext { // A peer identity, in general is one or more properties (in which case they // have the same name). - virtual std::vector GetPeerIdentity() const = 0; + virtual std::vector GetPeerIdentity() const = 0; virtual grpc::string GetPeerIdentityPropertyName() const = 0; // Returns all the property values with the given name. - virtual std::vector FindPropertyValues( + virtual std::vector FindPropertyValues( const grpc::string& name) const = 0; // Iteration over all the properties. diff --git a/src/cpp/common/auth_property_iterator.cc b/src/cpp/common/auth_property_iterator.cc index 5ccf8cf72c1..fa6da9d7a8d 100644 --- a/src/cpp/common/auth_property_iterator.cc +++ b/src/cpp/common/auth_property_iterator.cc @@ -77,9 +77,9 @@ bool AuthPropertyIterator::operator!=(const AuthPropertyIterator& rhs) const { } const AuthProperty AuthPropertyIterator::operator*() { - return std::make_pair( - grpc::string(property_->name), - grpc::string(property_->value, property_->value_length)); + return std::pair( + property_->name, + grpc::string_ref(property_->value, property_->value_length)); } } // namespace grpc diff --git a/src/cpp/common/secure_auth_context.cc b/src/cpp/common/secure_auth_context.cc index 87d7bab75c6..b18a8537c99 100644 --- a/src/cpp/common/secure_auth_context.cc +++ b/src/cpp/common/secure_auth_context.cc @@ -41,15 +41,16 @@ SecureAuthContext::SecureAuthContext(grpc_auth_context* ctx) : ctx_(ctx) {} SecureAuthContext::~SecureAuthContext() { grpc_auth_context_release(ctx_); } -std::vector SecureAuthContext::GetPeerIdentity() const { +std::vector SecureAuthContext::GetPeerIdentity() const { if (!ctx_) { - return std::vector(); + return std::vector(); } grpc_auth_property_iterator iter = grpc_auth_context_peer_identity(ctx_); - std::vector identity; + std::vector identity; const grpc_auth_property* property = nullptr; while ((property = grpc_auth_property_iterator_next(&iter))) { - identity.push_back(grpc::string(property->value, property->value_length)); + identity.push_back( + grpc::string_ref(property->value, property->value_length)); } return identity; } @@ -62,17 +63,17 @@ grpc::string SecureAuthContext::GetPeerIdentityPropertyName() const { return name == nullptr ? "" : name; } -std::vector SecureAuthContext::FindPropertyValues( +std::vector SecureAuthContext::FindPropertyValues( const grpc::string& name) const { if (!ctx_) { - return std::vector(); + return std::vector(); } grpc_auth_property_iterator iter = grpc_auth_context_find_properties_by_name(ctx_, name.c_str()); const grpc_auth_property* property = nullptr; - std::vector values; + std::vector values; while ((property = grpc_auth_property_iterator_next(&iter))) { - values.push_back(grpc::string(property->value, property->value_length)); + values.push_back(grpc::string_ref(property->value, property->value_length)); } return values; } diff --git a/src/cpp/common/secure_auth_context.h b/src/cpp/common/secure_auth_context.h index 01b71261898..7f622b890b4 100644 --- a/src/cpp/common/secure_auth_context.h +++ b/src/cpp/common/secure_auth_context.h @@ -46,12 +46,12 @@ class SecureAuthContext GRPC_FINAL : public AuthContext { ~SecureAuthContext() GRPC_OVERRIDE; - std::vector GetPeerIdentity() const GRPC_OVERRIDE; + std::vector GetPeerIdentity() const GRPC_OVERRIDE; grpc::string GetPeerIdentityPropertyName() const GRPC_OVERRIDE; - std::vector FindPropertyValues(const grpc::string& name) const - GRPC_OVERRIDE; + std::vector FindPropertyValues( + const grpc::string& name) const GRPC_OVERRIDE; AuthPropertyIterator begin() const GRPC_OVERRIDE; diff --git a/test/cpp/common/auth_property_iterator_test.cc b/test/cpp/common/auth_property_iterator_test.cc index 630c38c7f67..e6226d6a093 100644 --- a/test/cpp/common/auth_property_iterator_test.cc +++ b/test/cpp/common/auth_property_iterator_test.cc @@ -35,11 +35,14 @@ #include #include #include "src/cpp/common/secure_auth_context.h" +#include "test/cpp/util/string_ref_helper.h" extern "C" { #include "src/core/security/security_context.h" } +using ::grpc::testing::ToString; + namespace grpc { namespace { @@ -84,12 +87,12 @@ TEST_F(AuthPropertyIteratorTest, GeneralTest) { AuthProperty p1 = *iter; iter++; AuthProperty p2 = *iter; - EXPECT_EQ("name", p0.first); - EXPECT_EQ("chapi", p0.second); - EXPECT_EQ("name", p1.first); - EXPECT_EQ("chapo", p1.second); - EXPECT_EQ("foo", p2.first); - EXPECT_EQ("bar", p2.second); + EXPECT_EQ("name", ToString(p0.first)); + EXPECT_EQ("chapi", ToString(p0.second)); + EXPECT_EQ("name", ToString(p1.first)); + EXPECT_EQ("chapo", ToString(p1.second)); + EXPECT_EQ("foo", ToString(p2.first)); + EXPECT_EQ("bar", ToString(p2.second)); ++iter; EXPECT_EQ(empty_iter, iter); } diff --git a/test/cpp/common/secure_auth_context_test.cc b/test/cpp/common/secure_auth_context_test.cc index c71ef58023f..25538c18537 100644 --- a/test/cpp/common/secure_auth_context_test.cc +++ b/test/cpp/common/secure_auth_context_test.cc @@ -35,11 +35,14 @@ #include #include #include "src/cpp/common/secure_auth_context.h" +#include "test/cpp/util/string_ref_helper.h" extern "C" { #include "src/core/security/security_context.h" } +using grpc::testing::ToString; + namespace grpc { namespace { @@ -63,14 +66,14 @@ TEST_F(SecureAuthContextTest, Properties) { EXPECT_EQ(1, grpc_auth_context_set_peer_identity_property_name(ctx, "name")); SecureAuthContext context(ctx); - std::vector peer_identity = context.GetPeerIdentity(); + std::vector peer_identity = context.GetPeerIdentity(); EXPECT_EQ(2u, peer_identity.size()); - EXPECT_EQ("chapi", peer_identity[0]); - EXPECT_EQ("chapo", peer_identity[1]); + EXPECT_EQ("chapi", ToString(peer_identity[0])); + EXPECT_EQ("chapo", ToString(peer_identity[1])); EXPECT_EQ("name", context.GetPeerIdentityPropertyName()); - std::vector bar = context.FindPropertyValues("foo"); + std::vector bar = context.FindPropertyValues("foo"); EXPECT_EQ(1u, bar.size()); - EXPECT_EQ("bar", bar[0]); + EXPECT_EQ("bar", ToString(bar[0])); } TEST_F(SecureAuthContextTest, Iterators) { @@ -88,12 +91,12 @@ TEST_F(SecureAuthContextTest, Iterators) { AuthProperty p1 = *iter; iter++; AuthProperty p2 = *iter; - EXPECT_EQ("name", p0.first); - EXPECT_EQ("chapi", p0.second); - EXPECT_EQ("name", p1.first); - EXPECT_EQ("chapo", p1.second); - EXPECT_EQ("foo", p2.first); - EXPECT_EQ("bar", p2.second); + EXPECT_EQ("name", ToString(p0.first)); + EXPECT_EQ("chapi", ToString(p0.second)); + EXPECT_EQ("name", ToString(p1.first)); + EXPECT_EQ("chapo", ToString(p1.second)); + EXPECT_EQ("foo", ToString(p2.first)); + EXPECT_EQ("bar", ToString(p2.second)); ++iter; EXPECT_EQ(context.end(), iter); } diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc index 0d5bf36df72..9826837c608 100644 --- a/test/cpp/end2end/end2end_test.cc +++ b/test/cpp/end2end/end2end_test.cc @@ -81,10 +81,10 @@ void MaybeEchoDeadline(ServerContext* context, const EchoRequest* request, void CheckServerAuthContext(const ServerContext* context) { std::shared_ptr auth_ctx = context->auth_context(); - std::vector ssl = + std::vector ssl = auth_ctx->FindPropertyValues("transport_security_type"); EXPECT_EQ(1u, ssl.size()); - EXPECT_EQ("ssl", ssl[0]); + EXPECT_EQ("ssl", ToString(ssl[0])); EXPECT_TRUE(auth_ctx->GetPeerIdentityPropertyName().empty()); EXPECT_TRUE(auth_ctx->GetPeerIdentity().empty()); } @@ -840,16 +840,17 @@ TEST_F(End2endTest, ClientAuthContext) { EXPECT_TRUE(s.ok()); std::shared_ptr auth_ctx = context.auth_context(); - std::vector ssl = + std::vector ssl = auth_ctx->FindPropertyValues("transport_security_type"); EXPECT_EQ(1u, ssl.size()); - EXPECT_EQ("ssl", ssl[0]); + EXPECT_EQ("ssl", ToString(ssl[0])); EXPECT_EQ("x509_subject_alternative_name", auth_ctx->GetPeerIdentityPropertyName()); EXPECT_EQ(3u, auth_ctx->GetPeerIdentity().size()); - EXPECT_EQ("*.test.google.fr", auth_ctx->GetPeerIdentity()[0]); - EXPECT_EQ("waterzooi.test.google.be", auth_ctx->GetPeerIdentity()[1]); - EXPECT_EQ("*.test.youtube.com", auth_ctx->GetPeerIdentity()[2]); + EXPECT_EQ("*.test.google.fr", ToString(auth_ctx->GetPeerIdentity()[0])); + EXPECT_EQ("waterzooi.test.google.be", + ToString(auth_ctx->GetPeerIdentity()[1])); + EXPECT_EQ("*.test.youtube.com", ToString(auth_ctx->GetPeerIdentity()[2])); } // Make the response larger than the flow control window. diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json index a89e5340ed8..5facf582ce9 100644 --- a/tools/run_tests/sources_and_headers.json +++ b/tools/run_tests/sources_and_headers.json @@ -1068,8 +1068,11 @@ { "deps": [ "gpr", + "gpr_test_util", "grpc", - "grpc++" + "grpc++", + "grpc++_test_util", + "grpc_test_util" ], "headers": [], "language": "c++", @@ -1573,8 +1576,11 @@ { "deps": [ "gpr", + "gpr_test_util", "grpc", - "grpc++" + "grpc++", + "grpc++_test_util", + "grpc_test_util" ], "headers": [], "language": "c++", diff --git a/vsprojects/Grpc.mak b/vsprojects/Grpc.mak index 19cb39741e0..ad390e7cdab 100644 --- a/vsprojects/Grpc.mak +++ b/vsprojects/Grpc.mak @@ -615,10 +615,10 @@ async_end2end_test: async_end2end_test.exe echo Running async_end2end_test $(OUT_DIR)\async_end2end_test.exe -auth_property_iterator_test.exe: build_grpc++ build_grpc build_gpr $(OUT_DIR) +auth_property_iterator_test.exe: Debug\grpc++_test_util.lib build_grpc_test_util build_grpc++ build_grpc build_gpr_test_util build_gpr $(OUT_DIR) echo Building auth_property_iterator_test $(CC) $(CXXFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\cpp\common\auth_property_iterator_test.cc - $(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\auth_property_iterator_test.exe" Debug\grpc++.lib Debug\grpc.lib Debug\gpr.lib $(CXX_LIBS) $(LIBS) $(OUT_DIR)\auth_property_iterator_test.obj + $(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\auth_property_iterator_test.exe" Debug\grpc++_test_util.lib Debug\grpc_test_util.lib Debug\grpc++.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(CXX_LIBS) $(LIBS) $(OUT_DIR)\auth_property_iterator_test.obj auth_property_iterator_test: auth_property_iterator_test.exe echo Running auth_property_iterator_test $(OUT_DIR)\auth_property_iterator_test.exe @@ -751,10 +751,10 @@ reconnect_interop_server: reconnect_interop_server.exe echo Running reconnect_interop_server $(OUT_DIR)\reconnect_interop_server.exe -secure_auth_context_test.exe: build_grpc++ build_grpc build_gpr $(OUT_DIR) +secure_auth_context_test.exe: Debug\grpc++_test_util.lib build_grpc_test_util build_grpc++ build_grpc build_gpr_test_util build_gpr $(OUT_DIR) echo Building secure_auth_context_test $(CC) $(CXXFLAGS) /Fo:$(OUT_DIR)\ $(REPO_ROOT)\test\cpp\common\secure_auth_context_test.cc - $(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\secure_auth_context_test.exe" Debug\grpc++.lib Debug\grpc.lib Debug\gpr.lib $(CXX_LIBS) $(LIBS) $(OUT_DIR)\secure_auth_context_test.obj + $(LINK) $(LFLAGS) /OUT:"$(OUT_DIR)\secure_auth_context_test.exe" Debug\grpc++_test_util.lib Debug\grpc_test_util.lib Debug\grpc++.lib Debug\grpc.lib Debug\gpr_test_util.lib Debug\gpr.lib $(CXX_LIBS) $(LIBS) $(OUT_DIR)\secure_auth_context_test.obj secure_auth_context_test: secure_auth_context_test.exe echo Running secure_auth_context_test $(OUT_DIR)\secure_auth_context_test.exe From 18b3ccb2f21ebb79d5045943f09660c1c623a18c Mon Sep 17 00:00:00 2001 From: Masood Malekghassemi Date: Wed, 26 Aug 2015 12:08:16 -0700 Subject: [PATCH 13/29] Temporary fix on Python doc to alleviate confusion cc grpc/grpc-common#274, #2896 --- src/python/README.md | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/python/README.md b/src/python/README.md index de0142db05e..affce648843 100644 --- a/src/python/README.md +++ b/src/python/README.md @@ -52,9 +52,19 @@ BUILDING FROM SOURCE --------------------- - Clone this repository +- Initialize the git submodules +``` +$ git submodule update --init +``` + +- Make the libraries +``` +$ make +``` + - Use build_python.sh to build the Python code and install it into a virtual environment ``` -$ tools/run_tests/build_python.sh +$ CONFIG=opt tools/run_tests/build_python.sh 2.7 ``` TESTING @@ -62,7 +72,7 @@ TESTING - Use run_python.sh to run gRPC as it was installed into the virtual environment ``` -$ tools/run_tests/run_python.sh +$ CONFIG=opt PYVER=2.7 tools/run_tests/run_python.sh ``` PACKAGING From b2ea0b9f4808f4ce19538cd26e92c508c13b6552 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 26 Aug 2015 13:06:53 -0700 Subject: [PATCH 14/29] Up asan/tsan timeouts - they need it --- tools/run_tests/jobset.py | 1 - tools/run_tests/run_tests.py | 8 +++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/tools/run_tests/jobset.py b/tools/run_tests/jobset.py index 79dde55d6f2..2a863191259 100755 --- a/tools/run_tests/jobset.py +++ b/tools/run_tests/jobset.py @@ -174,7 +174,6 @@ class Job(object): for k, v in add_env.iteritems(): env[k] = v self._start = time.time() - print spec.cmdline self._process = subprocess.Popen(args=spec.cmdline, stderr=subprocess.STDOUT, stdout=self._tempfile, diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index 80854001d39..977d9c8aeaf 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -70,13 +70,14 @@ def platform_string(): # SimpleConfig: just compile with CONFIG=config, and run the binary to test class SimpleConfig(object): - def __init__(self, config, environ=None): + def __init__(self, config, environ=None, timeout_seconds=5*60): if environ is None: environ = {} self.build_config = config self.allow_hashing = (config != 'gcov') self.environ = environ self.environ['CONFIG'] = config + self.timeout_seconds = timeout_seconds def job_spec(self, cmdline, hash_targets, shortname=None, environ={}): """Construct a jobset.JobSpec for a test under this config @@ -96,6 +97,7 @@ class SimpleConfig(object): return jobset.JobSpec(cmdline=cmdline, shortname=shortname, environ=actual_environ, + timeout_seconds=self.timeout_seconds, hash_targets=hash_targets if self.allow_hashing else None) @@ -354,11 +356,11 @@ class Build(object): _CONFIGS = { 'dbg': SimpleConfig('dbg'), 'opt': SimpleConfig('opt'), - 'tsan': SimpleConfig('tsan', environ={ + 'tsan': SimpleConfig('tsan', timeout_seconds=10*60, environ={ 'TSAN_OPTIONS': 'suppressions=tools/tsan_suppressions.txt:halt_on_error=1:second_deadlock_stack=1'}), 'msan': SimpleConfig('msan'), 'ubsan': SimpleConfig('ubsan'), - 'asan': SimpleConfig('asan', environ={ + 'asan': SimpleConfig('asan', timeout_seconds=7*60, environ={ 'ASAN_OPTIONS': 'detect_leaks=1:color=always:suppressions=tools/tsan_suppressions.txt', 'LSAN_OPTIONS': 'report_objects=1'}), 'asan-noleaks': SimpleConfig('asan', environ={ From 0882a353d54e99276e7f63d49c43743e6a38d4d2 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 26 Aug 2015 13:21:14 -0700 Subject: [PATCH 15/29] Fix refcounting --- src/core/iomgr/tcp_windows.c | 73 ++++++++++++++++++++++++++---------- 1 file changed, 53 insertions(+), 20 deletions(-) diff --git a/src/core/iomgr/tcp_windows.c b/src/core/iomgr/tcp_windows.c index e3abe1bebc6..60c1ab3edd6 100644 --- a/src/core/iomgr/tcp_windows.c +++ b/src/core/iomgr/tcp_windows.c @@ -96,17 +96,44 @@ typedef struct grpc_tcp { char *peer_string; } grpc_tcp; -static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } +static void tcp_free(grpc_tcp *tcp) { + grpc_winsocket_orphan(tcp->socket); + gpr_mu_destroy(&tcp->mu); + gpr_free(tcp->peer_string); + gpr_free(tcp); +} +#define GRPC_TCP_REFCOUNT_DEBUG +#ifdef GRPC_TCP_REFCOUNT_DEBUG +#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__) +#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__) +static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file, + int line) { + gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp, + reason, tcp->refcount.count, tcp->refcount.count - 1); + if (gpr_unref(&tcp->refcount)) { + tcp_free(tcp); + } +} + +static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file, + int line) { + gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP ref %p : %s %d -> %d", tcp, + reason, tcp->refcount.count, tcp->refcount.count + 1); + gpr_ref(&tcp->refcount); +} +#else +#define TCP_UNREF(tcp, reason) tcp_unref((tcp)) +#define TCP_REF(tcp, reason) tcp_ref((tcp)) static void tcp_unref(grpc_tcp *tcp) { if (gpr_unref(&tcp->refcount)) { - grpc_winsocket_orphan(tcp->socket); - gpr_mu_destroy(&tcp->mu); - gpr_free(tcp->peer_string); - gpr_free(tcp); + tcp_free(tcp); } } +static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } +#endif + /* Asynchronous callback from the IOCP, or the background thread. */ static int on_read(grpc_tcp *tcp, int from_iocp) { grpc_winsocket *socket = tcp->socket; @@ -131,7 +158,6 @@ static int on_read(grpc_tcp *tcp, int from_iocp) { tcp->socket->read_info.outstanding = 0; gpr_slice_unref(tcp->read_slice); } - tcp_unref(tcp); return 0; } @@ -166,8 +192,10 @@ static void on_read_cb(void *tcpp, int from_iocp) { grpc_iomgr_closure *cb = tcp->read_cb; int success = on_read(tcp, from_iocp); tcp->read_cb = NULL; - tcp_unref(tcp); - cb->cb(cb->cb_arg, success); + TCP_UNREF(tcp, "read"); + if (cb) { + cb->cb(cb->cb_arg, success); + } } static grpc_endpoint_op_status win_read(grpc_endpoint *ep, @@ -185,6 +213,9 @@ static grpc_endpoint_op_status win_read(grpc_endpoint *ep, if (tcp->shutting_down) { return GRPC_ENDPOINT_ERROR; } + + TCP_REF(tcp, "read"); + tcp->socket->read_info.outstanding = 1; tcp->read_cb = cb; tcp->read_slices = read_slices; @@ -201,8 +232,11 @@ static grpc_endpoint_op_status win_read(grpc_endpoint *ep, /* Did we get data immediately ? Yay. */ if (info->wsa_error != WSAEWOULDBLOCK) { + int ok; info->bytes_transfered = bytes_read; - return on_read(tcp, 1) ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR; + ok = on_read(tcp, 1); + TCP_UNREF(tcp, "read"); + return ok ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR; } /* Otherwise, let's retry, by queuing a read. */ @@ -213,12 +247,13 @@ static grpc_endpoint_op_status win_read(grpc_endpoint *ep, if (status != 0) { int wsa_error = WSAGetLastError(); if (wsa_error != WSA_IO_PENDING) { + int ok; info->wsa_error = wsa_error; - return on_read(tcp, 1) ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR; + ok = on_read(tcp, 1); + return ok ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR; } } - tcp_ref(tcp); grpc_socket_notify_on_read(tcp->socket, on_read_cb, tcp); return GRPC_ENDPOINT_PENDING; } @@ -247,7 +282,7 @@ static void on_write(void *tcpp, int from_iocp) { if (from_iocp) { tcp->socket->write_info.outstanding = 0; } - tcp_unref(tcp); + TCP_UNREF(tcp, "write"); if (cb) { cb->cb(cb->cb_arg, 0); } @@ -270,7 +305,7 @@ static void on_write(void *tcpp, int from_iocp) { tcp->socket->write_info.outstanding = 0; - tcp_unref(tcp); + TCP_UNREF(tcp, "write"); cb->cb(cb->cb_arg, success); } @@ -292,7 +327,7 @@ static grpc_endpoint_op_status win_write(grpc_endpoint *ep, if (tcp->shutting_down) { return GRPC_ENDPOINT_ERROR; } - tcp_ref(tcp); + TCP_REF(tcp, "write"); tcp->socket->write_info.outstanding = 1; tcp->write_cb = cb; @@ -330,7 +365,7 @@ static grpc_endpoint_op_status win_write(grpc_endpoint *ep, } if (allocated) gpr_free(allocated); tcp->socket->write_info.outstanding = 0; - tcp_unref(tcp); + TCP_UNREF(tcp, "write"); return ret; } @@ -345,7 +380,7 @@ static grpc_endpoint_op_status win_write(grpc_endpoint *ep, int wsa_error = WSAGetLastError(); if (wsa_error != WSA_IO_PENDING) { tcp->socket->write_info.outstanding = 0; - tcp_unref(tcp); + TCP_UNREF(tcp, "write"); return GRPC_ENDPOINT_ERROR; } } @@ -378,19 +413,17 @@ static void win_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pss) { concurrent access of the data structure in that regard. */ static void win_shutdown(grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; - int extra_refs = 0; gpr_mu_lock(&tcp->mu); /* At that point, what may happen is that we're already inside the IOCP callback. See the comments in on_read and on_write. */ tcp->shutting_down = 1; - extra_refs = grpc_winsocket_shutdown(tcp->socket); - while (extra_refs--) tcp_ref(tcp); + grpc_winsocket_shutdown(tcp->socket); gpr_mu_unlock(&tcp->mu); } static void win_destroy(grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; - tcp_unref(tcp); + TCP_UNREF(tcp, "destroy"); } static char *win_get_peer(grpc_endpoint *ep) { From 57c48c6845a1b7d9f708bb0e24c45c30fa8668ef Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 26 Aug 2015 13:21:48 -0700 Subject: [PATCH 16/29] Turn off debug --- src/core/iomgr/tcp_windows.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/iomgr/tcp_windows.c b/src/core/iomgr/tcp_windows.c index 60c1ab3edd6..cd2d95d4822 100644 --- a/src/core/iomgr/tcp_windows.c +++ b/src/core/iomgr/tcp_windows.c @@ -103,7 +103,7 @@ static void tcp_free(grpc_tcp *tcp) { gpr_free(tcp); } -#define GRPC_TCP_REFCOUNT_DEBUG +/*#define GRPC_TCP_REFCOUNT_DEBUG*/ #ifdef GRPC_TCP_REFCOUNT_DEBUG #define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__) #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__) From 3fda8ed12235b61bb76920361e148edc53945077 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 26 Aug 2015 13:58:27 -0700 Subject: [PATCH 17/29] Restore google test --- .gitmodules | 2 +- third_party/googletest | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 160000 third_party/googletest diff --git a/.gitmodules b/.gitmodules index 434d01b3d5f..273e83a770c 100644 --- a/.gitmodules +++ b/.gitmodules @@ -14,4 +14,4 @@ url = https://github.com/gflags/gflags.git [submodule "third_party/googletest"] path = third_party/googletest - url = git://github.com/google/googletest + url = git@github.com:google/googletest diff --git a/third_party/googletest b/third_party/googletest new file mode 160000 index 00000000000..c99458533a9 --- /dev/null +++ b/third_party/googletest @@ -0,0 +1 @@ +Subproject commit c99458533a9b4c743ed51537e25989ea55944908 From 4ac2018fd9bb62c07554d2db0366a90d02f7ddf2 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 26 Aug 2015 14:00:48 -0700 Subject: [PATCH 18/29] Fix up path --- .gitmodules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index 273e83a770c..4ab7f3ca5b6 100644 --- a/.gitmodules +++ b/.gitmodules @@ -14,4 +14,4 @@ url = https://github.com/gflags/gflags.git [submodule "third_party/googletest"] path = third_party/googletest - url = git@github.com:google/googletest + url = https://github.com/google/googletest.git From d0532654b02850d103730726f9e20147c3c7d292 Mon Sep 17 00:00:00 2001 From: Stanley Cheung Date: Wed, 26 Aug 2015 14:02:33 -0700 Subject: [PATCH 19/29] php: fix build failure on mac --- tools/run_tests/build_php.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/run_tests/build_php.sh b/tools/run_tests/build_php.sh index 2d52a6e33b4..1d81779b6ac 100755 --- a/tools/run_tests/build_php.sh +++ b/tools/run_tests/build_php.sh @@ -37,6 +37,7 @@ cd $(dirname $0)/../.. root=`pwd` export GRPC_LIB_SUBDIR=libs/$CONFIG +export CFLAGS="-Wno-parentheses-equality" # build php cd src/php From 1e27e7d605e43ac6a4d7d2ace75c079f3569ddc0 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 26 Aug 2015 14:20:04 -0700 Subject: [PATCH 20/29] Use port server on windows --- test/core/util/port_posix.c | 14 +++---- test/core/util/port_windows.c | 79 +++++++++++++++++++++++++++++++---- 2 files changed, 77 insertions(+), 16 deletions(-) diff --git a/test/core/util/port_posix.c b/test/core/util/port_posix.c index 836e62a5412..4781d334e23 100644 --- a/test/core/util/port_posix.c +++ b/test/core/util/port_posix.c @@ -198,14 +198,13 @@ int grpc_pick_unused_port(void) { races with other processes on kernels that want to reuse the same port numbers over and over. */ - /* In alternating iterations we try UDP ports before TCP ports UDP + /* In alternating iterations we trial UDP ports before TCP ports UDP ports -- it could be the case that this machine has been using up UDP ports and they are scarcer. */ /* Type of port to first pick in next iteration */ int is_tcp = 1; - int try - = 0; + int trial = 0; char *env = gpr_getenv("GRPC_TEST_PORT_SERVER"); if (env) { @@ -218,11 +217,10 @@ int grpc_pick_unused_port(void) { for (;;) { int port; - try - ++; - if (try == 1) { + trial++; + if (trial == 1) { port = getpid() % (65536 - 30000) + 30000; - } else if (try <= NUM_RANDOM_PORTS_TO_PICK) { + } else if (trial <= NUM_RANDOM_PORTS_TO_PICK) { port = rand() % (65536 - 30000) + 30000; } else { port = 0; @@ -239,7 +237,7 @@ int grpc_pick_unused_port(void) { GPR_ASSERT(port > 0); /* Check that the port # is free for the other type of socket also */ if (!is_port_available(&port, !is_tcp)) { - /* In the next iteration try to bind to the other type first + /* In the next iteration trial to bind to the other type first because perhaps it is more rare. */ is_tcp = !is_tcp; continue; diff --git a/test/core/util/port_windows.c b/test/core/util/port_windows.c index 5b072f805a5..7275d565015 100644 --- a/test/core/util/port_windows.c +++ b/test/core/util/port_windows.c @@ -99,6 +99,62 @@ static int is_port_available(int *port, int is_tcp) { return 1; } +static void got_port_from_server(void *arg, + const grpc_httpcli_response *response) { + size_t i; + int port = 0; + portreq *pr = arg; + GPR_ASSERT(response); + GPR_ASSERT(response->status == 200); + for (i = 0; i < response->body_length; i++) { + GPR_ASSERT(response->body[i] >= '0' && response->body[i] <= '9'); + port = port * 10 + response->body[i] - '0'; + } + GPR_ASSERT(port > 1024); + gpr_mu_lock(GRPC_POLLSET_MU(&pr->pollset)); + pr->port = port; + grpc_pollset_kick(&pr->pollset, NULL); + gpr_mu_unlock(GRPC_POLLSET_MU(&pr->pollset)); +} + +static void destroy_pollset_and_shutdown(void *p) { + grpc_pollset_destroy(p); + grpc_shutdown(); +} + +static int pick_port_using_server(char *server) { + grpc_httpcli_context context; + grpc_httpcli_request req; + portreq pr; + + grpc_init(); + + memset(&pr, 0, sizeof(pr)); + memset(&req, 0, sizeof(req)); + grpc_pollset_init(&pr.pollset); + pr.port = -1; + + req.host = server; + req.path = "/get"; + + grpc_httpcli_context_init(&context); + grpc_httpcli_get(&context, &pr.pollset, &req, + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), got_port_from_server, + &pr); + gpr_mu_lock(GRPC_POLLSET_MU(&pr.pollset)); + while (pr.port == -1) { + grpc_pollset_worker worker; + grpc_pollset_work(&pr.pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), + GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1)); + } + gpr_mu_unlock(GRPC_POLLSET_MU(&pr.pollset)); + + grpc_httpcli_context_destroy(&context); + grpc_pollset_shutdown(&pr.pollset, destroy_pollset_and_shutdown, &pr.pollset); + + return pr.port; +} + int grpc_pick_unused_port(void) { /* We repeatedly pick a port and then see whether or not it is available for use both as a TCP socket and a UDP socket. First, we @@ -108,22 +164,29 @@ int grpc_pick_unused_port(void) { races with other processes on kernels that want to reuse the same port numbers over and over. */ - /* In alternating iterations we try UDP ports before TCP ports UDP + /* In alternating iterations we trial UDP ports before TCP ports UDP ports -- it could be the case that this machine has been using up UDP ports and they are scarcer. */ /* Type of port to first pick in next iteration */ int is_tcp = 1; - int try - = 0; + int trial = 0; + + char *env = gpr_getenv("GRPC_TEST_PORT_SERVER"); + if (env) { + int port = pick_port_using_server(env); + gpr_free(env); + if (port != 0) { + return port; + } + } for (;;) { int port; - try - ++; - if (try == 1) { + trial++; + if (trial == 1) { port = _getpid() % (65536 - 30000) + 30000; - } else if (try <= NUM_RANDOM_PORTS_TO_PICK) { + } else if (trial <= NUM_RANDOM_PORTS_TO_PICK) { port = rand() % (65536 - 30000) + 30000; } else { port = 0; @@ -136,7 +199,7 @@ int grpc_pick_unused_port(void) { GPR_ASSERT(port > 0); /* Check that the port # is free for the other type of socket also */ if (!is_port_available(&port, !is_tcp)) { - /* In the next iteration try to bind to the other type first + /* In the next iteration trial to bind to the other type first because perhaps it is more rare. */ is_tcp = !is_tcp; continue; From 3d5ba2f499907ae6857eb2cad4aca3ede8a98cbf Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 26 Aug 2015 14:35:52 -0700 Subject: [PATCH 21/29] Make port_windows compile again --- test/core/util/port_windows.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/test/core/util/port_windows.c b/test/core/util/port_windows.c index 7275d565015..2f64626cf3e 100644 --- a/test/core/util/port_windows.c +++ b/test/core/util/port_windows.c @@ -35,7 +35,6 @@ #include "test/core/util/test_config.h" #if defined(GPR_WINSOCK_SOCKET) && defined(GRPC_TEST_PICK_PORT) -#include "src/core/iomgr/sockaddr_utils.h" #include "test/core/util/port.h" #include @@ -43,8 +42,14 @@ #include #include +#include +#include #include +#include "src/core/support/env.h" +#include "src/core/httpcli/httpcli.h" +#include "src/core/iomgr/sockaddr_utils.h" + #define NUM_RANDOM_PORTS_TO_PICK 100 static int is_port_available(int *port, int is_tcp) { @@ -99,6 +104,11 @@ static int is_port_available(int *port, int is_tcp) { return 1; } +typedef struct portreq { + grpc_pollset pollset; + int port; +} portreq; + static void got_port_from_server(void *arg, const grpc_httpcli_response *response) { size_t i; From 2a7df7db4df92c31798808f5901535bdc91b0472 Mon Sep 17 00:00:00 2001 From: James Crasta Date: Wed, 26 Aug 2015 15:40:26 -0600 Subject: [PATCH 22/29] Use https like all the other submodules --- .gitmodules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index 434d01b3d5f..4ab7f3ca5b6 100644 --- a/.gitmodules +++ b/.gitmodules @@ -14,4 +14,4 @@ url = https://github.com/gflags/gflags.git [submodule "third_party/googletest"] path = third_party/googletest - url = git://github.com/google/googletest + url = https://github.com/google/googletest.git From 8bf34083e2cf7336a75e39bc97d7aaeb26299512 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 26 Aug 2015 15:37:47 -0700 Subject: [PATCH 23/29] Fix uninitialized data --- src/core/iomgr/tcp_posix.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/core/iomgr/tcp_posix.c b/src/core/iomgr/tcp_posix.c index 03be462960b..0db7cd9f0e1 100644 --- a/src/core/iomgr/tcp_posix.c +++ b/src/core/iomgr/tcp_posix.c @@ -85,8 +85,6 @@ typedef struct { grpc_iomgr_closure read_closure; grpc_iomgr_closure write_closure; - grpc_iomgr_closure handle_read_closure; - char *peer_string; } grpc_tcp; @@ -235,6 +233,7 @@ static void tcp_handle_read(void *arg /* grpc_tcp */, int success) { GPR_ASSERT(!tcp->finished_edge); if (!success) { + gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer); call_read_cb(tcp, 0); TCP_UNREF(tcp, "read"); } else { @@ -255,8 +254,7 @@ static grpc_endpoint_op_status tcp_read(grpc_endpoint *ep, tcp->finished_edge = 0; grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure); } else { - tcp->handle_read_closure.cb_arg = tcp; - grpc_iomgr_add_delayed_callback(&tcp->handle_read_closure, 1); + grpc_iomgr_add_delayed_callback(&tcp->read_closure, 1); } /* TODO(ctiller): immediate return */ return GRPC_ENDPOINT_PENDING; @@ -447,7 +445,6 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size, tcp->write_closure.cb = tcp_handle_write; tcp->write_closure.cb_arg = tcp; - tcp->handle_read_closure.cb = tcp_handle_read; return &tcp->base; } From 70964fca29eaace42a63619c211803f8891f9d87 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 26 Aug 2015 15:41:53 -0700 Subject: [PATCH 24/29] Properly reset incoming buffer --- src/core/iomgr/tcp_windows.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/core/iomgr/tcp_windows.c b/src/core/iomgr/tcp_windows.c index cd2d95d4822..58f9160ef97 100644 --- a/src/core/iomgr/tcp_windows.c +++ b/src/core/iomgr/tcp_windows.c @@ -219,6 +219,7 @@ static grpc_endpoint_op_status win_read(grpc_endpoint *ep, tcp->socket->read_info.outstanding = 1; tcp->read_cb = cb; tcp->read_slices = read_slices; + gpr_slice_buffer_reset_and_unref(read_slices); tcp->read_slice = gpr_slice_malloc(8192); From 4a71ce263324c6e10f62d50ed7610a34c0594cd8 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 26 Aug 2015 15:47:55 -0700 Subject: [PATCH 25/29] Increase msan timeout also --- tools/run_tests/run_tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index 977d9c8aeaf..64900b62008 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -358,7 +358,7 @@ _CONFIGS = { 'opt': SimpleConfig('opt'), 'tsan': SimpleConfig('tsan', timeout_seconds=10*60, environ={ 'TSAN_OPTIONS': 'suppressions=tools/tsan_suppressions.txt:halt_on_error=1:second_deadlock_stack=1'}), - 'msan': SimpleConfig('msan'), + 'msan': SimpleConfig('msan', timeout_seconds=7*60), 'ubsan': SimpleConfig('ubsan'), 'asan': SimpleConfig('asan', timeout_seconds=7*60, environ={ 'ASAN_OPTIONS': 'detect_leaks=1:color=always:suppressions=tools/tsan_suppressions.txt', From 2845c86958fbb2a30b06f88106583fb1e1131cc3 Mon Sep 17 00:00:00 2001 From: Hongyu Chen Date: Wed, 26 Aug 2015 18:06:52 -0700 Subject: [PATCH 26/29] Fix include guard of census/grpc_filter.h --- src/core/census/grpc_filter.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/census/grpc_filter.h b/src/core/census/grpc_filter.h index 1453c05d286..b3de3adc947 100644 --- a/src/core/census/grpc_filter.h +++ b/src/core/census/grpc_filter.h @@ -31,8 +31,8 @@ * */ -#ifndef GRPC_INTERNAL_CORE_CHANNEL_CENSUS_FILTER_H -#define GRPC_INTERNAL_CORE_CHANNEL_CENSUS_FILTER_H +#ifndef GRPC_INTERNAL_CORE_CENSUS_GRPC_FILTER_H +#define GRPC_INTERNAL_CORE_CENSUS_GRPC_FILTER_H #include "src/core/channel/channel_stack.h" @@ -41,4 +41,4 @@ extern const grpc_channel_filter grpc_client_census_filter; extern const grpc_channel_filter grpc_server_census_filter; -#endif /* GRPC_INTERNAL_CORE_CHANNEL_CENSUS_FILTER_H */ +#endif /* GRPC_INTERNAL_CORE_CENSUS_GRPC_FILTER_H */ From 8e3dc00d93232b8cc4f3760fa252320fe1ded860 Mon Sep 17 00:00:00 2001 From: Nathaniel Manista Date: Thu, 27 Aug 2015 03:13:41 +0000 Subject: [PATCH 27/29] The Beta API Channel --- .../grpcio/grpc/_adapter/_c/types/channel.c | 2 +- src/python/grpcio/grpc/beta/__init__.py | 28 +++ .../grpcio/grpc/beta/_connectivity_channel.py | 148 ++++++++++++++ src/python/grpcio/grpc/beta/beta.py | 114 +++++++++++ src/python/grpcio/grpc/beta/utilities.py | 161 ++++++++++++++++ .../grpcio_test/grpc_test/beta/__init__.py | 30 +++ .../beta/_connectivity_channel_test.py | 180 ++++++++++++++++++ .../grpc_test/beta/_utilities_test.py | 123 ++++++++++++ 8 files changed, 785 insertions(+), 1 deletion(-) create mode 100644 src/python/grpcio/grpc/beta/__init__.py create mode 100644 src/python/grpcio/grpc/beta/_connectivity_channel.py create mode 100644 src/python/grpcio/grpc/beta/beta.py create mode 100644 src/python/grpcio/grpc/beta/utilities.py create mode 100644 src/python/grpcio_test/grpc_test/beta/__init__.py create mode 100644 src/python/grpcio_test/grpc_test/beta/_connectivity_channel_test.py create mode 100644 src/python/grpcio_test/grpc_test/beta/_utilities_test.py diff --git a/src/python/grpcio/grpc/_adapter/_c/types/channel.c b/src/python/grpcio/grpc/_adapter/_c/types/channel.c index c577ac05eb0..cf866dd80c6 100644 --- a/src/python/grpcio/grpc/_adapter/_c/types/channel.c +++ b/src/python/grpcio/grpc/_adapter/_c/types/channel.c @@ -164,7 +164,7 @@ PyObject *pygrpc_Channel_watch_connectivity_state( int last_observed_state; CompletionQueue *completion_queue; char *keywords[] = {"last_observed_state", "deadline", - "completion_queue", "tag"}; + "completion_queue", "tag", NULL}; if (!PyArg_ParseTupleAndKeywords( args, kwargs, "idO!O:watch_connectivity_state", keywords, &last_observed_state, &deadline, &pygrpc_CompletionQueue_type, diff --git a/src/python/grpcio/grpc/beta/__init__.py b/src/python/grpcio/grpc/beta/__init__.py new file mode 100644 index 00000000000..b89398809fa --- /dev/null +++ b/src/python/grpcio/grpc/beta/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/python/grpcio/grpc/beta/_connectivity_channel.py b/src/python/grpcio/grpc/beta/_connectivity_channel.py new file mode 100644 index 00000000000..457ede79f2e --- /dev/null +++ b/src/python/grpcio/grpc/beta/_connectivity_channel.py @@ -0,0 +1,148 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Affords a connectivity-state-listenable channel.""" + +import threading +import time + +from grpc._adapter import _low +from grpc.framework.foundation import callable_util + +_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = ( + 'Exception calling channel subscription callback!') + + +class ConnectivityChannel(object): + + def __init__(self, low_channel, mapping): + self._lock = threading.Lock() + self._low_channel = low_channel + self._mapping = mapping + + self._polling = False + self._connectivity = None + self._try_to_connect = False + self._callbacks_and_connectivities = [] + self._delivering = False + + def _deliveries(self, connectivity): + callbacks_needing_update = [] + for callback_and_connectivity in self._callbacks_and_connectivities: + callback, callback_connectivity = callback_and_connectivity + if callback_connectivity is not connectivity: + callbacks_needing_update.append(callback) + callback_and_connectivity[1] = connectivity + return callbacks_needing_update + + def _deliver(self, initial_connectivity, initial_callbacks): + connectivity = initial_connectivity + callbacks = initial_callbacks + while True: + for callback in callbacks: + callable_util.call_logging_exceptions( + callback, _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE, + connectivity) + with self._lock: + callbacks = self._deliveries(self._connectivity) + if callbacks: + connectivity = self._connectivity + else: + self._delivering = False + return + + def _spawn_delivery(self, connectivity, callbacks): + delivering_thread = threading.Thread( + target=self._deliver, args=(connectivity, callbacks,)) + delivering_thread.start() + self._delivering = True + + # TODO(issue 3064): Don't poll. + def _poll_connectivity(self, low_channel, initial_try_to_connect): + try_to_connect = initial_try_to_connect + low_connectivity = low_channel.check_connectivity_state(try_to_connect) + with self._lock: + self._connectivity = self._mapping[low_connectivity] + callbacks = tuple( + callback for callback, unused_but_known_to_be_none_connectivity + in self._callbacks_and_connectivities) + for callback_and_connectivity in self._callbacks_and_connectivities: + callback_and_connectivity[1] = self._connectivity + if callbacks: + self._spawn_delivery(self._connectivity, callbacks) + completion_queue = _low.CompletionQueue() + while True: + low_channel.watch_connectivity_state( + low_connectivity, time.time() + 0.2, completion_queue, None) + event = completion_queue.next() + with self._lock: + if not self._callbacks_and_connectivities and not self._try_to_connect: + self._polling = False + self._connectivity = None + completion_queue.shutdown() + break + try_to_connect = self._try_to_connect + self._try_to_connect = False + if event.success or try_to_connect: + low_connectivity = low_channel.check_connectivity_state(try_to_connect) + with self._lock: + self._connectivity = self._mapping[low_connectivity] + if not self._delivering: + callbacks = self._deliveries(self._connectivity) + if callbacks: + self._spawn_delivery(self._connectivity, callbacks) + + def subscribe(self, callback, try_to_connect): + with self._lock: + if not self._callbacks_and_connectivities and not self._polling: + polling_thread = threading.Thread( + target=self._poll_connectivity, + args=(self._low_channel, bool(try_to_connect))) + polling_thread.start() + self._polling = True + self._callbacks_and_connectivities.append([callback, None]) + elif not self._delivering and self._connectivity is not None: + self._spawn_delivery(self._connectivity, (callback,)) + self._try_to_connect |= bool(try_to_connect) + self._callbacks_and_connectivities.append( + [callback, self._connectivity]) + else: + self._try_to_connect |= bool(try_to_connect) + self._callbacks_and_connectivities.append([callback, None]) + + def unsubscribe(self, callback): + with self._lock: + for index, (subscribed_callback, unused_connectivity) in enumerate( + self._callbacks_and_connectivities): + if callback == subscribed_callback: + self._callbacks_and_connectivities.pop(index) + break + + def low_channel(self): + return self._low_channel diff --git a/src/python/grpcio/grpc/beta/beta.py b/src/python/grpcio/grpc/beta/beta.py new file mode 100644 index 00000000000..40cad5e4868 --- /dev/null +++ b/src/python/grpcio/grpc/beta/beta.py @@ -0,0 +1,114 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Entry points into gRPC Python Beta.""" + +import enum + +from grpc._adapter import _low +from grpc._adapter import _types +from grpc.beta import _connectivity_channel + +_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = ( + 'Exception calling channel subscription callback!') + + +@enum.unique +class ChannelConnectivity(enum.Enum): + """Mirrors grpc_connectivity_state in the gRPC Core. + + Attributes: + IDLE: The channel is idle. + CONNECTING: The channel is connecting. + READY: The channel is ready to conduct RPCs. + TRANSIENT_FAILURE: The channel has seen a failure from which it expects to + recover. + FATAL_FAILURE: The channel has seen a failure from which it cannot recover. + """ + + IDLE = (_types.ConnectivityState.IDLE, 'idle',) + CONNECTING = (_types.ConnectivityState.CONNECTING, 'connecting',) + READY = (_types.ConnectivityState.READY, 'ready',) + TRANSIENT_FAILURE = ( + _types.ConnectivityState.TRANSIENT_FAILURE, 'transient failure',) + FATAL_FAILURE = (_types.ConnectivityState.FATAL_FAILURE, 'fatal failure',) + +_LOW_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = { + state: connectivity for state, connectivity in zip( + _types.ConnectivityState, ChannelConnectivity) +} + + +class Channel(object): + """A channel to a remote host through which RPCs may be conducted. + + Only the "subscribe" and "unsubscribe" methods are supported for application + use. This class' instance constructor and all other attributes are + unsupported. + """ + + def __init__(self, low_channel): + self._connectivity_channel = _connectivity_channel.ConnectivityChannel( + low_channel, _LOW_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY) + + def subscribe(self, callback, try_to_connect=None): + """Subscribes to this Channel's connectivity. + + Args: + callback: A callable to be invoked and passed this Channel's connectivity. + The callable will be invoked immediately upon subscription and again for + every change to this Channel's connectivity thereafter until it is + unsubscribed. + try_to_connect: A boolean indicating whether or not this Channel should + attempt to connect if it is not already connected and ready to conduct + RPCs. + """ + self._connectivity_channel.subscribe(callback, try_to_connect) + + def unsubscribe(self, callback): + """Unsubscribes a callback from this Channel's connectivity. + + Args: + callback: A callable previously registered with this Channel from having + been passed to its "subscribe" method. + """ + self._connectivity_channel.unsubscribe(callback) + + +def create_insecure_channel(host, port): + """Creates an insecure Channel to a remote host. + + Args: + host: The name of the remote host to which to connect. + port: The port of the remote host to which to connect. + + Returns: + A Channel to the remote host through which RPCs may be conducted. + """ + return Channel(_low.Channel('%s:%d' % (host, port), ())) diff --git a/src/python/grpcio/grpc/beta/utilities.py b/src/python/grpcio/grpc/beta/utilities.py new file mode 100644 index 00000000000..1b5356e3ad9 --- /dev/null +++ b/src/python/grpcio/grpc/beta/utilities.py @@ -0,0 +1,161 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Utilities for the gRPC Python Beta API.""" + +import threading +import time + +from grpc.beta import beta +from grpc.framework.foundation import callable_util +from grpc.framework.foundation import future + +_DONE_CALLBACK_EXCEPTION_LOG_MESSAGE = ( + 'Exception calling connectivity future "done" callback!') + + +class _ChannelReadyFuture(future.Future): + + def __init__(self, channel): + self._condition = threading.Condition() + self._channel = channel + + self._matured = False + self._cancelled = False + self._done_callbacks = [] + + def _block(self, timeout): + until = None if timeout is None else time.time() + timeout + with self._condition: + while True: + if self._cancelled: + raise future.CancelledError() + elif self._matured: + return + else: + if until is None: + self._condition.wait() + else: + remaining = until - time.time() + if remaining < 0: + raise future.TimeoutError() + else: + self._condition.wait(timeout=remaining) + + def _update(self, connectivity): + with self._condition: + if not self._cancelled and connectivity is beta.ChannelConnectivity.READY: + self._matured = True + self._channel.unsubscribe(self._update) + self._condition.notify_all() + done_callbacks = tuple(self._done_callbacks) + self._done_callbacks = None + else: + return + + for done_callback in done_callbacks: + callable_util.call_logging_exceptions( + done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self) + + def cancel(self): + with self._condition: + if not self._matured: + self._cancelled = True + self._channel.unsubscribe(self._update) + self._condition.notify_all() + done_callbacks = tuple(self._done_callbacks) + self._done_callbacks = None + else: + return False + + for done_callback in done_callbacks: + callable_util.call_logging_exceptions( + done_callback, _DONE_CALLBACK_EXCEPTION_LOG_MESSAGE, self) + + def cancelled(self): + with self._condition: + return self._cancelled + + def running(self): + with self._condition: + return not self._cancelled and not self._matured + + def done(self): + with self._condition: + return self._cancelled or self._matured + + def result(self, timeout=None): + self._block(timeout) + return None + + def exception(self, timeout=None): + self._block(timeout) + return None + + def traceback(self, timeout=None): + self._block(timeout) + return None + + def add_done_callback(self, fn): + with self._condition: + if not self._cancelled and not self._matured: + self._done_callbacks.append(fn) + return + + fn(self) + + def start(self): + with self._condition: + self._channel.subscribe(self._update, try_to_connect=True) + + def __del__(self): + with self._condition: + if not self._cancelled and not self._matured: + self._channel.unsubscribe(self._update) + + +def channel_ready_future(channel): + """Creates a future.Future that matures when a beta.Channel is ready. + + Cancelling the returned future.Future does not tell the given beta.Channel to + abandon attempts it may have been making to connect; cancelling merely + deactivates the return future.Future's subscription to the given + beta.Channel's connectivity. + + Args: + channel: A beta.Channel. + + Returns: + A future.Future that matures when the given Channel has connectivity + beta.ChannelConnectivity.READY. + """ + ready_future = _ChannelReadyFuture(channel) + ready_future.start() + return ready_future + diff --git a/src/python/grpcio_test/grpc_test/beta/__init__.py b/src/python/grpcio_test/grpc_test/beta/__init__.py new file mode 100644 index 00000000000..70865191060 --- /dev/null +++ b/src/python/grpcio_test/grpc_test/beta/__init__.py @@ -0,0 +1,30 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + diff --git a/src/python/grpcio_test/grpc_test/beta/_connectivity_channel_test.py b/src/python/grpcio_test/grpc_test/beta/_connectivity_channel_test.py new file mode 100644 index 00000000000..038464889d6 --- /dev/null +++ b/src/python/grpcio_test/grpc_test/beta/_connectivity_channel_test.py @@ -0,0 +1,180 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Tests of grpc.beta._connectivity_channel.""" + +import threading +import time +import unittest + +from grpc._adapter import _low +from grpc._adapter import _types +from grpc.beta import _connectivity_channel +from grpc_test.framework.common import test_constants + +_MAPPING_FUNCTION = lambda integer: integer * 200 + 17 +_MAPPING = { + state: _MAPPING_FUNCTION(state) for state in _types.ConnectivityState} +_IDLE, _CONNECTING, _READY, _TRANSIENT_FAILURE, _FATAL_FAILURE = map( + _MAPPING_FUNCTION, _types.ConnectivityState) + + +def _drive_completion_queue(completion_queue): + while True: + event = completion_queue.next(time.time() + 24 * 60 * 60) + if event.type == _types.EventType.QUEUE_SHUTDOWN: + break + + +class _Callback(object): + + def __init__(self): + self._condition = threading.Condition() + self._connectivities = [] + + def update(self, connectivity): + with self._condition: + self._connectivities.append(connectivity) + self._condition.notify() + + def connectivities(self): + with self._condition: + return tuple(self._connectivities) + + def block_until_connectivities_satisfy(self, predicate): + with self._condition: + while True: + connectivities = tuple(self._connectivities) + if predicate(connectivities): + return connectivities + else: + self._condition.wait() + + +class ChannelConnectivityTest(unittest.TestCase): + + def test_lonely_channel_connectivity(self): + low_channel = _low.Channel('localhost:12345', ()) + callback = _Callback() + + connectivity_channel = _connectivity_channel.ConnectivityChannel( + low_channel, _MAPPING) + connectivity_channel.subscribe(callback.update, try_to_connect=False) + first_connectivities = callback.block_until_connectivities_satisfy(bool) + connectivity_channel.subscribe(callback.update, try_to_connect=True) + second_connectivities = callback.block_until_connectivities_satisfy( + lambda connectivities: 2 <= len(connectivities)) + # Wait for a connection that will never happen. + time.sleep(test_constants.SHORT_TIMEOUT) + third_connectivities = callback.connectivities() + connectivity_channel.unsubscribe(callback.update) + fourth_connectivities = callback.connectivities() + connectivity_channel.unsubscribe(callback.update) + fifth_connectivities = callback.connectivities() + + self.assertSequenceEqual((_IDLE,), first_connectivities) + self.assertNotIn(_READY, second_connectivities) + self.assertNotIn(_READY, third_connectivities) + self.assertNotIn(_READY, fourth_connectivities) + self.assertNotIn(_READY, fifth_connectivities) + + def test_immediately_connectable_channel_connectivity(self): + server_completion_queue = _low.CompletionQueue() + server = _low.Server(server_completion_queue, []) + port = server.add_http2_port('[::]:0') + server.start() + server_completion_queue_thread = threading.Thread( + target=_drive_completion_queue, args=(server_completion_queue,)) + server_completion_queue_thread.start() + low_channel = _low.Channel('localhost:%d' % port, ()) + first_callback = _Callback() + second_callback = _Callback() + + connectivity_channel = _connectivity_channel.ConnectivityChannel( + low_channel, _MAPPING) + connectivity_channel.subscribe(first_callback.update, try_to_connect=False) + first_connectivities = first_callback.block_until_connectivities_satisfy( + bool) + # Wait for a connection that will never happen because try_to_connect=True + # has not yet been passed. + time.sleep(test_constants.SHORT_TIMEOUT) + second_connectivities = first_callback.connectivities() + connectivity_channel.subscribe(second_callback.update, try_to_connect=True) + third_connectivities = first_callback.block_until_connectivities_satisfy( + lambda connectivities: 2 <= len(connectivities)) + fourth_connectivities = second_callback.block_until_connectivities_satisfy( + bool) + # Wait for a connection that will happen (or may already have happened). + first_callback.block_until_connectivities_satisfy( + lambda connectivities: _READY in connectivities) + second_callback.block_until_connectivities_satisfy( + lambda connectivities: _READY in connectivities) + connectivity_channel.unsubscribe(first_callback.update) + connectivity_channel.unsubscribe(second_callback.update) + + server.shutdown() + server_completion_queue.shutdown() + server_completion_queue_thread.join() + + self.assertSequenceEqual((_IDLE,), first_connectivities) + self.assertSequenceEqual((_IDLE,), second_connectivities) + self.assertNotIn(_TRANSIENT_FAILURE, third_connectivities) + self.assertNotIn(_FATAL_FAILURE, third_connectivities) + self.assertNotIn(_TRANSIENT_FAILURE, fourth_connectivities) + self.assertNotIn(_FATAL_FAILURE, fourth_connectivities) + + def test_reachable_then_unreachable_channel_connectivity(self): + server_completion_queue = _low.CompletionQueue() + server = _low.Server(server_completion_queue, []) + port = server.add_http2_port('[::]:0') + server.start() + server_completion_queue_thread = threading.Thread( + target=_drive_completion_queue, args=(server_completion_queue,)) + server_completion_queue_thread.start() + low_channel = _low.Channel('localhost:%d' % port, ()) + callback = _Callback() + + connectivity_channel = _connectivity_channel.ConnectivityChannel( + low_channel, _MAPPING) + connectivity_channel.subscribe(callback.update, try_to_connect=True) + callback.block_until_connectivities_satisfy( + lambda connectivities: _READY in connectivities) + # Now take down the server and confirm that channel readiness is repudiated. + server.shutdown() + callback.block_until_connectivities_satisfy( + lambda connectivities: connectivities[-1] is not _READY) + connectivity_channel.unsubscribe(callback.update) + + server.shutdown() + server_completion_queue.shutdown() + server_completion_queue_thread.join() + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/python/grpcio_test/grpc_test/beta/_utilities_test.py b/src/python/grpcio_test/grpc_test/beta/_utilities_test.py new file mode 100644 index 00000000000..998e74ccf48 --- /dev/null +++ b/src/python/grpcio_test/grpc_test/beta/_utilities_test.py @@ -0,0 +1,123 @@ +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Tests of grpc.beta.utilities.""" + +import threading +import time +import unittest + +from grpc._adapter import _low +from grpc._adapter import _types +from grpc.beta import beta +from grpc.beta import utilities +from grpc.framework.foundation import future +from grpc_test.framework.common import test_constants + + +def _drive_completion_queue(completion_queue): + while True: + event = completion_queue.next(time.time() + 24 * 60 * 60) + if event.type == _types.EventType.QUEUE_SHUTDOWN: + break + + +class _Callback(object): + + def __init__(self): + self._condition = threading.Condition() + self._value = None + + def accept_value(self, value): + with self._condition: + self._value = value + self._condition.notify_all() + + def block_until_called(self): + with self._condition: + while self._value is None: + self._condition.wait() + return self._value + + +class ChannelConnectivityTest(unittest.TestCase): + + def test_lonely_channel_connectivity(self): + channel = beta.create_insecure_channel('localhost', 12345) + callback = _Callback() + + ready_future = utilities.channel_ready_future(channel) + ready_future.add_done_callback(callback.accept_value) + with self.assertRaises(future.TimeoutError): + ready_future.result(test_constants.SHORT_TIMEOUT) + self.assertFalse(ready_future.cancelled()) + self.assertFalse(ready_future.done()) + self.assertTrue(ready_future.running()) + ready_future.cancel() + value_passed_to_callback = callback.block_until_called() + self.assertIs(ready_future, value_passed_to_callback) + self.assertTrue(ready_future.cancelled()) + self.assertTrue(ready_future.done()) + self.assertFalse(ready_future.running()) + + def test_immediately_connectable_channel_connectivity(self): + server_completion_queue = _low.CompletionQueue() + server = _low.Server(server_completion_queue, []) + port = server.add_http2_port('[::]:0') + server.start() + server_completion_queue_thread = threading.Thread( + target=_drive_completion_queue, args=(server_completion_queue,)) + server_completion_queue_thread.start() + channel = beta.create_insecure_channel('localhost', port) + callback = _Callback() + + try: + ready_future = utilities.channel_ready_future(channel) + ready_future.add_done_callback(callback.accept_value) + self.assertIsNone( + ready_future.result(test_constants.SHORT_TIMEOUT)) + value_passed_to_callback = callback.block_until_called() + self.assertIs(ready_future, value_passed_to_callback) + self.assertFalse(ready_future.cancelled()) + self.assertTrue(ready_future.done()) + self.assertFalse(ready_future.running()) + # Cancellation after maturity has no effect. + ready_future.cancel() + self.assertFalse(ready_future.cancelled()) + self.assertTrue(ready_future.done()) + self.assertFalse(ready_future.running()) + finally: + ready_future.cancel() + server.shutdown() + server_completion_queue.shutdown() + server_completion_queue_thread.join() + + +if __name__ == '__main__': + unittest.main(verbosity=2) From 0d9f81f741b33e23941db435b9ba7a9ed37adaec Mon Sep 17 00:00:00 2001 From: yang-g Date: Wed, 26 Aug 2015 22:04:30 -0700 Subject: [PATCH 28/29] minor fixes --- include/grpc++/support/string_ref.h | 10 +++++----- src/cpp/util/string_ref.cc | 2 +- test/cpp/util/string_ref_test.cc | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/include/grpc++/support/string_ref.h b/include/grpc++/support/string_ref.h index fd2b3ad8e7b..2bc1fecefeb 100644 --- a/include/grpc++/support/string_ref.h +++ b/include/grpc++/support/string_ref.h @@ -31,8 +31,8 @@ * */ -#ifndef GRPCXX_STRING_REF_H -#define GRPCXX_STRING_REF_H +#ifndef GRPCXX_SUPPORT_STRING_REF_H +#define GRPCXX_SUPPORT_STRING_REF_H #include #include @@ -44,6 +44,8 @@ namespace grpc { // This class is a non owning reference to a string. // It should be a strict subset of the upcoming std::string_ref. See: // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2012/n3442.html +// The constexpr is dropped or replaced with const for legacy compiler +// compatibility. class string_ref { public: // types @@ -115,6 +117,4 @@ std::ostream& operator<<(std::ostream& stream, const string_ref& string); } // namespace grpc -#endif // GRPCXX_STRING_REF_H - - +#endif // GRPCXX_SUPPORT_STRING_REF_H diff --git a/src/cpp/util/string_ref.cc b/src/cpp/util/string_ref.cc index eb54f65e3ac..9adc0920135 100644 --- a/src/cpp/util/string_ref.cc +++ b/src/cpp/util/string_ref.cc @@ -80,7 +80,7 @@ size_t string_ref::find(string_ref s) const { } size_t string_ref::find(char c) const { - auto it = std::find_if(cbegin(), cend(), [c](char cc) { return cc == c; }); + auto it = std::find(cbegin(), cend(), c); return it == cend() ? npos : std::distance(cbegin(), it); } diff --git a/test/cpp/util/string_ref_test.cc b/test/cpp/util/string_ref_test.cc index c4ca4fce848..465072d43eb 100644 --- a/test/cpp/util/string_ref_test.cc +++ b/test/cpp/util/string_ref_test.cc @@ -100,8 +100,8 @@ TEST_F(StringRefTest, Assignment) { TEST_F(StringRefTest, Iterator) { string_ref s(kTestString); size_t i = 0; - for (char c : s) { - EXPECT_EQ(kTestString[i++], c); + for (auto it = s.cbegin(); it != s.cend(); ++it) { + EXPECT_EQ(kTestString[i++], *it); } EXPECT_EQ(strlen(kTestString), i); } From 99d7b661bede39143d1be6040fb67c81b8117ae3 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Thu, 27 Aug 2015 07:36:12 -0700 Subject: [PATCH 29/29] Revert "Refactor Endpoint API" --- include/grpc/support/slice_buffer.h | 2 - src/core/httpcli/httpcli.c | 95 ++-- src/core/iomgr/endpoint.c | 17 +- src/core/iomgr/endpoint.h | 63 +-- src/core/iomgr/tcp_posix.c | 525 ++++++++++++++------- src/core/iomgr/tcp_windows.c | 192 ++++---- src/core/security/secure_endpoint.c | 188 ++++---- src/core/security/secure_transport_setup.c | 119 ++--- src/core/support/slice_buffer.c | 22 - src/core/transport/chttp2/internal.h | 12 +- src/core/transport/chttp2/writing.c | 21 +- src/core/transport/chttp2_transport.c | 140 +++--- test/core/bad_client/bad_client.c | 17 +- test/core/iomgr/endpoint_tests.c | 204 ++++---- test/core/iomgr/tcp_posix_test.c | 148 +++--- test/core/security/secure_endpoint_test.c | 55 ++- test/core/util/port_posix.c | 14 +- test/core/util/port_windows.c | 91 +--- 18 files changed, 978 insertions(+), 947 deletions(-) diff --git a/include/grpc/support/slice_buffer.h b/include/grpc/support/slice_buffer.h index 04db003ac58..ec048e8c91f 100644 --- a/include/grpc/support/slice_buffer.h +++ b/include/grpc/support/slice_buffer.h @@ -86,8 +86,6 @@ void gpr_slice_buffer_reset_and_unref(gpr_slice_buffer *sb); void gpr_slice_buffer_swap(gpr_slice_buffer *a, gpr_slice_buffer *b); /* move all of the elements of src into dst */ void gpr_slice_buffer_move_into(gpr_slice_buffer *src, gpr_slice_buffer *dst); -/* remove n bytes from the end of a slice buffer */ -void gpr_slice_buffer_trim_end(gpr_slice_buffer *src, size_t n); #ifdef __cplusplus } diff --git a/src/core/httpcli/httpcli.c b/src/core/httpcli/httpcli.c index 1e38479eb16..9012070e8ea 100644 --- a/src/core/httpcli/httpcli.c +++ b/src/core/httpcli/httpcli.c @@ -61,10 +61,6 @@ typedef struct { grpc_httpcli_context *context; grpc_pollset *pollset; grpc_iomgr_object iomgr_obj; - gpr_slice_buffer incoming; - gpr_slice_buffer outgoing; - grpc_iomgr_closure on_read; - grpc_iomgr_closure done_write; } internal_request; static grpc_httpcli_get_override g_get_override = NULL; @@ -103,70 +99,73 @@ static void finish(internal_request *req, int success) { gpr_slice_unref(req->request_text); gpr_free(req->host); grpc_iomgr_unregister_object(&req->iomgr_obj); - gpr_slice_buffer_destroy(&req->incoming); - gpr_slice_buffer_destroy(&req->outgoing); gpr_free(req); } -static void on_read(void *user_data, int success); - -static void do_read(internal_request *req) { - switch (grpc_endpoint_read(req->ep, &req->incoming, &req->on_read)) { - case GRPC_ENDPOINT_DONE: - on_read(req, 1); - break; - case GRPC_ENDPOINT_PENDING: - break; - case GRPC_ENDPOINT_ERROR: - on_read(req, 0); - break; - } -} - -static void on_read(void *user_data, int success) { +static void on_read(void *user_data, gpr_slice *slices, size_t nslices, + grpc_endpoint_cb_status status) { internal_request *req = user_data; size_t i; - for (i = 0; i < req->incoming.count; i++) { - if (GPR_SLICE_LENGTH(req->incoming.slices[i])) { + for (i = 0; i < nslices; i++) { + if (GPR_SLICE_LENGTH(slices[i])) { req->have_read_byte = 1; - if (!grpc_httpcli_parser_parse(&req->parser, req->incoming.slices[i])) { + if (!grpc_httpcli_parser_parse(&req->parser, slices[i])) { finish(req, 0); - return; + goto done; } } } - if (success) { - do_read(req); - } else if (!req->have_read_byte) { - next_address(req); - } else { - finish(req, grpc_httpcli_parser_eof(&req->parser)); + switch (status) { + case GRPC_ENDPOINT_CB_OK: + grpc_endpoint_notify_on_read(req->ep, on_read, req); + break; + case GRPC_ENDPOINT_CB_EOF: + case GRPC_ENDPOINT_CB_ERROR: + case GRPC_ENDPOINT_CB_SHUTDOWN: + if (!req->have_read_byte) { + next_address(req); + } else { + finish(req, grpc_httpcli_parser_eof(&req->parser)); + } + break; + } + +done: + for (i = 0; i < nslices; i++) { + gpr_slice_unref(slices[i]); } } -static void on_written(internal_request *req) { do_read(req); } +static void on_written(internal_request *req) { + grpc_endpoint_notify_on_read(req->ep, on_read, req); +} -static void done_write(void *arg, int success) { +static void done_write(void *arg, grpc_endpoint_cb_status status) { internal_request *req = arg; - if (success) { - on_written(req); - } else { - next_address(req); + switch (status) { + case GRPC_ENDPOINT_CB_OK: + on_written(req); + break; + case GRPC_ENDPOINT_CB_EOF: + case GRPC_ENDPOINT_CB_SHUTDOWN: + case GRPC_ENDPOINT_CB_ERROR: + next_address(req); + break; } } static void start_write(internal_request *req) { gpr_slice_ref(req->request_text); - gpr_slice_buffer_add(&req->outgoing, req->request_text); - switch (grpc_endpoint_write(req->ep, &req->outgoing, &req->done_write)) { - case GRPC_ENDPOINT_DONE: + switch ( + grpc_endpoint_write(req->ep, &req->request_text, 1, done_write, req)) { + case GRPC_ENDPOINT_WRITE_DONE: on_written(req); break; - case GRPC_ENDPOINT_PENDING: + case GRPC_ENDPOINT_WRITE_PENDING: break; - case GRPC_ENDPOINT_ERROR: + case GRPC_ENDPOINT_WRITE_ERROR: finish(req, 0); break; } @@ -238,10 +237,6 @@ void grpc_httpcli_get(grpc_httpcli_context *context, grpc_pollset *pollset, request->handshaker ? request->handshaker : &grpc_httpcli_plaintext; req->context = context; req->pollset = pollset; - grpc_iomgr_closure_init(&req->on_read, on_read, req); - grpc_iomgr_closure_init(&req->done_write, done_write, req); - gpr_slice_buffer_init(&req->incoming); - gpr_slice_buffer_init(&req->outgoing); gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->path); grpc_iomgr_register_object(&req->iomgr_obj, name); gpr_free(name); @@ -275,11 +270,7 @@ void grpc_httpcli_post(grpc_httpcli_context *context, grpc_pollset *pollset, request->handshaker ? request->handshaker : &grpc_httpcli_plaintext; req->context = context; req->pollset = pollset; - grpc_iomgr_closure_init(&req->on_read, on_read, req); - grpc_iomgr_closure_init(&req->done_write, done_write, req); - gpr_slice_buffer_init(&req->incoming); - gpr_slice_buffer_init(&req->outgoing); - gpr_asprintf(&name, "HTTP:POST:%s:%s", request->host, request->path); + gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->path); grpc_iomgr_register_object(&req->iomgr_obj, name); gpr_free(name); req->host = gpr_strdup(request->host); diff --git a/src/core/iomgr/endpoint.c b/src/core/iomgr/endpoint.c index a7878e31dd4..8ee14bce9b7 100644 --- a/src/core/iomgr/endpoint.c +++ b/src/core/iomgr/endpoint.c @@ -33,16 +33,17 @@ #include "src/core/iomgr/endpoint.h" -grpc_endpoint_op_status grpc_endpoint_read(grpc_endpoint *ep, - gpr_slice_buffer *slices, - grpc_iomgr_closure *cb) { - return ep->vtable->read(ep, slices, cb); +void grpc_endpoint_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, + void *user_data) { + ep->vtable->notify_on_read(ep, cb, user_data); } -grpc_endpoint_op_status grpc_endpoint_write(grpc_endpoint *ep, - gpr_slice_buffer *slices, - grpc_iomgr_closure *cb) { - return ep->vtable->write(ep, slices, cb); +grpc_endpoint_write_status grpc_endpoint_write(grpc_endpoint *ep, + gpr_slice *slices, + size_t nslices, + grpc_endpoint_write_cb cb, + void *user_data) { + return ep->vtable->write(ep, slices, nslices, cb, user_data); } void grpc_endpoint_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) { diff --git a/src/core/iomgr/endpoint.h b/src/core/iomgr/endpoint.h index d14d52d5616..ea92a500e84 100644 --- a/src/core/iomgr/endpoint.h +++ b/src/core/iomgr/endpoint.h @@ -37,7 +37,6 @@ #include "src/core/iomgr/pollset.h" #include "src/core/iomgr/pollset_set.h" #include -#include #include /* An endpoint caps a streaming channel between two communicating processes. @@ -46,17 +45,31 @@ typedef struct grpc_endpoint grpc_endpoint; typedef struct grpc_endpoint_vtable grpc_endpoint_vtable; -typedef enum grpc_endpoint_op_status { - GRPC_ENDPOINT_DONE, /* completed immediately, cb won't be called */ - GRPC_ENDPOINT_PENDING, /* cb will be called when completed */ - GRPC_ENDPOINT_ERROR /* write errored out, cb won't be called */ -} grpc_endpoint_op_status; +typedef enum grpc_endpoint_cb_status { + GRPC_ENDPOINT_CB_OK = 0, /* Call completed successfully */ + GRPC_ENDPOINT_CB_EOF, /* Call completed successfully, end of file reached */ + GRPC_ENDPOINT_CB_SHUTDOWN, /* Call interrupted by shutdown */ + GRPC_ENDPOINT_CB_ERROR /* Call interrupted by socket error */ +} grpc_endpoint_cb_status; + +typedef enum grpc_endpoint_write_status { + GRPC_ENDPOINT_WRITE_DONE, /* completed immediately, cb won't be called */ + GRPC_ENDPOINT_WRITE_PENDING, /* cb will be called when completed */ + GRPC_ENDPOINT_WRITE_ERROR /* write errored out, cb won't be called */ +} grpc_endpoint_write_status; + +typedef void (*grpc_endpoint_read_cb)(void *user_data, gpr_slice *slices, + size_t nslices, + grpc_endpoint_cb_status error); +typedef void (*grpc_endpoint_write_cb)(void *user_data, + grpc_endpoint_cb_status error); struct grpc_endpoint_vtable { - grpc_endpoint_op_status (*read)(grpc_endpoint *ep, gpr_slice_buffer *slices, - grpc_iomgr_closure *cb); - grpc_endpoint_op_status (*write)(grpc_endpoint *ep, gpr_slice_buffer *slices, - grpc_iomgr_closure *cb); + void (*notify_on_read)(grpc_endpoint *ep, grpc_endpoint_read_cb cb, + void *user_data); + grpc_endpoint_write_status (*write)(grpc_endpoint *ep, gpr_slice *slices, + size_t nslices, grpc_endpoint_write_cb cb, + void *user_data); void (*add_to_pollset)(grpc_endpoint *ep, grpc_pollset *pollset); void (*add_to_pollset_set)(grpc_endpoint *ep, grpc_pollset_set *pollset); void (*shutdown)(grpc_endpoint *ep); @@ -64,32 +77,26 @@ struct grpc_endpoint_vtable { char *(*get_peer)(grpc_endpoint *ep); }; -/* When data is available on the connection, calls the callback with slices. - Callback success indicates that the endpoint can accept more reads, failure - indicates the endpoint is closed. - Valid slices may be placed into \a slices even on callback success == 0. */ -grpc_endpoint_op_status grpc_endpoint_read( - grpc_endpoint *ep, gpr_slice_buffer *slices, - grpc_iomgr_closure *cb) GRPC_MUST_USE_RESULT; +/* When data is available on the connection, calls the callback with slices. */ +void grpc_endpoint_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, + void *user_data); char *grpc_endpoint_get_peer(grpc_endpoint *ep); /* Write slices out to the socket. If the connection is ready for more data after the end of the call, it - returns GRPC_ENDPOINT_DONE. - Otherwise it returns GRPC_ENDPOINT_PENDING and calls cb when the - connection is ready for more data. - \a slices may be mutated at will by the endpoint until cb is called. - No guarantee is made to the content of slices after a write EXCEPT that - it is a valid slice buffer. - */ -grpc_endpoint_op_status grpc_endpoint_write( - grpc_endpoint *ep, gpr_slice_buffer *slices, - grpc_iomgr_closure *cb) GRPC_MUST_USE_RESULT; + returns GRPC_ENDPOINT_WRITE_DONE. + Otherwise it returns GRPC_ENDPOINT_WRITE_PENDING and calls cb when the + connection is ready for more data. */ +grpc_endpoint_write_status grpc_endpoint_write(grpc_endpoint *ep, + gpr_slice *slices, + size_t nslices, + grpc_endpoint_write_cb cb, + void *user_data); /* Causes any pending read/write callbacks to run immediately with - success==0 */ + GRPC_ENDPOINT_CB_SHUTDOWN status */ void grpc_endpoint_shutdown(grpc_endpoint *ep); void grpc_endpoint_destroy(grpc_endpoint *ep); diff --git a/src/core/iomgr/tcp_posix.c b/src/core/iomgr/tcp_posix.c index 0db7cd9f0e1..360e6ebd8cf 100644 --- a/src/core/iomgr/tcp_posix.c +++ b/src/core/iomgr/tcp_posix.c @@ -61,8 +61,209 @@ #define SENDMSG_FLAGS 0 #endif +/* Holds a slice array and associated state. */ +typedef struct grpc_tcp_slice_state { + gpr_slice *slices; /* Array of slices */ + size_t nslices; /* Size of slices array. */ + ssize_t first_slice; /* First valid slice in array */ + ssize_t last_slice; /* Last valid slice in array */ + gpr_slice working_slice; /* pointer to original final slice */ + int working_slice_valid; /* True if there is a working slice */ + int memory_owned; /* True if slices array is owned */ +} grpc_tcp_slice_state; + int grpc_tcp_trace = 0; +static void slice_state_init(grpc_tcp_slice_state *state, gpr_slice *slices, + size_t nslices, size_t valid_slices) { + state->slices = slices; + state->nslices = nslices; + if (valid_slices == 0) { + state->first_slice = -1; + } else { + state->first_slice = 0; + } + state->last_slice = valid_slices - 1; + state->working_slice_valid = 0; + state->memory_owned = 0; +} + +/* Returns true if there is still available data */ +static int slice_state_has_available(grpc_tcp_slice_state *state) { + return state->first_slice != -1 && state->last_slice >= state->first_slice; +} + +static ssize_t slice_state_slices_allocated(grpc_tcp_slice_state *state) { + if (state->first_slice == -1) { + return 0; + } else { + return state->last_slice - state->first_slice + 1; + } +} + +static void slice_state_realloc(grpc_tcp_slice_state *state, size_t new_size) { + /* TODO(klempner): use realloc instead when first_slice is 0 */ + /* TODO(klempner): Avoid a realloc in cases where it is unnecessary */ + gpr_slice *slices = state->slices; + size_t original_size = slice_state_slices_allocated(state); + size_t i; + gpr_slice *new_slices = gpr_malloc(sizeof(gpr_slice) * new_size); + + for (i = 0; i < original_size; ++i) { + new_slices[i] = slices[i + state->first_slice]; + } + + state->slices = new_slices; + state->last_slice = original_size - 1; + if (original_size > 0) { + state->first_slice = 0; + } else { + state->first_slice = -1; + } + state->nslices = new_size; + + if (state->memory_owned) { + gpr_free(slices); + } + state->memory_owned = 1; +} + +static void slice_state_remove_prefix(grpc_tcp_slice_state *state, + size_t prefix_bytes) { + gpr_slice *current_slice = &state->slices[state->first_slice]; + size_t current_slice_size; + + while (slice_state_has_available(state)) { + current_slice_size = GPR_SLICE_LENGTH(*current_slice); + if (current_slice_size > prefix_bytes) { + /* TODO(klempner): Get rid of the extra refcount created here by adding a + native "trim the first N bytes" operation to splice */ + /* TODO(klempner): This really shouldn't be modifying the current slice + unless we own the slices array. */ + gpr_slice tail; + tail = gpr_slice_split_tail(current_slice, prefix_bytes); + gpr_slice_unref(*current_slice); + *current_slice = tail; + return; + } else { + gpr_slice_unref(*current_slice); + ++state->first_slice; + ++current_slice; + prefix_bytes -= current_slice_size; + } + } +} + +static void slice_state_destroy(grpc_tcp_slice_state *state) { + while (slice_state_has_available(state)) { + gpr_slice_unref(state->slices[state->first_slice]); + ++state->first_slice; + } + + if (state->memory_owned) { + gpr_free(state->slices); + state->memory_owned = 0; + } +} + +void slice_state_transfer_ownership(grpc_tcp_slice_state *state, + gpr_slice **slices, size_t *nslices) { + *slices = state->slices + state->first_slice; + *nslices = state->last_slice - state->first_slice + 1; + + state->first_slice = -1; + state->last_slice = -1; +} + +/* Fills iov with the first min(iov_size, available) slices, returns number + filled */ +static size_t slice_state_to_iovec(grpc_tcp_slice_state *state, + struct iovec *iov, size_t iov_size) { + size_t nslices = state->last_slice - state->first_slice + 1; + gpr_slice *slices = state->slices + state->first_slice; + size_t i; + if (nslices < iov_size) { + iov_size = nslices; + } + + for (i = 0; i < iov_size; ++i) { + iov[i].iov_base = GPR_SLICE_START_PTR(slices[i]); + iov[i].iov_len = GPR_SLICE_LENGTH(slices[i]); + } + return iov_size; +} + +/* Makes n blocks available at the end of state, writes them into iov, and + returns the number of bytes allocated */ +static size_t slice_state_append_blocks_into_iovec(grpc_tcp_slice_state *state, + struct iovec *iov, size_t n, + size_t slice_size) { + size_t target_size; + size_t i; + size_t allocated_bytes; + ssize_t allocated_slices = slice_state_slices_allocated(state); + + if (n - state->working_slice_valid >= state->nslices - state->last_slice) { + /* Need to grow the slice array */ + target_size = state->nslices; + do { + target_size = target_size * 2; + } while (target_size < allocated_slices + n - state->working_slice_valid); + /* TODO(klempner): If this ever needs to support both prefix removal and + append, we should be smarter about the growth logic here */ + slice_state_realloc(state, target_size); + } + + i = 0; + allocated_bytes = 0; + + if (state->working_slice_valid) { + iov[0].iov_base = GPR_SLICE_END_PTR(state->slices[state->last_slice]); + iov[0].iov_len = GPR_SLICE_LENGTH(state->working_slice) - + GPR_SLICE_LENGTH(state->slices[state->last_slice]); + allocated_bytes += iov[0].iov_len; + ++i; + state->slices[state->last_slice] = state->working_slice; + state->working_slice_valid = 0; + } + + for (; i < n; ++i) { + ++state->last_slice; + state->slices[state->last_slice] = gpr_slice_malloc(slice_size); + iov[i].iov_base = GPR_SLICE_START_PTR(state->slices[state->last_slice]); + iov[i].iov_len = slice_size; + allocated_bytes += slice_size; + } + if (state->first_slice == -1) { + state->first_slice = 0; + } + return allocated_bytes; +} + +/* Remove the last n bytes from state */ +/* TODO(klempner): Consider having this defer actual deletion until later */ +static void slice_state_remove_last(grpc_tcp_slice_state *state, size_t bytes) { + while (bytes > 0 && slice_state_has_available(state)) { + if (GPR_SLICE_LENGTH(state->slices[state->last_slice]) > bytes) { + state->working_slice = state->slices[state->last_slice]; + state->working_slice_valid = 1; + /* TODO(klempner): Combine these into a single operation that doesn't need + to refcount */ + gpr_slice_unref(gpr_slice_split_tail( + &state->slices[state->last_slice], + GPR_SLICE_LENGTH(state->slices[state->last_slice]) - bytes)); + bytes = 0; + } else { + bytes -= GPR_SLICE_LENGTH(state->slices[state->last_slice]); + gpr_slice_unref(state->slices[state->last_slice]); + --state->last_slice; + if (state->last_slice == -1) { + state->first_slice = -1; + } + } + } +} + typedef struct { grpc_endpoint base; grpc_fd *em_fd; @@ -72,111 +273,80 @@ typedef struct { size_t slice_size; gpr_refcount refcount; - gpr_slice_buffer *incoming_buffer; - gpr_slice_buffer *outgoing_buffer; - /** slice within outgoing_buffer to write next */ - size_t outgoing_slice_idx; - /** byte within outgoing_buffer->slices[outgoing_slice_idx] to write next */ - size_t outgoing_byte_idx; + grpc_endpoint_read_cb read_cb; + void *read_user_data; + grpc_endpoint_write_cb write_cb; + void *write_user_data; - grpc_iomgr_closure *read_cb; - grpc_iomgr_closure *write_cb; + grpc_tcp_slice_state write_state; grpc_iomgr_closure read_closure; grpc_iomgr_closure write_closure; + grpc_iomgr_closure handle_read_closure; + char *peer_string; } grpc_tcp; -static void tcp_handle_read(void *arg /* grpc_tcp */, int success); -static void tcp_handle_write(void *arg /* grpc_tcp */, int success); +static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success); +static void grpc_tcp_handle_write(void *arg /* grpc_tcp */, int success); -static void tcp_shutdown(grpc_endpoint *ep) { +static void grpc_tcp_shutdown(grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_fd_shutdown(tcp->em_fd); } -static void tcp_free(grpc_tcp *tcp) { - grpc_fd_orphan(tcp->em_fd, NULL, "tcp_unref_orphan"); - gpr_free(tcp->peer_string); - gpr_free(tcp); -} - -/*#define GRPC_TCP_REFCOUNT_DEBUG*/ -#ifdef GRPC_TCP_REFCOUNT_DEBUG -#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__) -#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__) -static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file, - int line) { - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp, - reason, tcp->refcount.count, tcp->refcount.count - 1); - if (gpr_unref(&tcp->refcount)) { - tcp_free(tcp); +static void grpc_tcp_unref(grpc_tcp *tcp) { + int refcount_zero = gpr_unref(&tcp->refcount); + if (refcount_zero) { + grpc_fd_orphan(tcp->em_fd, NULL, "tcp_unref_orphan"); + gpr_free(tcp->peer_string); + gpr_free(tcp); } } -static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file, - int line) { - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP ref %p : %s %d -> %d", tcp, - reason, tcp->refcount.count, tcp->refcount.count + 1); - gpr_ref(&tcp->refcount); -} -#else -#define TCP_UNREF(tcp, reason) tcp_unref((tcp)) -#define TCP_REF(tcp, reason) tcp_ref((tcp)) -static void tcp_unref(grpc_tcp *tcp) { - if (gpr_unref(&tcp->refcount)) { - tcp_free(tcp); - } -} - -static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } -#endif - -static void tcp_destroy(grpc_endpoint *ep) { +static void grpc_tcp_destroy(grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; - TCP_UNREF(tcp, "destroy"); + grpc_tcp_unref(tcp); } -static void call_read_cb(grpc_tcp *tcp, int success) { - grpc_iomgr_closure *cb = tcp->read_cb; +static void call_read_cb(grpc_tcp *tcp, gpr_slice *slices, size_t nslices, + grpc_endpoint_cb_status status) { + grpc_endpoint_read_cb cb = tcp->read_cb; if (grpc_tcp_trace) { size_t i; - gpr_log(GPR_DEBUG, "read: success=%d", success); - for (i = 0; i < tcp->incoming_buffer->count; i++) { - char *dump = gpr_dump_slice(tcp->incoming_buffer->slices[i], - GPR_DUMP_HEX | GPR_DUMP_ASCII); + gpr_log(GPR_DEBUG, "read: status=%d", status); + for (i = 0; i < nslices; i++) { + char *dump = gpr_dump_slice(slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); gpr_log(GPR_DEBUG, "READ %p: %s", tcp, dump); gpr_free(dump); } } tcp->read_cb = NULL; - tcp->incoming_buffer = NULL; - cb->cb(cb->cb_arg, success); + cb(tcp->read_user_data, slices, nslices, status); } +#define INLINE_SLICE_BUFFER_SIZE 8 #define MAX_READ_IOVEC 4 -static void tcp_continue_read(grpc_tcp *tcp) { +static void grpc_tcp_continue_read(grpc_tcp *tcp) { + gpr_slice static_read_slices[INLINE_SLICE_BUFFER_SIZE]; struct msghdr msg; struct iovec iov[MAX_READ_IOVEC]; ssize_t read_bytes; - size_t i; + ssize_t allocated_bytes; + struct grpc_tcp_slice_state read_state; + gpr_slice *final_slices; + size_t final_nslices; GPR_ASSERT(!tcp->finished_edge); - GPR_ASSERT(tcp->iov_size <= MAX_READ_IOVEC); - GPR_ASSERT(tcp->incoming_buffer->count <= MAX_READ_IOVEC); GRPC_TIMER_BEGIN(GRPC_PTAG_HANDLE_READ, 0); + slice_state_init(&read_state, static_read_slices, INLINE_SLICE_BUFFER_SIZE, + 0); - while (tcp->incoming_buffer->count < (size_t)tcp->iov_size) { - gpr_slice_buffer_add_indexed(tcp->incoming_buffer, - gpr_slice_malloc(tcp->slice_size)); - } - for (i = 0; i < tcp->incoming_buffer->count; i++) { - iov[i].iov_base = GPR_SLICE_START_PTR(tcp->incoming_buffer->slices[i]); - iov[i].iov_len = GPR_SLICE_LENGTH(tcp->incoming_buffer->slices[i]); - } + allocated_bytes = slice_state_append_blocks_into_iovec( + &read_state, iov, tcp->iov_size, tcp->slice_size); msg.msg_name = NULL; msg.msg_namelen = 0; @@ -192,105 +362,106 @@ static void tcp_continue_read(grpc_tcp *tcp) { } while (read_bytes < 0 && errno == EINTR); GRPC_TIMER_END(GRPC_PTAG_RECVMSG, 0); + if (read_bytes < allocated_bytes) { + /* TODO(klempner): Consider a second read first, in hopes of getting a + * quick EAGAIN and saving a bunch of allocations. */ + slice_state_remove_last(&read_state, read_bytes < 0 + ? allocated_bytes + : allocated_bytes - read_bytes); + } + if (read_bytes < 0) { - /* NB: After calling call_read_cb a parallel call of the read handler may + /* NB: After calling the user_cb a parallel call of the read handler may * be running. */ if (errno == EAGAIN) { if (tcp->iov_size > 1) { tcp->iov_size /= 2; } - /* We've consumed the edge, request a new one */ - grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure); + if (slice_state_has_available(&read_state)) { + /* TODO(klempner): We should probably do the call into the application + without all this junk on the stack */ + /* FIXME(klempner): Refcount properly */ + slice_state_transfer_ownership(&read_state, &final_slices, + &final_nslices); + tcp->finished_edge = 1; + call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_OK); + slice_state_destroy(&read_state); + grpc_tcp_unref(tcp); + } else { + /* We've consumed the edge, request a new one */ + slice_state_destroy(&read_state); + grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure); + } } else { /* TODO(klempner): Log interesting errors */ - gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer); - call_read_cb(tcp, 0); - TCP_UNREF(tcp, "read"); + call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_ERROR); + slice_state_destroy(&read_state); + grpc_tcp_unref(tcp); } } else if (read_bytes == 0) { /* 0 read size ==> end of stream */ - gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer); - call_read_cb(tcp, 0); - TCP_UNREF(tcp, "read"); + if (slice_state_has_available(&read_state)) { + /* there were bytes already read: pass them up to the application */ + slice_state_transfer_ownership(&read_state, &final_slices, + &final_nslices); + call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_EOF); + } else { + call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_EOF); + } + slice_state_destroy(&read_state); + grpc_tcp_unref(tcp); } else { - GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length); - if ((size_t)read_bytes < tcp->incoming_buffer->length) { - gpr_slice_buffer_trim_end(tcp->incoming_buffer, - tcp->incoming_buffer->length - read_bytes); - } else if (tcp->iov_size < MAX_READ_IOVEC) { + if (tcp->iov_size < MAX_READ_IOVEC) { ++tcp->iov_size; } - GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length); - call_read_cb(tcp, 1); - TCP_UNREF(tcp, "read"); + GPR_ASSERT(slice_state_has_available(&read_state)); + slice_state_transfer_ownership(&read_state, &final_slices, &final_nslices); + call_read_cb(tcp, final_slices, final_nslices, GRPC_ENDPOINT_CB_OK); + slice_state_destroy(&read_state); + grpc_tcp_unref(tcp); } GRPC_TIMER_END(GRPC_PTAG_HANDLE_READ, 0); } -static void tcp_handle_read(void *arg /* grpc_tcp */, int success) { +static void grpc_tcp_handle_read(void *arg /* grpc_tcp */, int success) { grpc_tcp *tcp = (grpc_tcp *)arg; GPR_ASSERT(!tcp->finished_edge); if (!success) { - gpr_slice_buffer_reset_and_unref(tcp->incoming_buffer); - call_read_cb(tcp, 0); - TCP_UNREF(tcp, "read"); + call_read_cb(tcp, NULL, 0, GRPC_ENDPOINT_CB_SHUTDOWN); + grpc_tcp_unref(tcp); } else { - tcp_continue_read(tcp); + grpc_tcp_continue_read(tcp); } } -static grpc_endpoint_op_status tcp_read(grpc_endpoint *ep, - gpr_slice_buffer *incoming_buffer, - grpc_iomgr_closure *cb) { +static void grpc_tcp_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, + void *user_data) { grpc_tcp *tcp = (grpc_tcp *)ep; GPR_ASSERT(tcp->read_cb == NULL); tcp->read_cb = cb; - tcp->incoming_buffer = incoming_buffer; - gpr_slice_buffer_reset_and_unref(incoming_buffer); - TCP_REF(tcp, "read"); + tcp->read_user_data = user_data; + gpr_ref(&tcp->refcount); if (tcp->finished_edge) { tcp->finished_edge = 0; grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_closure); } else { - grpc_iomgr_add_delayed_callback(&tcp->read_closure, 1); + tcp->handle_read_closure.cb_arg = tcp; + grpc_iomgr_add_delayed_callback(&tcp->handle_read_closure, 1); } - /* TODO(ctiller): immediate return */ - return GRPC_ENDPOINT_PENDING; } #define MAX_WRITE_IOVEC 16 -static grpc_endpoint_op_status tcp_flush(grpc_tcp *tcp) { +static grpc_endpoint_write_status grpc_tcp_flush(grpc_tcp *tcp) { struct msghdr msg; struct iovec iov[MAX_WRITE_IOVEC]; int iov_size; ssize_t sent_length; - ssize_t sending_length; - ssize_t trailing; - ssize_t unwind_slice_idx; - ssize_t unwind_byte_idx; + grpc_tcp_slice_state *state = &tcp->write_state; for (;;) { - sending_length = 0; - unwind_slice_idx = tcp->outgoing_slice_idx; - unwind_byte_idx = tcp->outgoing_byte_idx; - for (iov_size = 0; tcp->outgoing_slice_idx != tcp->outgoing_buffer->count && - iov_size != MAX_WRITE_IOVEC; - iov_size++) { - iov[iov_size].iov_base = - GPR_SLICE_START_PTR( - tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]) + - tcp->outgoing_byte_idx; - iov[iov_size].iov_len = - GPR_SLICE_LENGTH( - tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]) - - tcp->outgoing_byte_idx; - sending_length += iov[iov_size].iov_len; - tcp->outgoing_slice_idx++; - tcp->outgoing_byte_idx = 0; - } - GPR_ASSERT(iov_size > 0); + iov_size = slice_state_to_iovec(state, iov, MAX_WRITE_IOVEC); msg.msg_name = NULL; msg.msg_namelen = 0; @@ -309,75 +480,70 @@ static grpc_endpoint_op_status tcp_flush(grpc_tcp *tcp) { if (sent_length < 0) { if (errno == EAGAIN) { - tcp->outgoing_slice_idx = unwind_slice_idx; - tcp->outgoing_byte_idx = unwind_byte_idx; - return GRPC_ENDPOINT_PENDING; + return GRPC_ENDPOINT_WRITE_PENDING; } else { /* TODO(klempner): Log some of these */ - return GRPC_ENDPOINT_ERROR; + slice_state_destroy(state); + return GRPC_ENDPOINT_WRITE_ERROR; } } - GPR_ASSERT(tcp->outgoing_byte_idx == 0); - trailing = sending_length - sent_length; - while (trailing > 0) { - ssize_t slice_length; - - tcp->outgoing_slice_idx--; - slice_length = GPR_SLICE_LENGTH( - tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]); - if (slice_length > trailing) { - tcp->outgoing_byte_idx = slice_length - trailing; - break; - } else { - trailing -= slice_length; - } - } + /* TODO(klempner): Probably better to batch this after we finish flushing */ + slice_state_remove_prefix(state, sent_length); - if (tcp->outgoing_slice_idx == tcp->outgoing_buffer->count) { - return GRPC_ENDPOINT_DONE; + if (!slice_state_has_available(state)) { + return GRPC_ENDPOINT_WRITE_DONE; } }; } -static void tcp_handle_write(void *arg /* grpc_tcp */, int success) { +static void grpc_tcp_handle_write(void *arg /* grpc_tcp */, int success) { grpc_tcp *tcp = (grpc_tcp *)arg; - grpc_endpoint_op_status status; - grpc_iomgr_closure *cb; + grpc_endpoint_write_status write_status; + grpc_endpoint_cb_status cb_status; + grpc_endpoint_write_cb cb; if (!success) { + slice_state_destroy(&tcp->write_state); cb = tcp->write_cb; tcp->write_cb = NULL; - cb->cb(cb->cb_arg, 0); - TCP_UNREF(tcp, "write"); + cb(tcp->write_user_data, GRPC_ENDPOINT_CB_SHUTDOWN); + grpc_tcp_unref(tcp); return; } GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_CB_WRITE, 0); - status = tcp_flush(tcp); - if (status == GRPC_ENDPOINT_PENDING) { + write_status = grpc_tcp_flush(tcp); + if (write_status == GRPC_ENDPOINT_WRITE_PENDING) { grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure); } else { + slice_state_destroy(&tcp->write_state); + if (write_status == GRPC_ENDPOINT_WRITE_DONE) { + cb_status = GRPC_ENDPOINT_CB_OK; + } else { + cb_status = GRPC_ENDPOINT_CB_ERROR; + } cb = tcp->write_cb; tcp->write_cb = NULL; - cb->cb(cb->cb_arg, status == GRPC_ENDPOINT_DONE); - TCP_UNREF(tcp, "write"); + cb(tcp->write_user_data, cb_status); + grpc_tcp_unref(tcp); } GRPC_TIMER_END(GRPC_PTAG_TCP_CB_WRITE, 0); } -static grpc_endpoint_op_status tcp_write(grpc_endpoint *ep, - gpr_slice_buffer *buf, - grpc_iomgr_closure *cb) { +static grpc_endpoint_write_status grpc_tcp_write(grpc_endpoint *ep, + gpr_slice *slices, + size_t nslices, + grpc_endpoint_write_cb cb, + void *user_data) { grpc_tcp *tcp = (grpc_tcp *)ep; - grpc_endpoint_op_status status; + grpc_endpoint_write_status status; if (grpc_tcp_trace) { size_t i; - for (i = 0; i < buf->count; i++) { - char *data = - gpr_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); + for (i = 0; i < nslices; i++) { + char *data = gpr_dump_slice(slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); gpr_log(GPR_DEBUG, "WRITE %p: %s", tcp, data); gpr_free(data); } @@ -385,19 +551,15 @@ static grpc_endpoint_op_status tcp_write(grpc_endpoint *ep, GRPC_TIMER_BEGIN(GRPC_PTAG_TCP_WRITE, 0); GPR_ASSERT(tcp->write_cb == NULL); + slice_state_init(&tcp->write_state, slices, nslices, nslices); - if (buf->length == 0) { - GRPC_TIMER_END(GRPC_PTAG_TCP_WRITE, 0); - return GRPC_ENDPOINT_DONE; - } - tcp->outgoing_buffer = buf; - tcp->outgoing_slice_idx = 0; - tcp->outgoing_byte_idx = 0; - - status = tcp_flush(tcp); - if (status == GRPC_ENDPOINT_PENDING) { - TCP_REF(tcp, "write"); + status = grpc_tcp_flush(tcp); + if (status == GRPC_ENDPOINT_WRITE_PENDING) { + /* TODO(klempner): Consider inlining rather than malloc for small nslices */ + slice_state_realloc(&tcp->write_state, nslices); + gpr_ref(&tcp->refcount); tcp->write_cb = cb; + tcp->write_user_data = user_data; grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_closure); } @@ -405,25 +567,27 @@ static grpc_endpoint_op_status tcp_write(grpc_endpoint *ep, return status; } -static void tcp_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) { +static void grpc_tcp_add_to_pollset(grpc_endpoint *ep, grpc_pollset *pollset) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_pollset_add_fd(pollset, tcp->em_fd); } -static void tcp_add_to_pollset_set(grpc_endpoint *ep, - grpc_pollset_set *pollset_set) { +static void grpc_tcp_add_to_pollset_set(grpc_endpoint *ep, + grpc_pollset_set *pollset_set) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_pollset_set_add_fd(pollset_set, tcp->em_fd); } -static char *tcp_get_peer(grpc_endpoint *ep) { +static char *grpc_tcp_get_peer(grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; return gpr_strdup(tcp->peer_string); } static const grpc_endpoint_vtable vtable = { - tcp_read, tcp_write, tcp_add_to_pollset, tcp_add_to_pollset_set, - tcp_shutdown, tcp_destroy, tcp_get_peer}; + grpc_tcp_notify_on_read, grpc_tcp_write, + grpc_tcp_add_to_pollset, grpc_tcp_add_to_pollset_set, + grpc_tcp_shutdown, grpc_tcp_destroy, + grpc_tcp_get_peer}; grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size, const char *peer_string) { @@ -433,18 +597,21 @@ grpc_endpoint *grpc_tcp_create(grpc_fd *em_fd, size_t slice_size, tcp->fd = em_fd->fd; tcp->read_cb = NULL; tcp->write_cb = NULL; - tcp->incoming_buffer = NULL; + tcp->read_user_data = NULL; + tcp->write_user_data = NULL; tcp->slice_size = slice_size; tcp->iov_size = 1; tcp->finished_edge = 1; + slice_state_init(&tcp->write_state, NULL, 0, 0); /* paired with unref in grpc_tcp_destroy */ gpr_ref_init(&tcp->refcount, 1); tcp->em_fd = em_fd; - tcp->read_closure.cb = tcp_handle_read; + tcp->read_closure.cb = grpc_tcp_handle_read; tcp->read_closure.cb_arg = tcp; - tcp->write_closure.cb = tcp_handle_write; + tcp->write_closure.cb = grpc_tcp_handle_write; tcp->write_closure.cb_arg = tcp; + tcp->handle_read_closure.cb = grpc_tcp_handle_read; return &tcp->base; } diff --git a/src/core/iomgr/tcp_windows.c b/src/core/iomgr/tcp_windows.c index 58f9160ef97..901793ec43b 100644 --- a/src/core/iomgr/tcp_windows.c +++ b/src/core/iomgr/tcp_windows.c @@ -82,11 +82,13 @@ typedef struct grpc_tcp { /* Refcounting how many operations are in progress. */ gpr_refcount refcount; - grpc_iomgr_closure *read_cb; - grpc_iomgr_closure *write_cb; + grpc_endpoint_read_cb read_cb; + void *read_user_data; gpr_slice read_slice; - gpr_slice_buffer *write_slices; - gpr_slice_buffer *read_slices; + + grpc_endpoint_write_cb write_cb; + void *write_user_data; + gpr_slice_buffer write_slices; /* The IO Completion Port runs from another thread. We need some mechanism to protect ourselves when requesting a shutdown. */ @@ -96,55 +98,34 @@ typedef struct grpc_tcp { char *peer_string; } grpc_tcp; -static void tcp_free(grpc_tcp *tcp) { - grpc_winsocket_orphan(tcp->socket); - gpr_mu_destroy(&tcp->mu); - gpr_free(tcp->peer_string); - gpr_free(tcp); -} - -/*#define GRPC_TCP_REFCOUNT_DEBUG*/ -#ifdef GRPC_TCP_REFCOUNT_DEBUG -#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__) -#define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__) -static void tcp_unref(grpc_tcp *tcp, const char *reason, const char *file, - int line) { - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP unref %p : %s %d -> %d", tcp, - reason, tcp->refcount.count, tcp->refcount.count - 1); - if (gpr_unref(&tcp->refcount)) { - tcp_free(tcp); - } -} +static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } -static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file, - int line) { - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "TCP ref %p : %s %d -> %d", tcp, - reason, tcp->refcount.count, tcp->refcount.count + 1); - gpr_ref(&tcp->refcount); -} -#else -#define TCP_UNREF(tcp, reason) tcp_unref((tcp)) -#define TCP_REF(tcp, reason) tcp_ref((tcp)) static void tcp_unref(grpc_tcp *tcp) { if (gpr_unref(&tcp->refcount)) { - tcp_free(tcp); + gpr_slice_buffer_destroy(&tcp->write_slices); + grpc_winsocket_orphan(tcp->socket); + gpr_mu_destroy(&tcp->mu); + gpr_free(tcp->peer_string); + gpr_free(tcp); } } -static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); } -#endif - /* Asynchronous callback from the IOCP, or the background thread. */ -static int on_read(grpc_tcp *tcp, int from_iocp) { +static void on_read(void *tcpp, int from_iocp) { + grpc_tcp *tcp = (grpc_tcp *)tcpp; grpc_winsocket *socket = tcp->socket; gpr_slice sub; gpr_slice *slice = NULL; size_t nslices = 0; - int success; + grpc_endpoint_cb_status status; + grpc_endpoint_read_cb cb; grpc_winsocket_callback_info *info = &socket->read_info; + void *opaque = tcp->read_user_data; int do_abort = 0; gpr_mu_lock(&tcp->mu); + cb = tcp->read_cb; + tcp->read_cb = NULL; if (!from_iocp || tcp->shutting_down) { /* If we are here with from_iocp set to true, it means we got raced to shutting down the endpoint. No actual abort callback will happen @@ -158,7 +139,9 @@ static int on_read(grpc_tcp *tcp, int from_iocp) { tcp->socket->read_info.outstanding = 0; gpr_slice_unref(tcp->read_slice); } - return 0; + tcp_unref(tcp); + if (cb) cb(opaque, NULL, 0, GRPC_ENDPOINT_CB_SHUTDOWN); + return; } GPR_ASSERT(tcp->socket->read_info.outstanding); @@ -169,38 +152,28 @@ static int on_read(grpc_tcp *tcp, int from_iocp) { gpr_log(GPR_ERROR, "ReadFile overlapped error: %s", utf8_message); gpr_free(utf8_message); } - success = 0; gpr_slice_unref(tcp->read_slice); + status = GRPC_ENDPOINT_CB_ERROR; } else { if (info->bytes_transfered != 0) { sub = gpr_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered); - gpr_slice_buffer_add(tcp->read_slices, sub); - success = 1; + status = GRPC_ENDPOINT_CB_OK; + slice = ⊂ + nslices = 1; } else { gpr_slice_unref(tcp->read_slice); - success = 0; + status = GRPC_ENDPOINT_CB_EOF; } } tcp->socket->read_info.outstanding = 0; - return success; -} - -static void on_read_cb(void *tcpp, int from_iocp) { - grpc_tcp *tcp = tcpp; - grpc_iomgr_closure *cb = tcp->read_cb; - int success = on_read(tcp, from_iocp); - tcp->read_cb = NULL; - TCP_UNREF(tcp, "read"); - if (cb) { - cb->cb(cb->cb_arg, success); - } + tcp_unref(tcp); + cb(opaque, slice, nslices, status); } -static grpc_endpoint_op_status win_read(grpc_endpoint *ep, - gpr_slice_buffer *read_slices, - grpc_iomgr_closure *cb) { +static void win_notify_on_read(grpc_endpoint *ep, grpc_endpoint_read_cb cb, + void *arg) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_winsocket *handle = tcp->socket; grpc_winsocket_callback_info *info = &handle->read_info; @@ -211,15 +184,13 @@ static grpc_endpoint_op_status win_read(grpc_endpoint *ep, GPR_ASSERT(!tcp->socket->read_info.outstanding); if (tcp->shutting_down) { - return GRPC_ENDPOINT_ERROR; + cb(arg, NULL, 0, GRPC_ENDPOINT_CB_SHUTDOWN); + return; } - - TCP_REF(tcp, "read"); - + tcp_ref(tcp); tcp->socket->read_info.outstanding = 1; tcp->read_cb = cb; - tcp->read_slices = read_slices; - gpr_slice_buffer_reset_and_unref(read_slices); + tcp->read_user_data = arg; tcp->read_slice = gpr_slice_malloc(8192); @@ -233,11 +204,10 @@ static grpc_endpoint_op_status win_read(grpc_endpoint *ep, /* Did we get data immediately ? Yay. */ if (info->wsa_error != WSAEWOULDBLOCK) { - int ok; info->bytes_transfered = bytes_read; - ok = on_read(tcp, 1); - TCP_UNREF(tcp, "read"); - return ok ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR; + /* This might heavily recurse. */ + on_read(tcp, 1); + return; } /* Otherwise, let's retry, by queuing a read. */ @@ -248,15 +218,13 @@ static grpc_endpoint_op_status win_read(grpc_endpoint *ep, if (status != 0) { int wsa_error = WSAGetLastError(); if (wsa_error != WSA_IO_PENDING) { - int ok; info->wsa_error = wsa_error; - ok = on_read(tcp, 1); - return ok ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR; + on_read(tcp, 1); + return; } } - grpc_socket_notify_on_read(tcp->socket, on_read_cb, tcp); - return GRPC_ENDPOINT_PENDING; + grpc_socket_notify_on_read(tcp->socket, on_read, tcp); } /* Asynchronous callback from the IOCP, or the background thread. */ @@ -264,8 +232,9 @@ static void on_write(void *tcpp, int from_iocp) { grpc_tcp *tcp = (grpc_tcp *)tcpp; grpc_winsocket *handle = tcp->socket; grpc_winsocket_callback_info *info = &handle->write_info; - grpc_iomgr_closure *cb; - int success; + grpc_endpoint_cb_status status = GRPC_ENDPOINT_CB_OK; + grpc_endpoint_write_cb cb; + void *opaque = tcp->write_user_data; int do_abort = 0; gpr_mu_lock(&tcp->mu); @@ -282,11 +251,10 @@ static void on_write(void *tcpp, int from_iocp) { if (do_abort) { if (from_iocp) { tcp->socket->write_info.outstanding = 0; + gpr_slice_buffer_reset_and_unref(&tcp->write_slices); } - TCP_UNREF(tcp, "write"); - if (cb) { - cb->cb(cb->cb_arg, 0); - } + tcp_unref(tcp); + if (cb) cb(opaque, GRPC_ENDPOINT_CB_SHUTDOWN); return; } @@ -298,22 +266,23 @@ static void on_write(void *tcpp, int from_iocp) { gpr_log(GPR_ERROR, "WSASend overlapped error: %s", utf8_message); gpr_free(utf8_message); } - success = 0; + status = GRPC_ENDPOINT_CB_ERROR; } else { - GPR_ASSERT(info->bytes_transfered == tcp->write_slices->length); - success = 1; + GPR_ASSERT(info->bytes_transfered == tcp->write_slices.length); } + gpr_slice_buffer_reset_and_unref(&tcp->write_slices); tcp->socket->write_info.outstanding = 0; - TCP_UNREF(tcp, "write"); - cb->cb(cb->cb_arg, success); + tcp_unref(tcp); + cb(opaque, status); } /* Initiates a write. */ -static grpc_endpoint_op_status win_write(grpc_endpoint *ep, - gpr_slice_buffer *slices, - grpc_iomgr_closure *cb) { +static grpc_endpoint_write_status win_write(grpc_endpoint *ep, + gpr_slice *slices, size_t nslices, + grpc_endpoint_write_cb cb, + void *arg) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_winsocket *socket = tcp->socket; grpc_winsocket_callback_info *info = &socket->write_info; @@ -326,26 +295,28 @@ static grpc_endpoint_op_status win_write(grpc_endpoint *ep, GPR_ASSERT(!tcp->socket->write_info.outstanding); if (tcp->shutting_down) { - return GRPC_ENDPOINT_ERROR; + return GRPC_ENDPOINT_WRITE_ERROR; } - TCP_REF(tcp, "write"); + tcp_ref(tcp); tcp->socket->write_info.outstanding = 1; tcp->write_cb = cb; - tcp->write_slices = slices; + tcp->write_user_data = arg; + + gpr_slice_buffer_addn(&tcp->write_slices, slices, nslices); - if (tcp->write_slices->count > GPR_ARRAY_SIZE(local_buffers)) { - buffers = (WSABUF *)gpr_malloc(sizeof(WSABUF) * tcp->write_slices->count); + if (tcp->write_slices.count > GPR_ARRAY_SIZE(local_buffers)) { + buffers = (WSABUF *)gpr_malloc(sizeof(WSABUF) * tcp->write_slices.count); allocated = buffers; } - for (i = 0; i < tcp->write_slices->count; i++) { - buffers[i].len = GPR_SLICE_LENGTH(tcp->write_slices->slices[i]); - buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices->slices[i]); + for (i = 0; i < tcp->write_slices.count; i++) { + buffers[i].len = GPR_SLICE_LENGTH(tcp->write_slices.slices[i]); + buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices.slices[i]); } /* First, let's try a synchronous, non-blocking write. */ - status = WSASend(socket->socket, buffers, tcp->write_slices->count, + status = WSASend(socket->socket, buffers, tcp->write_slices.count, &bytes_sent, 0, NULL, NULL); info->wsa_error = status == 0 ? 0 : WSAGetLastError(); @@ -353,10 +324,10 @@ static grpc_endpoint_op_status win_write(grpc_endpoint *ep, connection that has its send queue filled up. But if we don't, then we can avoid doing an async write operation at all. */ if (info->wsa_error != WSAEWOULDBLOCK) { - grpc_endpoint_op_status ret = GRPC_ENDPOINT_ERROR; + grpc_endpoint_write_status ret = GRPC_ENDPOINT_WRITE_ERROR; if (status == 0) { - ret = GRPC_ENDPOINT_DONE; - GPR_ASSERT(bytes_sent == tcp->write_slices->length); + ret = GRPC_ENDPOINT_WRITE_DONE; + GPR_ASSERT(bytes_sent == tcp->write_slices.length); } else { if (socket->read_info.wsa_error != WSAECONNRESET) { char *utf8_message = gpr_format_message(info->wsa_error); @@ -365,31 +336,33 @@ static grpc_endpoint_op_status win_write(grpc_endpoint *ep, } } if (allocated) gpr_free(allocated); + gpr_slice_buffer_reset_and_unref(&tcp->write_slices); tcp->socket->write_info.outstanding = 0; - TCP_UNREF(tcp, "write"); + tcp_unref(tcp); return ret; } /* If we got a WSAEWOULDBLOCK earlier, then we need to re-do the same operation, this time asynchronously. */ memset(&socket->write_info.overlapped, 0, sizeof(OVERLAPPED)); - status = WSASend(socket->socket, buffers, tcp->write_slices->count, + status = WSASend(socket->socket, buffers, tcp->write_slices.count, &bytes_sent, 0, &socket->write_info.overlapped, NULL); if (allocated) gpr_free(allocated); if (status != 0) { int wsa_error = WSAGetLastError(); if (wsa_error != WSA_IO_PENDING) { + gpr_slice_buffer_reset_and_unref(&tcp->write_slices); tcp->socket->write_info.outstanding = 0; - TCP_UNREF(tcp, "write"); - return GRPC_ENDPOINT_ERROR; + tcp_unref(tcp); + return GRPC_ENDPOINT_WRITE_ERROR; } } /* As all is now setup, we can now ask for the IOCP notification. It may trigger the callback immediately however, but no matter. */ grpc_socket_notify_on_write(socket, on_write, tcp); - return GRPC_ENDPOINT_PENDING; + return GRPC_ENDPOINT_WRITE_PENDING; } static void win_add_to_pollset(grpc_endpoint *ep, grpc_pollset *ps) { @@ -414,17 +387,19 @@ static void win_add_to_pollset_set(grpc_endpoint *ep, grpc_pollset_set *pss) { concurrent access of the data structure in that regard. */ static void win_shutdown(grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; + int extra_refs = 0; gpr_mu_lock(&tcp->mu); /* At that point, what may happen is that we're already inside the IOCP callback. See the comments in on_read and on_write. */ tcp->shutting_down = 1; - grpc_winsocket_shutdown(tcp->socket); + extra_refs = grpc_winsocket_shutdown(tcp->socket); + while (extra_refs--) tcp_ref(tcp); gpr_mu_unlock(&tcp->mu); } static void win_destroy(grpc_endpoint *ep) { grpc_tcp *tcp = (grpc_tcp *)ep; - TCP_UNREF(tcp, "destroy"); + tcp_unref(tcp); } static char *win_get_peer(grpc_endpoint *ep) { @@ -433,8 +408,8 @@ static char *win_get_peer(grpc_endpoint *ep) { } static grpc_endpoint_vtable vtable = { - win_read, win_write, win_add_to_pollset, win_add_to_pollset_set, - win_shutdown, win_destroy, win_get_peer}; + win_notify_on_read, win_write, win_add_to_pollset, win_add_to_pollset_set, + win_shutdown, win_destroy, win_get_peer}; grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) { grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp)); @@ -442,6 +417,7 @@ grpc_endpoint *grpc_tcp_create(grpc_winsocket *socket, char *peer_string) { tcp->base.vtable = &vtable; tcp->socket = socket; gpr_mu_init(&tcp->mu); + gpr_slice_buffer_init(&tcp->write_slices); gpr_ref_init(&tcp->refcount, 1); tcp->peer_string = gpr_strdup(peer_string); return &tcp->base; diff --git a/src/core/security/secure_endpoint.c b/src/core/security/secure_endpoint.c index b696e384fca..81b3e33cb2c 100644 --- a/src/core/security/secure_endpoint.c +++ b/src/core/security/secure_endpoint.c @@ -49,15 +49,15 @@ typedef struct { struct tsi_frame_protector *protector; gpr_mu protector_mu; /* saved upper level callbacks and user_data. */ - grpc_iomgr_closure *read_cb; - grpc_iomgr_closure *write_cb; - grpc_iomgr_closure on_read; - gpr_slice_buffer *read_buffer; - gpr_slice_buffer source_buffer; + grpc_endpoint_read_cb read_cb; + void *read_user_data; + grpc_endpoint_write_cb write_cb; + void *write_user_data; /* saved handshaker leftover data to unprotect. */ gpr_slice_buffer leftover_bytes; /* buffers for read and write */ gpr_slice read_staging_buffer; + gpr_slice_buffer input_buffer; gpr_slice write_staging_buffer; gpr_slice_buffer output_buffer; @@ -67,91 +67,62 @@ typedef struct { int grpc_trace_secure_endpoint = 0; +static void secure_endpoint_ref(secure_endpoint *ep) { gpr_ref(&ep->ref); } + static void destroy(secure_endpoint *secure_ep) { secure_endpoint *ep = secure_ep; grpc_endpoint_destroy(ep->wrapped_ep); tsi_frame_protector_destroy(ep->protector); gpr_slice_buffer_destroy(&ep->leftover_bytes); gpr_slice_unref(ep->read_staging_buffer); + gpr_slice_buffer_destroy(&ep->input_buffer); gpr_slice_unref(ep->write_staging_buffer); gpr_slice_buffer_destroy(&ep->output_buffer); - gpr_slice_buffer_destroy(&ep->source_buffer); gpr_mu_destroy(&ep->protector_mu); gpr_free(ep); } -/*#define GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG*/ -#ifdef GRPC_SECURE_ENDPOINT_REFCOUNT_DEBUG -#define SECURE_ENDPOINT_UNREF(ep, reason) \ - secure_endpoint_unref((ep), (reason), __FILE__, __LINE__) -#define SECURE_ENDPOINT_REF(ep, reason) \ - secure_endpoint_ref((ep), (reason), __FILE__, __LINE__) -static void secure_endpoint_unref(secure_endpoint *ep, const char *reason, - const char *file, int line) { - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "SECENDP unref %p : %s %d -> %d", - ep, reason, ep->ref.count, ep->ref.count - 1); - if (gpr_unref(&ep->ref)) { - destroy(ep); - } -} - -static void secure_endpoint_ref(secure_endpoint *ep, const char *reason, - const char *file, int line) { - gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "SECENDP ref %p : %s %d -> %d", - ep, reason, ep->ref.count, ep->ref.count + 1); - gpr_ref(&ep->ref); -} -#else -#define SECURE_ENDPOINT_UNREF(ep, reason) secure_endpoint_unref((ep)) -#define SECURE_ENDPOINT_REF(ep, reason) secure_endpoint_ref((ep)) static void secure_endpoint_unref(secure_endpoint *ep) { if (gpr_unref(&ep->ref)) { destroy(ep); } } -static void secure_endpoint_ref(secure_endpoint *ep) { gpr_ref(&ep->ref); } -#endif - static void flush_read_staging_buffer(secure_endpoint *ep, gpr_uint8 **cur, gpr_uint8 **end) { - gpr_slice_buffer_add(ep->read_buffer, ep->read_staging_buffer); + gpr_slice_buffer_add(&ep->input_buffer, ep->read_staging_buffer); ep->read_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE); *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer); *end = GPR_SLICE_END_PTR(ep->read_staging_buffer); } -static void call_read_cb(secure_endpoint *ep, int success) { +static void call_read_cb(secure_endpoint *ep, gpr_slice *slices, size_t nslices, + grpc_endpoint_cb_status error) { if (grpc_trace_secure_endpoint) { size_t i; - for (i = 0; i < ep->read_buffer->count; i++) { - char *data = gpr_dump_slice(ep->read_buffer->slices[i], - GPR_DUMP_HEX | GPR_DUMP_ASCII); + for (i = 0; i < nslices; i++) { + char *data = gpr_dump_slice(slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); gpr_log(GPR_DEBUG, "READ %p: %s", ep, data); gpr_free(data); } } - ep->read_buffer = NULL; - ep->read_cb->cb(ep->read_cb->cb_arg, success); - SECURE_ENDPOINT_UNREF(ep, "read"); + ep->read_cb(ep->read_user_data, slices, nslices, error); + secure_endpoint_unref(ep); } -static int on_read(void *user_data, int success) { +static void on_read(void *user_data, gpr_slice *slices, size_t nslices, + grpc_endpoint_cb_status error) { unsigned i; gpr_uint8 keep_looping = 0; + size_t input_buffer_count = 0; tsi_result result = TSI_OK; secure_endpoint *ep = (secure_endpoint *)user_data; gpr_uint8 *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer); gpr_uint8 *end = GPR_SLICE_END_PTR(ep->read_staging_buffer); - if (!success) { - gpr_slice_buffer_reset_and_unref(ep->read_buffer); - return 0; - } - /* TODO(yangg) check error, maybe bail out early */ - for (i = 0; i < ep->source_buffer.count; i++) { - gpr_slice encrypted = ep->source_buffer.slices[i]; + for (i = 0; i < nslices; i++) { + gpr_slice encrypted = slices[i]; gpr_uint8 *message_bytes = GPR_SLICE_START_PTR(encrypted); size_t message_size = GPR_SLICE_LENGTH(encrypted); @@ -190,7 +161,7 @@ static int on_read(void *user_data, int success) { if (cur != GPR_SLICE_START_PTR(ep->read_staging_buffer)) { gpr_slice_buffer_add( - ep->read_buffer, + &ep->input_buffer, gpr_slice_split_head( &ep->read_staging_buffer, (size_t)(cur - GPR_SLICE_START_PTR(ep->read_staging_buffer)))); @@ -198,53 +169,38 @@ static int on_read(void *user_data, int success) { /* TODO(yangg) experiment with moving this block after read_cb to see if it helps latency */ - gpr_slice_buffer_reset_and_unref(&ep->source_buffer); + for (i = 0; i < nslices; i++) { + gpr_slice_unref(slices[i]); + } if (result != TSI_OK) { - gpr_slice_buffer_reset_and_unref(ep->read_buffer); - return 0; + gpr_slice_buffer_reset_and_unref(&ep->input_buffer); + call_read_cb(ep, NULL, 0, GRPC_ENDPOINT_CB_ERROR); + return; } - - return 1; -} - -static void on_read_cb(void *user_data, int success) { - call_read_cb(user_data, on_read(user_data, success)); + /* The upper level will unref the slices. */ + input_buffer_count = ep->input_buffer.count; + ep->input_buffer.count = 0; + call_read_cb(ep, ep->input_buffer.slices, input_buffer_count, error); } -static grpc_endpoint_op_status endpoint_read(grpc_endpoint *secure_ep, - gpr_slice_buffer *slices, - grpc_iomgr_closure *cb) { +static void endpoint_notify_on_read(grpc_endpoint *secure_ep, + grpc_endpoint_read_cb cb, void *user_data) { secure_endpoint *ep = (secure_endpoint *)secure_ep; - int immediate_read_success = -1; ep->read_cb = cb; - ep->read_buffer = slices; - gpr_slice_buffer_reset_and_unref(ep->read_buffer); + ep->read_user_data = user_data; - if (ep->leftover_bytes.count) { - gpr_slice_buffer_swap(&ep->leftover_bytes, &ep->source_buffer); - GPR_ASSERT(ep->leftover_bytes.count == 0); - return on_read(ep, 1) ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR; - } + secure_endpoint_ref(ep); - SECURE_ENDPOINT_REF(ep, "read"); - - switch ( - grpc_endpoint_read(ep->wrapped_ep, &ep->source_buffer, &ep->on_read)) { - case GRPC_ENDPOINT_DONE: - immediate_read_success = on_read(ep, 1); - break; - case GRPC_ENDPOINT_PENDING: - return GRPC_ENDPOINT_PENDING; - case GRPC_ENDPOINT_ERROR: - immediate_read_success = on_read(ep, 0); - break; + if (ep->leftover_bytes.count) { + size_t leftover_nslices = ep->leftover_bytes.count; + ep->leftover_bytes.count = 0; + on_read(ep, ep->leftover_bytes.slices, leftover_nslices, + GRPC_ENDPOINT_CB_OK); + return; } - GPR_ASSERT(immediate_read_success != -1); - SECURE_ENDPOINT_UNREF(ep, "read"); - - return immediate_read_success ? GRPC_ENDPOINT_DONE : GRPC_ENDPOINT_ERROR; + grpc_endpoint_notify_on_read(ep->wrapped_ep, on_read, ep); } static void flush_write_staging_buffer(secure_endpoint *ep, gpr_uint8 **cur, @@ -255,28 +211,36 @@ static void flush_write_staging_buffer(secure_endpoint *ep, gpr_uint8 **cur, *end = GPR_SLICE_END_PTR(ep->write_staging_buffer); } -static grpc_endpoint_op_status endpoint_write(grpc_endpoint *secure_ep, - gpr_slice_buffer *slices, - grpc_iomgr_closure *cb) { +static void on_write(void *data, grpc_endpoint_cb_status error) { + secure_endpoint *ep = data; + ep->write_cb(ep->write_user_data, error); + secure_endpoint_unref(ep); +} + +static grpc_endpoint_write_status endpoint_write(grpc_endpoint *secure_ep, + gpr_slice *slices, + size_t nslices, + grpc_endpoint_write_cb cb, + void *user_data) { unsigned i; + size_t output_buffer_count = 0; tsi_result result = TSI_OK; secure_endpoint *ep = (secure_endpoint *)secure_ep; gpr_uint8 *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer); gpr_uint8 *end = GPR_SLICE_END_PTR(ep->write_staging_buffer); - - gpr_slice_buffer_reset_and_unref(&ep->output_buffer); + grpc_endpoint_write_status status; + GPR_ASSERT(ep->output_buffer.count == 0); if (grpc_trace_secure_endpoint) { - for (i = 0; i < slices->count; i++) { - char *data = - gpr_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); + for (i = 0; i < nslices; i++) { + char *data = gpr_dump_slice(slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); gpr_log(GPR_DEBUG, "WRITE %p: %s", ep, data); gpr_free(data); } } - for (i = 0; i < slices->count; i++) { - gpr_slice plain = slices->slices[i]; + for (i = 0; i < nslices; i++) { + gpr_slice plain = slices[i]; gpr_uint8 *message_bytes = GPR_SLICE_START_PTR(plain); size_t message_size = GPR_SLICE_LENGTH(plain); while (message_size > 0) { @@ -326,13 +290,29 @@ static grpc_endpoint_op_status endpoint_write(grpc_endpoint *secure_ep, } } + for (i = 0; i < nslices; i++) { + gpr_slice_unref(slices[i]); + } + if (result != TSI_OK) { /* TODO(yangg) do different things according to the error type? */ gpr_slice_buffer_reset_and_unref(&ep->output_buffer); - return GRPC_ENDPOINT_ERROR; + return GRPC_ENDPOINT_WRITE_ERROR; } - return grpc_endpoint_write(ep->wrapped_ep, &ep->output_buffer, cb); + /* clear output_buffer and let the lower level handle its slices. */ + output_buffer_count = ep->output_buffer.count; + ep->output_buffer.count = 0; + ep->write_cb = cb; + ep->write_user_data = user_data; + /* Need to keep the endpoint alive across a transport */ + secure_endpoint_ref(ep); + status = grpc_endpoint_write(ep->wrapped_ep, ep->output_buffer.slices, + output_buffer_count, on_write, ep); + if (status != GRPC_ENDPOINT_WRITE_PENDING) { + secure_endpoint_unref(ep); + } + return status; } static void endpoint_shutdown(grpc_endpoint *secure_ep) { @@ -340,9 +320,9 @@ static void endpoint_shutdown(grpc_endpoint *secure_ep) { grpc_endpoint_shutdown(ep->wrapped_ep); } -static void endpoint_destroy(grpc_endpoint *secure_ep) { +static void endpoint_unref(grpc_endpoint *secure_ep) { secure_endpoint *ep = (secure_endpoint *)secure_ep; - SECURE_ENDPOINT_UNREF(ep, "destroy"); + secure_endpoint_unref(ep); } static void endpoint_add_to_pollset(grpc_endpoint *secure_ep, @@ -363,9 +343,9 @@ static char *endpoint_get_peer(grpc_endpoint *secure_ep) { } static const grpc_endpoint_vtable vtable = { - endpoint_read, endpoint_write, + endpoint_notify_on_read, endpoint_write, endpoint_add_to_pollset, endpoint_add_to_pollset_set, - endpoint_shutdown, endpoint_destroy, + endpoint_shutdown, endpoint_unref, endpoint_get_peer}; grpc_endpoint *grpc_secure_endpoint_create( @@ -383,10 +363,8 @@ grpc_endpoint *grpc_secure_endpoint_create( } ep->write_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE); ep->read_staging_buffer = gpr_slice_malloc(STAGING_BUFFER_SIZE); + gpr_slice_buffer_init(&ep->input_buffer); gpr_slice_buffer_init(&ep->output_buffer); - gpr_slice_buffer_init(&ep->source_buffer); - ep->read_buffer = NULL; - grpc_iomgr_closure_init(&ep->on_read, on_read_cb, ep); gpr_mu_init(&ep->protector_mu); gpr_ref_init(&ep->ref, 1); return &ep->base; diff --git a/src/core/security/secure_transport_setup.c b/src/core/security/secure_transport_setup.c index bf0079577e4..0c3572b53c4 100644 --- a/src/core/security/secure_transport_setup.c +++ b/src/core/security/secure_transport_setup.c @@ -50,17 +50,16 @@ typedef struct { grpc_endpoint *wrapped_endpoint; grpc_endpoint *secure_endpoint; gpr_slice_buffer left_overs; - gpr_slice_buffer incoming; - gpr_slice_buffer outgoing; grpc_secure_transport_setup_done_cb cb; void *user_data; - grpc_iomgr_closure on_handshake_data_sent_to_peer; - grpc_iomgr_closure on_handshake_data_received_from_peer; } grpc_secure_transport_setup; -static void on_handshake_data_received_from_peer(void *setup, int success); +static void on_handshake_data_received_from_peer(void *setup, gpr_slice *slices, + size_t nslices, + grpc_endpoint_cb_status error); -static void on_handshake_data_sent_to_peer(void *setup, int success); +static void on_handshake_data_sent_to_peer(void *setup, + grpc_endpoint_cb_status error); static void secure_transport_setup_done(grpc_secure_transport_setup *s, int is_success) { @@ -79,8 +78,6 @@ static void secure_transport_setup_done(grpc_secure_transport_setup *s, if (s->handshaker != NULL) tsi_handshaker_destroy(s->handshaker); if (s->handshake_buffer != NULL) gpr_free(s->handshake_buffer); gpr_slice_buffer_destroy(&s->left_overs); - gpr_slice_buffer_destroy(&s->outgoing); - gpr_slice_buffer_destroy(&s->incoming); GRPC_SECURITY_CONNECTOR_UNREF(s->connector, "secure_transport_setup"); gpr_free(s); } @@ -105,8 +102,6 @@ static void on_peer_checked(void *user_data, grpc_security_status status) { s->secure_endpoint = grpc_secure_endpoint_create(protector, s->wrapped_endpoint, s->left_overs.slices, s->left_overs.count); - s->left_overs.count = 0; - s->left_overs.length = 0; secure_transport_setup_done(s, 1); return; } @@ -137,6 +132,7 @@ static void send_handshake_bytes_to_peer(grpc_secure_transport_setup *s) { size_t offset = 0; tsi_result result = TSI_OK; gpr_slice to_send; + grpc_endpoint_write_status write_status; do { size_t to_send_size = s->handshake_buffer_size - offset; @@ -159,25 +155,28 @@ static void send_handshake_bytes_to_peer(grpc_secure_transport_setup *s) { to_send = gpr_slice_from_copied_buffer((const char *)s->handshake_buffer, offset); - gpr_slice_buffer_reset_and_unref(&s->outgoing); - gpr_slice_buffer_add(&s->outgoing, to_send); /* TODO(klempner,jboeuf): This should probably use the client setup deadline */ - switch (grpc_endpoint_write(s->wrapped_endpoint, &s->outgoing, - &s->on_handshake_data_sent_to_peer)) { - case GRPC_ENDPOINT_ERROR: - gpr_log(GPR_ERROR, "Could not send handshake data to peer."); - secure_transport_setup_done(s, 0); - break; - case GRPC_ENDPOINT_DONE: - on_handshake_data_sent_to_peer(s, 1); - break; - case GRPC_ENDPOINT_PENDING: - break; + write_status = grpc_endpoint_write(s->wrapped_endpoint, &to_send, 1, + on_handshake_data_sent_to_peer, s); + if (write_status == GRPC_ENDPOINT_WRITE_ERROR) { + gpr_log(GPR_ERROR, "Could not send handshake data to peer."); + secure_transport_setup_done(s, 0); + } else if (write_status == GRPC_ENDPOINT_WRITE_DONE) { + on_handshake_data_sent_to_peer(s, GRPC_ENDPOINT_CB_OK); + } +} + +static void cleanup_slices(gpr_slice *slices, size_t num_slices) { + size_t i; + for (i = 0; i < num_slices; i++) { + gpr_slice_unref(slices[i]); } } -static void on_handshake_data_received_from_peer(void *setup, int success) { +static void on_handshake_data_received_from_peer( + void *setup, gpr_slice *slices, size_t nslices, + grpc_endpoint_cb_status error) { grpc_secure_transport_setup *s = setup; size_t consumed_slice_size = 0; tsi_result result = TSI_OK; @@ -185,37 +184,32 @@ static void on_handshake_data_received_from_peer(void *setup, int success) { size_t num_left_overs; int has_left_overs_in_current_slice = 0; - if (!success) { + if (error != GRPC_ENDPOINT_CB_OK) { gpr_log(GPR_ERROR, "Read failed."); + cleanup_slices(slices, nslices); secure_transport_setup_done(s, 0); return; } - for (i = 0; i < s->incoming.count; i++) { - consumed_slice_size = GPR_SLICE_LENGTH(s->incoming.slices[i]); + for (i = 0; i < nslices; i++) { + consumed_slice_size = GPR_SLICE_LENGTH(slices[i]); result = tsi_handshaker_process_bytes_from_peer( - s->handshaker, GPR_SLICE_START_PTR(s->incoming.slices[i]), - &consumed_slice_size); + s->handshaker, GPR_SLICE_START_PTR(slices[i]), &consumed_slice_size); if (!tsi_handshaker_is_in_progress(s->handshaker)) break; } if (tsi_handshaker_is_in_progress(s->handshaker)) { /* We may need more data. */ if (result == TSI_INCOMPLETE_DATA) { - switch (grpc_endpoint_read(s->wrapped_endpoint, &s->incoming, - &s->on_handshake_data_received_from_peer)) { - case GRPC_ENDPOINT_DONE: - on_handshake_data_received_from_peer(s, 1); - break; - case GRPC_ENDPOINT_ERROR: - on_handshake_data_received_from_peer(s, 0); - break; - case GRPC_ENDPOINT_PENDING: - break; - } + /* TODO(klempner,jboeuf): This should probably use the client setup + deadline */ + grpc_endpoint_notify_on_read(s->wrapped_endpoint, + on_handshake_data_received_from_peer, setup); + cleanup_slices(slices, nslices); return; } else { send_handshake_bytes_to_peer(s); + cleanup_slices(slices, nslices); return; } } @@ -223,40 +217,42 @@ static void on_handshake_data_received_from_peer(void *setup, int success) { if (result != TSI_OK) { gpr_log(GPR_ERROR, "Handshake failed with error %s", tsi_result_to_string(result)); + cleanup_slices(slices, nslices); secure_transport_setup_done(s, 0); return; } /* Handshake is done and successful this point. */ has_left_overs_in_current_slice = - (consumed_slice_size < GPR_SLICE_LENGTH(s->incoming.slices[i])); - num_left_overs = - (has_left_overs_in_current_slice ? 1 : 0) + s->incoming.count - i - 1; + (consumed_slice_size < GPR_SLICE_LENGTH(slices[i])); + num_left_overs = (has_left_overs_in_current_slice ? 1 : 0) + nslices - i - 1; if (num_left_overs == 0) { + cleanup_slices(slices, nslices); check_peer(s); return; } + cleanup_slices(slices, nslices - num_left_overs); + /* Put the leftovers in our buffer (ownership transfered). */ if (has_left_overs_in_current_slice) { - gpr_slice_buffer_add( - &s->left_overs, - gpr_slice_split_tail(&s->incoming.slices[i], consumed_slice_size)); - gpr_slice_unref( - s->incoming.slices[i]); /* split_tail above increments refcount. */ + gpr_slice_buffer_add(&s->left_overs, + gpr_slice_split_tail(&slices[i], consumed_slice_size)); + gpr_slice_unref(slices[i]); /* split_tail above increments refcount. */ } gpr_slice_buffer_addn( - &s->left_overs, &s->incoming.slices[i + 1], + &s->left_overs, &slices[i + 1], num_left_overs - (size_t)has_left_overs_in_current_slice); check_peer(s); } /* If setup is NULL, the setup is done. */ -static void on_handshake_data_sent_to_peer(void *setup, int success) { +static void on_handshake_data_sent_to_peer(void *setup, + grpc_endpoint_cb_status error) { grpc_secure_transport_setup *s = setup; /* Make sure that write is OK. */ - if (!success) { - gpr_log(GPR_ERROR, "Write failed."); + if (error != GRPC_ENDPOINT_CB_OK) { + gpr_log(GPR_ERROR, "Write failed with error %d.", error); if (setup != NULL) secure_transport_setup_done(s, 0); return; } @@ -265,17 +261,8 @@ static void on_handshake_data_sent_to_peer(void *setup, int success) { if (tsi_handshaker_is_in_progress(s->handshaker)) { /* TODO(klempner,jboeuf): This should probably use the client setup deadline */ - switch (grpc_endpoint_read(s->wrapped_endpoint, &s->incoming, - &s->on_handshake_data_received_from_peer)) { - case GRPC_ENDPOINT_ERROR: - on_handshake_data_received_from_peer(s, 0); - break; - case GRPC_ENDPOINT_PENDING: - break; - case GRPC_ENDPOINT_DONE: - on_handshake_data_received_from_peer(s, 1); - break; - } + grpc_endpoint_notify_on_read(s->wrapped_endpoint, + on_handshake_data_received_from_peer, setup); } else { check_peer(s); } @@ -301,12 +288,6 @@ void grpc_setup_secure_transport(grpc_security_connector *connector, s->wrapped_endpoint = nonsecure_endpoint; s->user_data = user_data; s->cb = cb; - grpc_iomgr_closure_init(&s->on_handshake_data_sent_to_peer, - on_handshake_data_sent_to_peer, s); - grpc_iomgr_closure_init(&s->on_handshake_data_received_from_peer, - on_handshake_data_received_from_peer, s); gpr_slice_buffer_init(&s->left_overs); - gpr_slice_buffer_init(&s->outgoing); - gpr_slice_buffer_init(&s->incoming); send_handshake_bytes_to_peer(s); } diff --git a/src/core/support/slice_buffer.c b/src/core/support/slice_buffer.c index 6482ef9c9fe..987d5cb9b55 100644 --- a/src/core/support/slice_buffer.c +++ b/src/core/support/slice_buffer.c @@ -207,25 +207,3 @@ void gpr_slice_buffer_move_into(gpr_slice_buffer *src, gpr_slice_buffer *dst) { src->count = 0; src->length = 0; } - -void gpr_slice_buffer_trim_end(gpr_slice_buffer *sb, size_t n) { - GPR_ASSERT(n <= sb->length); - sb->length -= n; - for (;;) { - size_t idx = sb->count - 1; - gpr_slice slice = sb->slices[idx]; - size_t slice_len = GPR_SLICE_LENGTH(slice); - if (slice_len > n) { - sb->slices[idx] = gpr_slice_sub_no_ref(slice, 0, slice_len - n); - return; - } else if (slice_len == n) { - gpr_slice_unref(slice); - sb->count = idx; - return; - } else { - gpr_slice_unref(slice); - n -= slice_len; - sb->count = idx; - } - } -} diff --git a/src/core/transport/chttp2/internal.h b/src/core/transport/chttp2/internal.h index a1b773b1cad..42cf0ecd5be 100644 --- a/src/core/transport/chttp2/internal.h +++ b/src/core/transport/chttp2/internal.h @@ -214,8 +214,6 @@ typedef struct { grpc_chttp2_hpack_compressor hpack_compressor; /** is this a client? */ gpr_uint8 is_client; - /** callback for when writing is done */ - grpc_iomgr_closure done_cb; } grpc_chttp2_transport_writing; struct grpc_chttp2_transport_parsing { @@ -331,11 +329,8 @@ struct grpc_chttp2_transport { /** closure to execute writing */ grpc_iomgr_closure writing_action; - /** closure to finish reading from the endpoint */ - grpc_iomgr_closure recv_data; - - /** incoming read bytes */ - gpr_slice_buffer read_buffer; + /** closure to start reading from the endpoint */ + grpc_iomgr_closure reading_action; /** address to place a newly accepted stream - set and unset by grpc_chttp2_parsing_accept_stream; used by init_stream to @@ -468,7 +463,8 @@ int grpc_chttp2_unlocking_check_writes(grpc_chttp2_transport_global *global, grpc_chttp2_transport_writing *writing); void grpc_chttp2_perform_writes( grpc_chttp2_transport_writing *transport_writing, grpc_endpoint *endpoint); -void grpc_chttp2_terminate_writing(void *transport_writing, int success); +void grpc_chttp2_terminate_writing( + grpc_chttp2_transport_writing *transport_writing, int success); void grpc_chttp2_cleanup_writing(grpc_chttp2_transport_global *global, grpc_chttp2_transport_writing *writing); diff --git a/src/core/transport/chttp2/writing.c b/src/core/transport/chttp2/writing.c index 2c8c48f47b4..123061b3fcc 100644 --- a/src/core/transport/chttp2/writing.c +++ b/src/core/transport/chttp2/writing.c @@ -37,6 +37,7 @@ #include static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing); +static void finish_write_cb(void *tw, grpc_endpoint_cb_status write_status); int grpc_chttp2_unlocking_check_writes( grpc_chttp2_transport_global *transport_global, @@ -164,15 +165,16 @@ void grpc_chttp2_perform_writes( GPR_ASSERT(transport_writing->outbuf.count > 0); GPR_ASSERT(endpoint); - switch (grpc_endpoint_write(endpoint, &transport_writing->outbuf, - &transport_writing->done_cb)) { - case GRPC_ENDPOINT_DONE: + switch (grpc_endpoint_write(endpoint, transport_writing->outbuf.slices, + transport_writing->outbuf.count, finish_write_cb, + transport_writing)) { + case GRPC_ENDPOINT_WRITE_DONE: grpc_chttp2_terminate_writing(transport_writing, 1); break; - case GRPC_ENDPOINT_ERROR: + case GRPC_ENDPOINT_WRITE_ERROR: grpc_chttp2_terminate_writing(transport_writing, 0); break; - case GRPC_ENDPOINT_PENDING: + case GRPC_ENDPOINT_WRITE_PENDING: break; } } @@ -207,6 +209,12 @@ static void finalize_outbuf(grpc_chttp2_transport_writing *transport_writing) { } } +static void finish_write_cb(void *tw, grpc_endpoint_cb_status write_status) { + grpc_chttp2_transport_writing *transport_writing = tw; + grpc_chttp2_terminate_writing(transport_writing, + write_status == GRPC_ENDPOINT_CB_OK); +} + void grpc_chttp2_cleanup_writing( grpc_chttp2_transport_global *transport_global, grpc_chttp2_transport_writing *transport_writing) { @@ -235,5 +243,6 @@ void grpc_chttp2_cleanup_writing( grpc_chttp2_list_add_read_write_state_changed(transport_global, stream_global); } - gpr_slice_buffer_reset_and_unref(&transport_writing->outbuf); + transport_writing->outbuf.count = 0; + transport_writing->outbuf.length = 0; } diff --git a/src/core/transport/chttp2_transport.c b/src/core/transport/chttp2_transport.c index 8caa10c9389..1bbd210e466 100644 --- a/src/core/transport/chttp2_transport.c +++ b/src/core/transport/chttp2_transport.c @@ -84,13 +84,15 @@ static void unlock_check_read_write_state(grpc_chttp2_transport *t); /* forward declarations of various callbacks that we'll build closures around */ static void writing_action(void *t, int iomgr_success_ignored); +static void reading_action(void *t, int iomgr_success_ignored); /** Set a transport level setting, and push it to our peer */ static void push_setting(grpc_chttp2_transport *t, grpc_chttp2_setting_id id, gpr_uint32 value); /** Endpoint callback to process incoming data */ -static void recv_data(void *tp, int success); +static void recv_data(void *tp, gpr_slice *slices, size_t nslices, + grpc_endpoint_cb_status error); /** Start disconnection chain */ static void drop_connection(grpc_chttp2_transport *t); @@ -141,7 +143,6 @@ static void destruct_transport(grpc_chttp2_transport *t) { grpc_chttp2_hpack_compressor_destroy(&t->writing.hpack_compressor); gpr_slice_buffer_destroy(&t->parsing.qbuf); - gpr_slice_buffer_destroy(&t->read_buffer); grpc_chttp2_hpack_parser_destroy(&t->parsing.hpack_parser); grpc_chttp2_goaway_parser_destroy(&t->parsing.goaway_parser); @@ -248,16 +249,12 @@ static void init_transport(grpc_chttp2_transport *t, gpr_slice_buffer_init(&t->writing.outbuf); grpc_chttp2_hpack_compressor_init(&t->writing.hpack_compressor, mdctx); grpc_iomgr_closure_init(&t->writing_action, writing_action, t); + grpc_iomgr_closure_init(&t->reading_action, reading_action, t); gpr_slice_buffer_init(&t->parsing.qbuf); grpc_chttp2_goaway_parser_init(&t->parsing.goaway_parser); grpc_chttp2_hpack_parser_init(&t->parsing.hpack_parser, t->metadata_context); - grpc_iomgr_closure_init(&t->writing.done_cb, grpc_chttp2_terminate_writing, - &t->writing); - grpc_iomgr_closure_init(&t->recv_data, recv_data, t); - gpr_slice_buffer_init(&t->read_buffer); - if (is_client) { gpr_slice_buffer_add( &t->global.qbuf, @@ -505,8 +502,8 @@ static void push_setting(grpc_chttp2_transport *t, grpc_chttp2_setting_id id, } } -void grpc_chttp2_terminate_writing(void *transport_writing_ptr, int success) { - grpc_chttp2_transport_writing *transport_writing = transport_writing_ptr; +void grpc_chttp2_terminate_writing( + grpc_chttp2_transport_writing *transport_writing, int success) { grpc_chttp2_transport *t = TRANSPORT_FROM_WRITING(transport_writing); lock(t); @@ -1063,76 +1060,74 @@ static void read_error_locked(grpc_chttp2_transport *t) { } /* tcp read callback */ -static int recv_data_loop(grpc_chttp2_transport *t, int *success) { +static void recv_data(void *tp, gpr_slice *slices, size_t nslices, + grpc_endpoint_cb_status error) { + grpc_chttp2_transport *t = tp; size_t i; - int keep_reading = 0; + int unref = 0; - lock(t); - i = 0; - GPR_ASSERT(!t->parsing_active); - if (!t->closed) { - t->parsing_active = 1; - /* merge stream lists */ - grpc_chttp2_stream_map_move_into(&t->new_stream_map, - &t->parsing_stream_map); - grpc_chttp2_prepare_to_read(&t->global, &t->parsing); - gpr_mu_unlock(&t->mu); - for (; i < t->read_buffer.count && - grpc_chttp2_perform_read(&t->parsing, t->read_buffer.slices[i]); - i++) - ; - gpr_mu_lock(&t->mu); - if (i != t->read_buffer.count) { + switch (error) { + case GRPC_ENDPOINT_CB_SHUTDOWN: + case GRPC_ENDPOINT_CB_EOF: + case GRPC_ENDPOINT_CB_ERROR: + lock(t); drop_connection(t); - } - /* merge stream lists */ - grpc_chttp2_stream_map_move_into(&t->new_stream_map, - &t->parsing_stream_map); - t->global.concurrent_stream_count = - grpc_chttp2_stream_map_size(&t->parsing_stream_map); - if (t->parsing.initial_window_update != 0) { - grpc_chttp2_stream_map_for_each(&t->parsing_stream_map, - update_global_window, t); - t->parsing.initial_window_update = 0; - } - /* handle higher level things */ - grpc_chttp2_publish_reads(&t->global, &t->parsing); - t->parsing_active = 0; - } - if (!*success || i != t->read_buffer.count) { - drop_connection(t); - read_error_locked(t); - } else { - keep_reading = 1; + read_error_locked(t); + unlock(t); + unref = 1; + for (i = 0; i < nslices; i++) gpr_slice_unref(slices[i]); + break; + case GRPC_ENDPOINT_CB_OK: + lock(t); + i = 0; + GPR_ASSERT(!t->parsing_active); + if (!t->closed) { + t->parsing_active = 1; + /* merge stream lists */ + grpc_chttp2_stream_map_move_into(&t->new_stream_map, + &t->parsing_stream_map); + grpc_chttp2_prepare_to_read(&t->global, &t->parsing); + gpr_mu_unlock(&t->mu); + for (; i < nslices && grpc_chttp2_perform_read(&t->parsing, slices[i]); + i++) { + gpr_slice_unref(slices[i]); + } + gpr_mu_lock(&t->mu); + if (i != nslices) { + drop_connection(t); + } + /* merge stream lists */ + grpc_chttp2_stream_map_move_into(&t->new_stream_map, + &t->parsing_stream_map); + t->global.concurrent_stream_count = + grpc_chttp2_stream_map_size(&t->parsing_stream_map); + if (t->parsing.initial_window_update != 0) { + grpc_chttp2_stream_map_for_each(&t->parsing_stream_map, + update_global_window, t); + t->parsing.initial_window_update = 0; + } + /* handle higher level things */ + grpc_chttp2_publish_reads(&t->global, &t->parsing); + t->parsing_active = 0; + } + if (i == nslices) { + grpc_chttp2_schedule_closure(&t->global, &t->reading_action, 1); + } else { + read_error_locked(t); + unref = 1; + } + unlock(t); + for (; i < nslices; i++) gpr_slice_unref(slices[i]); + break; } - gpr_slice_buffer_reset_and_unref(&t->read_buffer); - unlock(t); - - if (keep_reading) { - switch (grpc_endpoint_read(t->ep, &t->read_buffer, &t->recv_data)) { - case GRPC_ENDPOINT_DONE: - *success = 1; - return 1; - case GRPC_ENDPOINT_ERROR: - *success = 0; - return 1; - case GRPC_ENDPOINT_PENDING: - return 0; - } - } else { + if (unref) { UNREF_TRANSPORT(t, "recv_data"); - return 0; } - - gpr_log(GPR_ERROR, "should never reach here"); - abort(); } -static void recv_data(void *tp, int success) { - grpc_chttp2_transport *t = tp; - - while (recv_data_loop(t, &success)) - ; +static void reading_action(void *pt, int iomgr_success_ignored) { + grpc_chttp2_transport *t = pt; + grpc_endpoint_notify_on_read(t->ep, recv_data, t); } /* @@ -1245,6 +1240,5 @@ void grpc_chttp2_transport_start_reading(grpc_transport *transport, gpr_slice *slices, size_t nslices) { grpc_chttp2_transport *t = (grpc_chttp2_transport *)transport; REF_TRANSPORT(t, "recv_data"); /* matches unref inside recv_data */ - gpr_slice_buffer_addn(&t->read_buffer, slices, nslices); - recv_data(t, 1); + recv_data(t, slices, nslices, GRPC_ENDPOINT_CB_OK); } diff --git a/test/core/bad_client/bad_client.c b/test/core/bad_client/bad_client.c index 1d988796625..24bf5d3625f 100644 --- a/test/core/bad_client/bad_client.c +++ b/test/core/bad_client/bad_client.c @@ -59,7 +59,7 @@ static void thd_func(void *arg) { gpr_event_set(&a->done_thd, (void *)1); } -static void done_write(void *arg, int success) { +static void done_write(void *arg, grpc_endpoint_cb_status status) { thd_args *a = arg; gpr_event_set(&a->done_write, (void *)1); } @@ -85,8 +85,6 @@ void grpc_run_bad_client_test(grpc_bad_client_server_side_validator validator, grpc_mdctx *mdctx = grpc_mdctx_create(); gpr_slice slice = gpr_slice_from_copied_buffer(client_payload, client_payload_length); - gpr_slice_buffer outgoing; - grpc_iomgr_closure done_write_closure; hex = gpr_dump(client_payload, client_payload_length, GPR_DUMP_HEX | GPR_DUMP_ASCII); @@ -124,18 +122,14 @@ void grpc_run_bad_client_test(grpc_bad_client_server_side_validator validator, /* Start validator */ gpr_thd_new(&id, thd_func, &a, NULL); - gpr_slice_buffer_init(&outgoing); - gpr_slice_buffer_add(&outgoing, slice); - grpc_iomgr_closure_init(&done_write_closure, done_write, &a); - /* Write data */ - switch (grpc_endpoint_write(sfd.client, &outgoing, &done_write_closure)) { - case GRPC_ENDPOINT_DONE: + switch (grpc_endpoint_write(sfd.client, &slice, 1, done_write, &a)) { + case GRPC_ENDPOINT_WRITE_DONE: done_write(&a, 1); break; - case GRPC_ENDPOINT_PENDING: + case GRPC_ENDPOINT_WRITE_PENDING: break; - case GRPC_ENDPOINT_ERROR: + case GRPC_ENDPOINT_WRITE_ERROR: done_write(&a, 0); break; } @@ -161,7 +155,6 @@ void grpc_run_bad_client_test(grpc_bad_client_server_side_validator validator, .type == GRPC_OP_COMPLETE); grpc_server_destroy(a.server); grpc_completion_queue_destroy(a.cq); - gpr_slice_buffer_destroy(&outgoing); grpc_shutdown(); } diff --git a/test/core/iomgr/endpoint_tests.c b/test/core/iomgr/endpoint_tests.c index ef673747a18..6ef8e9ca3bc 100644 --- a/test/core/iomgr/endpoint_tests.c +++ b/test/core/iomgr/endpoint_tests.c @@ -59,7 +59,8 @@ static grpc_pollset *g_pollset; -size_t count_slices(gpr_slice *slices, size_t nslices, int *current_data) { +size_t count_and_unref_slices(gpr_slice *slices, size_t nslices, + int *current_data) { size_t num_bytes = 0; size_t i; size_t j; @@ -71,6 +72,7 @@ size_t count_slices(gpr_slice *slices, size_t nslices, int *current_data) { *current_data = (*current_data + 1) % 256; } num_bytes += GPR_SLICE_LENGTH(slices[i]); + gpr_slice_unref(slices[i]); } return num_bytes; } @@ -119,76 +121,86 @@ struct read_and_write_test_state { int current_write_data; int read_done; int write_done; - gpr_slice_buffer incoming; - gpr_slice_buffer outgoing; - grpc_iomgr_closure done_read; - grpc_iomgr_closure done_write; }; -static void read_and_write_test_read_handler(void *data, int success) { +static void read_and_write_test_read_handler(void *data, gpr_slice *slices, + size_t nslices, + grpc_endpoint_cb_status error) { struct read_and_write_test_state *state = data; + GPR_ASSERT(error != GRPC_ENDPOINT_CB_ERROR); + if (error == GRPC_ENDPOINT_CB_SHUTDOWN) { + gpr_log(GPR_INFO, "Read handler shutdown"); + gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); + state->read_done = 1; + grpc_pollset_kick(g_pollset, NULL); + gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); + return; + } - state->bytes_read += count_slices( - state->incoming.slices, state->incoming.count, &state->current_read_data); - if (state->bytes_read == state->target_bytes || !success) { + state->bytes_read += + count_and_unref_slices(slices, nslices, &state->current_read_data); + if (state->bytes_read == state->target_bytes) { gpr_log(GPR_INFO, "Read handler done"); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); - state->read_done = 1 + success; + state->read_done = 1; grpc_pollset_kick(g_pollset, NULL); gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); - } else if (success) { - switch (grpc_endpoint_read(state->read_ep, &state->incoming, - &state->done_read)) { - case GRPC_ENDPOINT_ERROR: - read_and_write_test_read_handler(data, 0); - break; - case GRPC_ENDPOINT_DONE: - read_and_write_test_read_handler(data, 1); - break; - case GRPC_ENDPOINT_PENDING: - break; - } + } else { + grpc_endpoint_notify_on_read(state->read_ep, + read_and_write_test_read_handler, data); } } -static void read_and_write_test_write_handler(void *data, int success) { +static void read_and_write_test_write_handler(void *data, + grpc_endpoint_cb_status error) { struct read_and_write_test_state *state = data; gpr_slice *slices = NULL; size_t nslices; - grpc_endpoint_op_status write_status; - - if (success) { - for (;;) { - /* Need to do inline writes until they don't succeed synchronously or we - finish writing */ - state->bytes_written += state->current_write_size; - if (state->target_bytes - state->bytes_written < - state->current_write_size) { - state->current_write_size = state->target_bytes - state->bytes_written; - } - if (state->current_write_size == 0) { - break; - } - - slices = allocate_blocks(state->current_write_size, 8192, &nslices, - &state->current_write_data); - gpr_slice_buffer_reset_and_unref(&state->outgoing); - gpr_slice_buffer_addn(&state->outgoing, slices, nslices); - write_status = grpc_endpoint_write(state->write_ep, &state->outgoing, - &state->done_write); - gpr_log(GPR_DEBUG, "write_status=%d", write_status); - GPR_ASSERT(write_status != GRPC_ENDPOINT_ERROR); - free(slices); - if (write_status == GRPC_ENDPOINT_PENDING) { - return; - } + grpc_endpoint_write_status write_status; + + GPR_ASSERT(error != GRPC_ENDPOINT_CB_ERROR); + + gpr_log(GPR_DEBUG, "%s: error=%d", "read_and_write_test_write_handler", + error); + + if (error == GRPC_ENDPOINT_CB_SHUTDOWN) { + gpr_log(GPR_INFO, "Write handler shutdown"); + gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); + state->write_done = 1; + grpc_pollset_kick(g_pollset, NULL); + gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); + return; + } + + for (;;) { + /* Need to do inline writes until they don't succeed synchronously or we + finish writing */ + state->bytes_written += state->current_write_size; + if (state->target_bytes - state->bytes_written < + state->current_write_size) { + state->current_write_size = state->target_bytes - state->bytes_written; + } + if (state->current_write_size == 0) { + break; + } + + slices = allocate_blocks(state->current_write_size, 8192, &nslices, + &state->current_write_data); + write_status = + grpc_endpoint_write(state->write_ep, slices, nslices, + read_and_write_test_write_handler, state); + gpr_log(GPR_DEBUG, "write_status=%d", write_status); + GPR_ASSERT(write_status != GRPC_ENDPOINT_WRITE_ERROR); + free(slices); + if (write_status == GRPC_ENDPOINT_WRITE_PENDING) { + return; } - GPR_ASSERT(state->bytes_written == state->target_bytes); } + GPR_ASSERT(state->bytes_written == state->target_bytes); gpr_log(GPR_INFO, "Write handler done"); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); - state->write_done = 1 + success; + state->write_done = 1; grpc_pollset_kick(g_pollset, NULL); gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); } @@ -222,31 +234,16 @@ static void read_and_write_test(grpc_endpoint_test_config config, state.write_done = 0; state.current_read_data = 0; state.current_write_data = 0; - grpc_iomgr_closure_init(&state.done_read, read_and_write_test_read_handler, - &state); - grpc_iomgr_closure_init(&state.done_write, read_and_write_test_write_handler, - &state); - gpr_slice_buffer_init(&state.outgoing); - gpr_slice_buffer_init(&state.incoming); /* Get started by pretending an initial write completed */ /* NOTE: Sets up initial conditions so we can have the same write handler for the first iteration as for later iterations. It does the right thing even when bytes_written is unsigned. */ state.bytes_written -= state.current_write_size; - read_and_write_test_write_handler(&state, 1); + read_and_write_test_write_handler(&state, GRPC_ENDPOINT_CB_OK); - switch ( - grpc_endpoint_read(state.read_ep, &state.incoming, &state.done_read)) { - case GRPC_ENDPOINT_PENDING: - break; - case GRPC_ENDPOINT_ERROR: - read_and_write_test_read_handler(&state, 0); - break; - case GRPC_ENDPOINT_DONE: - read_and_write_test_read_handler(&state, 1); - break; - } + grpc_endpoint_notify_on_read(state.read_ep, read_and_write_test_read_handler, + &state); if (shutdown) { gpr_log(GPR_DEBUG, "shutdown read"); @@ -266,8 +263,6 @@ static void read_and_write_test(grpc_endpoint_test_config config, grpc_endpoint_destroy(state.read_ep); grpc_endpoint_destroy(state.write_ep); - gpr_slice_buffer_destroy(&state.outgoing); - gpr_slice_buffer_destroy(&state.incoming); end_test(config); } @@ -278,40 +273,36 @@ struct timeout_test_state { typedef struct { int done; grpc_endpoint *ep; - gpr_slice_buffer incoming; - grpc_iomgr_closure done_read; } shutdown_during_write_test_state; -static void shutdown_during_write_test_read_handler(void *user_data, - int success) { +static void shutdown_during_write_test_read_handler( + void *user_data, gpr_slice *slices, size_t nslices, + grpc_endpoint_cb_status error) { + size_t i; shutdown_during_write_test_state *st = user_data; - if (!success) { + for (i = 0; i < nslices; i++) { + gpr_slice_unref(slices[i]); + } + + if (error != GRPC_ENDPOINT_CB_OK) { grpc_endpoint_destroy(st->ep); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); - st->done = 1; + st->done = error; grpc_pollset_kick(g_pollset, NULL); gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); } else { - switch (grpc_endpoint_read(st->ep, &st->incoming, &st->done_read)) { - case GRPC_ENDPOINT_PENDING: - break; - case GRPC_ENDPOINT_ERROR: - shutdown_during_write_test_read_handler(user_data, 0); - break; - case GRPC_ENDPOINT_DONE: - shutdown_during_write_test_read_handler(user_data, 1); - break; - } + grpc_endpoint_notify_on_read( + st->ep, shutdown_during_write_test_read_handler, user_data); } } -static void shutdown_during_write_test_write_handler(void *user_data, - int success) { +static void shutdown_during_write_test_write_handler( + void *user_data, grpc_endpoint_cb_status error) { shutdown_during_write_test_state *st = user_data; - gpr_log(GPR_INFO, "shutdown_during_write_test_write_handler: success = %d", - success); - if (success) { + gpr_log(GPR_INFO, "shutdown_during_write_test_write_handler: error = %d", + error); + if (error == 0) { /* This happens about 0.5% of the time when run under TSAN, and is entirely legitimate, but means we aren't testing the path we think we are. */ /* TODO(klempner): Change this test to retry the write in that case */ @@ -334,8 +325,6 @@ static void shutdown_during_write_test(grpc_endpoint_test_config config, shutdown_during_write_test_state read_st; shutdown_during_write_test_state write_st; gpr_slice *slices; - gpr_slice_buffer outgoing; - grpc_iomgr_closure done_write; grpc_endpoint_test_fixture f = begin_test(config, "shutdown_during_write_test", slice_size); @@ -346,26 +335,19 @@ static void shutdown_during_write_test(grpc_endpoint_test_config config, read_st.done = 0; write_st.done = 0; - grpc_iomgr_closure_init(&done_write, shutdown_during_write_test_write_handler, - &write_st); - grpc_iomgr_closure_init(&read_st.done_read, - shutdown_during_write_test_read_handler, &read_st); - gpr_slice_buffer_init(&read_st.incoming); - gpr_slice_buffer_init(&outgoing); - - GPR_ASSERT(grpc_endpoint_read(read_st.ep, &read_st.incoming, - &read_st.done_read) == GRPC_ENDPOINT_PENDING); + grpc_endpoint_notify_on_read( + read_st.ep, shutdown_during_write_test_read_handler, &read_st); for (size = 1;; size *= 2) { slices = allocate_blocks(size, 1, &nblocks, ¤t_data); - gpr_slice_buffer_reset_and_unref(&outgoing); - gpr_slice_buffer_addn(&outgoing, slices, nblocks); - switch (grpc_endpoint_write(write_st.ep, &outgoing, &done_write)) { - case GRPC_ENDPOINT_DONE: + switch (grpc_endpoint_write(write_st.ep, slices, nblocks, + shutdown_during_write_test_write_handler, + &write_st)) { + case GRPC_ENDPOINT_WRITE_DONE: break; - case GRPC_ENDPOINT_ERROR: + case GRPC_ENDPOINT_WRITE_ERROR: gpr_log(GPR_ERROR, "error writing"); abort(); - case GRPC_ENDPOINT_PENDING: + case GRPC_ENDPOINT_WRITE_PENDING: grpc_endpoint_shutdown(write_st.ep); deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); @@ -386,8 +368,6 @@ static void shutdown_during_write_test(grpc_endpoint_test_config config, } gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); gpr_free(slices); - gpr_slice_buffer_destroy(&read_st.incoming); - gpr_slice_buffer_destroy(&outgoing); end_test(config); return; } diff --git a/test/core/iomgr/tcp_posix_test.c b/test/core/iomgr/tcp_posix_test.c index 8acaa433bb5..6ad832231f5 100644 --- a/test/core/iomgr/tcp_posix_test.c +++ b/test/core/iomgr/tcp_posix_test.c @@ -118,12 +118,10 @@ struct read_socket_state { grpc_endpoint *ep; ssize_t read_bytes; ssize_t target_read_bytes; - gpr_slice_buffer incoming; - grpc_iomgr_closure read_cb; }; -static ssize_t count_slices(gpr_slice *slices, size_t nslices, - int *current_data) { +static ssize_t count_and_unref_slices(gpr_slice *slices, size_t nslices, + int *current_data) { ssize_t num_bytes = 0; unsigned i, j; unsigned char *buf; @@ -134,41 +132,31 @@ static ssize_t count_slices(gpr_slice *slices, size_t nslices, *current_data = (*current_data + 1) % 256; } num_bytes += GPR_SLICE_LENGTH(slices[i]); + gpr_slice_unref(slices[i]); } return num_bytes; } -static void read_cb(void *user_data, int success) { +static void read_cb(void *user_data, gpr_slice *slices, size_t nslices, + grpc_endpoint_cb_status error) { struct read_socket_state *state = (struct read_socket_state *)user_data; ssize_t read_bytes; int current_data; - GPR_ASSERT(success); + GPR_ASSERT(error == GRPC_ENDPOINT_CB_OK); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); current_data = state->read_bytes % 256; - read_bytes = count_slices(state->incoming.slices, state->incoming.count, - ¤t_data); + read_bytes = count_and_unref_slices(slices, nslices, ¤t_data); state->read_bytes += read_bytes; gpr_log(GPR_INFO, "Read %d bytes of %d", read_bytes, state->target_read_bytes); if (state->read_bytes >= state->target_read_bytes) { - gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); + /* empty */ } else { - switch (grpc_endpoint_read(state->ep, &state->incoming, &state->read_cb)) { - case GRPC_ENDPOINT_DONE: - gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); - read_cb(user_data, 1); - break; - case GRPC_ENDPOINT_ERROR: - gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); - read_cb(user_data, 0); - break; - case GRPC_ENDPOINT_PENDING: - gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); - break; - } + grpc_endpoint_notify_on_read(state->ep, read_cb, state); } + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); } /* Write to a socket, then read from it using the grpc_tcp API. */ @@ -193,19 +181,8 @@ static void read_test(ssize_t num_bytes, ssize_t slice_size) { state.ep = ep; state.read_bytes = 0; state.target_read_bytes = written_bytes; - gpr_slice_buffer_init(&state.incoming); - grpc_iomgr_closure_init(&state.read_cb, read_cb, &state); - switch (grpc_endpoint_read(ep, &state.incoming, &state.read_cb)) { - case GRPC_ENDPOINT_DONE: - read_cb(&state, 1); - break; - case GRPC_ENDPOINT_ERROR: - read_cb(&state, 0); - break; - case GRPC_ENDPOINT_PENDING: - break; - } + grpc_endpoint_notify_on_read(ep, read_cb, &state); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); while (state.read_bytes < state.target_read_bytes) { @@ -216,7 +193,6 @@ static void read_test(ssize_t num_bytes, ssize_t slice_size) { GPR_ASSERT(state.read_bytes == state.target_read_bytes); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); - gpr_slice_buffer_destroy(&state.incoming); grpc_endpoint_destroy(ep); } @@ -243,19 +219,8 @@ static void large_read_test(ssize_t slice_size) { state.ep = ep; state.read_bytes = 0; state.target_read_bytes = written_bytes; - gpr_slice_buffer_init(&state.incoming); - grpc_iomgr_closure_init(&state.read_cb, read_cb, &state); - switch (grpc_endpoint_read(ep, &state.incoming, &state.read_cb)) { - case GRPC_ENDPOINT_DONE: - read_cb(&state, 1); - break; - case GRPC_ENDPOINT_ERROR: - read_cb(&state, 0); - break; - case GRPC_ENDPOINT_PENDING: - break; - } + grpc_endpoint_notify_on_read(ep, read_cb, &state); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); while (state.read_bytes < state.target_read_bytes) { @@ -266,7 +231,6 @@ static void large_read_test(ssize_t slice_size) { GPR_ASSERT(state.read_bytes == state.target_read_bytes); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); - gpr_slice_buffer_destroy(&state.incoming); grpc_endpoint_destroy(ep); } @@ -298,7 +262,8 @@ static gpr_slice *allocate_blocks(ssize_t num_bytes, ssize_t slice_size, return slices; } -static void write_done(void *user_data /* write_socket_state */, int success) { +static void write_done(void *user_data /* write_socket_state */, + grpc_endpoint_cb_status error) { struct write_socket_state *state = (struct write_socket_state *)user_data; gpr_log(GPR_INFO, "Write done callback called"); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); @@ -374,8 +339,6 @@ static void write_test(ssize_t num_bytes, ssize_t slice_size) { size_t num_blocks; gpr_slice *slices; int current_data = 0; - gpr_slice_buffer outgoing; - grpc_iomgr_closure write_done_closure; gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20); gpr_log(GPR_INFO, "Start write test with %d bytes, slice size %d", num_bytes, @@ -392,21 +355,74 @@ static void write_test(ssize_t num_bytes, ssize_t slice_size) { slices = allocate_blocks(num_bytes, slice_size, &num_blocks, ¤t_data); - gpr_slice_buffer_init(&outgoing); - gpr_slice_buffer_addn(&outgoing, slices, num_blocks); - grpc_iomgr_closure_init(&write_done_closure, write_done, &state); + if (grpc_endpoint_write(ep, slices, num_blocks, write_done, &state) == + GRPC_ENDPOINT_WRITE_DONE) { + /* Write completed immediately */ + read_bytes = drain_socket(sv[0]); + GPR_ASSERT(read_bytes == num_bytes); + } else { + drain_socket_blocking(sv[0], num_bytes, num_bytes); + gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); + for (;;) { + grpc_pollset_worker worker; + if (state.write_done) { + break; + } + grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), + deadline); + } + gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); + } + + grpc_endpoint_destroy(ep); + gpr_free(slices); +} + +static void read_done_for_write_error(void *ud, gpr_slice *slices, + size_t nslices, + grpc_endpoint_cb_status error) { + GPR_ASSERT(error != GRPC_ENDPOINT_CB_OK); + GPR_ASSERT(nslices == 0); +} + +/* Write to a socket using the grpc_tcp API, then drain it directly. + Note that if the write does not complete immediately we need to drain the + socket in parallel with the read. */ +static void write_error_test(ssize_t num_bytes, ssize_t slice_size) { + int sv[2]; + grpc_endpoint *ep; + struct write_socket_state state; + size_t num_blocks; + gpr_slice *slices; + int current_data = 0; + grpc_pollset_worker worker; + gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20); + + gpr_log(GPR_INFO, "Start write error test with %d bytes, slice size %d", + num_bytes, slice_size); + + create_sockets(sv); - switch (grpc_endpoint_write(ep, &outgoing, &write_done_closure)) { - case GRPC_ENDPOINT_DONE: + ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_error_test"), + GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test"); + grpc_endpoint_add_to_pollset(ep, &g_pollset); + + close(sv[0]); + + state.ep = ep; + state.write_done = 0; + + slices = allocate_blocks(num_bytes, slice_size, &num_blocks, ¤t_data); + + switch (grpc_endpoint_write(ep, slices, num_blocks, write_done, &state)) { + case GRPC_ENDPOINT_WRITE_DONE: + case GRPC_ENDPOINT_WRITE_ERROR: /* Write completed immediately */ - read_bytes = drain_socket(sv[0]); - GPR_ASSERT(read_bytes == num_bytes); break; - case GRPC_ENDPOINT_PENDING: - drain_socket_blocking(sv[0], num_bytes, num_bytes); + case GRPC_ENDPOINT_WRITE_PENDING: + grpc_endpoint_notify_on_read(ep, read_done_for_write_error, NULL); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); for (;;) { - grpc_pollset_worker worker; if (state.write_done) { break; } @@ -415,14 +431,10 @@ static void write_test(ssize_t num_bytes, ssize_t slice_size) { } gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); break; - case GRPC_ENDPOINT_ERROR: - gpr_log(GPR_ERROR, "endpoint got error"); - abort(); } - gpr_slice_buffer_destroy(&outgoing); grpc_endpoint_destroy(ep); - gpr_free(slices); + free(slices); } void run_tests(void) { @@ -441,6 +453,10 @@ void run_tests(void) { write_test(100000, 1); write_test(100000, 137); + for (i = 1; i < 1000; i = GPR_MAX(i + 1, i * 5 / 4)) { + write_error_test(40320, i); + } + for (i = 1; i < 1000; i = GPR_MAX(i + 1, i * 5 / 4)) { write_test(40320, i); } diff --git a/test/core/security/secure_endpoint_test.c b/test/core/security/secure_endpoint_test.c index c76ddcd1940..a8368fc8426 100644 --- a/test/core/security/secure_endpoint_test.c +++ b/test/core/security/secure_endpoint_test.c @@ -135,26 +135,62 @@ static grpc_endpoint_test_config configs[] = { secure_endpoint_create_fixture_tcp_socketpair_leftover, clean_up}, }; -static void test_leftover(grpc_endpoint_test_config config, size_t slice_size) { - grpc_endpoint_test_fixture f = config.create_fixture(slice_size); - gpr_slice_buffer incoming; +static void verify_leftover(void *user_data, gpr_slice *slices, size_t nslices, + grpc_endpoint_cb_status error) { gpr_slice s = gpr_slice_from_copied_string("hello world 12345678900987654321"); + + GPR_ASSERT(error == GRPC_ENDPOINT_CB_OK); + GPR_ASSERT(nslices == 1); + + GPR_ASSERT(0 == gpr_slice_cmp(s, slices[0])); + gpr_slice_unref(slices[0]); + gpr_slice_unref(s); + *(int *)user_data = 1; +} + +static void test_leftover(grpc_endpoint_test_config config, size_t slice_size) { + grpc_endpoint_test_fixture f = config.create_fixture(slice_size); + int verified = 0; gpr_log(GPR_INFO, "Start test left over"); - gpr_slice_buffer_init(&incoming); - GPR_ASSERT(grpc_endpoint_read(f.client_ep, &incoming, NULL) == - GRPC_ENDPOINT_DONE); - GPR_ASSERT(incoming.count == 1); - GPR_ASSERT(0 == gpr_slice_cmp(s, incoming.slices[0])); + grpc_endpoint_notify_on_read(f.client_ep, verify_leftover, &verified); + GPR_ASSERT(verified == 1); grpc_endpoint_shutdown(f.client_ep); grpc_endpoint_shutdown(f.server_ep); grpc_endpoint_destroy(f.client_ep); grpc_endpoint_destroy(f.server_ep); + clean_up(); +} + +static void destroy_early(void *user_data, gpr_slice *slices, size_t nslices, + grpc_endpoint_cb_status error) { + grpc_endpoint_test_fixture *f = user_data; + gpr_slice s = + gpr_slice_from_copied_string("hello world 12345678900987654321"); + + GPR_ASSERT(error == GRPC_ENDPOINT_CB_OK); + GPR_ASSERT(nslices == 1); + + grpc_endpoint_shutdown(f->client_ep); + grpc_endpoint_destroy(f->client_ep); + + GPR_ASSERT(0 == gpr_slice_cmp(s, slices[0])); + gpr_slice_unref(slices[0]); gpr_slice_unref(s); - gpr_slice_buffer_destroy(&incoming); +} +/* test which destroys the ep before finishing reading */ +static void test_destroy_ep_early(grpc_endpoint_test_config config, + size_t slice_size) { + grpc_endpoint_test_fixture f = config.create_fixture(slice_size); + gpr_log(GPR_INFO, "Start test destroy early"); + + grpc_endpoint_notify_on_read(f.client_ep, destroy_early, &f); + + grpc_endpoint_shutdown(f.server_ep); + grpc_endpoint_destroy(f.server_ep); clean_up(); } @@ -167,6 +203,7 @@ int main(int argc, char **argv) { grpc_pollset_init(&g_pollset); grpc_endpoint_tests(configs[0], &g_pollset); test_leftover(configs[1], 1); + test_destroy_ep_early(configs[1], 1); grpc_pollset_shutdown(&g_pollset, destroy_pollset, &g_pollset); grpc_iomgr_shutdown(); diff --git a/test/core/util/port_posix.c b/test/core/util/port_posix.c index 4781d334e23..836e62a5412 100644 --- a/test/core/util/port_posix.c +++ b/test/core/util/port_posix.c @@ -198,13 +198,14 @@ int grpc_pick_unused_port(void) { races with other processes on kernels that want to reuse the same port numbers over and over. */ - /* In alternating iterations we trial UDP ports before TCP ports UDP + /* In alternating iterations we try UDP ports before TCP ports UDP ports -- it could be the case that this machine has been using up UDP ports and they are scarcer. */ /* Type of port to first pick in next iteration */ int is_tcp = 1; - int trial = 0; + int try + = 0; char *env = gpr_getenv("GRPC_TEST_PORT_SERVER"); if (env) { @@ -217,10 +218,11 @@ int grpc_pick_unused_port(void) { for (;;) { int port; - trial++; - if (trial == 1) { + try + ++; + if (try == 1) { port = getpid() % (65536 - 30000) + 30000; - } else if (trial <= NUM_RANDOM_PORTS_TO_PICK) { + } else if (try <= NUM_RANDOM_PORTS_TO_PICK) { port = rand() % (65536 - 30000) + 30000; } else { port = 0; @@ -237,7 +239,7 @@ int grpc_pick_unused_port(void) { GPR_ASSERT(port > 0); /* Check that the port # is free for the other type of socket also */ if (!is_port_available(&port, !is_tcp)) { - /* In the next iteration trial to bind to the other type first + /* In the next iteration try to bind to the other type first because perhaps it is more rare. */ is_tcp = !is_tcp; continue; diff --git a/test/core/util/port_windows.c b/test/core/util/port_windows.c index 2f64626cf3e..5b072f805a5 100644 --- a/test/core/util/port_windows.c +++ b/test/core/util/port_windows.c @@ -35,6 +35,7 @@ #include "test/core/util/test_config.h" #if defined(GPR_WINSOCK_SOCKET) && defined(GRPC_TEST_PICK_PORT) +#include "src/core/iomgr/sockaddr_utils.h" #include "test/core/util/port.h" #include @@ -42,14 +43,8 @@ #include #include -#include -#include #include -#include "src/core/support/env.h" -#include "src/core/httpcli/httpcli.h" -#include "src/core/iomgr/sockaddr_utils.h" - #define NUM_RANDOM_PORTS_TO_PICK 100 static int is_port_available(int *port, int is_tcp) { @@ -104,67 +99,6 @@ static int is_port_available(int *port, int is_tcp) { return 1; } -typedef struct portreq { - grpc_pollset pollset; - int port; -} portreq; - -static void got_port_from_server(void *arg, - const grpc_httpcli_response *response) { - size_t i; - int port = 0; - portreq *pr = arg; - GPR_ASSERT(response); - GPR_ASSERT(response->status == 200); - for (i = 0; i < response->body_length; i++) { - GPR_ASSERT(response->body[i] >= '0' && response->body[i] <= '9'); - port = port * 10 + response->body[i] - '0'; - } - GPR_ASSERT(port > 1024); - gpr_mu_lock(GRPC_POLLSET_MU(&pr->pollset)); - pr->port = port; - grpc_pollset_kick(&pr->pollset, NULL); - gpr_mu_unlock(GRPC_POLLSET_MU(&pr->pollset)); -} - -static void destroy_pollset_and_shutdown(void *p) { - grpc_pollset_destroy(p); - grpc_shutdown(); -} - -static int pick_port_using_server(char *server) { - grpc_httpcli_context context; - grpc_httpcli_request req; - portreq pr; - - grpc_init(); - - memset(&pr, 0, sizeof(pr)); - memset(&req, 0, sizeof(req)); - grpc_pollset_init(&pr.pollset); - pr.port = -1; - - req.host = server; - req.path = "/get"; - - grpc_httpcli_context_init(&context); - grpc_httpcli_get(&context, &pr.pollset, &req, - GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), got_port_from_server, - &pr); - gpr_mu_lock(GRPC_POLLSET_MU(&pr.pollset)); - while (pr.port == -1) { - grpc_pollset_worker worker; - grpc_pollset_work(&pr.pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), - GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1)); - } - gpr_mu_unlock(GRPC_POLLSET_MU(&pr.pollset)); - - grpc_httpcli_context_destroy(&context); - grpc_pollset_shutdown(&pr.pollset, destroy_pollset_and_shutdown, &pr.pollset); - - return pr.port; -} - int grpc_pick_unused_port(void) { /* We repeatedly pick a port and then see whether or not it is available for use both as a TCP socket and a UDP socket. First, we @@ -174,29 +108,22 @@ int grpc_pick_unused_port(void) { races with other processes on kernels that want to reuse the same port numbers over and over. */ - /* In alternating iterations we trial UDP ports before TCP ports UDP + /* In alternating iterations we try UDP ports before TCP ports UDP ports -- it could be the case that this machine has been using up UDP ports and they are scarcer. */ /* Type of port to first pick in next iteration */ int is_tcp = 1; - int trial = 0; - - char *env = gpr_getenv("GRPC_TEST_PORT_SERVER"); - if (env) { - int port = pick_port_using_server(env); - gpr_free(env); - if (port != 0) { - return port; - } - } + int try + = 0; for (;;) { int port; - trial++; - if (trial == 1) { + try + ++; + if (try == 1) { port = _getpid() % (65536 - 30000) + 30000; - } else if (trial <= NUM_RANDOM_PORTS_TO_PICK) { + } else if (try <= NUM_RANDOM_PORTS_TO_PICK) { port = rand() % (65536 - 30000) + 30000; } else { port = 0; @@ -209,7 +136,7 @@ int grpc_pick_unused_port(void) { GPR_ASSERT(port > 0); /* Check that the port # is free for the other type of socket also */ if (!is_port_available(&port, !is_tcp)) { - /* In the next iteration trial to bind to the other type first + /* In the next iteration try to bind to the other type first because perhaps it is more rare. */ is_tcp = !is_tcp; continue;