From 7225408dc7fd7af82348f4e123b0906ef411caa6 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Mon, 21 Aug 2017 17:05:44 -0700 Subject: [PATCH 001/109] Add Z_FINISH flush at end of stream --- src/core/ext/transport/chttp2/transport/writing.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c index 80eb51ff0d5..304f5f199eb 100644 --- a/src/core/ext/transport/chttp2/transport/writing.c +++ b/src/core/ext/transport/chttp2/transport/writing.c @@ -325,6 +325,19 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( is_last_frame = is_last_data_frame && s->send_trailing_metadata != NULL && grpc_metadata_batch_is_empty(s->send_trailing_metadata); + if (is_last_frame && s->stream_compression_ctx) { + GPR_ASSERT(grpc_stream_compress( + s->stream_compression_ctx, &s->flow_controlled_buffer, + s->compressed_data_buffer, NULL, MAX_SIZE_T, + GRPC_STREAM_COMPRESSION_FLUSH_FINISH)); + grpc_stream_compression_context_destroy(s->stream_compression_ctx); + s->stream_compression_ctx = NULL; + /* After finish, bytes in s->compressed_data_buffer may be + * more than max_outgoing. Start another round of the current + * while loop so that send_bytes, is_last_data_frame and + * is_last_frame are recalculated. */ + continue; + } grpc_chttp2_encode_data(s->id, s->compressed_data_buffer, send_bytes, is_last_frame, &s->stats.outgoing, &t->outbuf); @@ -345,6 +358,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( s->stream_compression_ctx, &s->flow_controlled_buffer, s->compressed_data_buffer, NULL, MAX_SIZE_T, GRPC_STREAM_COMPRESSION_FLUSH_SYNC)); + GPR_ASSERT(s->compressed_data_buffer->length > 0); } } } else { From 1e95aa1e3b79d3094197eb3af8e5613442a18241 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Mon, 21 Aug 2017 18:06:02 -0700 Subject: [PATCH 002/109] clang-format --- src/core/ext/transport/chttp2/transport/writing.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c index 304f5f199eb..50f0d8588c8 100644 --- a/src/core/ext/transport/chttp2/transport/writing.c +++ b/src/core/ext/transport/chttp2/transport/writing.c @@ -330,7 +330,8 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( s->stream_compression_ctx, &s->flow_controlled_buffer, s->compressed_data_buffer, NULL, MAX_SIZE_T, GRPC_STREAM_COMPRESSION_FLUSH_FINISH)); - grpc_stream_compression_context_destroy(s->stream_compression_ctx); + grpc_stream_compression_context_destroy( + s->stream_compression_ctx); s->stream_compression_ctx = NULL; /* After finish, bytes in s->compressed_data_buffer may be * more than max_outgoing. Start another round of the current From b02f1bf6fc3dd1943ab2e4f9e7ed7d9278a0a113 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Tue, 22 Aug 2017 10:03:16 -0700 Subject: [PATCH 003/109] Minor bug fix --- src/core/ext/transport/chttp2/transport/writing.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c index 50f0d8588c8..e338caf39ff 100644 --- a/src/core/ext/transport/chttp2/transport/writing.c +++ b/src/core/ext/transport/chttp2/transport/writing.c @@ -322,10 +322,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( (send_bytes == s->compressed_data_buffer->length && s->flow_controlled_buffer.length == 0 && s->fetching_send_message == NULL); - is_last_frame = - is_last_data_frame && s->send_trailing_metadata != NULL && - grpc_metadata_batch_is_empty(s->send_trailing_metadata); - if (is_last_frame && s->stream_compression_ctx) { + if (is_last_data_frame && s->stream_compression_ctx) { GPR_ASSERT(grpc_stream_compress( s->stream_compression_ctx, &s->flow_controlled_buffer, s->compressed_data_buffer, NULL, MAX_SIZE_T, @@ -335,10 +332,13 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( s->stream_compression_ctx = NULL; /* After finish, bytes in s->compressed_data_buffer may be * more than max_outgoing. Start another round of the current - * while loop so that send_bytes, is_last_data_frame and - * is_last_frame are recalculated. */ + * while loop so that send_bytes and is_last_data_frame are + * recalculated. */ continue; } + is_last_frame = + is_last_data_frame && s->send_trailing_metadata != NULL && + grpc_metadata_batch_is_empty(s->send_trailing_metadata); grpc_chttp2_encode_data(s->id, s->compressed_data_buffer, send_bytes, is_last_frame, &s->stats.outgoing, &t->outbuf); From 103fc3e3f45941ab471de19ff17c7a51a373b80b Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Wed, 23 Aug 2017 11:24:47 -0700 Subject: [PATCH 004/109] nit fix --- src/core/ext/transport/chttp2/transport/writing.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c index e338caf39ff..0ae10ee883d 100644 --- a/src/core/ext/transport/chttp2/transport/writing.c +++ b/src/core/ext/transport/chttp2/transport/writing.c @@ -322,7 +322,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( (send_bytes == s->compressed_data_buffer->length && s->flow_controlled_buffer.length == 0 && s->fetching_send_message == NULL); - if (is_last_data_frame && s->stream_compression_ctx) { + if (is_last_data_frame && s->stream_compression_ctx != NULL) { GPR_ASSERT(grpc_stream_compress( s->stream_compression_ctx, &s->flow_controlled_buffer, s->compressed_data_buffer, NULL, MAX_SIZE_T, From c353643c378cecf96a580eee901a8be3308e18b9 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Wed, 23 Aug 2017 14:03:30 -0700 Subject: [PATCH 005/109] Log error when stream compression fails --- src/core/ext/transport/chttp2/transport/writing.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c index 0ae10ee883d..b6bf0450eea 100644 --- a/src/core/ext/transport/chttp2/transport/writing.c +++ b/src/core/ext/transport/chttp2/transport/writing.c @@ -323,10 +323,12 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( s->flow_controlled_buffer.length == 0 && s->fetching_send_message == NULL); if (is_last_data_frame && s->stream_compression_ctx != NULL) { - GPR_ASSERT(grpc_stream_compress( - s->stream_compression_ctx, &s->flow_controlled_buffer, - s->compressed_data_buffer, NULL, MAX_SIZE_T, - GRPC_STREAM_COMPRESSION_FLUSH_FINISH)); + if (!grpc_stream_compress( + s->stream_compression_ctx, &s->flow_controlled_buffer, + s->compressed_data_buffer, NULL, MAX_SIZE_T, + GRPC_STREAM_COMPRESSION_FLUSH_FINISH)) { + gpr_log(GPR_ERROR, "Stream compression failed."); + } grpc_stream_compression_context_destroy( s->stream_compression_ctx); s->stream_compression_ctx = NULL; From 0e3969874ae731ae2278b29a426dd83d2f95470b Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Wed, 30 Aug 2017 16:23:41 -0700 Subject: [PATCH 006/109] Allow unreachable code on iPhone --- include/grpc/impl/codegen/port_platform.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/grpc/impl/codegen/port_platform.h b/include/grpc/impl/codegen/port_platform.h index e84a75d2959..8d8dcee3b0a 100644 --- a/include/grpc/impl/codegen/port_platform.h +++ b/include/grpc/impl/codegen/port_platform.h @@ -183,7 +183,7 @@ #define _BSD_SOURCE #endif #if TARGET_OS_IPHONE -#define GPR_FORBID_UNREACHABLE_CODE 1 +#define GPR_FORBID_UNREACHABLE_CODE 0 #define GPR_PLATFORM_STRING "ios" #define GPR_CPU_IPHONE 1 #define GPR_PTHREAD_TLS 1 From 76e0c1ddd536342bdc5059209da38d5406c7e717 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Thu, 31 Aug 2017 14:59:17 -0700 Subject: [PATCH 007/109] Guarantee that Z_FINISH is only applied at the end of stream --- src/core/ext/transport/chttp2/transport/writing.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c index b6bf0450eea..410c154206c 100644 --- a/src/core/ext/transport/chttp2/transport/writing.c +++ b/src/core/ext/transport/chttp2/transport/writing.c @@ -322,7 +322,8 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( (send_bytes == s->compressed_data_buffer->length && s->flow_controlled_buffer.length == 0 && s->fetching_send_message == NULL); - if (is_last_data_frame && s->stream_compression_ctx != NULL) { + if (is_last_data_frame && s->send_trailing_metadata != NULL && + s->stream_compression_ctx != NULL) { if (!grpc_stream_compress( s->stream_compression_ctx, &s->flow_controlled_buffer, s->compressed_data_buffer, NULL, MAX_SIZE_T, From 2caf021772ee241da3366e7dfd32aa4ee1134092 Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Fri, 1 Sep 2017 15:04:13 -0700 Subject: [PATCH 008/109] Change plugin credentials API to support both sync and async modes. --- include/grpc/grpc_security.h | 35 ++++- .../credentials/plugin/plugin_credentials.c | 143 ++++++++++++------ src/cpp/client/secure_credentials.cc | 68 +++++++-- src/cpp/client/secure_credentials.h | 17 ++- src/csharp/ext/grpc_csharp_ext.c | 8 +- src/node/ext/call_credentials.cc | 10 +- src/php/ext/grpc/call_credentials.c | 45 ++++-- .../grpc/_cython/_cygrpc/credentials.pxd.pxi | 7 +- .../grpc/_cython/_cygrpc/credentials.pyx.pxi | 9 +- .../grpcio/grpc/_cython/_cygrpc/grpc.pxi | 7 +- src/ruby/ext/grpc/rb_call_credentials.c | 8 +- test/core/security/credentials_test.c | 43 +++--- 12 files changed, 280 insertions(+), 120 deletions(-) diff --git a/include/grpc/grpc_security.h b/include/grpc/grpc_security.h index 2005e25df28..42e2ab31117 100644 --- a/include/grpc/grpc_security.h +++ b/include/grpc/grpc_security.h @@ -249,19 +249,40 @@ typedef struct { void *reserved; } grpc_auth_metadata_context; +/** Maximum number of credentials returnable by a credentials plugin via + a synchronous return. */ +#define GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX 4 + /** grpc_metadata_credentials plugin is an API user provided structure used to create grpc_credentials objects that can be set on a channel (composed) or a call. See grpc_credentials_metadata_create_from_plugin below. The grpc client stack will call the get_metadata method of the plugin for every call in scope for the credentials created from it. */ typedef struct { - /** The implementation of this method has to be non-blocking. - - context is the information that can be used by the plugin to create auth - metadata. - - cb is the callback that needs to be called when the metadata is ready. - - user_data needs to be passed as the first parameter of the callback. */ - void (*get_metadata)(void *state, grpc_auth_metadata_context context, - grpc_credentials_plugin_metadata_cb cb, void *user_data); + /** The implementation of this method has to be non-blocking, but can + be performed synchronously or asynchronously. + + If processing occurs synchronously, returns non-zero and populates + creds_md, num_creds_md, status, and error_details. In this case, + the caller takes ownership of the entries in creds_md and of + error_details. Note that if the plugin needs to return more than + GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX entries in creds_md, it must + return asynchronously. + + If processing occurs asynchronously, returns zero and invokes \a cb + when processing is completed. \a user_data will be passed as the + first parameter of the callback. NOTE: \a cb MUST be invoked in a + different thread, not from the thread in which \a get_metadata() is + invoked. + + \a context is the information that can be used by the plugin to create + auth metadata. */ + int (*get_metadata)( + void *state, grpc_auth_metadata_context context, + grpc_credentials_plugin_metadata_cb cb, void *user_data, + grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], + size_t *num_creds_md, grpc_status_code *status, + const char **error_details); /** Destroys the plugin state. */ void (*destroy)(void *state); diff --git a/src/core/lib/security/credentials/plugin/plugin_credentials.c b/src/core/lib/security/credentials/plugin/plugin_credentials.c index 73e0c23e0f7..e844bff79c0 100644 --- a/src/core/lib/security/credentials/plugin/plugin_credentials.c +++ b/src/core/lib/security/credentials/plugin/plugin_credentials.c @@ -53,6 +53,63 @@ static void pending_request_remove_locked( } } +// Checks if the request has been cancelled. +// If not, removes it from the pending list, so that it cannot be +// cancelled out from under us. +// When this returns, r->cancelled indicates whether the request was +// cancelled before completion. +static void pending_request_complete( + grpc_exec_ctx *exec_ctx, grpc_plugin_credentials_pending_request *r) { + gpr_mu_lock(&r->creds->mu); + if (!r->cancelled) pending_request_remove_locked(r->creds, r); + gpr_mu_unlock(&r->creds->mu); + // Ref to credentials not needed anymore. + grpc_call_credentials_unref(exec_ctx, &r->creds->base); +} + +static grpc_error *process_plugin_result( + grpc_exec_ctx *exec_ctx, grpc_plugin_credentials_pending_request *r, + const grpc_metadata *md, size_t num_md, grpc_status_code status, + const char *error_details) { + grpc_error *error = GRPC_ERROR_NONE; + if (status != GRPC_STATUS_OK) { + char *msg; + gpr_asprintf(&msg, "Getting metadata from plugin failed with error: %s", + error_details); + error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg); + gpr_free(msg); + } else { + bool seen_illegal_header = false; + for (size_t i = 0; i < num_md; ++i) { + if (!GRPC_LOG_IF_ERROR("validate_metadata_from_plugin", + grpc_validate_header_key_is_legal(md[i].key))) { + seen_illegal_header = true; + break; + } else if (!grpc_is_binary_header(md[i].key) && + !GRPC_LOG_IF_ERROR( + "validate_metadata_from_plugin", + grpc_validate_header_nonbin_value_is_legal( + md[i].value))) { + gpr_log(GPR_ERROR, "Plugin added invalid metadata value."); + seen_illegal_header = true; + break; + } + } + if (seen_illegal_header) { + error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal metadata"); + } else { + for (size_t i = 0; i < num_md; ++i) { + grpc_mdelem mdelem = grpc_mdelem_from_slices( + exec_ctx, grpc_slice_ref_internal(md[i].key), + grpc_slice_ref_internal(md[i].value)); + grpc_credentials_mdelem_array_add(r->md_array, mdelem); + GRPC_MDELEM_UNREF(exec_ctx, mdelem); + } + } + } + return error; +} + static void plugin_md_request_metadata_ready(void *request, const grpc_metadata *md, size_t num_md, @@ -64,54 +121,13 @@ static void plugin_md_request_metadata_ready(void *request, NULL, NULL); grpc_plugin_credentials_pending_request *r = (grpc_plugin_credentials_pending_request *)request; - // Check if the request has been cancelled. - // If not, remove it from the pending list, so that it cannot be - // cancelled out from under us. - gpr_mu_lock(&r->creds->mu); - if (!r->cancelled) pending_request_remove_locked(r->creds, r); - gpr_mu_unlock(&r->creds->mu); - grpc_call_credentials_unref(&exec_ctx, &r->creds->base); + // Remove request from pending list if not previously cancelled. + pending_request_complete(&exec_ctx, r); // If it has not been cancelled, process it. if (!r->cancelled) { - if (status != GRPC_STATUS_OK) { - char *msg; - gpr_asprintf(&msg, "Getting metadata from plugin failed with error: %s", - error_details); - GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata, - GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg)); - gpr_free(msg); - } else { - bool seen_illegal_header = false; - for (size_t i = 0; i < num_md; ++i) { - if (!GRPC_LOG_IF_ERROR("validate_metadata_from_plugin", - grpc_validate_header_key_is_legal(md[i].key))) { - seen_illegal_header = true; - break; - } else if (!grpc_is_binary_header(md[i].key) && - !GRPC_LOG_IF_ERROR( - "validate_metadata_from_plugin", - grpc_validate_header_nonbin_value_is_legal( - md[i].value))) { - gpr_log(GPR_ERROR, "Plugin added invalid metadata value."); - seen_illegal_header = true; - break; - } - } - if (seen_illegal_header) { - GRPC_CLOSURE_SCHED( - &exec_ctx, r->on_request_metadata, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal metadata")); - } else { - for (size_t i = 0; i < num_md; ++i) { - grpc_mdelem mdelem = grpc_mdelem_from_slices( - &exec_ctx, grpc_slice_ref_internal(md[i].key), - grpc_slice_ref_internal(md[i].value)); - grpc_credentials_mdelem_array_add(r->md_array, mdelem); - GRPC_MDELEM_UNREF(&exec_ctx, mdelem); - } - GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata, GRPC_ERROR_NONE); - } - } + grpc_error *error = + process_plugin_result(&exec_ctx, r, md, num_md, status, error_details); + GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata, error); } gpr_free(r); grpc_exec_ctx_finish(&exec_ctx); @@ -125,6 +141,7 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx, grpc_closure *on_request_metadata, grpc_error **error) { grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds; + bool retval = true; // Synchronous return. if (c->plugin.get_metadata != NULL) { // Create pending_request object. grpc_plugin_credentials_pending_request *pending_request = @@ -143,11 +160,37 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx, gpr_mu_unlock(&c->mu); // Invoke the plugin. The callback holds a ref to us. grpc_call_credentials_ref(creds); - c->plugin.get_metadata(c->plugin.state, context, - plugin_md_request_metadata_ready, pending_request); - return false; + grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX]; + size_t num_creds_md = 0; + grpc_status_code status = GRPC_STATUS_OK; + const char *error_details = NULL; + if (!c->plugin.get_metadata(c->plugin.state, context, + plugin_md_request_metadata_ready, + pending_request, creds_md, &num_creds_md, + &status, &error_details)) { + return false; // Asynchronous return. + } + // Returned synchronously. + // Remove request from pending list if not previously cancelled. + pending_request_complete(exec_ctx, pending_request); + // If the request was cancelled, the error will have been returned + // asynchronously by plugin_cancel_get_request_metadata(), so return + // false. Otherwise, process the result. + if (pending_request->cancelled) { + retval = false; + } else { + *error = process_plugin_result(exec_ctx, pending_request, creds_md, + num_creds_md, status, error_details); + } + // Clean up. + for (size_t i = 0; i < num_creds_md; ++i) { + grpc_slice_unref_internal(exec_ctx, creds_md[i].key); + grpc_slice_unref_internal(exec_ctx, creds_md[i].value); + } + gpr_free((void *)error_details); + gpr_free(pending_request); } - return true; + return retval; } static void plugin_cancel_get_request_metadata( diff --git a/src/cpp/client/secure_credentials.cc b/src/cpp/client/secure_credentials.cc index 057a058a3fb..00104165a50 100644 --- a/src/cpp/client/secure_credentials.cc +++ b/src/cpp/client/secure_credentials.cc @@ -21,6 +21,7 @@ #include #include #include +#include #include "src/cpp/client/create_channel_internal.h" #include "src/cpp/common/secure_auth_context.h" @@ -157,28 +158,50 @@ void MetadataCredentialsPluginWrapper::Destroy(void* wrapper) { delete w; } -void MetadataCredentialsPluginWrapper::GetMetadata( +int MetadataCredentialsPluginWrapper::GetMetadata( void* wrapper, grpc_auth_metadata_context context, - grpc_credentials_plugin_metadata_cb cb, void* user_data) { + grpc_credentials_plugin_metadata_cb cb, void* user_data, + grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], + size_t *num_creds_md, grpc_status_code *status, + const char **error_details) { GPR_ASSERT(wrapper); MetadataCredentialsPluginWrapper* w = reinterpret_cast(wrapper); if (!w->plugin_) { - cb(user_data, NULL, 0, GRPC_STATUS_OK, NULL); - return; + *num_creds_md = 0; + *status = GRPC_STATUS_OK; + *error_details = nullptr; + return true; } if (w->plugin_->IsBlocking()) { + // Asynchronous return. w->thread_pool_->Add( std::bind(&MetadataCredentialsPluginWrapper::InvokePlugin, w, context, - cb, user_data)); + cb, user_data, nullptr, nullptr, nullptr, nullptr)); + return false; } else { - w->InvokePlugin(context, cb, user_data); + // Synchronous return. + w->InvokePlugin(context, cb, user_data, creds_md, num_creds_md, status, + error_details); + return true; } } +namespace { + +void UnrefMetadata(const std::vector& md) { + for (auto it = md.begin(); it != md.end(); ++it) { + grpc_slice_unref(it->key); + grpc_slice_unref(it->value); + } +} + +} // namespace + void MetadataCredentialsPluginWrapper::InvokePlugin( grpc_auth_metadata_context context, grpc_credentials_plugin_metadata_cb cb, - void* user_data) { + void* user_data, grpc_metadata creds_md[4], size_t *num_creds_md, + grpc_status_code *status_code, const char **error_details) { std::multimap metadata; // const_cast is safe since the SecureAuthContext does not take owndership and @@ -196,12 +219,31 @@ void MetadataCredentialsPluginWrapper::InvokePlugin( md_entry.flags = 0; md.push_back(md_entry); } - cb(user_data, md.empty() ? nullptr : &md[0], md.size(), - static_cast(status.error_code()), - status.error_message().c_str()); - for (auto it = md.begin(); it != md.end(); ++it) { - grpc_slice_unref(it->key); - grpc_slice_unref(it->value); + if (creds_md != nullptr) { + // Synchronous return. + if (md.size() > GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX) { + *num_creds_md = 0; + *status_code = GRPC_STATUS_INTERNAL; + *error_details = gpr_strdup( + "blocking plugin credentials returned too many metadata keys"); + UnrefMetadata(md); + } else { + for (const auto& elem : md) { + creds_md[*num_creds_md].key = elem.key; + creds_md[*num_creds_md].value = elem.value; + creds_md[*num_creds_md].flags = elem.flags; + ++(*num_creds_md); + } + *status_code = static_cast(status.error_code()); + *error_details = + status.ok() ? nullptr : gpr_strdup(status.error_message().c_str()); + } + } else { + // Asynchronous return. + cb(user_data, md.empty() ? nullptr : &md[0], md.size(), + static_cast(status.error_code()), + status.error_message().c_str()); + UnrefMetadata(md); } } diff --git a/src/cpp/client/secure_credentials.h b/src/cpp/client/secure_credentials.h index 66547c736b2..fdabaca84fa 100644 --- a/src/cpp/client/secure_credentials.h +++ b/src/cpp/client/secure_credentials.h @@ -58,16 +58,23 @@ class SecureCallCredentials final : public CallCredentials { class MetadataCredentialsPluginWrapper final : private GrpcLibraryCodegen { public: static void Destroy(void* wrapper); - static void GetMetadata(void* wrapper, grpc_auth_metadata_context context, - grpc_credentials_plugin_metadata_cb cb, - void* user_data); + static int GetMetadata( + void* wrapper, grpc_auth_metadata_context context, + grpc_credentials_plugin_metadata_cb cb, void* user_data, + grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], + size_t *num_creds_md, grpc_status_code *status, + const char **error_details); explicit MetadataCredentialsPluginWrapper( std::unique_ptr plugin); private: - void InvokePlugin(grpc_auth_metadata_context context, - grpc_credentials_plugin_metadata_cb cb, void* user_data); + void InvokePlugin( + grpc_auth_metadata_context context, + grpc_credentials_plugin_metadata_cb cb, void* user_data, + grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], + size_t *num_creds_md, grpc_status_code *status, + const char **error_details); std::unique_ptr thread_pool_; std::unique_ptr plugin_; }; diff --git a/src/csharp/ext/grpc_csharp_ext.c b/src/csharp/ext/grpc_csharp_ext.c index aebce364c58..b2c8ca63f49 100644 --- a/src/csharp/ext/grpc_csharp_ext.c +++ b/src/csharp/ext/grpc_csharp_ext.c @@ -1023,13 +1023,17 @@ typedef void(GPR_CALLTYPE *grpcsharp_metadata_interceptor_func)( grpc_credentials_plugin_metadata_cb cb, void *user_data, int32_t is_destroy); -static void grpcsharp_get_metadata_handler( +static int grpcsharp_get_metadata_handler( void *state, grpc_auth_metadata_context context, - grpc_credentials_plugin_metadata_cb cb, void *user_data) { + grpc_credentials_plugin_metadata_cb cb, void *user_data, + grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], + size_t *num_creds_md, grpc_status_code *status, + const char **error_details) { grpcsharp_metadata_interceptor_func interceptor = (grpcsharp_metadata_interceptor_func)(intptr_t)state; interceptor(state, context.service_url, context.method_name, cb, user_data, 0); + return 0; /* Asynchronous return. */ } static void grpcsharp_metadata_credentials_destroy_handler(void *state) { diff --git a/src/node/ext/call_credentials.cc b/src/node/ext/call_credentials.cc index 4cf3e565efd..0644a812e9a 100644 --- a/src/node/ext/call_credentials.cc +++ b/src/node/ext/call_credentials.cc @@ -238,9 +238,12 @@ NAUV_WORK_CB(SendPluginCallback) { } } -void plugin_get_metadata(void *state, grpc_auth_metadata_context context, - grpc_credentials_plugin_metadata_cb cb, - void *user_data) { +int plugin_get_metadata( + void *state, grpc_auth_metadata_context context, + grpc_credentials_plugin_metadata_cb cb, void *user_data, + grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], + size_t *num_creds_md, grpc_status_code *status, + const char **error_details) { plugin_state *p_state = reinterpret_cast(state); plugin_callback_data *data = new plugin_callback_data; data->service_url = context.service_url; @@ -252,6 +255,7 @@ void plugin_get_metadata(void *state, grpc_auth_metadata_context context, uv_mutex_unlock(&p_state->plugin_mutex); uv_async_send(&p_state->plugin_async); + return 0; // Async processing. } void plugin_uv_close_cb(uv_handle_t *handle) { diff --git a/src/php/ext/grpc/call_credentials.c b/src/php/ext/grpc/call_credentials.c index 1eee8645df9..8bf5120538b 100644 --- a/src/php/ext/grpc/call_credentials.c +++ b/src/php/ext/grpc/call_credentials.c @@ -143,9 +143,12 @@ PHP_METHOD(CallCredentials, createFromPlugin) { } /* Callback function for plugin creds API */ -void plugin_get_metadata(void *ptr, grpc_auth_metadata_context context, - grpc_credentials_plugin_metadata_cb cb, - void *user_data) { +int plugin_get_metadata( + void *ptr, grpc_auth_metadata_context context, + grpc_credentials_plugin_metadata_cb cb, void *user_data, + grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], + size_t *num_creds_md, grpc_status_code *status, + const char **error_details) { TSRMLS_FETCH(); plugin_state *state = (plugin_state *)ptr; @@ -175,15 +178,19 @@ void plugin_get_metadata(void *ptr, grpc_auth_metadata_context context, /* call the user callback function */ zend_call_function(state->fci, state->fci_cache TSRMLS_CC); - grpc_status_code code = GRPC_STATUS_OK; + *num_creds_md = 0; + *status = GRPC_STATUS_OK; + *error_details = NULL; + grpc_metadata_array metadata; - bool cleanup = true; if (retval == NULL || Z_TYPE_P(retval) != IS_ARRAY) { - cleanup = false; - code = GRPC_STATUS_INVALID_ARGUMENT; - } else if (!create_metadata_array(retval, &metadata)) { - code = GRPC_STATUS_INVALID_ARGUMENT; + *status = GRPC_STATUS_INVALID_ARGUMENT; + return true; // Synchronous return. + } + if (!create_metadata_array(retval, &metadata)) { + *status = GRPC_STATUS_INVALID_ARGUMENT; + return true; // Synchronous return. } if (retval != NULL) { @@ -197,14 +204,24 @@ void plugin_get_metadata(void *ptr, grpc_auth_metadata_context context, #endif } - /* Pass control back to core */ - cb(user_data, metadata.metadata, metadata.count, code, NULL); - if (cleanup) { - for (int i = 0; i < metadata.count; i++) { + if (metadata.count > GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX) { + *status = GRPC_STATUS_INTERNAL; + *error_details = gpr_strdup( + "PHP plugin credentials returned too many metadata entries"); + for (size_t i = 0; i < metadata.count; i++) { + // TODO(stanleycheung): Why don't we need to unref the key here? grpc_slice_unref(metadata.metadata[i].value); } - grpc_metadata_array_destroy(&metadata); + } else { + // Return data to core. + *num_creds_md = metadata.count; + for (size_t i = 0; i < metadata.count; ++i) { + creds_md[i] = metadata.metadata[i]; + } } + + grpc_metadata_array_destroy(&metadata); + return true; // Synchronous return. } /* Cleanup function for plugin creds API */ diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi index a0e69dd6131..98f306feb78 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi @@ -49,8 +49,11 @@ cdef class AuthMetadataContext: cdef grpc_auth_metadata_context context -cdef void plugin_get_metadata( +cdef int plugin_get_metadata( void *state, grpc_auth_metadata_context context, - grpc_credentials_plugin_metadata_cb cb, void *user_data) with gil + grpc_credentials_plugin_metadata_cb cb, void *user_data, + grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], + size_t *num_creds_md, grpc_status_code *status, + const char **error_details) with gil cdef void plugin_destroy_c_plugin_state(void *state) with gil diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi index 98d7a9820df..36ae151b9dc 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi @@ -122,9 +122,13 @@ cdef class AuthMetadataContext: grpc_shutdown() -cdef void plugin_get_metadata( +cdef int plugin_get_metadata( void *state, grpc_auth_metadata_context context, - grpc_credentials_plugin_metadata_cb cb, void *user_data) with gil: + grpc_credentials_plugin_metadata_cb cb, void *user_data, + grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], + size_t *num_creds_md, grpc_status_code *status, + const char **error_details) with gil: +# FIXME: force the python code to run in a separate thread called_flag = [False] def python_callback( Metadata metadata, grpc_status_code status, @@ -141,6 +145,7 @@ cdef void plugin_get_metadata( if not called_flag[0]: cb(user_data, Metadata([]).c_metadata_array.metadata, 0, StatusCode.unknown, traceback.format_exc().encode()) + return 0 # Asynchronous return cdef void plugin_destroy_c_plugin_state(void *state) with gil: cpython.Py_DECREF(state) diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi index 5950bfa0e6a..7ca8fd3df06 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi @@ -461,9 +461,12 @@ cdef extern from "grpc/grpc_security.h": grpc_status_code status, const char *error_details) ctypedef struct grpc_metadata_credentials_plugin: - void (*get_metadata)( + int (*get_metadata)( void *state, grpc_auth_metadata_context context, - grpc_credentials_plugin_metadata_cb cb, void *user_data) + grpc_credentials_plugin_metadata_cb cb, void *user_data, + grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], + size_t *num_creds_md, grpc_status_code *status, + const char **error_details) void (*destroy)(void *state) void *state const char *type diff --git a/src/ruby/ext/grpc/rb_call_credentials.c b/src/ruby/ext/grpc/rb_call_credentials.c index 049a869bdcf..4214a0811bd 100644 --- a/src/ruby/ext/grpc/rb_call_credentials.c +++ b/src/ruby/ext/grpc/rb_call_credentials.c @@ -112,9 +112,12 @@ static void grpc_rb_call_credentials_callback_with_gil(void *param) { gpr_free(params); } -static void grpc_rb_call_credentials_plugin_get_metadata( +static int grpc_rb_call_credentials_plugin_get_metadata( void *state, grpc_auth_metadata_context context, - grpc_credentials_plugin_metadata_cb cb, void *user_data) { + grpc_credentials_plugin_metadata_cb cb, void *user_data, + grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], + size_t *num_creds_md, grpc_status_code *status, + const char **error_details) { callback_params *params = gpr_malloc(sizeof(callback_params)); params->get_metadata = (VALUE)state; params->context = context; @@ -123,6 +126,7 @@ static void grpc_rb_call_credentials_plugin_get_metadata( grpc_rb_event_queue_enqueue(grpc_rb_call_credentials_callback_with_gil, (void *)(params)); + return 0; // Async return. } static void grpc_rb_call_credentials_plugin_destroy(void *state) { diff --git a/test/core/security/credentials_test.c b/test/core/security/credentials_test.c index 441c4311353..5ac58070c8c 100644 --- a/test/core/security/credentials_test.c +++ b/test/core/security/credentials_test.c @@ -1036,39 +1036,46 @@ typedef enum { static const expected_md plugin_md[] = {{"foo", "bar"}, {"hi", "there"}}; -static void plugin_get_metadata_success(void *state, - grpc_auth_metadata_context context, - grpc_credentials_plugin_metadata_cb cb, - void *user_data) { - size_t i; - grpc_metadata md[GPR_ARRAY_SIZE(plugin_md)]; - plugin_state *s = (plugin_state *)state; +static int plugin_get_metadata_success( + void *state, grpc_auth_metadata_context context, + grpc_credentials_plugin_metadata_cb cb, void *user_data, + grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], + size_t *num_creds_md, grpc_status_code *status, + const char **error_details) { GPR_ASSERT(strcmp(context.service_url, test_service_url) == 0); GPR_ASSERT(strcmp(context.method_name, test_method) == 0); GPR_ASSERT(context.channel_auth_context == NULL); GPR_ASSERT(context.reserved == NULL); + GPR_ASSERT(GPR_ARRAY_SIZE(plugin_md) < + GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX); + plugin_state *s = (plugin_state *)state; *s = PLUGIN_GET_METADATA_CALLED_STATE; - for (i = 0; i < GPR_ARRAY_SIZE(plugin_md); i++) { - memset(&md[i], 0, sizeof(grpc_metadata)); - md[i].key = grpc_slice_from_copied_string(plugin_md[i].key); - md[i].value = grpc_slice_from_copied_string(plugin_md[i].value); + for (size_t i = 0; i < GPR_ARRAY_SIZE(plugin_md); ++i) { + memset(&creds_md[i], 0, sizeof(grpc_metadata)); + creds_md[i].key = grpc_slice_from_copied_string(plugin_md[i].key); + creds_md[i].value = grpc_slice_from_copied_string(plugin_md[i].value); } - cb(user_data, md, GPR_ARRAY_SIZE(md), GRPC_STATUS_OK, NULL); + *num_creds_md = GPR_ARRAY_SIZE(plugin_md); + return true; // Synchronous return. } static const char *plugin_error_details = "Could not get metadata for plugin."; -static void plugin_get_metadata_failure(void *state, - grpc_auth_metadata_context context, - grpc_credentials_plugin_metadata_cb cb, - void *user_data) { - plugin_state *s = (plugin_state *)state; +static int plugin_get_metadata_failure( + void *state, grpc_auth_metadata_context context, + grpc_credentials_plugin_metadata_cb cb, void *user_data, + grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], + size_t *num_creds_md, grpc_status_code *status, + const char **error_details) { GPR_ASSERT(strcmp(context.service_url, test_service_url) == 0); GPR_ASSERT(strcmp(context.method_name, test_method) == 0); GPR_ASSERT(context.channel_auth_context == NULL); GPR_ASSERT(context.reserved == NULL); + plugin_state *s = (plugin_state *)state; *s = PLUGIN_GET_METADATA_CALLED_STATE; - cb(user_data, NULL, 0, GRPC_STATUS_UNAUTHENTICATED, plugin_error_details); + *status = GRPC_STATUS_UNAUTHENTICATED; + *error_details = gpr_strdup(plugin_error_details); + return true; // Synchronous return. } static void plugin_destroy(void *state) { From d27504f09a6ec147ee0ea4d38b6d4714e0449fda Mon Sep 17 00:00:00 2001 From: Ken Payson Date: Fri, 1 Sep 2017 14:30:29 -0700 Subject: [PATCH 009/109] Changes --- .../grpc/_cython/_cygrpc/credentials.pyx.pxi | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi index 36ae151b9dc..fa89b8d55a9 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi @@ -14,6 +14,7 @@ cimport cpython +import threading import traceback @@ -128,7 +129,6 @@ cdef int plugin_get_metadata( grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], size_t *num_creds_md, grpc_status_code *status, const char **error_details) with gil: -# FIXME: force the python code to run in a separate thread called_flag = [False] def python_callback( Metadata metadata, grpc_status_code status, @@ -139,12 +139,14 @@ cdef int plugin_get_metadata( cdef CredentialsMetadataPlugin self = state cdef AuthMetadataContext cy_context = AuthMetadataContext() cy_context.context = context - try: - self.plugin_callback(cy_context, python_callback) - except Exception as error: - if not called_flag[0]: - cb(user_data, Metadata([]).c_metadata_array.metadata, - 0, StatusCode.unknown, traceback.format_exc().encode()) + def async_callback(): + try: + self.plugin_callback(cy_context, python_callback) + except Exception as error: + if not called_flag[0]: + cb(user_data, Metadata([]).c_metadata_array.metadata, + 0, StatusCode.unknown, traceback.format_exc().encode()) + threading.Thread(group=None, target=async_callback).start() return 0 # Asynchronous return cdef void plugin_destroy_c_plugin_state(void *state) with gil: From 6d8cc7c7ee3933d8643ab479402717cf9a0726a2 Mon Sep 17 00:00:00 2001 From: Stanley Cheung Date: Mon, 4 Sep 2017 22:22:18 -0700 Subject: [PATCH 010/109] Fixed some PHP errors --- src/php/ext/grpc/call_credentials.c | 1 + src/php/ext/grpc/call_credentials.h | 9 ++++++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/php/ext/grpc/call_credentials.c b/src/php/ext/grpc/call_credentials.c index 8bf5120538b..a395d53614e 100644 --- a/src/php/ext/grpc/call_credentials.c +++ b/src/php/ext/grpc/call_credentials.c @@ -35,6 +35,7 @@ #include #include +#include zend_class_entry *grpc_ce_call_credentials; #if PHP_MAJOR_VERSION >= 7 diff --git a/src/php/ext/grpc/call_credentials.h b/src/php/ext/grpc/call_credentials.h index 9be8763278e..663cc6858de 100755 --- a/src/php/ext/grpc/call_credentials.h +++ b/src/php/ext/grpc/call_credentials.h @@ -65,9 +65,12 @@ typedef struct plugin_state { } plugin_state; /* Callback function for plugin creds API */ -void plugin_get_metadata(void *state, grpc_auth_metadata_context context, - grpc_credentials_plugin_metadata_cb cb, - void *user_data); +int plugin_get_metadata( + void *ptr, grpc_auth_metadata_context context, + grpc_credentials_plugin_metadata_cb cb, void *user_data, + grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], + size_t *num_creds_md, grpc_status_code *status, + const char **error_details); /* Cleanup function for plugin creds API */ void plugin_destroy_state(void *ptr); From 130e07061f6eed9bbb04f58eeab091fa515b0572 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Fri, 1 Sep 2017 18:02:51 -0700 Subject: [PATCH 011/109] Add interface abstraction and identity module --- build.yaml | 2 + .../chttp2/transport/chttp2_transport.c | 6 +- .../ext/transport/chttp2/transport/writing.c | 2 +- src/core/lib/compression/stream_compression.c | 173 +++----------- src/core/lib/compression/stream_compression.h | 34 ++- .../lib/compression/stream_compression_gzip.c | 218 ++++++++++++++++++ .../lib/compression/stream_compression_gzip.h | 26 +++ .../compression/stream_compression_identity.c | 77 +++++++ .../compression/stream_compression_identity.h | 26 +++ 9 files changed, 410 insertions(+), 154 deletions(-) create mode 100644 src/core/lib/compression/stream_compression_gzip.c create mode 100644 src/core/lib/compression/stream_compression_gzip.h create mode 100644 src/core/lib/compression/stream_compression_identity.c create mode 100644 src/core/lib/compression/stream_compression_identity.h diff --git a/build.yaml b/build.yaml index 7d8440ce446..0939796bcb3 100644 --- a/build.yaml +++ b/build.yaml @@ -197,6 +197,7 @@ filegroups: - src/core/lib/compression/stream_compression.c - src/core/lib/debug/stats.c - src/core/lib/debug/stats_data.c + - src/core/lib/compression/stream_compression_gzip.c - src/core/lib/http/format_request.c - src/core/lib/http/httpcli.c - src/core/lib/http/parser.c @@ -349,6 +350,7 @@ filegroups: - src/core/lib/compression/stream_compression.h - src/core/lib/debug/stats.h - src/core/lib/debug/stats_data.h + - src/core/lib/compression/stream_compression_gzip.h - src/core/lib/http/format_request.h - src/core/lib/http/httpcli.h - src/core/lib/http/parser.h diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index 7541bd5c92c..0e00995d11d 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -1731,7 +1731,7 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx, if (!s->stream_decompression_ctx) { s->stream_decompression_ctx = grpc_stream_compression_context_create( - GRPC_STREAM_COMPRESSION_DECOMPRESS); + GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); } if (!grpc_stream_decompress(s->stream_decompression_ctx, &s->unprocessed_incoming_frames_buffer, @@ -1804,7 +1804,7 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx, bool end_of_context; if (!s->stream_decompression_ctx) { s->stream_decompression_ctx = grpc_stream_compression_context_create( - GRPC_STREAM_COMPRESSION_DECOMPRESS); + GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); } if (!grpc_stream_decompress(s->stream_decompression_ctx, &s->frame_storage, @@ -2694,7 +2694,7 @@ static grpc_error *incoming_byte_stream_pull(grpc_exec_ctx *exec_ctx, bool end_of_context; if (!s->stream_decompression_ctx) { s->stream_decompression_ctx = grpc_stream_compression_context_create( - GRPC_STREAM_COMPRESSION_DECOMPRESS); + GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); } if (!grpc_stream_decompress(s->stream_decompression_ctx, &s->unprocessed_incoming_frames_buffer, diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c index 711938b2782..b877e9f1264 100644 --- a/src/core/ext/transport/chttp2/transport/writing.c +++ b/src/core/ext/transport/chttp2/transport/writing.c @@ -329,7 +329,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( if (s->stream_compression_ctx == NULL) { s->stream_compression_ctx = grpc_stream_compression_context_create( - GRPC_STREAM_COMPRESSION_COMPRESS); + GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); } s->uncompressed_data_size = s->flow_controlled_buffer.length; GPR_ASSERT(grpc_stream_compress( diff --git a/src/core/lib/compression/stream_compression.c b/src/core/lib/compression/stream_compression.c index df13d53e06f..274ee100ba5 100644 --- a/src/core/lib/compression/stream_compression.c +++ b/src/core/lib/compression/stream_compression.c @@ -16,176 +16,57 @@ * */ -#include #include #include "src/core/lib/compression/stream_compression.h" -#include "src/core/lib/iomgr/exec_ctx.h" -#include "src/core/lib/slice/slice_internal.h" +#include "src/core/lib/compression/stream_compression_gzip.h" -#define OUTPUT_BLOCK_SIZE (1024) - -static bool gzip_flate(grpc_stream_compression_context *ctx, - grpc_slice_buffer *in, grpc_slice_buffer *out, - size_t *output_size, size_t max_output_size, int flush, - bool *end_of_context) { - GPR_ASSERT(flush == 0 || flush == Z_SYNC_FLUSH || flush == Z_FINISH); - /* Full flush is not allowed when inflating. */ - GPR_ASSERT(!(ctx->flate == inflate && (flush == Z_FINISH))); - - grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; - int r; - bool eoc = false; - size_t original_max_output_size = max_output_size; - while (max_output_size > 0 && (in->length > 0 || flush) && !eoc) { - size_t slice_size = max_output_size < OUTPUT_BLOCK_SIZE ? max_output_size - : OUTPUT_BLOCK_SIZE; - grpc_slice slice_out = GRPC_SLICE_MALLOC(slice_size); - ctx->zs.avail_out = (uInt)slice_size; - ctx->zs.next_out = GRPC_SLICE_START_PTR(slice_out); - while (ctx->zs.avail_out > 0 && in->length > 0 && !eoc) { - grpc_slice slice = grpc_slice_buffer_take_first(in); - ctx->zs.avail_in = (uInt)GRPC_SLICE_LENGTH(slice); - ctx->zs.next_in = GRPC_SLICE_START_PTR(slice); - r = ctx->flate(&ctx->zs, Z_NO_FLUSH); - if (r < 0 && r != Z_BUF_ERROR) { - gpr_log(GPR_ERROR, "zlib error (%d)", r); - grpc_slice_unref_internal(&exec_ctx, slice_out); - grpc_exec_ctx_finish(&exec_ctx); - return false; - } else if (r == Z_STREAM_END && ctx->flate == inflate) { - eoc = true; - } - if (ctx->zs.avail_in > 0) { - grpc_slice_buffer_undo_take_first( - in, - grpc_slice_sub(slice, GRPC_SLICE_LENGTH(slice) - ctx->zs.avail_in, - GRPC_SLICE_LENGTH(slice))); - } - grpc_slice_unref_internal(&exec_ctx, slice); - } - if (flush != 0 && ctx->zs.avail_out > 0 && !eoc) { - GPR_ASSERT(in->length == 0); - r = ctx->flate(&ctx->zs, flush); - if (flush == Z_SYNC_FLUSH) { - switch (r) { - case Z_OK: - /* Maybe flush is not complete; just made some partial progress. */ - if (ctx->zs.avail_out > 0) { - flush = 0; - } - break; - case Z_BUF_ERROR: - case Z_STREAM_END: - flush = 0; - break; - default: - gpr_log(GPR_ERROR, "zlib error (%d)", r); - grpc_slice_unref_internal(&exec_ctx, slice_out); - grpc_exec_ctx_finish(&exec_ctx); - return false; - } - } else if (flush == Z_FINISH) { - switch (r) { - case Z_OK: - case Z_BUF_ERROR: - /* Wait for the next loop to assign additional output space. */ - GPR_ASSERT(ctx->zs.avail_out == 0); - break; - case Z_STREAM_END: - flush = 0; - break; - default: - gpr_log(GPR_ERROR, "zlib error (%d)", r); - grpc_slice_unref_internal(&exec_ctx, slice_out); - grpc_exec_ctx_finish(&exec_ctx); - return false; - } - } - } - - if (ctx->zs.avail_out == 0) { - grpc_slice_buffer_add(out, slice_out); - } else if (ctx->zs.avail_out < slice_size) { - slice_out.data.refcounted.length -= ctx->zs.avail_out; - grpc_slice_buffer_add(out, slice_out); - } else { - grpc_slice_unref_internal(&exec_ctx, slice_out); - } - max_output_size -= (slice_size - ctx->zs.avail_out); - } - grpc_exec_ctx_finish(&exec_ctx); - if (end_of_context) { - *end_of_context = eoc; - } - if (output_size) { - *output_size = original_max_output_size - max_output_size; - } - return true; -} +extern const grpc_stream_compression_vtable grpc_stream_compression_identity_vtable; bool grpc_stream_compress(grpc_stream_compression_context *ctx, grpc_slice_buffer *in, grpc_slice_buffer *out, size_t *output_size, size_t max_output_size, grpc_stream_compression_flush flush) { - GPR_ASSERT(ctx->flate == deflate); - int gzip_flush; - switch (flush) { - case GRPC_STREAM_COMPRESSION_FLUSH_NONE: - gzip_flush = 0; - break; - case GRPC_STREAM_COMPRESSION_FLUSH_SYNC: - gzip_flush = Z_SYNC_FLUSH; - break; - case GRPC_STREAM_COMPRESSION_FLUSH_FINISH: - gzip_flush = Z_FINISH; - break; - default: - gzip_flush = 0; - } - return gzip_flate(ctx, in, out, output_size, max_output_size, gzip_flush, - NULL); + return ctx->vtable->compress(ctx, in, out, output_size, max_output_size, flush); } bool grpc_stream_decompress(grpc_stream_compression_context *ctx, grpc_slice_buffer *in, grpc_slice_buffer *out, size_t *output_size, size_t max_output_size, bool *end_of_context) { - GPR_ASSERT(ctx->flate == inflate); - return gzip_flate(ctx, in, out, output_size, max_output_size, Z_SYNC_FLUSH, - end_of_context); + return ctx->vtable->decompress(ctx, in, out, output_size, max_output_size, end_of_context); } grpc_stream_compression_context *grpc_stream_compression_context_create( grpc_stream_compression_method method) { - grpc_stream_compression_context *ctx = - gpr_zalloc(sizeof(grpc_stream_compression_context)); - int r; - if (ctx == NULL) { - return NULL; - } - if (method == GRPC_STREAM_COMPRESSION_DECOMPRESS) { - r = inflateInit2(&ctx->zs, 0x1F); - ctx->flate = inflate; - } else { - r = deflateInit2(&ctx->zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 0x1F, 8, - Z_DEFAULT_STRATEGY); - ctx->flate = deflate; - } - if (r != Z_OK) { - gpr_free(ctx); - return NULL; + switch (method) { + case GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS: + case GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS: + return grpc_stream_compression_identity_vtable.context_create(method); + case GRPC_STREAM_COMPRESSION_GZIP_COMPRESS: + case GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS: + return grpc_stream_compression_gzip_vtable.context_create(method); + default: + gpr_log(GPR_ERROR, "Unknown stream compression method: %d", method); + return NULL; } - - return ctx; } void grpc_stream_compression_context_destroy( grpc_stream_compression_context *ctx) { - if (ctx->flate == inflate) { - inflateEnd(&ctx->zs); + ctx->vtable->context_destroy(ctx); +} + +int grpc_stream_compression_method_parse( + grpc_slice value, bool is_compress, grpc_stream_compression_method *method) { + if (grpc_slice_eq(value, GRPC_MDSTR_IDENTITY)) { + *method = is_compress ? GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS : GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS; + return 1; + } else if (grpc_slice_eq(value, GRPC_MDSTR_GZIP)) { + *method = is_compress ? GRPC_STREAM_COMPRESSION_GZIP_COMPRESS : GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS; + return 1; } else { - deflateEnd(&ctx->zs); + return 0; } - gpr_free(ctx); } + diff --git a/src/core/lib/compression/stream_compression.h b/src/core/lib/compression/stream_compression.h index 844dff81a3b..fa07247c310 100644 --- a/src/core/lib/compression/stream_compression.h +++ b/src/core/lib/compression/stream_compression.h @@ -24,15 +24,20 @@ #include #include +#include "src/core/lib/transport/static_metadata.h" + +typedef struct grpc_stream_compression_vtable grpc_stream_compression_vtable; + /* Stream compression/decompression context */ typedef struct grpc_stream_compression_context { - z_stream zs; - int (*flate)(z_stream *zs, int flush); + const grpc_stream_compression_vtable *vtable; } grpc_stream_compression_context; typedef enum grpc_stream_compression_method { - GRPC_STREAM_COMPRESSION_COMPRESS = 0, - GRPC_STREAM_COMPRESSION_DECOMPRESS, + GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS = 0, + GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS, + GRPC_STREAM_COMPRESSION_GZIP_COMPRESS, + GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS, GRPC_STREAM_COMPRESSION_METHOD_COUNT } grpc_stream_compression_method; @@ -43,6 +48,21 @@ typedef enum grpc_stream_compression_flush { GRPC_STREAM_COMPRESSION_FLUSH_COUNT } grpc_stream_compression_flush; +struct grpc_stream_compression_vtable { +bool (*compress)(grpc_stream_compression_context *ctx, + grpc_slice_buffer *in, grpc_slice_buffer *out, + size_t *output_size, size_t max_output_size, + grpc_stream_compression_flush flush); +bool (*decompress)(grpc_stream_compression_context *ctx, + grpc_slice_buffer *in, grpc_slice_buffer *out, + size_t *output_size, size_t max_output_size, + bool *end_of_context); +grpc_stream_compression_context *(*context_create)( + grpc_stream_compression_method method); +void (*context_destroy)( + grpc_stream_compression_context *ctx); +}; + /** * Compress bytes provided in \a in with a given context, with an optional flush * at the end of compression. Emits at most \a max_output_size compressed bytes @@ -87,4 +107,10 @@ grpc_stream_compression_context *grpc_stream_compression_context_create( void grpc_stream_compression_context_destroy( grpc_stream_compression_context *ctx); +/** + * Parse stream compression method based on algorithm name + */ +int grpc_stream_compression_method_parse( + grpc_slice value, bool is_compress, grpc_stream_compression_method *method); + #endif diff --git a/src/core/lib/compression/stream_compression_gzip.c b/src/core/lib/compression/stream_compression_gzip.c new file mode 100644 index 00000000000..2d03836a0dc --- /dev/null +++ b/src/core/lib/compression/stream_compression_gzip.c @@ -0,0 +1,218 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include +#include + +#include "src/core/lib/compression/stream_compression.h" +#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/slice/slice_internal.h" + +#define OUTPUT_BLOCK_SIZE (1024) + +typedef struct grpc_stream_compression_context_gzip { + grpc_stream_compression_context base; + + z_stream zs; + int (*flate)(z_stream *zs, int flush); +} grpc_stream_compression_context_gzip; + +static bool gzip_flate(grpc_stream_compression_context_gzip *ctx, + grpc_slice_buffer *in, grpc_slice_buffer *out, + size_t *output_size, size_t max_output_size, int flush, + bool *end_of_context) { + GPR_ASSERT(flush == 0 || flush == Z_SYNC_FLUSH || flush == Z_FINISH); + /* Full flush is not allowed when inflating. */ + GPR_ASSERT(!(ctx->flate == inflate && (flush == Z_FINISH))); + + grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; + int r; + bool eoc = false; + size_t original_max_output_size = max_output_size; + while (max_output_size > 0 && (in->length > 0 || flush) && !eoc) { + size_t slice_size = max_output_size < OUTPUT_BLOCK_SIZE ? max_output_size + : OUTPUT_BLOCK_SIZE; + grpc_slice slice_out = GRPC_SLICE_MALLOC(slice_size); + ctx->zs.avail_out = (uInt)slice_size; + ctx->zs.next_out = GRPC_SLICE_START_PTR(slice_out); + while (ctx->zs.avail_out > 0 && in->length > 0 && !eoc) { + grpc_slice slice = grpc_slice_buffer_take_first(in); + ctx->zs.avail_in = (uInt)GRPC_SLICE_LENGTH(slice); + ctx->zs.next_in = GRPC_SLICE_START_PTR(slice); + r = ctx->flate(&ctx->zs, Z_NO_FLUSH); + if (r < 0 && r != Z_BUF_ERROR) { + gpr_log(GPR_ERROR, "zlib error (%d)", r); + grpc_slice_unref_internal(&exec_ctx, slice_out); + grpc_exec_ctx_finish(&exec_ctx); + return false; + } else if (r == Z_STREAM_END && ctx->flate == inflate) { + eoc = true; + } + if (ctx->zs.avail_in > 0) { + grpc_slice_buffer_undo_take_first( + in, + grpc_slice_sub(slice, GRPC_SLICE_LENGTH(slice) - ctx->zs.avail_in, + GRPC_SLICE_LENGTH(slice))); + } + grpc_slice_unref_internal(&exec_ctx, slice); + } + if (flush != 0 && ctx->zs.avail_out > 0 && !eoc) { + GPR_ASSERT(in->length == 0); + r = ctx->flate(&ctx->zs, flush); + if (flush == Z_SYNC_FLUSH) { + switch (r) { + case Z_OK: + /* Maybe flush is not complete; just made some partial progress. */ + if (ctx->zs.avail_out > 0) { + flush = 0; + } + break; + case Z_BUF_ERROR: + case Z_STREAM_END: + flush = 0; + break; + default: + gpr_log(GPR_ERROR, "zlib error (%d)", r); + grpc_slice_unref_internal(&exec_ctx, slice_out); + grpc_exec_ctx_finish(&exec_ctx); + return false; + } + } else if (flush == Z_FINISH) { + switch (r) { + case Z_OK: + case Z_BUF_ERROR: + /* Wait for the next loop to assign additional output space. */ + GPR_ASSERT(ctx->zs.avail_out == 0); + break; + case Z_STREAM_END: + flush = 0; + break; + default: + gpr_log(GPR_ERROR, "zlib error (%d)", r); + grpc_slice_unref_internal(&exec_ctx, slice_out); + grpc_exec_ctx_finish(&exec_ctx); + return false; + } + } + } + + if (ctx->zs.avail_out == 0) { + grpc_slice_buffer_add(out, slice_out); + } else if (ctx->zs.avail_out < slice_size) { + slice_out.data.refcounted.length -= ctx->zs.avail_out; + grpc_slice_buffer_add(out, slice_out); + } else { + grpc_slice_unref_internal(&exec_ctx, slice_out); + } + max_output_size -= (slice_size - ctx->zs.avail_out); + } + grpc_exec_ctx_finish(&exec_ctx); + if (end_of_context) { + *end_of_context = eoc; + } + if (output_size) { + *output_size = original_max_output_size - max_output_size; + } + return true; +} + +static bool grpc_stream_compress_gzip(grpc_stream_compression_context *ctx, + grpc_slice_buffer *in, grpc_slice_buffer *out, + size_t *output_size, size_t max_output_size, + grpc_stream_compression_flush flush) { + if (ctx == NULL) { + return false; + } + grpc_stream_compression_context_gzip *gzip_ctx = (grpc_stream_compression_context_gzip *)ctx; + GPR_ASSERT(gzip_ctx->flate == deflate); + int gzip_flush; + switch (flush) { + case GRPC_STREAM_COMPRESSION_FLUSH_NONE: + gzip_flush = 0; + break; + case GRPC_STREAM_COMPRESSION_FLUSH_SYNC: + gzip_flush = Z_SYNC_FLUSH; + break; + case GRPC_STREAM_COMPRESSION_FLUSH_FINISH: + gzip_flush = Z_FINISH; + break; + default: + gzip_flush = 0; + } + return gzip_flate(gzip_ctx, in, out, output_size, max_output_size, gzip_flush, + NULL); +} + +static bool grpc_stream_decompress_gzip(grpc_stream_compression_context *ctx, + grpc_slice_buffer *in, grpc_slice_buffer *out, + size_t *output_size, size_t max_output_size, + bool *end_of_context) { + if (ctx == NULL) { + return false; + } + grpc_stream_compression_context_gzip *gzip_ctx = (grpc_stream_compression_context_gzip *)ctx; + GPR_ASSERT(gzip_ctx->flate == inflate); + return gzip_flate(gzip_ctx, in, out, output_size, max_output_size, Z_SYNC_FLUSH, + end_of_context); +} + +static grpc_stream_compression_context *grpc_stream_compression_context_create_gzip( + grpc_stream_compression_method method) { + GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_GZIP_COMPRESS || method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); + grpc_stream_compression_context_gzip *gzip_ctx = + gpr_zalloc(sizeof(grpc_stream_compression_context_gzip)); + int r; + if (gzip_ctx == NULL) { + return NULL; + } + if (method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS) { + r = inflateInit2(&gzip_ctx->zs, 0x1F); + gzip_ctx->flate = inflate; + } else { + r = deflateInit2(&gzip_ctx->zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 0x1F, 8, + Z_DEFAULT_STRATEGY); + gzip_ctx->flate = deflate; + } + if (r != Z_OK) { + gpr_free(gzip_ctx); + return NULL; + } + + return (grpc_stream_compression_context *)gzip_ctx; +} + +static void grpc_stream_compression_context_destroy_gzip( + grpc_stream_compression_context *ctx) { + if (ctx == NULL) { + return; + } + grpc_stream_compression_context_gzip *gzip_ctx = (grpc_stream_compression_context_gzip*)ctx; + if (gzip_ctx->flate == inflate) { + inflateEnd(&gzip_ctx->zs); + } else { + deflateEnd(&gzip_ctx->zs); + } + gpr_free(ctx); +} + +const grpc_stream_compression_vtable grpc_stream_compression_gzip_vtable = { + .compress = grpc_stream_compress_gzip, + .decompress = grpc_stream_decompress_gzip, + .context_create = grpc_stream_compression_context_create_gzip, + .context_destroy = grpc_stream_compression_context_destroy_gzip +}; diff --git a/src/core/lib/compression/stream_compression_gzip.h b/src/core/lib/compression/stream_compression_gzip.h new file mode 100644 index 00000000000..7cf49a0de93 --- /dev/null +++ b/src/core/lib/compression/stream_compression_gzip.h @@ -0,0 +1,26 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_GZIP_H +#define GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_GZIP_H + +#include "src/core/lib/compression/stream_compression.h" + +extern const grpc_stream_compression_vtable grpc_stream_compression_gzip_vtable; + +#endif diff --git a/src/core/lib/compression/stream_compression_identity.c b/src/core/lib/compression/stream_compression_identity.c new file mode 100644 index 00000000000..1395b762d48 --- /dev/null +++ b/src/core/lib/compression/stream_compression_identity.c @@ -0,0 +1,77 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#include +#include + +#include "src/core/lib/compression/stream_compression.h" +#include "src/core/lib/iomgr/exec_ctx.h" +#include "src/core/lib/slice/slice_internal.h" + +#define OUTPUT_BLOCK_SIZE (1024) + +static void grpc_stream_compression_pass_through(grpc_slice_buffer *in, grpc_slice_buffer *out, size_t *output_size, size_t max_output_size) { + if (max_output_size >= in->length) { + *output_size = in->length; + grpc_slice_buffer_move_into(in, out); + } else { + *output_size = max_output_size; + grpc_slice_buffer_move_first(in, max_output_size, out); + } +} + +static bool grpc_stream_compress_identity(grpc_stream_compression_context *ctx, + grpc_slice_buffer *in, grpc_slice_buffer *out, + size_t *output_size, size_t max_output_size, + grpc_stream_compression_flush flush) { + if (ctx == NULL) { + return false; + } + grpc_stream_compression_pass_through(in, out, output_size, max_output_size); + return true; +} + +static bool grpc_stream_decompress_identity(grpc_stream_compression_context *ctx, + grpc_slice_buffer *in, grpc_slice_buffer *out, + size_t *output_size, size_t max_output_size, + bool *end_of_context) { + if (ctx == NULL) { + return false; + } + grpc_stream_compression_pass_through(in, out, output_size, max_output_size); + return true; +} + +static grpc_stream_compression_context *grpc_stream_compression_context_create_identity( + grpc_stream_compression_method method) { + GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS || method == GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS); + /* No context needed in this case. Use fake context instead. */ + return (grpc_stream_compression_context *)1; +} + +static void grpc_stream_compression_context_destroy_identity( + grpc_stream_compression_context *ctx) { + return; +} + +const grpc_stream_compression_vtable grpc_stream_compression_identity_vtable = { + .compress = grpc_stream_compress_identity, + .decompress = grpc_stream_decompress_identity, + .context_create = grpc_stream_compression_context_create_identity, + .context_destroy = grpc_stream_compression_context_destroy_identity +}; diff --git a/src/core/lib/compression/stream_compression_identity.h b/src/core/lib/compression/stream_compression_identity.h new file mode 100644 index 00000000000..0d770dd32b7 --- /dev/null +++ b/src/core/lib/compression/stream_compression_identity.h @@ -0,0 +1,26 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_IDENTITY_H +#define GRPC_CORE_LIB_COMPRESSION_STREAM_COMPRESSION_IDENTITY_H + +#include "src/core/lib/compression/stream_compression.h" + +extern const grpc_stream_compression_vtable grpc_stream_compression_identity_vtable; + +#endif From bf5484e785f93e1183a15415239a729d3d120af1 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Fri, 1 Sep 2017 18:03:03 -0700 Subject: [PATCH 012/109] build_project --- CMakeLists.txt | 12 ++++++++++++ Makefile | 12 ++++++++++++ binding.gyp | 2 ++ build.yaml | 6 ++++-- config.m4 | 2 ++ config.w32 | 2 ++ gRPC-Core.podspec | 6 ++++++ grpc.gemspec | 4 ++++ grpc.gyp | 8 ++++++++ package.xml | 4 ++++ src/python/grpcio/grpc_core_dependencies.py | 2 ++ tools/doxygen/Doxyfile.c++.internal | 2 ++ tools/doxygen/Doxyfile.core.internal | 4 ++++ tools/run_tests/generated/sources_and_headers.json | 6 ++++++ 14 files changed, 70 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b22aa1899e8..ee4d2bcbaba 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -968,6 +968,8 @@ add_library(grpc src/core/lib/compression/compression.c src/core/lib/compression/message_compress.c src/core/lib/compression/stream_compression.c + src/core/lib/compression/stream_compression_gzip.c + src/core/lib/compression/stream_compression_identity.c src/core/lib/debug/stats.c src/core/lib/debug/stats_data.c src/core/lib/http/format_request.c @@ -1319,6 +1321,8 @@ add_library(grpc_cronet src/core/lib/compression/compression.c src/core/lib/compression/message_compress.c src/core/lib/compression/stream_compression.c + src/core/lib/compression/stream_compression_gzip.c + src/core/lib/compression/stream_compression_identity.c src/core/lib/debug/stats.c src/core/lib/debug/stats_data.c src/core/lib/http/format_request.c @@ -1638,6 +1642,8 @@ add_library(grpc_test_util src/core/lib/compression/compression.c src/core/lib/compression/message_compress.c src/core/lib/compression/stream_compression.c + src/core/lib/compression/stream_compression_gzip.c + src/core/lib/compression/stream_compression_identity.c src/core/lib/debug/stats.c src/core/lib/debug/stats_data.c src/core/lib/http/format_request.c @@ -1901,6 +1907,8 @@ add_library(grpc_test_util_unsecure src/core/lib/compression/compression.c src/core/lib/compression/message_compress.c src/core/lib/compression/stream_compression.c + src/core/lib/compression/stream_compression_gzip.c + src/core/lib/compression/stream_compression_identity.c src/core/lib/debug/stats.c src/core/lib/debug/stats_data.c src/core/lib/http/format_request.c @@ -2150,6 +2158,8 @@ add_library(grpc_unsecure src/core/lib/compression/compression.c src/core/lib/compression/message_compress.c src/core/lib/compression/stream_compression.c + src/core/lib/compression/stream_compression_gzip.c + src/core/lib/compression/stream_compression_identity.c src/core/lib/debug/stats.c src/core/lib/debug/stats_data.c src/core/lib/http/format_request.c @@ -2851,6 +2861,8 @@ add_library(grpc++_cronet src/core/lib/compression/compression.c src/core/lib/compression/message_compress.c src/core/lib/compression/stream_compression.c + src/core/lib/compression/stream_compression_gzip.c + src/core/lib/compression/stream_compression_identity.c src/core/lib/debug/stats.c src/core/lib/debug/stats_data.c src/core/lib/http/format_request.c diff --git a/Makefile b/Makefile index 743ca41dc6c..a3db8106dae 100644 --- a/Makefile +++ b/Makefile @@ -2915,6 +2915,8 @@ LIBGRPC_SRC = \ src/core/lib/compression/compression.c \ src/core/lib/compression/message_compress.c \ src/core/lib/compression/stream_compression.c \ + src/core/lib/compression/stream_compression_gzip.c \ + src/core/lib/compression/stream_compression_identity.c \ src/core/lib/debug/stats.c \ src/core/lib/debug/stats_data.c \ src/core/lib/http/format_request.c \ @@ -3264,6 +3266,8 @@ LIBGRPC_CRONET_SRC = \ src/core/lib/compression/compression.c \ src/core/lib/compression/message_compress.c \ src/core/lib/compression/stream_compression.c \ + src/core/lib/compression/stream_compression_gzip.c \ + src/core/lib/compression/stream_compression_identity.c \ src/core/lib/debug/stats.c \ src/core/lib/debug/stats_data.c \ src/core/lib/http/format_request.c \ @@ -3580,6 +3584,8 @@ LIBGRPC_TEST_UTIL_SRC = \ src/core/lib/compression/compression.c \ src/core/lib/compression/message_compress.c \ src/core/lib/compression/stream_compression.c \ + src/core/lib/compression/stream_compression_gzip.c \ + src/core/lib/compression/stream_compression_identity.c \ src/core/lib/debug/stats.c \ src/core/lib/debug/stats_data.c \ src/core/lib/http/format_request.c \ @@ -3832,6 +3838,8 @@ LIBGRPC_TEST_UTIL_UNSECURE_SRC = \ src/core/lib/compression/compression.c \ src/core/lib/compression/message_compress.c \ src/core/lib/compression/stream_compression.c \ + src/core/lib/compression/stream_compression_gzip.c \ + src/core/lib/compression/stream_compression_identity.c \ src/core/lib/debug/stats.c \ src/core/lib/debug/stats_data.c \ src/core/lib/http/format_request.c \ @@ -4057,6 +4065,8 @@ LIBGRPC_UNSECURE_SRC = \ src/core/lib/compression/compression.c \ src/core/lib/compression/message_compress.c \ src/core/lib/compression/stream_compression.c \ + src/core/lib/compression/stream_compression_gzip.c \ + src/core/lib/compression/stream_compression_identity.c \ src/core/lib/debug/stats.c \ src/core/lib/debug/stats_data.c \ src/core/lib/http/format_request.c \ @@ -4741,6 +4751,8 @@ LIBGRPC++_CRONET_SRC = \ src/core/lib/compression/compression.c \ src/core/lib/compression/message_compress.c \ src/core/lib/compression/stream_compression.c \ + src/core/lib/compression/stream_compression_gzip.c \ + src/core/lib/compression/stream_compression_identity.c \ src/core/lib/debug/stats.c \ src/core/lib/debug/stats_data.c \ src/core/lib/http/format_request.c \ diff --git a/binding.gyp b/binding.gyp index 946edd8139c..0547a82512f 100644 --- a/binding.gyp +++ b/binding.gyp @@ -667,6 +667,8 @@ 'src/core/lib/compression/compression.c', 'src/core/lib/compression/message_compress.c', 'src/core/lib/compression/stream_compression.c', + 'src/core/lib/compression/stream_compression_gzip.c', + 'src/core/lib/compression/stream_compression_identity.c', 'src/core/lib/debug/stats.c', 'src/core/lib/debug/stats_data.c', 'src/core/lib/http/format_request.c', diff --git a/build.yaml b/build.yaml index 0939796bcb3..0de5b9859d8 100644 --- a/build.yaml +++ b/build.yaml @@ -195,9 +195,10 @@ filegroups: - src/core/lib/compression/compression.c - src/core/lib/compression/message_compress.c - src/core/lib/compression/stream_compression.c + - src/core/lib/compression/stream_compression_gzip.c + - src/core/lib/compression/stream_compression_identity.c - src/core/lib/debug/stats.c - src/core/lib/debug/stats_data.c - - src/core/lib/compression/stream_compression_gzip.c - src/core/lib/http/format_request.c - src/core/lib/http/httpcli.c - src/core/lib/http/parser.c @@ -348,9 +349,10 @@ filegroups: - src/core/lib/compression/algorithm_metadata.h - src/core/lib/compression/message_compress.h - src/core/lib/compression/stream_compression.h + - src/core/lib/compression/stream_compression_gzip.h + - src/core/lib/compression/stream_compression_identity.h - src/core/lib/debug/stats.h - src/core/lib/debug/stats_data.h - - src/core/lib/compression/stream_compression_gzip.h - src/core/lib/http/format_request.h - src/core/lib/http/httpcli.h - src/core/lib/http/parser.h diff --git a/config.m4 b/config.m4 index d9d25cb0065..fbae82de7f6 100644 --- a/config.m4 +++ b/config.m4 @@ -96,6 +96,8 @@ if test "$PHP_GRPC" != "no"; then src/core/lib/compression/compression.c \ src/core/lib/compression/message_compress.c \ src/core/lib/compression/stream_compression.c \ + src/core/lib/compression/stream_compression_gzip.c \ + src/core/lib/compression/stream_compression_identity.c \ src/core/lib/debug/stats.c \ src/core/lib/debug/stats_data.c \ src/core/lib/http/format_request.c \ diff --git a/config.w32 b/config.w32 index 640115e8614..3dacc31e6ad 100644 --- a/config.w32 +++ b/config.w32 @@ -73,6 +73,8 @@ if (PHP_GRPC != "no") { "src\\core\\lib\\compression\\compression.c " + "src\\core\\lib\\compression\\message_compress.c " + "src\\core\\lib\\compression\\stream_compression.c " + + "src\\core\\lib\\compression\\stream_compression_gzip.c " + + "src\\core\\lib\\compression\\stream_compression_identity.c " + "src\\core\\lib\\debug\\stats.c " + "src\\core\\lib\\debug\\stats_data.c " + "src\\core\\lib\\http\\format_request.c " + diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index 66368b1b4b6..8fea17c5dba 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -329,6 +329,8 @@ Pod::Spec.new do |s| 'src/core/lib/compression/algorithm_metadata.h', 'src/core/lib/compression/message_compress.h', 'src/core/lib/compression/stream_compression.h', + 'src/core/lib/compression/stream_compression_gzip.h', + 'src/core/lib/compression/stream_compression_identity.h', 'src/core/lib/debug/stats.h', 'src/core/lib/debug/stats_data.h', 'src/core/lib/http/format_request.h', @@ -479,6 +481,8 @@ Pod::Spec.new do |s| 'src/core/lib/compression/compression.c', 'src/core/lib/compression/message_compress.c', 'src/core/lib/compression/stream_compression.c', + 'src/core/lib/compression/stream_compression_gzip.c', + 'src/core/lib/compression/stream_compression_identity.c', 'src/core/lib/debug/stats.c', 'src/core/lib/debug/stats_data.c', 'src/core/lib/http/format_request.c', @@ -826,6 +830,8 @@ Pod::Spec.new do |s| 'src/core/lib/compression/algorithm_metadata.h', 'src/core/lib/compression/message_compress.h', 'src/core/lib/compression/stream_compression.h', + 'src/core/lib/compression/stream_compression_gzip.h', + 'src/core/lib/compression/stream_compression_identity.h', 'src/core/lib/debug/stats.h', 'src/core/lib/debug/stats_data.h', 'src/core/lib/http/format_request.h', diff --git a/grpc.gemspec b/grpc.gemspec index d3779a9991e..166c914c529 100644 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -261,6 +261,8 @@ Gem::Specification.new do |s| s.files += %w( src/core/lib/compression/algorithm_metadata.h ) s.files += %w( src/core/lib/compression/message_compress.h ) s.files += %w( src/core/lib/compression/stream_compression.h ) + s.files += %w( src/core/lib/compression/stream_compression_gzip.h ) + s.files += %w( src/core/lib/compression/stream_compression_identity.h ) s.files += %w( src/core/lib/debug/stats.h ) s.files += %w( src/core/lib/debug/stats_data.h ) s.files += %w( src/core/lib/http/format_request.h ) @@ -415,6 +417,8 @@ Gem::Specification.new do |s| s.files += %w( src/core/lib/compression/compression.c ) s.files += %w( src/core/lib/compression/message_compress.c ) s.files += %w( src/core/lib/compression/stream_compression.c ) + s.files += %w( src/core/lib/compression/stream_compression_gzip.c ) + s.files += %w( src/core/lib/compression/stream_compression_identity.c ) s.files += %w( src/core/lib/debug/stats.c ) s.files += %w( src/core/lib/debug/stats_data.c ) s.files += %w( src/core/lib/http/format_request.c ) diff --git a/grpc.gyp b/grpc.gyp index 40938a4564a..2e2e0b7dfda 100644 --- a/grpc.gyp +++ b/grpc.gyp @@ -233,6 +233,8 @@ 'src/core/lib/compression/compression.c', 'src/core/lib/compression/message_compress.c', 'src/core/lib/compression/stream_compression.c', + 'src/core/lib/compression/stream_compression_gzip.c', + 'src/core/lib/compression/stream_compression_identity.c', 'src/core/lib/debug/stats.c', 'src/core/lib/debug/stats_data.c', 'src/core/lib/http/format_request.c', @@ -533,6 +535,8 @@ 'src/core/lib/compression/compression.c', 'src/core/lib/compression/message_compress.c', 'src/core/lib/compression/stream_compression.c', + 'src/core/lib/compression/stream_compression_gzip.c', + 'src/core/lib/compression/stream_compression_identity.c', 'src/core/lib/debug/stats.c', 'src/core/lib/debug/stats_data.c', 'src/core/lib/http/format_request.c', @@ -738,6 +742,8 @@ 'src/core/lib/compression/compression.c', 'src/core/lib/compression/message_compress.c', 'src/core/lib/compression/stream_compression.c', + 'src/core/lib/compression/stream_compression_gzip.c', + 'src/core/lib/compression/stream_compression_identity.c', 'src/core/lib/debug/stats.c', 'src/core/lib/debug/stats_data.c', 'src/core/lib/http/format_request.c', @@ -928,6 +934,8 @@ 'src/core/lib/compression/compression.c', 'src/core/lib/compression/message_compress.c', 'src/core/lib/compression/stream_compression.c', + 'src/core/lib/compression/stream_compression_gzip.c', + 'src/core/lib/compression/stream_compression_identity.c', 'src/core/lib/debug/stats.c', 'src/core/lib/debug/stats_data.c', 'src/core/lib/http/format_request.c', diff --git a/package.xml b/package.xml index 1cca4fbdddb..33b8378a35d 100644 --- a/package.xml +++ b/package.xml @@ -271,6 +271,8 @@ + + @@ -425,6 +427,8 @@ + + diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py index 1cbf345ab6e..9025a07ba9c 100644 --- a/src/python/grpcio/grpc_core_dependencies.py +++ b/src/python/grpcio/grpc_core_dependencies.py @@ -72,6 +72,8 @@ CORE_SOURCE_FILES = [ 'src/core/lib/compression/compression.c', 'src/core/lib/compression/message_compress.c', 'src/core/lib/compression/stream_compression.c', + 'src/core/lib/compression/stream_compression_gzip.c', + 'src/core/lib/compression/stream_compression_identity.c', 'src/core/lib/debug/stats.c', 'src/core/lib/debug/stats_data.c', 'src/core/lib/http/format_request.c', diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index 91c149eec92..71f500b53ff 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -942,6 +942,8 @@ src/core/lib/channel/handshaker_registry.h \ src/core/lib/compression/algorithm_metadata.h \ src/core/lib/compression/message_compress.h \ src/core/lib/compression/stream_compression.h \ +src/core/lib/compression/stream_compression_gzip.h \ +src/core/lib/compression/stream_compression_identity.h \ src/core/lib/debug/stats.h \ src/core/lib/debug/stats_data.h \ src/core/lib/debug/trace.h \ diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal index 26d982acd7d..63993c808bc 100644 --- a/tools/doxygen/Doxyfile.core.internal +++ b/tools/doxygen/Doxyfile.core.internal @@ -1077,6 +1077,10 @@ src/core/lib/compression/message_compress.c \ src/core/lib/compression/message_compress.h \ src/core/lib/compression/stream_compression.c \ src/core/lib/compression/stream_compression.h \ +src/core/lib/compression/stream_compression_gzip.c \ +src/core/lib/compression/stream_compression_gzip.h \ +src/core/lib/compression/stream_compression_identity.c \ +src/core/lib/compression/stream_compression_identity.h \ src/core/lib/debug/stats.c \ src/core/lib/debug/stats.h \ src/core/lib/debug/stats_data.c \ diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json index fbd47389f71..cca4cff0fb1 100644 --- a/tools/run_tests/generated/sources_and_headers.json +++ b/tools/run_tests/generated/sources_and_headers.json @@ -7856,6 +7856,8 @@ "src/core/lib/compression/compression.c", "src/core/lib/compression/message_compress.c", "src/core/lib/compression/stream_compression.c", + "src/core/lib/compression/stream_compression_gzip.c", + "src/core/lib/compression/stream_compression_identity.c", "src/core/lib/debug/stats.c", "src/core/lib/debug/stats_data.c", "src/core/lib/http/format_request.c", @@ -8009,6 +8011,8 @@ "src/core/lib/compression/algorithm_metadata.h", "src/core/lib/compression/message_compress.h", "src/core/lib/compression/stream_compression.h", + "src/core/lib/compression/stream_compression_gzip.h", + "src/core/lib/compression/stream_compression_identity.h", "src/core/lib/debug/stats.h", "src/core/lib/debug/stats_data.h", "src/core/lib/http/format_request.h", @@ -8141,6 +8145,8 @@ "src/core/lib/compression/algorithm_metadata.h", "src/core/lib/compression/message_compress.h", "src/core/lib/compression/stream_compression.h", + "src/core/lib/compression/stream_compression_gzip.h", + "src/core/lib/compression/stream_compression_identity.h", "src/core/lib/debug/stats.h", "src/core/lib/debug/stats_data.h", "src/core/lib/http/format_request.h", From 8e14acc76bacdf67fa852a96e44381df238e3907 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Tue, 5 Sep 2017 13:39:07 -0700 Subject: [PATCH 013/109] Remove duplicate sentences on send path --- .../chttp2/transport/chttp2_transport.c | 14 ++- .../ext/transport/chttp2/transport/internal.h | 9 +- .../ext/transport/chttp2/transport/writing.c | 89 ++++++++----------- 3 files changed, 45 insertions(+), 67 deletions(-) diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index 0e00995d11d..e1fb9dd9154 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -727,10 +727,7 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp, grpc_slice_buffer_destroy_internal(exec_ctx, &s->unprocessed_incoming_frames_buffer); grpc_slice_buffer_destroy_internal(exec_ctx, &s->frame_storage); - if (s->compressed_data_buffer) { - grpc_slice_buffer_destroy_internal(exec_ctx, s->compressed_data_buffer); - gpr_free(s->compressed_data_buffer); - } + grpc_slice_buffer_destroy_internal(exec_ctx, &s->compressed_data_buffer); if (s->decompressed_data_buffer) { grpc_slice_buffer_destroy_internal(exec_ctx, s->decompressed_data_buffer); gpr_free(s->decompressed_data_buffer); @@ -1300,12 +1297,11 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE; /* Identify stream compression */ - if ((s->stream_compression_send_enabled = - (op_payload->send_initial_metadata.send_initial_metadata->idx.named - .content_encoding != NULL)) == true) { - s->compressed_data_buffer = gpr_malloc(sizeof(grpc_slice_buffer)); - grpc_slice_buffer_init(s->compressed_data_buffer); + if (op_payload->send_initial_metadata.send_initial_metadata->idx.named.content_encoding == NULL || + grpc_stream_compression_method_parse(GRPC_MDVALUE(op_payload->send_initial_metadata.send_initial_metadata->idx.named.content_encoding->md), true, &s->stream_compression_method) == 0) { + s->stream_compression_method = GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS; } + grpc_slice_buffer_init(&s->compressed_data_buffer); s->send_initial_metadata_finished = add_closure_barrier(on_complete); s->send_initial_metadata = diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h index 3c41a8958f6..e5e4dd33443 100644 --- a/src/core/ext/transport/chttp2/transport/internal.h +++ b/src/core/ext/transport/chttp2/transport/internal.h @@ -557,10 +557,11 @@ struct grpc_chttp2_stream { grpc_chttp2_write_cb *finish_after_write; size_t sending_bytes; - /** Whether stream compression send is enabled */ + /* Stream compression method to be used. */ + grpc_stream_compression_method stream_compression_method; + /* Stream decompression method to be used. */ + grpc_stream_compression_method stream_decompression_method; bool stream_compression_recv_enabled; - /** Whether stream compression recv is enabled */ - bool stream_compression_send_enabled; /** Whether bytes stored in unprocessed_incoming_byte_stream is decompressed */ bool unprocessed_incoming_frames_decompressed; @@ -570,7 +571,7 @@ struct grpc_chttp2_stream { grpc_stream_compression_context *stream_compression_ctx; /** Buffer storing data that is compressed but not sent */ - grpc_slice_buffer *compressed_data_buffer; + grpc_slice_buffer compressed_data_buffer; /** Amount of uncompressed bytes sent out when compressed_data_buffer is * emptied */ size_t uncompressed_data_size; diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c index b877e9f1264..b6bc864c29d 100644 --- a/src/core/ext/transport/chttp2/transport/writing.c +++ b/src/core/ext/transport/chttp2/transport/writing.c @@ -288,8 +288,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( if (sent_initial_metadata) { /* send any body bytes, if allowed by flow control */ if (s->flow_controlled_buffer.length > 0 || - (s->stream_compression_send_enabled && - s->compressed_data_buffer->length > 0)) { + s->compressed_data_buffer.length > 0) { uint32_t stream_remote_window = (uint32_t)GPR_MAX( 0, s->flow_control.remote_window_delta + @@ -302,56 +301,40 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( if (max_outgoing > 0) { bool is_last_data_frame = false; bool is_last_frame = false; - if (s->stream_compression_send_enabled) { - while ((s->flow_controlled_buffer.length > 0 || - s->compressed_data_buffer->length > 0) && - max_outgoing > 0) { - if (s->compressed_data_buffer->length > 0) { - uint32_t send_bytes = (uint32_t)GPR_MIN( - max_outgoing, s->compressed_data_buffer->length); - is_last_data_frame = - (send_bytes == s->compressed_data_buffer->length && - s->flow_controlled_buffer.length == 0 && - s->fetching_send_message == NULL); - is_last_frame = - is_last_data_frame && s->send_trailing_metadata != NULL && - grpc_metadata_batch_is_empty(s->send_trailing_metadata); - grpc_chttp2_encode_data(s->id, s->compressed_data_buffer, - send_bytes, is_last_frame, - &s->stats.outgoing, &t->outbuf); - grpc_chttp2_flowctl_sent_data(&t->flow_control, - &s->flow_control, send_bytes); - max_outgoing -= send_bytes; - if (s->compressed_data_buffer->length == 0) { - s->sending_bytes += s->uncompressed_data_size; - } - } else { - if (s->stream_compression_ctx == NULL) { - s->stream_compression_ctx = - grpc_stream_compression_context_create( - GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); - } - s->uncompressed_data_size = s->flow_controlled_buffer.length; - GPR_ASSERT(grpc_stream_compress( - s->stream_compression_ctx, &s->flow_controlled_buffer, - s->compressed_data_buffer, NULL, MAX_SIZE_T, - GRPC_STREAM_COMPRESSION_FLUSH_SYNC)); + while ((s->flow_controlled_buffer.length > 0 || + s->compressed_data_buffer.length > 0) && + max_outgoing > 0) { + if (s->compressed_data_buffer.length > 0) { + uint32_t send_bytes = (uint32_t)GPR_MIN( + max_outgoing, s->compressed_data_buffer.length); + is_last_data_frame = + (send_bytes == s->compressed_data_buffer.length && + s->flow_controlled_buffer.length == 0 && + s->fetching_send_message == NULL); + is_last_frame = + is_last_data_frame && s->send_trailing_metadata != NULL && + grpc_metadata_batch_is_empty(s->send_trailing_metadata); + grpc_chttp2_encode_data(s->id, &s->compressed_data_buffer, + send_bytes, is_last_frame, + &s->stats.outgoing, &t->outbuf); + grpc_chttp2_flowctl_sent_data(&t->flow_control, + &s->flow_control, send_bytes); + max_outgoing -= send_bytes; + if (s->compressed_data_buffer.length == 0) { + s->sending_bytes += s->uncompressed_data_size; } + } else { + if (s->stream_compression_ctx == NULL) { + s->stream_compression_ctx = + grpc_stream_compression_context_create( + s->stream_compression_method); + } + s->uncompressed_data_size = s->flow_controlled_buffer.length; + GPR_ASSERT(grpc_stream_compress( + s->stream_compression_ctx, &s->flow_controlled_buffer, + &s->compressed_data_buffer, NULL, MAX_SIZE_T, + GRPC_STREAM_COMPRESSION_FLUSH_SYNC)); } - } else { - uint32_t send_bytes = (uint32_t)GPR_MIN( - max_outgoing, s->flow_controlled_buffer.length); - is_last_data_frame = s->fetching_send_message == NULL && - send_bytes == s->flow_controlled_buffer.length; - is_last_frame = - is_last_data_frame && s->send_trailing_metadata != NULL && - grpc_metadata_batch_is_empty(s->send_trailing_metadata); - grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, - send_bytes, is_last_frame, - &s->stats.outgoing, &t->outbuf); - grpc_chttp2_flowctl_sent_data(&t->flow_control, &s->flow_control, - send_bytes); - s->sending_bytes += send_bytes; } t->ping_state.pings_before_data_required = t->ping_policy.max_pings_without_data; @@ -371,8 +354,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( } now_writing = true; if (s->flow_controlled_buffer.length > 0 || - (s->stream_compression_send_enabled && - s->compressed_data_buffer->length > 0)) { + s->compressed_data_buffer.length > 0) { GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:fork"); grpc_chttp2_list_add_writable_stream(t, s); } @@ -387,8 +369,7 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( if (s->send_trailing_metadata != NULL && s->fetching_send_message == NULL && s->flow_controlled_buffer.length == 0 && - (!s->stream_compression_send_enabled || - s->compressed_data_buffer->length == 0)) { + s->compressed_data_buffer.length == 0) { GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "sending trailing_metadata")); if (grpc_metadata_batch_is_empty(s->send_trailing_metadata)) { grpc_chttp2_encode_data(s->id, &s->flow_controlled_buffer, 0, true, From 03600fc37df35399063ea9d89e9b745835a41396 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Tue, 5 Sep 2017 13:42:44 -0700 Subject: [PATCH 014/109] clang-format --- .../chttp2/transport/chttp2_transport.c | 9 +++- .../ext/transport/chttp2/transport/writing.c | 4 +- src/core/lib/compression/stream_compression.c | 19 +++++--- src/core/lib/compression/stream_compression.h | 22 +++++----- .../lib/compression/stream_compression_gzip.c | 44 +++++++++++-------- .../compression/stream_compression_identity.c | 36 ++++++++------- .../compression/stream_compression_identity.h | 3 +- 7 files changed, 80 insertions(+), 57 deletions(-) diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index e1fb9dd9154..64a5e342feb 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -1297,8 +1297,13 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE; /* Identify stream compression */ - if (op_payload->send_initial_metadata.send_initial_metadata->idx.named.content_encoding == NULL || - grpc_stream_compression_method_parse(GRPC_MDVALUE(op_payload->send_initial_metadata.send_initial_metadata->idx.named.content_encoding->md), true, &s->stream_compression_method) == 0) { + if (op_payload->send_initial_metadata.send_initial_metadata->idx.named + .content_encoding == NULL || + grpc_stream_compression_method_parse( + GRPC_MDVALUE( + op_payload->send_initial_metadata.send_initial_metadata->idx + .named.content_encoding->md), + true, &s->stream_compression_method) == 0) { s->stream_compression_method = GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS; } grpc_slice_buffer_init(&s->compressed_data_buffer); diff --git a/src/core/ext/transport/chttp2/transport/writing.c b/src/core/ext/transport/chttp2/transport/writing.c index b6bc864c29d..fff98a62016 100644 --- a/src/core/ext/transport/chttp2/transport/writing.c +++ b/src/core/ext/transport/chttp2/transport/writing.c @@ -317,8 +317,8 @@ grpc_chttp2_begin_write_result grpc_chttp2_begin_write( grpc_chttp2_encode_data(s->id, &s->compressed_data_buffer, send_bytes, is_last_frame, &s->stats.outgoing, &t->outbuf); - grpc_chttp2_flowctl_sent_data(&t->flow_control, - &s->flow_control, send_bytes); + grpc_chttp2_flowctl_sent_data(&t->flow_control, &s->flow_control, + send_bytes); max_outgoing -= send_bytes; if (s->compressed_data_buffer.length == 0) { s->sending_bytes += s->uncompressed_data_size; diff --git a/src/core/lib/compression/stream_compression.c b/src/core/lib/compression/stream_compression.c index 274ee100ba5..411489f0296 100644 --- a/src/core/lib/compression/stream_compression.c +++ b/src/core/lib/compression/stream_compression.c @@ -21,20 +21,23 @@ #include "src/core/lib/compression/stream_compression.h" #include "src/core/lib/compression/stream_compression_gzip.h" -extern const grpc_stream_compression_vtable grpc_stream_compression_identity_vtable; +extern const grpc_stream_compression_vtable + grpc_stream_compression_identity_vtable; bool grpc_stream_compress(grpc_stream_compression_context *ctx, grpc_slice_buffer *in, grpc_slice_buffer *out, size_t *output_size, size_t max_output_size, grpc_stream_compression_flush flush) { - return ctx->vtable->compress(ctx, in, out, output_size, max_output_size, flush); + return ctx->vtable->compress(ctx, in, out, output_size, max_output_size, + flush); } bool grpc_stream_decompress(grpc_stream_compression_context *ctx, grpc_slice_buffer *in, grpc_slice_buffer *out, size_t *output_size, size_t max_output_size, bool *end_of_context) { - return ctx->vtable->decompress(ctx, in, out, output_size, max_output_size, end_of_context); + return ctx->vtable->decompress(ctx, in, out, output_size, max_output_size, + end_of_context); } grpc_stream_compression_context *grpc_stream_compression_context_create( @@ -58,15 +61,17 @@ void grpc_stream_compression_context_destroy( } int grpc_stream_compression_method_parse( - grpc_slice value, bool is_compress, grpc_stream_compression_method *method) { + grpc_slice value, bool is_compress, + grpc_stream_compression_method *method) { if (grpc_slice_eq(value, GRPC_MDSTR_IDENTITY)) { - *method = is_compress ? GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS : GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS; + *method = is_compress ? GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS + : GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS; return 1; } else if (grpc_slice_eq(value, GRPC_MDSTR_GZIP)) { - *method = is_compress ? GRPC_STREAM_COMPRESSION_GZIP_COMPRESS : GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS; + *method = is_compress ? GRPC_STREAM_COMPRESSION_GZIP_COMPRESS + : GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS; return 1; } else { return 0; } } - diff --git a/src/core/lib/compression/stream_compression.h b/src/core/lib/compression/stream_compression.h index fa07247c310..6d073280faa 100644 --- a/src/core/lib/compression/stream_compression.h +++ b/src/core/lib/compression/stream_compression.h @@ -49,18 +49,16 @@ typedef enum grpc_stream_compression_flush { } grpc_stream_compression_flush; struct grpc_stream_compression_vtable { -bool (*compress)(grpc_stream_compression_context *ctx, - grpc_slice_buffer *in, grpc_slice_buffer *out, - size_t *output_size, size_t max_output_size, - grpc_stream_compression_flush flush); -bool (*decompress)(grpc_stream_compression_context *ctx, - grpc_slice_buffer *in, grpc_slice_buffer *out, - size_t *output_size, size_t max_output_size, - bool *end_of_context); -grpc_stream_compression_context *(*context_create)( - grpc_stream_compression_method method); -void (*context_destroy)( - grpc_stream_compression_context *ctx); + bool (*compress)(grpc_stream_compression_context *ctx, grpc_slice_buffer *in, + grpc_slice_buffer *out, size_t *output_size, + size_t max_output_size, grpc_stream_compression_flush flush); + bool (*decompress)(grpc_stream_compression_context *ctx, + grpc_slice_buffer *in, grpc_slice_buffer *out, + size_t *output_size, size_t max_output_size, + bool *end_of_context); + grpc_stream_compression_context *(*context_create)( + grpc_stream_compression_method method); + void (*context_destroy)(grpc_stream_compression_context *ctx); }; /** diff --git a/src/core/lib/compression/stream_compression_gzip.c b/src/core/lib/compression/stream_compression_gzip.c index 2d03836a0dc..5d21bf27ae0 100644 --- a/src/core/lib/compression/stream_compression_gzip.c +++ b/src/core/lib/compression/stream_compression_gzip.c @@ -132,13 +132,16 @@ static bool gzip_flate(grpc_stream_compression_context_gzip *ctx, } static bool grpc_stream_compress_gzip(grpc_stream_compression_context *ctx, - grpc_slice_buffer *in, grpc_slice_buffer *out, - size_t *output_size, size_t max_output_size, - grpc_stream_compression_flush flush) { + grpc_slice_buffer *in, + grpc_slice_buffer *out, + size_t *output_size, + size_t max_output_size, + grpc_stream_compression_flush flush) { if (ctx == NULL) { return false; } - grpc_stream_compression_context_gzip *gzip_ctx = (grpc_stream_compression_context_gzip *)ctx; + grpc_stream_compression_context_gzip *gzip_ctx = + (grpc_stream_compression_context_gzip *)ctx; GPR_ASSERT(gzip_ctx->flate == deflate); int gzip_flush; switch (flush) { @@ -159,21 +162,26 @@ static bool grpc_stream_compress_gzip(grpc_stream_compression_context *ctx, } static bool grpc_stream_decompress_gzip(grpc_stream_compression_context *ctx, - grpc_slice_buffer *in, grpc_slice_buffer *out, - size_t *output_size, size_t max_output_size, - bool *end_of_context) { + grpc_slice_buffer *in, + grpc_slice_buffer *out, + size_t *output_size, + size_t max_output_size, + bool *end_of_context) { if (ctx == NULL) { return false; } - grpc_stream_compression_context_gzip *gzip_ctx = (grpc_stream_compression_context_gzip *)ctx; + grpc_stream_compression_context_gzip *gzip_ctx = + (grpc_stream_compression_context_gzip *)ctx; GPR_ASSERT(gzip_ctx->flate == inflate); - return gzip_flate(gzip_ctx, in, out, output_size, max_output_size, Z_SYNC_FLUSH, - end_of_context); + return gzip_flate(gzip_ctx, in, out, output_size, max_output_size, + Z_SYNC_FLUSH, end_of_context); } -static grpc_stream_compression_context *grpc_stream_compression_context_create_gzip( +static grpc_stream_compression_context * +grpc_stream_compression_context_create_gzip( grpc_stream_compression_method method) { - GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_GZIP_COMPRESS || method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); + GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_GZIP_COMPRESS || + method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); grpc_stream_compression_context_gzip *gzip_ctx = gpr_zalloc(sizeof(grpc_stream_compression_context_gzip)); int r; @@ -201,7 +209,8 @@ static void grpc_stream_compression_context_destroy_gzip( if (ctx == NULL) { return; } - grpc_stream_compression_context_gzip *gzip_ctx = (grpc_stream_compression_context_gzip*)ctx; + grpc_stream_compression_context_gzip *gzip_ctx = + (grpc_stream_compression_context_gzip *)ctx; if (gzip_ctx->flate == inflate) { inflateEnd(&gzip_ctx->zs); } else { @@ -211,8 +220,7 @@ static void grpc_stream_compression_context_destroy_gzip( } const grpc_stream_compression_vtable grpc_stream_compression_gzip_vtable = { - .compress = grpc_stream_compress_gzip, - .decompress = grpc_stream_decompress_gzip, - .context_create = grpc_stream_compression_context_create_gzip, - .context_destroy = grpc_stream_compression_context_destroy_gzip -}; + .compress = grpc_stream_compress_gzip, + .decompress = grpc_stream_decompress_gzip, + .context_create = grpc_stream_compression_context_create_gzip, + .context_destroy = grpc_stream_compression_context_destroy_gzip}; diff --git a/src/core/lib/compression/stream_compression_identity.c b/src/core/lib/compression/stream_compression_identity.c index 1395b762d48..133c3250d27 100644 --- a/src/core/lib/compression/stream_compression_identity.c +++ b/src/core/lib/compression/stream_compression_identity.c @@ -25,7 +25,10 @@ #define OUTPUT_BLOCK_SIZE (1024) -static void grpc_stream_compression_pass_through(grpc_slice_buffer *in, grpc_slice_buffer *out, size_t *output_size, size_t max_output_size) { +static void grpc_stream_compression_pass_through(grpc_slice_buffer *in, + grpc_slice_buffer *out, + size_t *output_size, + size_t max_output_size) { if (max_output_size >= in->length) { *output_size = in->length; grpc_slice_buffer_move_into(in, out); @@ -36,9 +39,11 @@ static void grpc_stream_compression_pass_through(grpc_slice_buffer *in, grpc_sli } static bool grpc_stream_compress_identity(grpc_stream_compression_context *ctx, - grpc_slice_buffer *in, grpc_slice_buffer *out, - size_t *output_size, size_t max_output_size, - grpc_stream_compression_flush flush) { + grpc_slice_buffer *in, + grpc_slice_buffer *out, + size_t *output_size, + size_t max_output_size, + grpc_stream_compression_flush flush) { if (ctx == NULL) { return false; } @@ -46,10 +51,10 @@ static bool grpc_stream_compress_identity(grpc_stream_compression_context *ctx, return true; } -static bool grpc_stream_decompress_identity(grpc_stream_compression_context *ctx, - grpc_slice_buffer *in, grpc_slice_buffer *out, - size_t *output_size, size_t max_output_size, - bool *end_of_context) { +static bool grpc_stream_decompress_identity( + grpc_stream_compression_context *ctx, grpc_slice_buffer *in, + grpc_slice_buffer *out, size_t *output_size, size_t max_output_size, + bool *end_of_context) { if (ctx == NULL) { return false; } @@ -57,9 +62,11 @@ static bool grpc_stream_decompress_identity(grpc_stream_compression_context *ctx return true; } -static grpc_stream_compression_context *grpc_stream_compression_context_create_identity( +static grpc_stream_compression_context * +grpc_stream_compression_context_create_identity( grpc_stream_compression_method method) { - GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS || method == GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS); + GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS || + method == GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS); /* No context needed in this case. Use fake context instead. */ return (grpc_stream_compression_context *)1; } @@ -70,8 +77,7 @@ static void grpc_stream_compression_context_destroy_identity( } const grpc_stream_compression_vtable grpc_stream_compression_identity_vtable = { - .compress = grpc_stream_compress_identity, - .decompress = grpc_stream_decompress_identity, - .context_create = grpc_stream_compression_context_create_identity, - .context_destroy = grpc_stream_compression_context_destroy_identity -}; + .compress = grpc_stream_compress_identity, + .decompress = grpc_stream_decompress_identity, + .context_create = grpc_stream_compression_context_create_identity, + .context_destroy = grpc_stream_compression_context_destroy_identity}; diff --git a/src/core/lib/compression/stream_compression_identity.h b/src/core/lib/compression/stream_compression_identity.h index 0d770dd32b7..41926e949e5 100644 --- a/src/core/lib/compression/stream_compression_identity.h +++ b/src/core/lib/compression/stream_compression_identity.h @@ -21,6 +21,7 @@ #include "src/core/lib/compression/stream_compression.h" -extern const grpc_stream_compression_vtable grpc_stream_compression_identity_vtable; +extern const grpc_stream_compression_vtable + grpc_stream_compression_identity_vtable; #endif From 68a1a1530be8573e109c4ec6754941a682f8b5dd Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Tue, 5 Sep 2017 14:47:33 -0700 Subject: [PATCH 015/109] Build fix --- .../compression/stream_compression_test.c | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/test/core/compression/stream_compression_test.c b/test/core/compression/stream_compression_test.c index e576507aaf6..26c957ded49 100644 --- a/test/core/compression/stream_compression_test.c +++ b/test/core/compression/stream_compression_test.c @@ -59,10 +59,10 @@ static void test_stream_compression_simple_compress_decompress() { grpc_slice_buffer_init(&relay); grpc_slice_buffer_init(&sink); grpc_stream_compression_context *compress_ctx = - grpc_stream_compression_context_create(GRPC_STREAM_COMPRESSION_COMPRESS); + grpc_stream_compression_context_create(GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); grpc_stream_compression_context *decompress_ctx = grpc_stream_compression_context_create( - GRPC_STREAM_COMPRESSION_DECOMPRESS); + GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); grpc_slice slice = grpc_slice_from_static_string(test_str); grpc_slice_buffer_add(&source, slice); GPR_ASSERT(grpc_stream_compress(compress_ctx, &source, &relay, NULL, @@ -91,10 +91,10 @@ test_stream_compression_simple_compress_decompress_with_output_size_constraint() grpc_slice_buffer_init(&relay); grpc_slice_buffer_init(&sink); grpc_stream_compression_context *compress_ctx = - grpc_stream_compression_context_create(GRPC_STREAM_COMPRESSION_COMPRESS); + grpc_stream_compression_context_create(GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); grpc_stream_compression_context *decompress_ctx = grpc_stream_compression_context_create( - GRPC_STREAM_COMPRESSION_DECOMPRESS); + GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); grpc_slice slice = grpc_slice_from_static_string(test_str); grpc_slice_buffer_add(&source, slice); GPR_ASSERT(grpc_stream_compress(compress_ctx, &source, &relay, NULL, @@ -139,10 +139,10 @@ test_stream_compression_simple_compress_decompress_with_large_data() { grpc_slice_buffer_init(&relay); grpc_slice_buffer_init(&sink); grpc_stream_compression_context *compress_ctx = - grpc_stream_compression_context_create(GRPC_STREAM_COMPRESSION_COMPRESS); + grpc_stream_compression_context_create(GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); grpc_stream_compression_context *decompress_ctx = grpc_stream_compression_context_create( - GRPC_STREAM_COMPRESSION_DECOMPRESS); + GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); grpc_slice slice = grpc_slice_from_static_string(test_str); grpc_slice_buffer_add(&source, slice); GPR_ASSERT(grpc_stream_compress(compress_ctx, &source, &relay, NULL, @@ -172,7 +172,7 @@ static void test_stream_compression_drop_context() { grpc_slice_buffer_init(&relay); grpc_slice_buffer_init(&sink); grpc_stream_compression_context *compress_ctx = - grpc_stream_compression_context_create(GRPC_STREAM_COMPRESSION_COMPRESS); + grpc_stream_compression_context_create(GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); grpc_slice slice = grpc_slice_from_static_string(test_str); grpc_slice_buffer_add(&source, slice); GPR_ASSERT(grpc_stream_compress(compress_ctx, &source, &relay, NULL, @@ -181,7 +181,7 @@ static void test_stream_compression_drop_context() { grpc_stream_compression_context_destroy(compress_ctx); compress_ctx = - grpc_stream_compression_context_create(GRPC_STREAM_COMPRESSION_COMPRESS); + grpc_stream_compression_context_create(GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); slice = grpc_slice_from_static_string(test_str2); grpc_slice_buffer_add(&source, slice); GPR_ASSERT(grpc_stream_compress(compress_ctx, &source, &relay, NULL, @@ -205,7 +205,7 @@ static void test_stream_compression_drop_context() { grpc_stream_compression_context *decompress_ctx = grpc_stream_compression_context_create( - GRPC_STREAM_COMPRESSION_DECOMPRESS); + GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); bool end_of_context; size_t output_size; GPR_ASSERT(grpc_stream_decompress(decompress_ctx, &relay, &sink, &output_size, @@ -219,7 +219,7 @@ static void test_stream_compression_drop_context() { grpc_slice_buffer_init(&sink); decompress_ctx = grpc_stream_compression_context_create( - GRPC_STREAM_COMPRESSION_DECOMPRESS); + GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); GPR_ASSERT(grpc_stream_decompress(decompress_ctx, &relay, &sink, &output_size, ~(size_t)0, &end_of_context)); GPR_ASSERT(end_of_context == true); @@ -240,7 +240,7 @@ static void test_stream_compression_sync_flush() { grpc_slice_buffer_init(&relay); grpc_slice_buffer_init(&sink); grpc_stream_compression_context *compress_ctx = - grpc_stream_compression_context_create(GRPC_STREAM_COMPRESSION_COMPRESS); + grpc_stream_compression_context_create(GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); grpc_slice slice = grpc_slice_from_static_string(test_str); grpc_slice_buffer_add(&source, slice); GPR_ASSERT(grpc_stream_compress(compress_ctx, &source, &relay, NULL, @@ -249,7 +249,7 @@ static void test_stream_compression_sync_flush() { grpc_stream_compression_context *decompress_ctx = grpc_stream_compression_context_create( - GRPC_STREAM_COMPRESSION_DECOMPRESS); + GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); bool end_of_context; size_t output_size; GPR_ASSERT(grpc_stream_decompress(decompress_ctx, &relay, &sink, &output_size, From c3229b777ce58f49b092a92a07b18155d1ae7799 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Tue, 5 Sep 2017 17:57:19 -0700 Subject: [PATCH 016/109] Remove duplicate sentences on recv path --- .../chttp2/transport/chttp2_transport.c | 37 ++++++++----------- .../transport/chttp2/transport/hpack_parser.c | 13 ++----- .../ext/transport/chttp2/transport/internal.h | 9 ++--- .../compression/stream_compression_identity.c | 1 + 4 files changed, 23 insertions(+), 37 deletions(-) diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index 64a5e342feb..ad0521cc42f 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -693,6 +693,8 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, grpc_schedule_on_exec_ctx); grpc_slice_buffer_init(&s->unprocessed_incoming_frames_buffer); grpc_slice_buffer_init(&s->frame_storage); + grpc_slice_buffer_init(&s->compressed_data_buffer); + grpc_slice_buffer_init(&s->decompressed_data_buffer); s->pending_byte_stream = false; GRPC_CLOSURE_INIT(&s->reset_byte_stream, reset_byte_stream, s, grpc_combiner_scheduler(t->combiner)); @@ -728,10 +730,7 @@ static void destroy_stream_locked(grpc_exec_ctx *exec_ctx, void *sp, &s->unprocessed_incoming_frames_buffer); grpc_slice_buffer_destroy_internal(exec_ctx, &s->frame_storage); grpc_slice_buffer_destroy_internal(exec_ctx, &s->compressed_data_buffer); - if (s->decompressed_data_buffer) { - grpc_slice_buffer_destroy_internal(exec_ctx, s->decompressed_data_buffer); - gpr_free(s->decompressed_data_buffer); - } + grpc_slice_buffer_destroy_internal(exec_ctx, &s->decompressed_data_buffer); grpc_chttp2_list_remove_stalled_by_transport(t, s); grpc_chttp2_list_remove_stalled_by_stream(t, s); @@ -1306,7 +1305,6 @@ static void perform_stream_op_locked(grpc_exec_ctx *exec_ctx, void *stream_op, true, &s->stream_compression_method) == 0) { s->stream_compression_method = GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS; } - grpc_slice_buffer_init(&s->compressed_data_buffer); s->send_initial_metadata_finished = add_closure_barrier(on_complete); s->send_initial_metadata = @@ -1725,18 +1723,17 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx, &s->frame_storage); s->unprocessed_incoming_frames_decompressed = false; } - if (s->stream_compression_recv_enabled && - !s->unprocessed_incoming_frames_decompressed) { - GPR_ASSERT(s->decompressed_data_buffer->length == 0); + if (!s->unprocessed_incoming_frames_decompressed) { + GPR_ASSERT(s->decompressed_data_buffer.length == 0); bool end_of_context; if (!s->stream_decompression_ctx) { s->stream_decompression_ctx = grpc_stream_compression_context_create( - GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); + s->stream_decompression_method); } if (!grpc_stream_decompress(s->stream_decompression_ctx, &s->unprocessed_incoming_frames_buffer, - s->decompressed_data_buffer, NULL, + &s->decompressed_data_buffer, NULL, GRPC_HEADER_SIZE_IN_BYTES, &end_of_context)) { grpc_slice_buffer_reset_and_unref_internal(exec_ctx, @@ -1747,7 +1744,7 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx, "Stream decompression error."); } else { error = grpc_deframe_unprocessed_incoming_frames( - exec_ctx, &s->data_parser, s, s->decompressed_data_buffer, NULL, + exec_ctx, &s->data_parser, s, &s->decompressed_data_buffer, NULL, s->recv_message); if (end_of_context) { grpc_stream_compression_context_destroy( @@ -1755,10 +1752,6 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx, s->stream_decompression_ctx = NULL; } } - } else { - error = grpc_deframe_unprocessed_incoming_frames( - exec_ctx, &s->data_parser, s, - &s->unprocessed_incoming_frames_buffer, NULL, s->recv_message); } if (error != GRPC_ERROR_NONE) { s->seen_error = true; @@ -1797,7 +1790,7 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx, } bool pending_data = s->pending_byte_stream || s->unprocessed_incoming_frames_buffer.length > 0; - if (s->stream_compression_recv_enabled && s->read_closed && + if (s->read_closed && s->frame_storage.length > 0 && !pending_data && !s->seen_error && s->recv_trailing_metadata_finished != NULL) { /* Maybe some SYNC_FLUSH data is left in frame_storage. Consume them and @@ -1805,7 +1798,7 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx, bool end_of_context; if (!s->stream_decompression_ctx) { s->stream_decompression_ctx = grpc_stream_compression_context_create( - GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); + s->stream_decompression_method); } if (!grpc_stream_decompress(s->stream_decompression_ctx, &s->frame_storage, @@ -1818,6 +1811,7 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx, } else { if (s->unprocessed_incoming_frames_buffer.length > 0) { s->unprocessed_incoming_frames_decompressed = true; + pending_data = true; } if (end_of_context) { grpc_stream_compression_context_destroy(s->stream_decompression_ctx); @@ -2690,16 +2684,15 @@ static grpc_error *incoming_byte_stream_pull(grpc_exec_ctx *exec_ctx, grpc_error *error; if (s->unprocessed_incoming_frames_buffer.length > 0) { - if (s->stream_compression_recv_enabled && - !s->unprocessed_incoming_frames_decompressed) { + if (!s->unprocessed_incoming_frames_decompressed) { bool end_of_context; if (!s->stream_decompression_ctx) { s->stream_decompression_ctx = grpc_stream_compression_context_create( - GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); + s->stream_decompression_method); } if (!grpc_stream_decompress(s->stream_decompression_ctx, &s->unprocessed_incoming_frames_buffer, - s->decompressed_data_buffer, NULL, MAX_SIZE_T, + &s->decompressed_data_buffer, NULL, MAX_SIZE_T, &end_of_context)) { error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Stream decompression error."); @@ -2707,7 +2700,7 @@ static grpc_error *incoming_byte_stream_pull(grpc_exec_ctx *exec_ctx, } GPR_ASSERT(s->unprocessed_incoming_frames_buffer.length == 0); grpc_slice_buffer_swap(&s->unprocessed_incoming_frames_buffer, - s->decompressed_data_buffer); + &s->decompressed_data_buffer); s->unprocessed_incoming_frames_decompressed = true; if (end_of_context) { grpc_stream_compression_context_destroy(s->stream_decompression_ctx); diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.c index c21d76ba710..3b338da288d 100644 --- a/src/core/ext/transport/chttp2/transport/hpack_parser.c +++ b/src/core/ext/transport/chttp2/transport/hpack_parser.c @@ -1659,16 +1659,9 @@ static void parse_stream_compression_md(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t, grpc_chttp2_stream *s, grpc_metadata_batch *initial_metadata) { - if (initial_metadata->idx.named.content_encoding != NULL) { - grpc_slice content_encoding = - GRPC_MDVALUE(initial_metadata->idx.named.content_encoding->md); - if (!grpc_slice_eq(content_encoding, GRPC_MDSTR_IDENTITY)) { - if (grpc_slice_eq(content_encoding, GRPC_MDSTR_GZIP)) { - s->stream_compression_recv_enabled = true; - s->decompressed_data_buffer = gpr_malloc(sizeof(grpc_slice_buffer)); - grpc_slice_buffer_init(s->decompressed_data_buffer); - } - } + if (initial_metadata->idx.named.content_encoding == NULL || + grpc_stream_compression_method_parse(GRPC_MDVALUE(initial_metadata->idx.named.content_encoding->md), false, &s->stream_decompression_method) == 0) { + s->stream_decompression_method = GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS; } } diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h index e5e4dd33443..33cc962f64e 100644 --- a/src/core/ext/transport/chttp2/transport/internal.h +++ b/src/core/ext/transport/chttp2/transport/internal.h @@ -561,10 +561,6 @@ struct grpc_chttp2_stream { grpc_stream_compression_method stream_compression_method; /* Stream decompression method to be used. */ grpc_stream_compression_method stream_decompression_method; - bool stream_compression_recv_enabled; - /** Whether bytes stored in unprocessed_incoming_byte_stream is decompressed - */ - bool unprocessed_incoming_frames_decompressed; /** Stream compression decompress context */ grpc_stream_compression_context *stream_decompression_ctx; /** Stream compression compress context */ @@ -576,7 +572,10 @@ struct grpc_chttp2_stream { * emptied */ size_t uncompressed_data_size; /** Temporary buffer storing decompressed data */ - grpc_slice_buffer *decompressed_data_buffer; + grpc_slice_buffer decompressed_data_buffer; + /** Whether bytes stored in unprocessed_incoming_byte_stream is decompressed + */ + bool unprocessed_incoming_frames_decompressed; }; /** Transport writing call flow: diff --git a/src/core/lib/compression/stream_compression_identity.c b/src/core/lib/compression/stream_compression_identity.c index 133c3250d27..91ee06d7831 100644 --- a/src/core/lib/compression/stream_compression_identity.c +++ b/src/core/lib/compression/stream_compression_identity.c @@ -59,6 +59,7 @@ static bool grpc_stream_decompress_identity( return false; } grpc_stream_compression_pass_through(in, out, output_size, max_output_size); + *end_of_context = false; return true; } From 93197df3d5c5ad49c7fcbaa05b005e6ad7837560 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Tue, 5 Sep 2017 19:19:44 -0700 Subject: [PATCH 017/109] Add patch to identity --- .../lib/compression/stream_compression_gzip.c | 3 ++- .../compression/stream_compression_identity.c | 21 ++++++++++++++----- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/src/core/lib/compression/stream_compression_gzip.c b/src/core/lib/compression/stream_compression_gzip.c index 5d21bf27ae0..502bf44a913 100644 --- a/src/core/lib/compression/stream_compression_gzip.c +++ b/src/core/lib/compression/stream_compression_gzip.c @@ -19,7 +19,7 @@ #include #include -#include "src/core/lib/compression/stream_compression.h" +#include "src/core/lib/compression/stream_compression_gzip.h" #include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/slice/slice_internal.h" @@ -201,6 +201,7 @@ grpc_stream_compression_context_create_gzip( return NULL; } + gzip_ctx->base.vtable = &grpc_stream_compression_gzip_vtable; return (grpc_stream_compression_context *)gzip_ctx; } diff --git a/src/core/lib/compression/stream_compression_identity.c b/src/core/lib/compression/stream_compression_identity.c index 91ee06d7831..19eb6a9a00d 100644 --- a/src/core/lib/compression/stream_compression_identity.c +++ b/src/core/lib/compression/stream_compression_identity.c @@ -19,21 +19,30 @@ #include #include -#include "src/core/lib/compression/stream_compression.h" +#include "src/core/lib/compression/stream_compression_identity.h" #include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/slice/slice_internal.h" #define OUTPUT_BLOCK_SIZE (1024) +/* Singleton context used for all identity streams. */ +static grpc_stream_compression_context identity_ctx = { + .vtable = &grpc_stream_compression_identity_vtable +}; + static void grpc_stream_compression_pass_through(grpc_slice_buffer *in, grpc_slice_buffer *out, size_t *output_size, size_t max_output_size) { if (max_output_size >= in->length) { - *output_size = in->length; + if (output_size) { + *output_size = in->length; + } grpc_slice_buffer_move_into(in, out); } else { - *output_size = max_output_size; + if (output_size) { + *output_size = max_output_size; + } grpc_slice_buffer_move_first(in, max_output_size, out); } } @@ -59,7 +68,9 @@ static bool grpc_stream_decompress_identity( return false; } grpc_stream_compression_pass_through(in, out, output_size, max_output_size); - *end_of_context = false; + if (end_of_context) { + *end_of_context = false; + } return true; } @@ -69,7 +80,7 @@ grpc_stream_compression_context_create_identity( GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_IDENTITY_COMPRESS || method == GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS); /* No context needed in this case. Use fake context instead. */ - return (grpc_stream_compression_context *)1; + return (grpc_stream_compression_context *)&identity_ctx; } static void grpc_stream_compression_context_destroy_identity( From f4c95fa4ebfa5cd8dfea81f949fce2412125b085 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Wed, 6 Sep 2017 11:58:29 -0700 Subject: [PATCH 018/109] Patch for fuzzer test failure --- src/core/ext/transport/chttp2/transport/chttp2_transport.c | 7 ++++++- src/core/ext/transport/chttp2/transport/frame_data.c | 4 ++-- src/core/ext/transport/chttp2/transport/internal.h | 2 ++ 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index ad0521cc42f..2a26bbb0c8b 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -696,6 +696,7 @@ static int init_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt, grpc_slice_buffer_init(&s->compressed_data_buffer); grpc_slice_buffer_init(&s->decompressed_data_buffer); s->pending_byte_stream = false; + s->decompressed_header_bytes = 0; GRPC_CLOSURE_INIT(&s->reset_byte_stream, reset_byte_stream, s, grpc_combiner_scheduler(t->combiner)); @@ -1734,7 +1735,7 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx, if (!grpc_stream_decompress(s->stream_decompression_ctx, &s->unprocessed_incoming_frames_buffer, &s->decompressed_data_buffer, NULL, - GRPC_HEADER_SIZE_IN_BYTES, + GRPC_HEADER_SIZE_IN_BYTES - s->decompressed_header_bytes, &end_of_context)) { grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &s->frame_storage); @@ -1743,6 +1744,10 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx, error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( "Stream decompression error."); } else { + s->decompressed_header_bytes += s->decompressed_data_buffer.length; + if (s->decompressed_header_bytes == GRPC_HEADER_SIZE_IN_BYTES) { + s->decompressed_header_bytes = 0; + } error = grpc_deframe_unprocessed_incoming_frames( exec_ctx, &s->data_parser, s, &s->decompressed_data_buffer, NULL, s->recv_message); diff --git a/src/core/ext/transport/chttp2/transport/frame_data.c b/src/core/ext/transport/chttp2/transport/frame_data.c index 222d2177b29..73aaab18025 100644 --- a/src/core/ext/transport/chttp2/transport/frame_data.c +++ b/src/core/ext/transport/chttp2/transport/frame_data.c @@ -210,7 +210,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames( if (cur != end) { grpc_slice_buffer_undo_take_first( - &s->unprocessed_incoming_frames_buffer, + slices, grpc_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg))); } grpc_slice_unref_internal(exec_ctx, slice); @@ -277,7 +277,7 @@ grpc_error *grpc_deframe_unprocessed_incoming_frames( p->state = GRPC_CHTTP2_DATA_FH_0; cur += p->frame_size; grpc_slice_buffer_undo_take_first( - &s->unprocessed_incoming_frames_buffer, + slices, grpc_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg))); grpc_slice_unref_internal(exec_ctx, slice); return GRPC_ERROR_NONE; diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h index 33cc962f64e..b64e9a0cd65 100644 --- a/src/core/ext/transport/chttp2/transport/internal.h +++ b/src/core/ext/transport/chttp2/transport/internal.h @@ -576,6 +576,8 @@ struct grpc_chttp2_stream { /** Whether bytes stored in unprocessed_incoming_byte_stream is decompressed */ bool unprocessed_incoming_frames_decompressed; + /** gRPC header bytes that are already decompressed */ + size_t decompressed_header_bytes; }; /** Transport writing call flow: From 27670c81a9beec3365ab5b373c707a847c0df503 Mon Sep 17 00:00:00 2001 From: Alexander Polcyn Date: Tue, 5 Sep 2017 20:39:28 -0700 Subject: [PATCH 019/109] make a guarantee that c# auth callbacks are async to c-core --- .../NativeMetadataCredentialsPlugin.cs | 9 +-- .../MetadataCredentialsTest.cs | 59 +++++++++++++++++++ 2 files changed, 62 insertions(+), 6 deletions(-) diff --git a/src/csharp/Grpc.Core/Internal/NativeMetadataCredentialsPlugin.cs b/src/csharp/Grpc.Core/Internal/NativeMetadataCredentialsPlugin.cs index b56bdbb23f8..a8cb3571811 100644 --- a/src/csharp/Grpc.Core/Internal/NativeMetadataCredentialsPlugin.cs +++ b/src/csharp/Grpc.Core/Internal/NativeMetadataCredentialsPlugin.cs @@ -61,12 +61,9 @@ namespace Grpc.Core.Internal try { - var context = new AuthInterceptorContext(Marshal.PtrToStringAnsi(serviceUrlPtr), - Marshal.PtrToStringAnsi(methodNamePtr)); - // Don't await, we are in a native callback and need to return. - #pragma warning disable 4014 - GetMetadataAsync(context, callbackPtr, userDataPtr); - #pragma warning restore 4014 + var context = new AuthInterceptorContext(Marshal.PtrToStringAnsi(serviceUrlPtr), Marshal.PtrToStringAnsi(methodNamePtr)); + // Make a guarantee that credentials_notify_from_plugin is invoked async to be compliant with c-core API. + ThreadPool.QueueUserWorkItem(async (stateInfo) => await GetMetadataAsync(context, callbackPtr, userDataPtr)); } catch (Exception e) { diff --git a/src/csharp/Grpc.IntegrationTesting/MetadataCredentialsTest.cs b/src/csharp/Grpc.IntegrationTesting/MetadataCredentialsTest.cs index e81157cf97e..eba6276a1f5 100644 --- a/src/csharp/Grpc.IntegrationTesting/MetadataCredentialsTest.cs +++ b/src/csharp/Grpc.IntegrationTesting/MetadataCredentialsTest.cs @@ -89,6 +89,54 @@ namespace Grpc.IntegrationTesting client.UnaryCall(new SimpleRequest { }, new CallOptions(credentials: callCredentials)); } + [Test] + public async Task MetadataCredentials_Composed() + { + var first = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => { + // Attempt to exercise the case where async callback is inlineable/synchronously-runnable. + metadata.Add("first_authorization", "FIRST_SECRET_TOKEN"); + return TaskUtils.CompletedTask; + })); + var second = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => { + metadata.Add("second_authorization", "SECOND_SECRET_TOKEN"); + return TaskUtils.CompletedTask; + })); + var third = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => { + metadata.Add("third_authorization", "THIRD_SECRET_TOKEN"); + return TaskUtils.CompletedTask; + })); + var channelCredentials = ChannelCredentials.Create(TestCredentials.CreateSslCredentials(), + CallCredentials.Compose(first, second, third)); + channel = new Channel(Host, server.Ports.Single().BoundPort, channelCredentials, options); + var client = new TestService.TestServiceClient(channel); + var call = client.StreamingOutputCall(new StreamingOutputCallRequest { }); + Assert.IsTrue(await call.ResponseStream.MoveNext()); + Assert.IsFalse(await call.ResponseStream.MoveNext()); + } + + [Test] + public async Task MetadataCredentials_ComposedPerCall() + { + channel = new Channel(Host, server.Ports.Single().BoundPort, TestCredentials.CreateSslCredentials(), options); + var client = new TestService.TestServiceClient(channel); + var first = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => { + metadata.Add("first_authorization", "FIRST_SECRET_TOKEN"); + return TaskUtils.CompletedTask; + })); + var second = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => { + metadata.Add("second_authorization", "SECOND_SECRET_TOKEN"); + return TaskUtils.CompletedTask; + })); + var third = CallCredentials.FromInterceptor(new AsyncAuthInterceptor((context, metadata) => { + metadata.Add("third_authorization", "THIRD_SECRET_TOKEN"); + return TaskUtils.CompletedTask; + })); + var call = client.StreamingOutputCall(new StreamingOutputCallRequest{ }, + new CallOptions(credentials: CallCredentials.Compose(first, second, third))); + Assert.IsTrue(await call.ResponseStream.MoveNext()); + Assert.IsFalse(await call.ResponseStream.MoveNext()); + } + [Test] public void MetadataCredentials_InterceptorLeavesMetadataEmpty() { @@ -125,6 +173,17 @@ namespace Grpc.IntegrationTesting Assert.AreEqual("SECRET_TOKEN", authToken); return Task.FromResult(new SimpleResponse()); } + + public override async Task StreamingOutputCall(StreamingOutputCallRequest request, IServerStreamWriter responseStream, ServerCallContext context) + { + var first = context.RequestHeaders.First((entry) => entry.Key == "first_authorization").Value; + Assert.AreEqual("FIRST_SECRET_TOKEN", first); + var second = context.RequestHeaders.First((entry) => entry.Key == "second_authorization").Value; + Assert.AreEqual("SECOND_SECRET_TOKEN", second); + var third = context.RequestHeaders.First((entry) => entry.Key == "third_authorization").Value; + Assert.AreEqual("THIRD_SECRET_TOKEN", third); + await responseStream.WriteAsync(new StreamingOutputCallResponse()); + } } } } From 1ff6ee12cc01e3a9f03c24b74fdea4526ecddc8b Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Wed, 6 Sep 2017 14:56:18 -0700 Subject: [PATCH 020/109] clang-format --- .../lib/security/credentials/plugin/plugin_credentials.c | 3 +-- src/cpp/client/secure_credentials.cc | 8 ++++---- src/cpp/client/secure_credentials.h | 8 ++++---- src/csharp/ext/grpc_csharp_ext.c | 2 +- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/core/lib/security/credentials/plugin/plugin_credentials.c b/src/core/lib/security/credentials/plugin/plugin_credentials.c index e844bff79c0..8f49c5aef2d 100644 --- a/src/core/lib/security/credentials/plugin/plugin_credentials.c +++ b/src/core/lib/security/credentials/plugin/plugin_credentials.c @@ -88,8 +88,7 @@ static grpc_error *process_plugin_result( } else if (!grpc_is_binary_header(md[i].key) && !GRPC_LOG_IF_ERROR( "validate_metadata_from_plugin", - grpc_validate_header_nonbin_value_is_legal( - md[i].value))) { + grpc_validate_header_nonbin_value_is_legal(md[i].value))) { gpr_log(GPR_ERROR, "Plugin added invalid metadata value."); seen_illegal_header = true; break; diff --git a/src/cpp/client/secure_credentials.cc b/src/cpp/client/secure_credentials.cc index 00104165a50..6c06e453263 100644 --- a/src/cpp/client/secure_credentials.cc +++ b/src/cpp/client/secure_credentials.cc @@ -162,8 +162,8 @@ int MetadataCredentialsPluginWrapper::GetMetadata( void* wrapper, grpc_auth_metadata_context context, grpc_credentials_plugin_metadata_cb cb, void* user_data, grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], - size_t *num_creds_md, grpc_status_code *status, - const char **error_details) { + size_t* num_creds_md, grpc_status_code* status, + const char** error_details) { GPR_ASSERT(wrapper); MetadataCredentialsPluginWrapper* w = reinterpret_cast(wrapper); @@ -200,8 +200,8 @@ void UnrefMetadata(const std::vector& md) { void MetadataCredentialsPluginWrapper::InvokePlugin( grpc_auth_metadata_context context, grpc_credentials_plugin_metadata_cb cb, - void* user_data, grpc_metadata creds_md[4], size_t *num_creds_md, - grpc_status_code *status_code, const char **error_details) { + void* user_data, grpc_metadata creds_md[4], size_t* num_creds_md, + grpc_status_code* status_code, const char** error_details) { std::multimap metadata; // const_cast is safe since the SecureAuthContext does not take owndership and diff --git a/src/cpp/client/secure_credentials.h b/src/cpp/client/secure_credentials.h index fdabaca84fa..fa1e31996ad 100644 --- a/src/cpp/client/secure_credentials.h +++ b/src/cpp/client/secure_credentials.h @@ -62,8 +62,8 @@ class MetadataCredentialsPluginWrapper final : private GrpcLibraryCodegen { void* wrapper, grpc_auth_metadata_context context, grpc_credentials_plugin_metadata_cb cb, void* user_data, grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], - size_t *num_creds_md, grpc_status_code *status, - const char **error_details); + size_t* num_creds_md, grpc_status_code* status, + const char** error_details); explicit MetadataCredentialsPluginWrapper( std::unique_ptr plugin); @@ -73,8 +73,8 @@ class MetadataCredentialsPluginWrapper final : private GrpcLibraryCodegen { grpc_auth_metadata_context context, grpc_credentials_plugin_metadata_cb cb, void* user_data, grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], - size_t *num_creds_md, grpc_status_code *status, - const char **error_details); + size_t* num_creds_md, grpc_status_code* status, + const char** error_details); std::unique_ptr thread_pool_; std::unique_ptr plugin_; }; diff --git a/src/csharp/ext/grpc_csharp_ext.c b/src/csharp/ext/grpc_csharp_ext.c index b2c8ca63f49..92291f9011b 100644 --- a/src/csharp/ext/grpc_csharp_ext.c +++ b/src/csharp/ext/grpc_csharp_ext.c @@ -1033,7 +1033,7 @@ static int grpcsharp_get_metadata_handler( (grpcsharp_metadata_interceptor_func)(intptr_t)state; interceptor(state, context.service_url, context.method_name, cb, user_data, 0); - return 0; /* Asynchronous return. */ + return 0; /* Asynchronous return. */ } static void grpcsharp_metadata_credentials_destroy_handler(void *state) { From c2a61a2780e56c474a12f5570133fe166ec9840f Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Wed, 6 Sep 2017 22:07:42 -0700 Subject: [PATCH 021/109] write_buffering patch --- src/core/ext/transport/chttp2/transport/chttp2_transport.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index 2a26bbb0c8b..9b80e1d3369 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -1757,6 +1757,9 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx, s->stream_decompression_ctx = NULL; } } + } else { + error = grpc_deframe_unprocessed_incoming_frames( + exec_ctx, &s->data_parser, s, &s->unprocessed_incoming_frames_buffer, NULL, s->recv_message); } if (error != GRPC_ERROR_NONE) { s->seen_error = true; From 1e99b2388bf5e13c79143463f6d503275fd63cb7 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Thu, 7 Sep 2017 09:47:58 -0700 Subject: [PATCH 022/109] clang-format --- .../chttp2/transport/chttp2_transport.c | 27 ++++++++++--------- .../transport/chttp2/transport/hpack_parser.c | 7 +++-- .../compression/stream_compression_identity.c | 3 +-- .../compression/stream_compression_test.c | 19 ++++++++----- 4 files changed, 32 insertions(+), 24 deletions(-) diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index 9b80e1d3369..721f694506b 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -1732,11 +1732,12 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx, grpc_stream_compression_context_create( s->stream_decompression_method); } - if (!grpc_stream_decompress(s->stream_decompression_ctx, - &s->unprocessed_incoming_frames_buffer, - &s->decompressed_data_buffer, NULL, - GRPC_HEADER_SIZE_IN_BYTES - s->decompressed_header_bytes, - &end_of_context)) { + if (!grpc_stream_decompress( + s->stream_decompression_ctx, + &s->unprocessed_incoming_frames_buffer, + &s->decompressed_data_buffer, NULL, + GRPC_HEADER_SIZE_IN_BYTES - s->decompressed_header_bytes, + &end_of_context)) { grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &s->frame_storage); grpc_slice_buffer_reset_and_unref_internal( @@ -1749,8 +1750,8 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx, s->decompressed_header_bytes = 0; } error = grpc_deframe_unprocessed_incoming_frames( - exec_ctx, &s->data_parser, s, &s->decompressed_data_buffer, NULL, - s->recv_message); + exec_ctx, &s->data_parser, s, &s->decompressed_data_buffer, + NULL, s->recv_message); if (end_of_context) { grpc_stream_compression_context_destroy( s->stream_decompression_ctx); @@ -1759,7 +1760,8 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx *exec_ctx, } } else { error = grpc_deframe_unprocessed_incoming_frames( - exec_ctx, &s->data_parser, s, &s->unprocessed_incoming_frames_buffer, NULL, s->recv_message); + exec_ctx, &s->data_parser, s, + &s->unprocessed_incoming_frames_buffer, NULL, s->recv_message); } if (error != GRPC_ERROR_NONE) { s->seen_error = true; @@ -1798,9 +1800,8 @@ void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx *exec_ctx, } bool pending_data = s->pending_byte_stream || s->unprocessed_incoming_frames_buffer.length > 0; - if (s->read_closed && - s->frame_storage.length > 0 && !pending_data && !s->seen_error && - s->recv_trailing_metadata_finished != NULL) { + if (s->read_closed && s->frame_storage.length > 0 && !pending_data && + !s->seen_error && s->recv_trailing_metadata_finished != NULL) { /* Maybe some SYNC_FLUSH data is left in frame_storage. Consume them and * maybe decompress the next 5 bytes in the stream. */ bool end_of_context; @@ -2700,8 +2701,8 @@ static grpc_error *incoming_byte_stream_pull(grpc_exec_ctx *exec_ctx, } if (!grpc_stream_decompress(s->stream_decompression_ctx, &s->unprocessed_incoming_frames_buffer, - &s->decompressed_data_buffer, NULL, MAX_SIZE_T, - &end_of_context)) { + &s->decompressed_data_buffer, NULL, + MAX_SIZE_T, &end_of_context)) { error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Stream decompression error."); return error; diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.c index 3b338da288d..2028877cac0 100644 --- a/src/core/ext/transport/chttp2/transport/hpack_parser.c +++ b/src/core/ext/transport/chttp2/transport/hpack_parser.c @@ -1660,8 +1660,11 @@ static void parse_stream_compression_md(grpc_exec_ctx *exec_ctx, grpc_chttp2_stream *s, grpc_metadata_batch *initial_metadata) { if (initial_metadata->idx.named.content_encoding == NULL || - grpc_stream_compression_method_parse(GRPC_MDVALUE(initial_metadata->idx.named.content_encoding->md), false, &s->stream_decompression_method) == 0) { - s->stream_decompression_method = GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS; + grpc_stream_compression_method_parse( + GRPC_MDVALUE(initial_metadata->idx.named.content_encoding->md), false, + &s->stream_decompression_method) == 0) { + s->stream_decompression_method = + GRPC_STREAM_COMPRESSION_IDENTITY_DECOMPRESS; } } diff --git a/src/core/lib/compression/stream_compression_identity.c b/src/core/lib/compression/stream_compression_identity.c index 19eb6a9a00d..3dfcf53b85d 100644 --- a/src/core/lib/compression/stream_compression_identity.c +++ b/src/core/lib/compression/stream_compression_identity.c @@ -27,8 +27,7 @@ /* Singleton context used for all identity streams. */ static grpc_stream_compression_context identity_ctx = { - .vtable = &grpc_stream_compression_identity_vtable -}; + .vtable = &grpc_stream_compression_identity_vtable}; static void grpc_stream_compression_pass_through(grpc_slice_buffer *in, grpc_slice_buffer *out, diff --git a/test/core/compression/stream_compression_test.c b/test/core/compression/stream_compression_test.c index 26c957ded49..afed6cd6b5c 100644 --- a/test/core/compression/stream_compression_test.c +++ b/test/core/compression/stream_compression_test.c @@ -59,7 +59,8 @@ static void test_stream_compression_simple_compress_decompress() { grpc_slice_buffer_init(&relay); grpc_slice_buffer_init(&sink); grpc_stream_compression_context *compress_ctx = - grpc_stream_compression_context_create(GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); + grpc_stream_compression_context_create( + GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); grpc_stream_compression_context *decompress_ctx = grpc_stream_compression_context_create( GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); @@ -91,7 +92,8 @@ test_stream_compression_simple_compress_decompress_with_output_size_constraint() grpc_slice_buffer_init(&relay); grpc_slice_buffer_init(&sink); grpc_stream_compression_context *compress_ctx = - grpc_stream_compression_context_create(GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); + grpc_stream_compression_context_create( + GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); grpc_stream_compression_context *decompress_ctx = grpc_stream_compression_context_create( GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); @@ -139,7 +141,8 @@ test_stream_compression_simple_compress_decompress_with_large_data() { grpc_slice_buffer_init(&relay); grpc_slice_buffer_init(&sink); grpc_stream_compression_context *compress_ctx = - grpc_stream_compression_context_create(GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); + grpc_stream_compression_context_create( + GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); grpc_stream_compression_context *decompress_ctx = grpc_stream_compression_context_create( GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); @@ -172,7 +175,8 @@ static void test_stream_compression_drop_context() { grpc_slice_buffer_init(&relay); grpc_slice_buffer_init(&sink); grpc_stream_compression_context *compress_ctx = - grpc_stream_compression_context_create(GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); + grpc_stream_compression_context_create( + GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); grpc_slice slice = grpc_slice_from_static_string(test_str); grpc_slice_buffer_add(&source, slice); GPR_ASSERT(grpc_stream_compress(compress_ctx, &source, &relay, NULL, @@ -180,8 +184,8 @@ static void test_stream_compression_drop_context() { GRPC_STREAM_COMPRESSION_FLUSH_FINISH)); grpc_stream_compression_context_destroy(compress_ctx); - compress_ctx = - grpc_stream_compression_context_create(GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); + compress_ctx = grpc_stream_compression_context_create( + GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); slice = grpc_slice_from_static_string(test_str2); grpc_slice_buffer_add(&source, slice); GPR_ASSERT(grpc_stream_compress(compress_ctx, &source, &relay, NULL, @@ -240,7 +244,8 @@ static void test_stream_compression_sync_flush() { grpc_slice_buffer_init(&relay); grpc_slice_buffer_init(&sink); grpc_stream_compression_context *compress_ctx = - grpc_stream_compression_context_create(GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); + grpc_stream_compression_context_create( + GRPC_STREAM_COMPRESSION_GZIP_COMPRESS); grpc_slice slice = grpc_slice_from_static_string(test_str); grpc_slice_buffer_add(&source, slice); GPR_ASSERT(grpc_stream_compress(compress_ctx, &source, &relay, NULL, From 9cd90f43dcfeabff95cb7b61fb029ff7e4a899b3 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Thu, 7 Sep 2017 09:51:13 -0700 Subject: [PATCH 023/109] fix bazel build --- BUILD | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/BUILD b/BUILD index 67e727bd521..e147dec4d49 100644 --- a/BUILD +++ b/BUILD @@ -574,6 +574,8 @@ grpc_cc_library( "src/core/lib/compression/compression.c", "src/core/lib/compression/message_compress.c", "src/core/lib/compression/stream_compression.c", + "src/core/lib/compression/stream_compression_gzip.c", + "src/core/lib/compression/stream_compression_identity.c", "src/core/lib/http/format_request.c", "src/core/lib/http/httpcli.c", "src/core/lib/http/parser.c", @@ -706,6 +708,8 @@ grpc_cc_library( "src/core/lib/compression/algorithm_metadata.h", "src/core/lib/compression/message_compress.h", "src/core/lib/compression/stream_compression.h", + "src/core/lib/compression/stream_compression_gzip.h", + "src/core/lib/compression/stream_compression_identity.h", "src/core/lib/http/format_request.h", "src/core/lib/http/httpcli.h", "src/core/lib/http/parser.h", From 4fe48e6579966b47bae473f0e561c59623bc24ff Mon Sep 17 00:00:00 2001 From: Ken Payson Date: Thu, 7 Sep 2017 11:45:36 -0700 Subject: [PATCH 024/109] Build fixes --- src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi index 7ca8fd3df06..279eb8a71cf 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi @@ -374,6 +374,10 @@ cdef extern from "grpc/grpc.h": cdef extern from "grpc/grpc_security.h": + # Declare this as an enum, this is the only way to make it a const in + # cython + enum: GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX + ctypedef enum grpc_ssl_roots_override_result: GRPC_SSL_ROOTS_OVERRIDE_OK GRPC_SSL_ROOTS_OVERRIDE_FAILED_PERMANENTLY From c7388e5d82124bc4c9c83532a710d8b3e1c2c640 Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Fri, 8 Sep 2017 08:01:21 -0700 Subject: [PATCH 025/109] Fix node build. --- src/node/ext/call_credentials.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/node/ext/call_credentials.h b/src/node/ext/call_credentials.h index adcff84573d..c5508e2ba25 100644 --- a/src/node/ext/call_credentials.h +++ b/src/node/ext/call_credentials.h @@ -75,9 +75,12 @@ typedef struct plugin_state { uv_async_t plugin_async; } plugin_state; -void plugin_get_metadata(void *state, grpc_auth_metadata_context context, - grpc_credentials_plugin_metadata_cb cb, - void *user_data); +int plugin_get_metadata( + void *state, grpc_auth_metadata_context context, + grpc_credentials_plugin_metadata_cb cb, void *user_data, + grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], + size_t *num_creds_md, grpc_status_code *status, + const char **error_details); void plugin_destroy_state(void *state); From 9dba7eb6b8edfb8035f1166be6e501f90a5e33d0 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Fri, 8 Sep 2017 15:32:51 -0700 Subject: [PATCH 026/109] Fix error unref problem --- src/core/ext/transport/chttp2/transport/chttp2_transport.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c index 721f694506b..39c2e7baaab 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c @@ -2596,7 +2596,7 @@ static void reset_byte_stream(grpc_exec_ctx *exec_ctx, void *arg, GRPC_ERROR_UNREF(s->byte_stream_error); s->byte_stream_error = GRPC_ERROR_NONE; grpc_chttp2_cancel_stream(exec_ctx, s->t, s, GRPC_ERROR_REF(error)); - s->byte_stream_error = error; + s->byte_stream_error = GRPC_ERROR_REF(error); } } From ad9208c07e74d90dd5110324fe9cb830e6e6f68b Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Mon, 11 Sep 2017 11:28:20 -0700 Subject: [PATCH 027/109] clang-format --- src/node/ext/call_credentials.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/node/ext/call_credentials.h b/src/node/ext/call_credentials.h index c5508e2ba25..3a54bbf0cf7 100644 --- a/src/node/ext/call_credentials.h +++ b/src/node/ext/call_credentials.h @@ -79,8 +79,7 @@ int plugin_get_metadata( void *state, grpc_auth_metadata_context context, grpc_credentials_plugin_metadata_cb cb, void *user_data, grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], - size_t *num_creds_md, grpc_status_code *status, - const char **error_details); + size_t *num_creds_md, grpc_status_code *status, const char **error_details); void plugin_destroy_state(void *state); From 1c5a5f21a339523ad5984287d9194cb511659136 Mon Sep 17 00:00:00 2001 From: Matt Kwong Date: Tue, 12 Sep 2017 17:44:37 -0700 Subject: [PATCH 028/109] Add .NET CLI to coverage Dockerfile --- .../multilang_jessie_x64/Dockerfile.template | 1 + .../test/multilang_jessie_x64/Dockerfile | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/templates/tools/dockerfile/test/multilang_jessie_x64/Dockerfile.template b/templates/tools/dockerfile/test/multilang_jessie_x64/Dockerfile.template index 6cad474a206..0d47aa91f36 100644 --- a/templates/tools/dockerfile/test/multilang_jessie_x64/Dockerfile.template +++ b/templates/tools/dockerfile/test/multilang_jessie_x64/Dockerfile.template @@ -19,6 +19,7 @@ <%include file="../../apt_get_basic.include"/> <%include file="../../gcp_api_libraries.include"/> <%include file="../../csharp_deps.include"/> + <%include file="../../csharp_dotnetcli_deps.include"/> <%include file="../../cxx_deps.include"/> <%include file="../../node_deps.include"/> <%include file="../../php_deps.include"/> diff --git a/tools/dockerfile/test/multilang_jessie_x64/Dockerfile b/tools/dockerfile/test/multilang_jessie_x64/Dockerfile index b7373b5d9c3..6f1759bceb3 100644 --- a/tools/dockerfile/test/multilang_jessie_x64/Dockerfile +++ b/tools/dockerfile/test/multilang_jessie_x64/Dockerfile @@ -70,6 +70,24 @@ RUN apt-get update && apt-get -y dist-upgrade && apt-get install -y \ RUN nuget update -self +# Install dotnet SDK based on https://www.microsoft.com/net/core#debian +RUN apt-get update && apt-get install -y curl libunwind8 gettext +# dotnet-dev-1.0.0-preview2-003131 +RUN curl -sSL -o dotnet100.tar.gz https://go.microsoft.com/fwlink/?LinkID=827530 +RUN mkdir -p /opt/dotnet && tar zxf dotnet100.tar.gz -C /opt/dotnet +# dotnet-dev-1.0.1 +RUN curl -sSL -o dotnet101.tar.gz https://go.microsoft.com/fwlink/?LinkID=843453 +RUN mkdir -p /opt/dotnet && tar zxf dotnet101.tar.gz -C /opt/dotnet +RUN ln -s /opt/dotnet/dotnet /usr/local/bin + +# Trigger the population of the local package cache +ENV NUGET_XMLDOC_MODE skip +RUN mkdir warmup \ + && cd warmup \ + && dotnet new \ + && cd .. \ + && rm -rf warmup + #================= # C++ dependencies RUN apt-get update && apt-get -y install libgflags-dev libgtest-dev libc++-dev clang && apt-get clean From 3218e2b975a71cc32aedad71d00267e07244c109 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Thu, 14 Sep 2017 10:29:42 -0700 Subject: [PATCH 029/109] minor build fix --- src/core/lib/compression/stream_compression_gzip.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/lib/compression/stream_compression_gzip.c b/src/core/lib/compression/stream_compression_gzip.c index 587bf8379eb..abcbdb3a91f 100644 --- a/src/core/lib/compression/stream_compression_gzip.c +++ b/src/core/lib/compression/stream_compression_gzip.c @@ -183,7 +183,7 @@ grpc_stream_compression_context_create_gzip( GPR_ASSERT(method == GRPC_STREAM_COMPRESSION_GZIP_COMPRESS || method == GRPC_STREAM_COMPRESSION_GZIP_DECOMPRESS); grpc_stream_compression_context_gzip *gzip_ctx = - (grpc_stream_compression_context *)gpr_zalloc( + (grpc_stream_compression_context_gzip *)gpr_zalloc( sizeof(grpc_stream_compression_context_gzip)); int r; if (gzip_ctx == NULL) { From e2d60524bdbd6d7f761d3dbce185d6f6b0dbbbcb Mon Sep 17 00:00:00 2001 From: David Garcia Quintas Date: Thu, 14 Sep 2017 14:52:37 -0700 Subject: [PATCH 030/109] Script for CI testing latency --- tools/github/pr_latency.py | 146 +++++++++++++++++++++++++++++++++++++ 1 file changed, 146 insertions(+) create mode 100644 tools/github/pr_latency.py diff --git a/tools/github/pr_latency.py b/tools/github/pr_latency.py new file mode 100644 index 00000000000..e773bda37ab --- /dev/null +++ b/tools/github/pr_latency.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Measure the time between PR creation and completion of all tests""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import logging +import pprint +import urllib2 + +from datetime import datetime, timedelta + +logging.basicConfig(format='%(asctime)s %(message)s') + +PRS = 'https://api.github.com/repos/grpc/grpc/pulls?state=open&per_page=100' +COMMITS = 'https://api.github.com/repos/grpc/grpc/pulls/{pr_number}/commits' + + +def gh(url): + request = urllib2.Request(url) + if TOKEN: + request.add_header('Authorization', 'token {}'.format(TOKEN)) + response = urllib2.urlopen(request) + return response.read() + + +def print_csv_header(): + print('pr,base_time,test_time,latency_seconds,successes,failures,errors') + + +def output(pr, base_time, test_time, diff_time, successes, failures, errors, mode='human'): + if mode == 'human': + print("PR #{} base time: {} UTC, Tests completed at: {} UTC. Latency: {}." + "\n\tSuccesses: {}, Failures: {}, Errors: {}".format( + pr, base_time, test_time, diff_time, successes, failures, errors)) + elif mode == 'csv': + print(','.join([str(pr), str(base_time), + str(test_time), str(int((test_time-base_time).total_seconds())), + str(successes), str(failures), str(errors)])) + + +def parse_timestamp(datetime_str): + return datetime.strptime(datetime_str, '%Y-%m-%dT%H:%M:%SZ') + + +def to_posix_timestamp(dt): + return str((dt - datetime(1970, 1, 1)).total_seconds()) + + +def get_pr_data(): + latest_prs = json.loads(gh(PRS)) + res = [{'number': pr['number'], + 'created_at': parse_timestamp(pr['created_at']), + 'updated_at': parse_timestamp(pr['updated_at']), + 'statuses_url': pr['statuses_url']} + for pr in latest_prs] + return res + + +def get_commits_data(pr_number): + commits = json.loads(gh(COMMITS.format(pr_number=pr_number))) + return {'num_commits': len(commits), + 'most_recent_date': parse_timestamp(commits[-1]['commit']['author']['date'])} + + +def get_status_data(statuses_url, system): + status_url = statuses_url.replace('statuses', 'status') + statuses = json.loads(gh(status_url + '?per_page=100')) + successes = 0 + failures = 0 + errors = 0 + latest_datetime = None + if not statuses: return None + if system == 'kokoro': string_in_target_url = 'kokoro' + elif system == 'jenkins': string_in_target_url = 'grpc-testing' + for status in statuses['statuses']: + if not status['target_url'] or string_in_target_url not in status['target_url']: continue # Ignore jenkins + if status['state'] == 'pending': return None + elif status['state'] == 'success': successes += 1 + elif status['state'] == 'failure': failures += 1 + elif status['state'] == 'error': errors += 1 + if not latest_datetime: + latest_datetime = parse_timestamp(status['updated_at']) + else: + latest_datetime = max(latest_datetime, parse_timestamp(status['updated_at'])) + # First status is the most recent one. + if any([successes, failures, errors]) and sum([successes, failures, errors]) > 15: + return {'latest_datetime': latest_datetime, + 'successes': successes, + 'failures': failures, + 'errors': errors} + else: return None + + +def build_args_parser(): + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--format', type=str, choices=['human', 'csv'], default='human') + parser.add_argument('--system', type=str, choices=['jenkins', 'kokoro'], required=True) + parser.add_argument('--token', type=str, default='') + return parser + + +def main(): + import sys + global TOKEN + args_parser = build_args_parser() + args = args_parser.parse_args() + TOKEN = args.token + if args.format == 'csv': print_csv_header() + for pr_data in get_pr_data(): + commit_data = get_commits_data(pr_data['number']) + # PR with a single commit -> use the PRs creation time. + # else -> use the latest commit's date. + base_timestamp = pr_data['updated_at'] + if commit_data['num_commits'] > 1: + base_timestamp = commit_data['most_recent_date'] + else: + base_timestamp = pr_data['created_at'] + last_status = get_status_data(pr_data['statuses_url'], args.system) + if last_status: + diff = last_status['latest_datetime'] - base_timestamp + if diff < timedelta(hours=5): + output(pr_data['number'], base_timestamp, last_status['latest_datetime'], + diff, last_status['successes'], last_status['failures'], + last_status['errors'], mode=args.format) + + +if __name__ == '__main__': + main() From e16329dd5f2be074ce72564666e83a33e34e468c Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Thu, 14 Sep 2017 09:35:03 -0700 Subject: [PATCH 031/109] Debug: timer hash table --- src/core/lib/iomgr/timer_generic.c | 98 +++++++++++++++++++++++++++--- 1 file changed, 91 insertions(+), 7 deletions(-) diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c index c08bb525b77..27656417ec3 100644 --- a/src/core/lib/iomgr/timer_generic.c +++ b/src/core/lib/iomgr/timer_generic.c @@ -79,6 +79,67 @@ static timer_shard g_shards[NUM_SHARDS]; * Access to this is protected by g_shared_mutables.mu */ static timer_shard *g_shard_queue[NUM_SHARDS]; +#define NUM_HASH_BUCKETS 1000 +#define NUM_SLOTS_PER_BUCKET 5 +static gpr_mu g_hash_mu; +static grpc_timer *g_timer_hash[1000][5] = {{NULL, NULL}}; + +static void init_timer_hash() { gpr_mu_init(&g_hash_mu); } + +static bool is_timer_present(grpc_timer *t) { + size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS); + bool is_found = false; + gpr_mu_lock(&g_hash_mu); + for (int j = 0; j < NUM_SLOTS_PER_BUCKET; j++) { + if (g_timer_hash[i][j] == t) { + is_found = true; + break; + } + } + gpr_mu_unlock(&g_hash_mu); + return is_found; +} + +static void check_and_add_timer(grpc_timer *t) { + size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS); + bool added = false; + gpr_mu_lock(&g_hash_mu); + for (int j = 0; j < NUM_SLOTS_PER_BUCKET; j++) { + if (g_timer_hash[i][j] == NULL) { + g_timer_hash[i][j] = t; + added = true; + break; + } else if (g_timer_hash[i][j] == t) { + gpr_log(GPR_ERROR, "*** DUPLICATE TIMER BEING ADDED (%p) **", (void *)t); + abort(); + } + } + gpr_mu_unlock(&g_hash_mu); + if (!added) { + gpr_log(GPR_ERROR, "** NOT ENOUGH BUCKETS **"); + abort(); + } +} + +static void remove_timer(grpc_timer *t) { + size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS); + bool removed = false; + gpr_mu_lock(&g_hash_mu); + for (int j = 0; j < NUM_SLOTS_PER_BUCKET; j++) { + if (g_timer_hash[i][j] == t) { + g_timer_hash[i][j] = 0; + removed = true; + break; + } + } + + gpr_mu_unlock(&g_hash_mu); + if (!removed) { + gpr_log(GPR_ERROR, "*** Unable to remove %p. BUG! **", (void *)t); + abort(); + } +} + /* Thread local variable that stores the deadline of the next timer the thread * has last-seen. This is an optimization to prevent the thread from checking * shared_mutables.min_timer (which requires acquiring shared_mutables.mu lock, @@ -96,7 +157,8 @@ struct shared_mutables { } GPR_ALIGN_STRUCT(GPR_CACHELINE_SIZE); static struct shared_mutables g_shared_mutables = { - .checker_mu = GPR_SPINLOCK_STATIC_INITIALIZER, .initialized = false, + .checker_mu = GPR_SPINLOCK_STATIC_INITIALIZER, + .initialized = false, }; static gpr_clock_type g_clock_type; @@ -176,6 +238,8 @@ void grpc_timer_list_init(gpr_timespec now) { shard->min_deadline = compute_min_deadline(shard); g_shard_queue[i] = shard; } + + init_timer_hash(); } void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) { @@ -247,8 +311,9 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, gpr_atm deadline_atm = timer->deadline = timespec_to_atm_round_up(deadline); if (GRPC_TRACER_ON(grpc_timer_trace)) { - gpr_log(GPR_DEBUG, "TIMER %p: SET %" PRId64 ".%09d [%" PRIdPTR - "] now %" PRId64 ".%09d [%" PRIdPTR "] call %p[%p]", + gpr_log(GPR_DEBUG, + "TIMER %p: SET %" PRId64 ".%09d [%" PRIdPTR "] now %" PRId64 + ".%09d [%" PRIdPTR "] call %p[%p]", timer, deadline.tv_sec, deadline.tv_nsec, deadline_atm, now.tv_sec, now.tv_nsec, timespec_to_atm_round_down(now), closure, closure->cb); } @@ -273,6 +338,10 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, grpc_time_averaged_stats_add_sample(&shard->stats, ts_to_dbl(gpr_time_sub(deadline, now))); + + /** TODO: sreek - CHECK HERE AND ADD **/ + check_and_add_timer(timer); + if (deadline_atm < shard->queue_deadline_cap) { is_first_timer = grpc_timer_heap_add(&shard->heap, timer); } else { @@ -280,8 +349,9 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, list_join(&shard->list, timer); } if (GRPC_TRACER_ON(grpc_timer_trace)) { - gpr_log(GPR_DEBUG, " .. add to shard %d with queue_deadline_cap=%" PRIdPTR - " => is_first_timer=%s", + gpr_log(GPR_DEBUG, + " .. add to shard %d with queue_deadline_cap=%" PRIdPTR + " => is_first_timer=%s", (int)(shard - g_shards), shard->queue_deadline_cap, is_first_timer ? "true" : "false"); } @@ -334,7 +404,11 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer, timer->pending ? "true" : "false"); } + if (timer->pending) { + /* TODO: sreek - Remove the timer here */ + remove_timer(timer); + GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED); timer->pending = false; if (timer->heap_index == INVALID_HEAP_INDEX) { @@ -342,6 +416,13 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { } else { grpc_timer_heap_remove(&shard->heap, timer); } + } else { + if (is_timer_present(timer)) { + gpr_log(GPR_ERROR, + "** gpr_timer_cancel called on a non-pending timer! %p", + (void *)timer); + abort(); + } } gpr_mu_unlock(&shard->mu); } @@ -425,6 +506,8 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard, grpc_timer *timer; gpr_mu_lock(&shard->mu); while ((timer = pop_one(shard, now))) { + /* TODO: sreek: Remove timer here */ + remove_timer(timer); GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_REF(error)); n++; } @@ -537,8 +620,9 @@ grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_asprintf(&next_str, "%" PRId64 ".%09d [%" PRIdPTR "]", next->tv_sec, next->tv_nsec, timespec_to_atm_round_down(*next)); } - gpr_log(GPR_DEBUG, "TIMER CHECK BEGIN: now=%" PRId64 ".%09d [%" PRIdPTR - "] next=%s tls_min=%" PRIdPTR " glob_min=%" PRIdPTR, + gpr_log(GPR_DEBUG, + "TIMER CHECK BEGIN: now=%" PRId64 ".%09d [%" PRIdPTR + "] next=%s tls_min=%" PRIdPTR " glob_min=%" PRIdPTR, now.tv_sec, now.tv_nsec, now_atm, next_str, gpr_tls_get(&g_last_seen_min_timer), gpr_atm_no_barrier_load(&g_shared_mutables.min_timer)); From 993fa31e669dcf8432e84c3c2a83b11bf9e68b5e Mon Sep 17 00:00:00 2001 From: Yihua Zhang Date: Fri, 15 Sep 2017 08:46:54 -0700 Subject: [PATCH 032/109] update tsi test library to support async tsi implementation --- test/core/tsi/transport_security_test_lib.c | 29 +++++++++++++++++++++ test/core/tsi/transport_security_test_lib.h | 19 ++++++++++++-- 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/test/core/tsi/transport_security_test_lib.c b/test/core/tsi/transport_security_test_lib.c index 7d66e110f2e..329b2371bf1 100644 --- a/test/core/tsi/transport_security_test_lib.c +++ b/test/core/tsi/transport_security_test_lib.c @@ -23,9 +23,26 @@ #include #include #include +#include #include "src/core/lib/security/transport/tsi_error.h" #include "test/core/tsi/transport_security_test_lib.h" +static void notification_signal(tsi_test_fixture *fixture) { + gpr_mu_lock(&fixture->mu); + fixture->notified = true; + gpr_cv_signal(&fixture->cv); + gpr_mu_unlock(&fixture->mu); +} + +static void notification_wait(tsi_test_fixture *fixture) { + gpr_mu_lock(&fixture->mu); + while (!fixture->notified) { + gpr_cv_wait(&fixture->cv, &fixture->mu, gpr_inf_future(GPR_CLOCK_REALTIME)); + } + fixture->notified = false; + gpr_mu_unlock(&fixture->mu); +} + typedef struct handshaker_args { tsi_test_fixture *fixture; unsigned char *handshake_buffer; @@ -273,9 +290,11 @@ grpc_error *on_handshake_next_done(tsi_result result, void *user_data, /* Read more data if we need to. */ if (result == TSI_INCOMPLETE_DATA) { GPR_ASSERT(bytes_to_send_size == 0); + notification_signal(fixture); return error; } if (result != TSI_OK) { + notification_signal(fixture); return grpc_set_tsi_error_result( GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshake failed"), result); } @@ -295,6 +314,7 @@ grpc_error *on_handshake_next_done(tsi_result result, void *user_data, if (handshaker_result != NULL) { maybe_append_unused_bytes(args); } + notification_signal(fixture); return error; } @@ -345,7 +365,11 @@ static void do_handshaker_next(handshaker_args *args) { if (result != TSI_ASYNC) { args->error = on_handshake_next_done(result, args, bytes_to_send, bytes_to_send_size, handshaker_result); + if (args->error != GRPC_ERROR_NONE) { + return; + } } + notification_wait(fixture); } void tsi_test_do_handshake(tsi_test_fixture *fixture) { @@ -532,6 +556,9 @@ void tsi_test_fixture_init(tsi_test_fixture *fixture) { fixture->bytes_read_from_server_channel = 0; fixture->test_unused_bytes = true; fixture->has_client_finished_first = false; + gpr_mu_init(&fixture->mu); + gpr_cv_init(&fixture->cv); + fixture->notified = false; } void tsi_test_fixture_destroy(tsi_test_fixture *fixture) { @@ -546,5 +573,7 @@ void tsi_test_fixture_destroy(tsi_test_fixture *fixture) { GPR_ASSERT(fixture->vtable != NULL); GPR_ASSERT(fixture->vtable->destruct != NULL); fixture->vtable->destruct(fixture); + gpr_mu_destroy(&fixture->mu); + gpr_cv_destroy(&fixture->cv); gpr_free(fixture); } diff --git a/test/core/tsi/transport_security_test_lib.h b/test/core/tsi/transport_security_test_lib.h index 8ae2024ee49..ed8ff856dfa 100644 --- a/test/core/tsi/transport_security_test_lib.h +++ b/test/core/tsi/transport_security_test_lib.h @@ -21,6 +21,10 @@ #include "src/core/tsi/transport_security_interface.h" +#ifdef __cplusplus +extern "C" { +#endif + #define TSI_TEST_TINY_HANDSHAKE_BUFFER_SIZE 32 #define TSI_TEST_SMALL_HANDSHAKE_BUFFER_SIZE 128 #define TSI_TEST_SMALL_READ_BUFFER_ALLOCATED_SIZE 41 @@ -56,10 +60,10 @@ typedef struct tsi_test_fixture_vtable { void (*setup_handshakers)(tsi_test_fixture *fixture); void (*check_handshaker_peers)(tsi_test_fixture *fixture); void (*destruct)(tsi_test_fixture *fixture); -} tranport_security_test_vtable; +} tsi_test_fixture_vtable; struct tsi_test_fixture { - const struct tsi_test_fixture_vtable *vtable; + const tsi_test_fixture_vtable *vtable; /* client/server TSI handshaker used to perform TSI handshakes, and will get instantiated during the call to setup_handshakers. */ tsi_handshaker *client_handshaker; @@ -95,6 +99,13 @@ struct tsi_test_fixture { (https://github.com/grpc/grpc/issues/12164). */ bool test_unused_bytes; + /* These objects will be used coordinate client/server handshakers with TSI + thread to perform TSI handshakes in an asynchronous manner (for GTS TSI + implementations). + */ + gpr_cv cv; + gpr_mu mu; + bool notified; }; struct tsi_test_frame_protector_config { @@ -162,4 +173,8 @@ void tsi_test_do_handshake(tsi_test_fixture *fixture); the client and server switching its role. */ void tsi_test_do_round_trip(tsi_test_fixture *fixture); +#ifdef __cplusplus +} +#endif + #endif // GRPC_TEST_CORE_TSI_TRANSPORT_SECURITY_TEST_LIB_H_ From a0109e54b9c6a737277e2f703734c4c6c57e14ed Mon Sep 17 00:00:00 2001 From: Nathaniel Manista Date: Sat, 16 Sep 2017 23:49:52 +0000 Subject: [PATCH 033/109] Add compiler value "all_the_cpythons" --- tools/run_tests/run_tests.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index 591a0bebbde..96812747a34 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -714,6 +714,9 @@ class PythonLanguage(object): return (pypy32_config,) elif args.compiler == 'python_alpine': return (python27_config,) + elif args.compiler == 'all_the_cpythons': + return (python27_config, python34_config, python35_config, + python36_config,) else: raise Exception('Compiler %s not supported.' % args.compiler) @@ -1211,7 +1214,7 @@ argp.add_argument('--compiler', choices=['default', 'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc_musl', 'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', - 'python2.7', 'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3', 'python_alpine', + 'python2.7', 'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3', 'python_alpine', 'all_the_cpythons', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8', 'electron1.3', 'electron1.6', 'coreclr', From 75c27789026375122a33043833a25e5b935c906f Mon Sep 17 00:00:00 2001 From: Matt Kwong Date: Mon, 18 Sep 2017 17:11:48 -0700 Subject: [PATCH 034/109] Update Dockerfiles from ubuntu:15.10 to debian:jessie --- .../tools/dockerfile/clang_format.include | 6 ++-- .../grpc_clang_format/Dockerfile.template | 3 +- .../test/sanity/Dockerfile.template | 27 ++++++++++----- tools/dockerfile/grpc_clang_format/Dockerfile | 8 ++--- tools/dockerfile/test/sanity/Dockerfile | 33 ++++++++++++------- 5 files changed, 48 insertions(+), 29 deletions(-) diff --git a/templates/tools/dockerfile/clang_format.include b/templates/tools/dockerfile/clang_format.include index 9a2b60ba8c4..81bd2be797b 100644 --- a/templates/tools/dockerfile/clang_format.include +++ b/templates/tools/dockerfile/clang_format.include @@ -1,5 +1,5 @@ RUN apt-get update && apt-get -y install wget -RUN echo deb http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list -RUN echo deb-src http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list -RUN wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key| apt-key add - +RUN echo "deb http://llvm.org/apt/jessie/ llvm-toolchain-jessie-3.8 main" >> /etc/apt/sources.list +RUN echo "deb-src http://llvm.org/apt/jessie/ llvm-toolchain-jessie-3.8 main" >> /etc/apt/sources.list +RUN wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key | apt-key add - RUN apt-get update && apt-get -y install clang-format-3.8 diff --git a/templates/tools/dockerfile/grpc_clang_format/Dockerfile.template b/templates/tools/dockerfile/grpc_clang_format/Dockerfile.template index 69cd4034b04..1ab667c95d3 100644 --- a/templates/tools/dockerfile/grpc_clang_format/Dockerfile.template +++ b/templates/tools/dockerfile/grpc_clang_format/Dockerfile.template @@ -14,9 +14,10 @@ # See the License for the specific language governing permissions and # limitations under the License. - FROM ubuntu:15.10 + FROM debian:jessie <%include file="../clang_format.include"/> ADD clang_format_all_the_things.sh / CMD ["echo 'Run with tools/distrib/clang_format_code.sh'"] + \ No newline at end of file diff --git a/templates/tools/dockerfile/test/sanity/Dockerfile.template b/templates/tools/dockerfile/test/sanity/Dockerfile.template index 3f9ea322338..bf34c0ab0f1 100644 --- a/templates/tools/dockerfile/test/sanity/Dockerfile.template +++ b/templates/tools/dockerfile/test/sanity/Dockerfile.template @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. - FROM ubuntu:15.10 + FROM debian:jessie <%include file="../../apt_get_basic.include"/> <%include file="../../gcp_api_libraries.include"/> @@ -33,14 +33,23 @@ #====================================== # More sanity test dependencies (bazel) - RUN apt-get install -y openjdk-8-jdk - # Check out Bazel version 0.4.1 since this version allows running - # ./compile.sh without a local protoc dependency - # TODO(mattkwong): install dependencies to support latest Bazel version if newer - # version is needed - RUN git clone https://github.com/bazelbuild/bazel.git /bazel && ${"\\"} - cd /bazel && git checkout tags/0.4.1 && ./compile.sh - RUN ln -s /bazel/output/bazel /bin/ + RUN echo "deb http://http.debian.net/debian jessie-backports main" >> /etc/apt/sources.list + RUN apt-get update + RUN apt-get install -y -t jessie-backports openjdk-8-jdk + + #======================== + # Bazel installation + RUN echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" > /etc/apt/sources.list.d/bazel.list + RUN curl https://bazel.build/bazel-release.pub.gpg | apt-key add - + RUN apt-get -y update + RUN apt-get -y install bazel + + # Pin Bazel to 0.4.4 + # Installing Bazel via apt-get first is required before installing 0.4.4 to + # allow gRPC to build without errors. See https://github.com/grpc/grpc/issues/10553 + RUN curl -fSsL -O https://github.com/bazelbuild/bazel/releases/download/0.4.4/bazel-0.4.4-installer-linux-x86_64.sh + RUN chmod +x ./bazel-0.4.4-installer-linux-x86_64.sh + RUN ./bazel-0.4.4-installer-linux-x86_64.sh <%include file="../../clang_format.include"/> <%include file="../../run_tests_addons.include"/> diff --git a/tools/dockerfile/grpc_clang_format/Dockerfile b/tools/dockerfile/grpc_clang_format/Dockerfile index 647cb52a7b1..dff07feeb6d 100644 --- a/tools/dockerfile/grpc_clang_format/Dockerfile +++ b/tools/dockerfile/grpc_clang_format/Dockerfile @@ -12,12 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM ubuntu:15.10 +FROM debian:jessie RUN apt-get update && apt-get -y install wget -RUN echo deb http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list -RUN echo deb-src http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list -RUN wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key| apt-key add - +RUN echo "deb http://llvm.org/apt/jessie/ llvm-toolchain-jessie-3.8 main" >> /etc/apt/sources.list +RUN echo "deb-src http://llvm.org/apt/jessie/ llvm-toolchain-jessie-3.8 main" >> /etc/apt/sources.list +RUN wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key | apt-key add - RUN apt-get update && apt-get -y install clang-format-3.8 ADD clang_format_all_the_things.sh / diff --git a/tools/dockerfile/test/sanity/Dockerfile b/tools/dockerfile/test/sanity/Dockerfile index 44732a5ae42..dff979dc371 100644 --- a/tools/dockerfile/test/sanity/Dockerfile +++ b/tools/dockerfile/test/sanity/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM ubuntu:15.10 +FROM debian:jessie # Install Git and basic packages. RUN apt-get update && apt-get install -y \ @@ -82,19 +82,28 @@ RUN pip install simplejson mako #====================================== # More sanity test dependencies (bazel) -RUN apt-get install -y openjdk-8-jdk -# Check out Bazel version 0.4.1 since this version allows running -# ./compile.sh without a local protoc dependency -# TODO(mattkwong): install dependencies to support latest Bazel version if newer -# version is needed -RUN git clone https://github.com/bazelbuild/bazel.git /bazel && \ - cd /bazel && git checkout tags/0.4.1 && ./compile.sh -RUN ln -s /bazel/output/bazel /bin/ +RUN echo "deb http://http.debian.net/debian jessie-backports main" >> /etc/apt/sources.list +RUN apt-get update +RUN apt-get install -y -t jessie-backports openjdk-8-jdk + +#======================== +# Bazel installation +RUN echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" > /etc/apt/sources.list.d/bazel.list +RUN curl https://bazel.build/bazel-release.pub.gpg | apt-key add - +RUN apt-get -y update +RUN apt-get -y install bazel + +# Pin Bazel to 0.4.4 +# Installing Bazel via apt-get first is required before installing 0.4.4 to +# allow gRPC to build without errors. See https://github.com/grpc/grpc/issues/10553 +RUN curl -fSsL -O https://github.com/bazelbuild/bazel/releases/download/0.4.4/bazel-0.4.4-installer-linux-x86_64.sh +RUN chmod +x ./bazel-0.4.4-installer-linux-x86_64.sh +RUN ./bazel-0.4.4-installer-linux-x86_64.sh RUN apt-get update && apt-get -y install wget -RUN echo deb http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list -RUN echo deb-src http://llvm.org/apt/wily/ llvm-toolchain-wily-3.8 main >> /etc/apt/sources.list -RUN wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key| apt-key add - +RUN echo "deb http://llvm.org/apt/jessie/ llvm-toolchain-jessie-3.8 main" >> /etc/apt/sources.list +RUN echo "deb-src http://llvm.org/apt/jessie/ llvm-toolchain-jessie-3.8 main" >> /etc/apt/sources.list +RUN wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key | apt-key add - RUN apt-get update && apt-get -y install clang-format-3.8 # Prepare ccache From 79c12b9dc02f5558af6868acd2cafdfaa600d89e Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Mon, 18 Sep 2017 22:02:52 -0700 Subject: [PATCH 035/109] Drain readable fd --- .../dns/c_ares/grpc_ares_ev_driver_posix.c | 47 +++++++++++++++++-- test/cpp/naming/resolver_component_test.cc | 4 +- .../naming/resolver_component_tests_runner.sh | 8 ++++ .../naming/resolver_test_record_groups.yaml | 5 -- 4 files changed, 54 insertions(+), 10 deletions(-) diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c index 7f1f57259a9..d38fe66d061 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c @@ -20,6 +20,7 @@ #if GRPC_ARES == 1 && defined(GRPC_POSIX_SOCKET) #include +#include #include "src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h" @@ -54,6 +55,8 @@ typedef struct fd_node { bool readable_registered; /** if the writable closure has been registered */ bool writable_registered; + /** if the fd is being shut down */ + bool shutting_down; } fd_node; struct grpc_ares_ev_driver { @@ -100,7 +103,6 @@ static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) { GPR_ASSERT(!fdn->readable_registered); GPR_ASSERT(!fdn->writable_registered); gpr_mu_destroy(&fdn->mu); - grpc_pollset_set_del_fd(exec_ctx, fdn->ev_driver->pollset_set, fdn->fd); /* c-ares library has closed the fd inside grpc_fd. This fd may be picked up immediately by another thread, and should not be closed by the following grpc_fd_orphan. */ @@ -109,6 +111,20 @@ static void fd_node_destroy(grpc_exec_ctx *exec_ctx, fd_node *fdn) { gpr_free(fdn); } +static void fd_node_shutdown(grpc_exec_ctx *exec_ctx, fd_node *fdn) { + gpr_mu_lock(&fdn->mu); + fdn->shutting_down = true; + if (!fdn->readable_registered && !fdn->writable_registered) { + gpr_mu_unlock(&fdn->mu); + fd_node_destroy(exec_ctx, fdn); + } else { + grpc_fd_shutdown( + exec_ctx, fdn->fd, + GRPC_ERROR_CREATE_FROM_STATIC_STRING("c-ares fd shutdown")); + gpr_mu_unlock(&fdn->mu); + } +} + grpc_error *grpc_ares_ev_driver_create(grpc_ares_ev_driver **ev_driver, grpc_pollset_set *pollset_set) { *ev_driver = (grpc_ares_ev_driver *)gpr_malloc(sizeof(grpc_ares_ev_driver)); @@ -175,18 +191,34 @@ static fd_node *pop_fd_node(fd_node **head, int fd) { return NULL; } +/* Check if \a fd is still readable */ +static bool grpc_ares_is_fd_still_readable(grpc_ares_ev_driver *ev_driver, + int fd) { + size_t bytes_available = 0; + return ioctl(fd, FIONREAD, &bytes_available) == 0 && bytes_available > 0; +} + static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { fd_node *fdn = (fd_node *)arg; grpc_ares_ev_driver *ev_driver = fdn->ev_driver; gpr_mu_lock(&fdn->mu); fdn->readable_registered = false; + if (fdn->shutting_down && !fdn->writable_registered) { + gpr_mu_unlock(&fdn->mu); + fd_node_destroy(exec_ctx, fdn); + grpc_ares_ev_driver_unref(ev_driver); + return; + } gpr_mu_unlock(&fdn->mu); gpr_log(GPR_DEBUG, "readable on %d", grpc_fd_wrapped_fd(fdn->fd)); if (error == GRPC_ERROR_NONE) { - ares_process_fd(ev_driver->channel, grpc_fd_wrapped_fd(fdn->fd), - ARES_SOCKET_BAD); + do { + ares_process_fd(ev_driver->channel, grpc_fd_wrapped_fd(fdn->fd), + ARES_SOCKET_BAD); + } while ( + grpc_ares_is_fd_still_readable(ev_driver, grpc_fd_wrapped_fd(fdn->fd))); } else { // If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or // timed out. The pending lookups made on this ev_driver will be cancelled @@ -208,6 +240,12 @@ static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_ares_ev_driver *ev_driver = fdn->ev_driver; gpr_mu_lock(&fdn->mu); fdn->writable_registered = false; + if (fdn->shutting_down && !fdn->readable_registered) { + gpr_mu_unlock(&fdn->mu); + fd_node_destroy(exec_ctx, fdn); + grpc_ares_ev_driver_unref(ev_driver); + return; + } gpr_mu_unlock(&fdn->mu); gpr_log(GPR_DEBUG, "writable on %d", grpc_fd_wrapped_fd(fdn->fd)); @@ -256,6 +294,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx, fdn->ev_driver = ev_driver; fdn->readable_registered = false; fdn->writable_registered = false; + fdn->shutting_down = false; gpr_mu_init(&fdn->mu); GRPC_CLOSURE_INIT(&fdn->read_closure, on_readable_cb, fdn, grpc_schedule_on_exec_ctx); @@ -296,7 +335,7 @@ static void grpc_ares_notify_on_event_locked(grpc_exec_ctx *exec_ctx, while (ev_driver->fds != NULL) { fd_node *cur = ev_driver->fds; ev_driver->fds = ev_driver->fds->next; - fd_node_destroy(exec_ctx, cur); + fd_node_shutdown(exec_ctx, cur); } ev_driver->fds = new_list; // If the ev driver has no working fd, all the tasks are done. diff --git a/test/cpp/naming/resolver_component_test.cc b/test/cpp/naming/resolver_component_test.cc index 0857fb6a32c..6a2ce37d00a 100644 --- a/test/cpp/naming/resolver_component_test.cc +++ b/test/cpp/naming/resolver_component_test.cc @@ -267,7 +267,9 @@ void CheckResolverResultLocked(grpc_exec_ctx *exec_ctx, void *argsp, } EXPECT_THAT(args->expected_addrs, UnorderedElementsAreArray(found_lb_addrs)); CheckServiceConfigResultLocked(channel_args, args); - CheckLBPolicyResultLocked(channel_args, args); + if (args->expected_service_config_string == "") { + CheckLBPolicyResultLocked(channel_args, args); + } gpr_atm_rel_store(&args->done_atm, 1); gpr_mu_lock(args->mu); GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(args->pollset, NULL)); diff --git a/test/cpp/naming/resolver_component_tests_runner.sh b/test/cpp/naming/resolver_component_tests_runner.sh index 83b03b67a30..cf71c9dcf97 100755 --- a/test/cpp/naming/resolver_component_tests_runner.sh +++ b/test/cpp/naming/resolver_component_tests_runner.sh @@ -168,6 +168,14 @@ $FLAGS_test_bin_path \ --local_dns_server_address=127.0.0.1:$FLAGS_test_dns_server_port & wait $! || EXIT_CODE=1 +$FLAGS_test_bin_path \ + --target_name='ipv4-config-causing-fallback-to-tcp.resolver-tests.grpctestingexp.' \ + --expected_addrs='1.2.3.4:443,False' \ + --expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwo","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooThree","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooFour","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooFive","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooSix","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooSeven","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooEight","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooNine","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTen","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooEleven","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]}]}' \ + --expected_lb_policy='' \ + --local_dns_server_address=127.0.0.1:$FLAGS_test_dns_server_port & +wait $! || EXIT_CODE=1 + kill -SIGTERM $DNS_SERVER_PID || true wait exit $EXIT_CODE diff --git a/test/cpp/naming/resolver_test_record_groups.yaml b/test/cpp/naming/resolver_test_record_groups.yaml index 67c611d831f..c2e8ddd0ed2 100644 --- a/test/cpp/naming/resolver_test_record_groups.yaml +++ b/test/cpp/naming/resolver_test_record_groups.yaml @@ -137,11 +137,6 @@ resolver_component_tests: - {TTL: '2100', data: '2607:f8b0:400a:801::1002', type: AAAA} srv-ipv6-target-has-backend-and-balancer: - {TTL: '2100', data: '2607:f8b0:400a:801::1002', type: AAAA} - -resolver_component_tests_TODO: -- 'TODO: enable this large-txt-record test once working. (it is much longer than 512 - bytes, likely to cause use of TCP even if max payload for UDP is changed somehow, - e.g. via notes in RFC 2671)' - expected_addrs: - {address: '1.2.3.4:443', is_balancer: false} expected_chosen_service_config: '{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwo","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooThree","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooFour","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooFive","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooSix","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooSeven","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooEight","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooNine","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTen","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooEleven","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]}]}' From 532aad7752c27447124df8b0b6f05292a2009f9c Mon Sep 17 00:00:00 2001 From: "Nicolas \"Pixel\" Noble" Date: Wed, 20 Sep 2017 01:50:12 +0200 Subject: [PATCH 036/109] Adding script to generate pull request list from an interval. --- tools/distrib/pull_requests_interval.sh | 67 +++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100755 tools/distrib/pull_requests_interval.sh diff --git a/tools/distrib/pull_requests_interval.sh b/tools/distrib/pull_requests_interval.sh new file mode 100755 index 00000000000..43114990aa2 --- /dev/null +++ b/tools/distrib/pull_requests_interval.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +if [ "x$1" = "x" ] ; then + echo "Usage: $0 [second ref]" + exit 1 +else + first=$1 +fi + +if [ -n $2 ] ; then + second=HEAD +fi + +if [ -e ~/github-credentials.vars ] ; then + . ~/github-credentials.vars +fi + +if [ "x$github_client_id" = "x" ] || [ "x$github_client_secret" = "x" ] ; then + echo "Warning: you don't have github credentials set." + echo + echo "You may end up exceeding guest quota quickly." + echo "You can create an application for yourself," + echo "and get its credentials. Go to" + echo + echo " https://github.com/settings/developers" + echo + echo "and click 'Register a new application'." + echo + echo "From the application's information, copy/paste" + echo "its Client ID and Client Secret, into the file" + echo + echo " ~/github-credentials.vars" + echo + echo "with the following format:" + echo + echo "github_client_id=0123456789abcdef0123" + echo "github_client_secret=0123456789abcdef0123456789abcdef" + echo + echo + addendum="" +else + addendum="?client_id=$github_client_id&client_secret=$github_client_secret" +fi + +unset notfirst +echo "[" +git log --pretty=oneline $1..$2 | + grep '[^ ]\+ Merge pull request #[0-9]\{4,6\} ' | + cut -f 2 -d# | + cut -f 1 -d\ | + sort -u | + while read id ; do + if [ "x$notfirst" = "x" ] ; then + notfirst=true + else + echo "," + fi + echo -n " {\"url\": \"https://github.com/grpc/grpc/pull/$id\"," + out=`mktemp` + curl -s "https://api.github.com/repos/grpc/grpc/pulls/$id$addendum" > $out + echo -n " "`grep '"title"' $out` + echo -n " "`grep '"login"' $out | head -1` + echo -n " \"pr\": $id }" + rm $out + done +echo +echo "]" From b56cf2d3bf6b44be4291b7a9962a10e181a3a753 Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Tue, 19 Sep 2017 15:01:39 -0700 Subject: [PATCH 037/109] Protect fdn->fd --- .../dns/c_ares/grpc_ares_ev_driver_posix.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c index d38fe66d061..b5be69c4287 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c @@ -38,8 +38,6 @@ typedef struct fd_node { /** the owner of this fd node */ grpc_ares_ev_driver *ev_driver; - /** the grpc_fd owned by this fd node */ - grpc_fd *fd; /** a closure wrapping on_readable_cb, which should be invoked when the grpc_fd in this node becomes readable. */ grpc_closure read_closure; @@ -51,6 +49,8 @@ typedef struct fd_node { /** mutex guarding the rest of the state */ gpr_mu mu; + /** the grpc_fd owned by this fd node */ + grpc_fd *fd; /** if the readable closure has been registered */ bool readable_registered; /** if the writable closure has been registered */ @@ -203,6 +203,7 @@ static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg, fd_node *fdn = (fd_node *)arg; grpc_ares_ev_driver *ev_driver = fdn->ev_driver; gpr_mu_lock(&fdn->mu); + const int fd = grpc_fd_wrapped_fd(fdn->fd); fdn->readable_registered = false; if (fdn->shutting_down && !fdn->writable_registered) { gpr_mu_unlock(&fdn->mu); @@ -212,13 +213,11 @@ static void on_readable_cb(grpc_exec_ctx *exec_ctx, void *arg, } gpr_mu_unlock(&fdn->mu); - gpr_log(GPR_DEBUG, "readable on %d", grpc_fd_wrapped_fd(fdn->fd)); + gpr_log(GPR_DEBUG, "readable on %d", fd); if (error == GRPC_ERROR_NONE) { do { - ares_process_fd(ev_driver->channel, grpc_fd_wrapped_fd(fdn->fd), - ARES_SOCKET_BAD); - } while ( - grpc_ares_is_fd_still_readable(ev_driver, grpc_fd_wrapped_fd(fdn->fd))); + ares_process_fd(ev_driver->channel, fd, ARES_SOCKET_BAD); + } while (grpc_ares_is_fd_still_readable(ev_driver, fd)); } else { // If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or // timed out. The pending lookups made on this ev_driver will be cancelled @@ -239,6 +238,7 @@ static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg, fd_node *fdn = (fd_node *)arg; grpc_ares_ev_driver *ev_driver = fdn->ev_driver; gpr_mu_lock(&fdn->mu); + const int fd = grpc_fd_wrapped_fd(fdn->fd); fdn->writable_registered = false; if (fdn->shutting_down && !fdn->readable_registered) { gpr_mu_unlock(&fdn->mu); @@ -248,10 +248,9 @@ static void on_writable_cb(grpc_exec_ctx *exec_ctx, void *arg, } gpr_mu_unlock(&fdn->mu); - gpr_log(GPR_DEBUG, "writable on %d", grpc_fd_wrapped_fd(fdn->fd)); + gpr_log(GPR_DEBUG, "writable on %d", fd); if (error == GRPC_ERROR_NONE) { - ares_process_fd(ev_driver->channel, ARES_SOCKET_BAD, - grpc_fd_wrapped_fd(fdn->fd)); + ares_process_fd(ev_driver->channel, ARES_SOCKET_BAD, fd); } else { // If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or // timed out. The pending lookups made on this ev_driver will be cancelled From c2261f21e10cb117fb0718963b80468d1ab5500b Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Wed, 20 Sep 2017 13:25:49 -0700 Subject: [PATCH 038/109] more debug --- src/core/lib/iomgr/timer_generic.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c index 7449a628857..5091c29ae8c 100644 --- a/src/core/lib/iomgr/timer_generic.c +++ b/src/core/lib/iomgr/timer_generic.c @@ -452,6 +452,18 @@ static int refill_heap(timer_shard *shard, gpr_atm now) { for (timer = shard->list.next; timer != &shard->list; timer = next) { next = timer->next; +#ifndef NDEBUG + if (next == timer && next != &shard->list) { + grpc_closure *c = timer->closure; + gpr_log(GPR_ERROR, + "We have a problem!!!! - timer %p closure: %p, created-at: " + "[%s,%d], scheduled-at: [%s, %d]", + timer, c, c->file_initiated, c->line_created, c->file_initiated, + c->line_initiated); + abort(); + } +#endif + if (timer->deadline < shard->queue_deadline_cap) { if (GRPC_TRACER_ON(grpc_timer_check_trace)) { gpr_log(GPR_DEBUG, " .. add timer with deadline %" PRIdPTR " to heap", From 4b527a7d8c895abe537384b49465cfa6a53b6162 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Wed, 20 Sep 2017 13:35:05 -0700 Subject: [PATCH 039/109] Fix test flake of stream_compression_compressed_payload #12307 --- test/core/end2end/tests/compressed_payload.c | 9 +++++---- .../tests/stream_compression_compressed_payload.c | 9 +++++---- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/test/core/end2end/tests/compressed_payload.c b/test/core/end2end/tests/compressed_payload.c index 639af15454b..ce86e97b83e 100644 --- a/test/core/end2end/tests/compressed_payload.c +++ b/test/core/end2end/tests/compressed_payload.c @@ -151,6 +151,11 @@ static void request_for_disabled_algorithm( grpc_metadata_array_init(&request_metadata_recv); grpc_call_details_init(&call_details); + error = + grpc_server_request_call(f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101)); + GPR_ASSERT(GRPC_CALL_OK == error); + memset(ops, 0, sizeof(ops)); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; @@ -187,10 +192,6 @@ static void request_for_disabled_algorithm( error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL); GPR_ASSERT(GRPC_CALL_OK == error); - error = - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.cq, f.cq, tag(101)); - GPR_ASSERT(GRPC_CALL_OK == error); CQ_EXPECT_COMPLETION(cqv, tag(101), true); cq_verify(cqv); diff --git a/test/core/end2end/tests/stream_compression_compressed_payload.c b/test/core/end2end/tests/stream_compression_compressed_payload.c index 5a69091a57f..094c4de1c05 100644 --- a/test/core/end2end/tests/stream_compression_compressed_payload.c +++ b/test/core/end2end/tests/stream_compression_compressed_payload.c @@ -151,6 +151,11 @@ static void request_for_disabled_algorithm( grpc_metadata_array_init(&request_metadata_recv); grpc_call_details_init(&call_details); + error = + grpc_server_request_call(f.server, &s, &call_details, + &request_metadata_recv, f.cq, f.cq, tag(101)); + GPR_ASSERT(GRPC_CALL_OK == error); + memset(ops, 0, sizeof(ops)); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; @@ -187,10 +192,6 @@ static void request_for_disabled_algorithm( error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL); GPR_ASSERT(GRPC_CALL_OK == error); - error = - grpc_server_request_call(f.server, &s, &call_details, - &request_metadata_recv, f.cq, f.cq, tag(101)); - GPR_ASSERT(GRPC_CALL_OK == error); CQ_EXPECT_COMPLETION(cqv, tag(101), true); cq_verify(cqv); From e2e7cf42a45ff12eeb37f1ab4b954275d0a2c9ba Mon Sep 17 00:00:00 2001 From: Matt Kwong Date: Fri, 15 Sep 2017 11:17:04 -0700 Subject: [PATCH 040/109] Add testing of grpc-node for Linux and MacOS --- .../run_tests/helper_scripts/run_grpc-node.sh | 28 +++++++++ .../python_utils/filter_pull_request_tests.py | 2 +- tools/run_tests/run_tests.py | 62 +++++++++++++++++++ tools/run_tests/run_tests_matrix.py | 2 +- .../run_tests/sanity/check_test_filtering.py | 2 +- 5 files changed, 93 insertions(+), 3 deletions(-) create mode 100755 tools/run_tests/helper_scripts/run_grpc-node.sh diff --git a/tools/run_tests/helper_scripts/run_grpc-node.sh b/tools/run_tests/helper_scripts/run_grpc-node.sh new file mode 100755 index 00000000000..25f149f5793 --- /dev/null +++ b/tools/run_tests/helper_scripts/run_grpc-node.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This script runs grpc/grpc-node tests with their grpc submodule updated +# to this reference + +# cd to gRPC root directory +cd $(dirname $0)/../../.. + +CURRENT_COMMIT=$(git rev-parse --verify HEAD) + +rm -rf ./../grpc-node +git clone --recursive https://github.com/grpc/grpc-node ./../grpc-node +cd ./../grpc-node + +./test-grpc-submodule.sh $CURRENT_COMMIT diff --git a/tools/run_tests/python_utils/filter_pull_request_tests.py b/tools/run_tests/python_utils/filter_pull_request_tests.py index 4ad981237b7..8ba3f2cb4d7 100644 --- a/tools/run_tests/python_utils/filter_pull_request_tests.py +++ b/tools/run_tests/python_utils/filter_pull_request_tests.py @@ -47,7 +47,7 @@ class TestSuite: _CORE_TEST_SUITE = TestSuite(['c']) _CPP_TEST_SUITE = TestSuite(['c++']) _CSHARP_TEST_SUITE = TestSuite(['csharp']) -_NODE_TEST_SUITE = TestSuite(['node']) +_NODE_TEST_SUITE = TestSuite(['node', 'grpc-node']) _OBJC_TEST_SUITE = TestSuite(['objc']) _PHP_TEST_SUITE = TestSuite(['php', 'php7']) _PYTHON_TEST_SUITE = TestSuite(['python']) diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index b66c5f7f71f..bcf09deddb2 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -445,6 +445,67 @@ class CLanguage(object): return self.make_target +# This tests Node on grpc/grpc-node and will become the standard for Node testing +class RemoteNodeLanguage(object): + + def __init__(self): + self.platform = platform_string() + + def configure(self, config, args): + self.config = config + self.args = args + # Note: electron ABI only depends on major and minor version, so that's all + # we should specify in the compiler argument + _check_compiler(self.args.compiler, ['default', 'node0.12', + 'node4', 'node5', 'node6', + 'node7', 'node8', + 'electron1.3', 'electron1.6']) + if self.args.compiler == 'default': + self.runtime = 'node' + self.node_version = '8' + else: + if self.args.compiler.startswith('electron'): + self.runtime = 'electron' + self.node_version = self.args.compiler[8:] + else: + self.runtime = 'node' + # Take off the word "node" + self.node_version = self.args.compiler[4:] + + # TODO: update with Windows/electron scripts when available for grpc/grpc-node + def test_specs(self): + if self.platform == 'windows': + return [self.config.job_spec(['tools\\run_tests\\helper_scripts\\run_node.bat'])] + else: + return [self.config.job_spec(['tools/run_tests/helper_scripts/run_grpc-node.sh'], + None, + environ=_FORCE_ENVIRON_FOR_WRAPPERS)] + + def pre_build_steps(self): + return [] + + def make_targets(self): + return [] + + def make_options(self): + return [] + + def build_steps(self): + return [] + + def post_tests_steps(self): + return [] + + def makefile_name(self): + return 'Makefile' + + def dockerfile_dir(self): + return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch) + + def __str__(self): + return 'grpc-node' + + class NodeLanguage(object): def __init__(self): @@ -1063,6 +1124,7 @@ with open('tools/run_tests/generated/configs.json') as f: _LANGUAGES = { 'c++': CLanguage('cxx', 'c++'), 'c': CLanguage('c', 'c'), + 'grpc-node': RemoteNodeLanguage(), 'node': NodeLanguage(), 'node_express': NodeExpressLanguage(), 'php': PhpLanguage(), diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py index 957e7b569e2..711e069088d 100755 --- a/tools/run_tests/run_tests_matrix.py +++ b/tools/run_tests/run_tests_matrix.py @@ -166,7 +166,7 @@ def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS): inner_jobs=inner_jobs, timeout_seconds=_CPP_RUNTESTS_TIMEOUT) - test_jobs += _generate_jobs(languages=['ruby', 'php'], + test_jobs += _generate_jobs(languages=['grpc-node', 'ruby', 'php'], configs=['dbg', 'opt'], platforms=['linux', 'macos'], labels=['basictests', 'multilang'], diff --git a/tools/run_tests/sanity/check_test_filtering.py b/tools/run_tests/sanity/check_test_filtering.py index bea8125fb9c..3ebb9389f79 100755 --- a/tools/run_tests/sanity/check_test_filtering.py +++ b/tools/run_tests/sanity/check_test_filtering.py @@ -25,7 +25,7 @@ sys.path.insert(0, os.path.abspath('tools/run_tests/')) from run_tests_matrix import _create_test_jobs, _create_portability_test_jobs import python_utils.filter_pull_request_tests as filter_pull_request_tests -_LIST_OF_LANGUAGE_LABELS = ['c', 'c++', 'csharp', 'node', 'objc', 'php', 'php7', 'python', 'ruby'] +_LIST_OF_LANGUAGE_LABELS = ['c', 'c++', 'csharp', 'grpc-node', 'node', 'objc', 'php', 'php7', 'python', 'ruby'] _LIST_OF_PLATFORM_LABELS = ['linux', 'macos', 'windows'] class TestFilteringTest(unittest.TestCase): From c65c8773c0f0d27697c4b260be76167cd212ec92 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Wed, 20 Sep 2017 17:19:01 -0700 Subject: [PATCH 041/109] Remove the entire line --- include/grpc/impl/codegen/port_platform.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/grpc/impl/codegen/port_platform.h b/include/grpc/impl/codegen/port_platform.h index 8d8dcee3b0a..24581fa6ef3 100644 --- a/include/grpc/impl/codegen/port_platform.h +++ b/include/grpc/impl/codegen/port_platform.h @@ -183,7 +183,6 @@ #define _BSD_SOURCE #endif #if TARGET_OS_IPHONE -#define GPR_FORBID_UNREACHABLE_CODE 0 #define GPR_PLATFORM_STRING "ios" #define GPR_CPU_IPHONE 1 #define GPR_PTHREAD_TLS 1 From 96d23c3f081915a2e2f60b132941a09d6c3a632d Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Wed, 20 Sep 2017 17:10:37 -0700 Subject: [PATCH 042/109] Timer hash table for debug builds --- src/core/lib/iomgr/timer_generic.c | 143 ++++++++++++++++++----------- 1 file changed, 87 insertions(+), 56 deletions(-) diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c index 5091c29ae8c..699947409bb 100644 --- a/src/core/lib/iomgr/timer_generic.c +++ b/src/core/lib/iomgr/timer_generic.c @@ -79,67 +79,121 @@ static timer_shard g_shards[NUM_SHARDS]; * Access to this is protected by g_shared_mutables.mu */ static timer_shard *g_shard_queue[NUM_SHARDS]; +#ifndef NDEBUG + +/* == Hash table for duplicate timer detection == */ + #define NUM_HASH_BUCKETS 1000 -#define NUM_SLOTS_PER_BUCKET 5 -static gpr_mu g_hash_mu; -static grpc_timer *g_timer_hash[1000][5] = {{NULL, NULL}}; +#define NUM_SLOTS_PER_BUCKET 30 -static void init_timer_hash() { gpr_mu_init(&g_hash_mu); } +static gpr_mu g_hash_mu[NUM_HASH_BUCKETS]; /* One mutex per bucket */ +static grpc_timer *g_timer_ht[NUM_HASH_BUCKETS] + [NUM_SLOTS_PER_BUCKET] = {{NULL, NULL}}; -static bool is_timer_present(grpc_timer *t) { +static void init_timer_ht() { + for (int i = 0; i < NUM_HASH_BUCKETS; i++) { + gpr_mu_init(&g_hash_mu[i]); + } +} + +static bool is_in_ht(grpc_timer *t) { size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS); bool is_found = false; - gpr_mu_lock(&g_hash_mu); + gpr_mu_lock(&g_hash_mu[i]); for (int j = 0; j < NUM_SLOTS_PER_BUCKET; j++) { - if (g_timer_hash[i][j] == t) { + if (g_timer_ht[i][j] == t) { is_found = true; break; } } - gpr_mu_unlock(&g_hash_mu); + gpr_mu_unlock(&g_hash_mu[i]); return is_found; } -static void check_and_add_timer(grpc_timer *t) { +static void add_to_ht(grpc_timer *t) { size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS); bool added = false; - gpr_mu_lock(&g_hash_mu); + gpr_mu_lock(&g_hash_mu[i]); for (int j = 0; j < NUM_SLOTS_PER_BUCKET; j++) { - if (g_timer_hash[i][j] == NULL) { - g_timer_hash[i][j] = t; + if (g_timer_ht[i][j] == NULL) { + g_timer_ht[i][j] = t; added = true; break; - } else if (g_timer_hash[i][j] == t) { - gpr_log(GPR_ERROR, "*** DUPLICATE TIMER BEING ADDED (%p) **", (void *)t); + } else if (g_timer_ht[i][j] == t) { + grpc_closure *c = t->closure; + gpr_log(GPR_ERROR, + "** Duplicate timer (%p) being added. Closure: (%p), created at: " + "(%s:%d), scheduled at: (%s:%d) **", + t, c, c->file_created, c->line_created, c->file_initiated, + c->line_initiated); abort(); } } - gpr_mu_unlock(&g_hash_mu); + + gpr_mu_unlock(&g_hash_mu[i]); if (!added) { - gpr_log(GPR_ERROR, "** NOT ENOUGH BUCKETS **"); + gpr_log(GPR_ERROR, + "** Not enough slots in the timer hash table. Please increase " + "NUM_SLOTS_PER_BUCKET **"); abort(); } } -static void remove_timer(grpc_timer *t) { +static void remove_from_ht(grpc_timer *t) { size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS); bool removed = false; - gpr_mu_lock(&g_hash_mu); + gpr_mu_lock(&g_hash_mu[i]); for (int j = 0; j < NUM_SLOTS_PER_BUCKET; j++) { - if (g_timer_hash[i][j] == t) { - g_timer_hash[i][j] = 0; + if (g_timer_ht[i][j] == t) { + g_timer_ht[i][j] = 0; removed = true; break; } } - gpr_mu_unlock(&g_hash_mu); + gpr_mu_unlock(&g_hash_mu[i]); if (!removed) { - gpr_log(GPR_ERROR, "*** Unable to remove %p. BUG! **", (void *)t); + grpc_closure *c = t->closure; + gpr_log(GPR_ERROR, + "** Removing timer (%p) that is not added to hash table. Closure " + "(%p), created at: (%s:%d), scheduled at: (%s:%d) **", + t, c, c->file_created, c->line_created, c->file_initiated, + c->line_initiated); abort(); } } +/* If a timer is added to a timer shard (either heap or a list), it cannot + * be pending. A timer is added to hash table only-if it is added to the + * timer shard. + * Therefore, if timer->pending is false, it cannot be in hash table */ +static void validate_non_pending_timer(grpc_timer *t) { + if (!t->pending && is_in_ht(t)) { + grpc_closure *c = t->closure; + gpr_log(GPR_ERROR, + "** gpr_timer_cancel() called on a non-pending timer (%p) which " + "is in the hash table. Closure: (%p), created at: (%s:%d), " + "scheduled at: (%s:%d) **", + t, c, c->file_created, c->line_created, c->file_initiated, + c->line_initiated); + abort(); + } +} + +#define INIT_TIMER_HASH_TABLE() init_timer_ht() +#define ADD_TO_HASH_TABLE(t) add_to_ht((t)) +#define REMOVE_FROM_HASH_TABLE(t) remove_from_ht((t)) +#define VALIDATE_NON_PENDING_TIMER(t) validate_non_pending_timer((t)) + +#else + +#define INIT_TIMER_HASH_TABLE() +#define ADD_TO_HASH_TABLE(t) +#define REMOVE_FROM_HASH_TABLE(t) +#define VALIDATE_NON_PENDING_TIMER(t) + +#endif + /* Thread local variable that stores the deadline of the next timer the thread * has last-seen. This is an optimization to prevent the thread from checking * shared_mutables.min_timer (which requires acquiring shared_mutables.mu lock, @@ -237,7 +291,7 @@ void grpc_timer_list_init(gpr_timespec now) { g_shard_queue[i] = shard; } - init_timer_hash(); + INIT_TIMER_HASH_TABLE(); } void grpc_timer_list_shutdown(grpc_exec_ctx *exec_ctx) { @@ -309,9 +363,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, gpr_atm deadline_atm = timer->deadline = timespec_to_atm_round_up(deadline); if (GRPC_TRACER_ON(grpc_timer_trace)) { - gpr_log(GPR_DEBUG, - "TIMER %p: SET %" PRId64 ".%09d [%" PRIdPTR "] now %" PRId64 - ".%09d [%" PRIdPTR "] call %p[%p]", + gpr_log(GPR_DEBUG, "TIMER %p: SET %" PRId64 ".%09d [%" PRIdPTR + "] now %" PRId64 ".%09d [%" PRIdPTR "] call %p[%p]", timer, deadline.tv_sec, deadline.tv_nsec, deadline_atm, now.tv_sec, now.tv_nsec, timespec_to_atm_round_down(now), closure, closure->cb); } @@ -337,8 +390,7 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, grpc_time_averaged_stats_add_sample(&shard->stats, ts_to_dbl(gpr_time_sub(deadline, now))); - /** TODO: sreek - CHECK HERE AND ADD **/ - check_and_add_timer(timer); + ADD_TO_HASH_TABLE(timer); if (deadline_atm < shard->queue_deadline_cap) { is_first_timer = grpc_timer_heap_add(&shard->heap, timer); @@ -347,9 +399,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, list_join(&shard->list, timer); } if (GRPC_TRACER_ON(grpc_timer_trace)) { - gpr_log(GPR_DEBUG, - " .. add to shard %d with queue_deadline_cap=%" PRIdPTR - " => is_first_timer=%s", + gpr_log(GPR_DEBUG, " .. add to shard %d with queue_deadline_cap=%" PRIdPTR + " => is_first_timer=%s", (int)(shard - g_shards), shard->queue_deadline_cap, is_first_timer ? "true" : "false"); } @@ -404,8 +455,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { } if (timer->pending) { - /* TODO: sreek - Remove the timer here */ - remove_timer(timer); + REMOVE_FROM_HASH_TABLE(timer); GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED); timer->pending = false; @@ -415,12 +465,7 @@ void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) { grpc_timer_heap_remove(&shard->heap, timer); } } else { - if (is_timer_present(timer)) { - gpr_log(GPR_ERROR, - "** gpr_timer_cancel called on a non-pending timer! %p", - (void *)timer); - abort(); - } + VALIDATE_NON_PENDING_TIMER(timer); } gpr_mu_unlock(&shard->mu); } @@ -452,18 +497,6 @@ static int refill_heap(timer_shard *shard, gpr_atm now) { for (timer = shard->list.next; timer != &shard->list; timer = next) { next = timer->next; -#ifndef NDEBUG - if (next == timer && next != &shard->list) { - grpc_closure *c = timer->closure; - gpr_log(GPR_ERROR, - "We have a problem!!!! - timer %p closure: %p, created-at: " - "[%s,%d], scheduled-at: [%s, %d]", - timer, c, c->file_initiated, c->line_created, c->file_initiated, - c->line_initiated); - abort(); - } -#endif - if (timer->deadline < shard->queue_deadline_cap) { if (GRPC_TRACER_ON(grpc_timer_check_trace)) { gpr_log(GPR_DEBUG, " .. add timer with deadline %" PRIdPTR " to heap", @@ -516,8 +549,7 @@ static size_t pop_timers(grpc_exec_ctx *exec_ctx, timer_shard *shard, grpc_timer *timer; gpr_mu_lock(&shard->mu); while ((timer = pop_one(shard, now))) { - /* TODO: sreek: Remove timer here */ - remove_timer(timer); + REMOVE_FROM_HASH_TABLE(timer); GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_REF(error)); n++; } @@ -630,9 +662,8 @@ grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx, gpr_asprintf(&next_str, "%" PRId64 ".%09d [%" PRIdPTR "]", next->tv_sec, next->tv_nsec, timespec_to_atm_round_down(*next)); } - gpr_log(GPR_DEBUG, - "TIMER CHECK BEGIN: now=%" PRId64 ".%09d [%" PRIdPTR - "] next=%s tls_min=%" PRIdPTR " glob_min=%" PRIdPTR, + gpr_log(GPR_DEBUG, "TIMER CHECK BEGIN: now=%" PRId64 ".%09d [%" PRIdPTR + "] next=%s tls_min=%" PRIdPTR " glob_min=%" PRIdPTR, now.tv_sec, now.tv_nsec, now_atm, next_str, gpr_tls_get(&g_last_seen_min_timer), gpr_atm_no_barrier_load(&g_shared_mutables.min_timer)); From 46c821fe811d0cd2ab18feaec368443d4fd39a75 Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Wed, 20 Sep 2017 19:23:22 -0700 Subject: [PATCH 043/109] Remove extra record_to_resolve --- test/cpp/naming/resolver_test_record_groups.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/test/cpp/naming/resolver_test_record_groups.yaml b/test/cpp/naming/resolver_test_record_groups.yaml index c2e8ddd0ed2..33d774ca701 100644 --- a/test/cpp/naming/resolver_test_record_groups.yaml +++ b/test/cpp/naming/resolver_test_record_groups.yaml @@ -141,7 +141,6 @@ resolver_component_tests: - {address: '1.2.3.4:443', is_balancer: false} expected_chosen_service_config: '{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwo","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooThree","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooFour","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooFive","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooSix","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooSeven","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooEight","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooNine","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTen","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooEleven","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]}]}' expected_lb_policy: null - record_to_resolve: srv-ipv6-target-has-backend-and-balancer record_to_resolve: ipv4-config-causing-fallback-to-tcp records: ipv4-config-causing-fallback-to-tcp: From 3b6593679c28afd4455c7d38c417583ddc4341ef Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Thu, 21 Sep 2017 12:04:37 -0700 Subject: [PATCH 044/109] Fix format --- .../resolver/dns/c_ares/grpc_ares_ev_driver_posix.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c index b5be69c4287..c30cc93b6ff 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.c @@ -118,9 +118,8 @@ static void fd_node_shutdown(grpc_exec_ctx *exec_ctx, fd_node *fdn) { gpr_mu_unlock(&fdn->mu); fd_node_destroy(exec_ctx, fdn); } else { - grpc_fd_shutdown( - exec_ctx, fdn->fd, - GRPC_ERROR_CREATE_FROM_STATIC_STRING("c-ares fd shutdown")); + grpc_fd_shutdown(exec_ctx, fdn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING( + "c-ares fd shutdown")); gpr_mu_unlock(&fdn->mu); } } From 2f3e58809911fed67ba4ea79633d820ab6b719ef Mon Sep 17 00:00:00 2001 From: Alexander Polcyn Date: Thu, 21 Sep 2017 13:47:20 -0700 Subject: [PATCH 045/109] Dont assume that sigint wasnt masked when invoking ruby tests --- src/ruby/end2end/killed_client_thread_client.rb | 2 +- src/ruby/end2end/killed_client_thread_driver.rb | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/ruby/end2end/killed_client_thread_client.rb b/src/ruby/end2end/killed_client_thread_client.rb index 7d6ed8c8d7a..493c0eb56a4 100755 --- a/src/ruby/end2end/killed_client_thread_client.rb +++ b/src/ruby/end2end/killed_client_thread_client.rb @@ -35,7 +35,7 @@ def main :this_channel_is_insecure) stub.echo(Echo::EchoRequest.new(request: 'hello')) fail 'the clients rpc in this test shouldnt complete. ' \ - 'expecting SIGINT to happen in the middle of the call' + 'expecting SIGTERM to happen in the middle of the call' end thd.join end diff --git a/src/ruby/end2end/killed_client_thread_driver.rb b/src/ruby/end2end/killed_client_thread_driver.rb index 09f05a44873..fce5d13e825 100755 --- a/src/ruby/end2end/killed_client_thread_driver.rb +++ b/src/ruby/end2end/killed_client_thread_driver.rb @@ -69,9 +69,9 @@ def main call_started_cv.wait(call_started_mu) until call_started.val end - # SIGINT the child process now that it's + # SIGTERM the child process now that it's # in the middle of an RPC (happening on a non-main thread) - Process.kill('SIGINT', client_pid) + Process.kill('SIGTERM', client_pid) STDERR.puts 'sent shutdown' begin @@ -88,8 +88,8 @@ def main end client_exit_code = $CHILD_STATUS - if client_exit_code.termsig != 2 # SIGINT - fail 'expected client exit from SIGINT ' \ + if client_exit_code.termsig != 15 # SIGTERM + fail 'expected client exit from SIGTERM ' \ "but got child status: #{client_exit_code}" end From 054bba9edc2127360b3416fad28e20b9fb4c378e Mon Sep 17 00:00:00 2001 From: David Garcia Quintas Date: Thu, 21 Sep 2017 14:04:55 -0700 Subject: [PATCH 046/109] More docs --- tools/github/pr_latency.py | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/tools/github/pr_latency.py b/tools/github/pr_latency.py index e773bda37ab..5d635835e54 100644 --- a/tools/github/pr_latency.py +++ b/tools/github/pr_latency.py @@ -13,7 +13,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Measure the time between PR creation and completion of all tests""" +"""Measure the time between PR creation and completion of all tests. + +You'll need a github API token to avoid being rate-limited. See +https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/ + +This script goes over the most recent 100 pull requests. For PRs with a single +commit, it uses the PR's creation as the initial time; othewise, it uses the +date of the last commit. This is somewhat fragile, and imposed by the fact that +GitHub reports a PR's updated timestamp for any event that modifies the PR (e.g. +comments), not just the addition of new commits. + +In addition, it ignores latencies greater than five hours, as that's likely due +to a manual re-run of tests. +""" from __future__ import absolute_import from __future__ import division @@ -111,9 +124,13 @@ def get_status_data(statuses_url, system): def build_args_parser(): import argparse parser = argparse.ArgumentParser() - parser.add_argument('--format', type=str, choices=['human', 'csv'], default='human') - parser.add_argument('--system', type=str, choices=['jenkins', 'kokoro'], required=True) - parser.add_argument('--token', type=str, default='') + parser.add_argument('--format', type=str, choices=['human', 'csv'], + default='human', + help='Output format: are you a human or a machine?') + parser.add_argument('--system', type=str, choices=['jenkins', 'kokoro'], + required=True, help='Consider only the given CI system') + parser.add_argument('--token', type=str, default='', + help='GitHub token to use its API with a higher rate limit') return parser From 2bc9c8b38e7ddec797e5f8d4c6946d1b20781709 Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Thu, 21 Sep 2017 15:20:21 -0700 Subject: [PATCH 047/109] Review feedback --- src/core/lib/iomgr/timer_generic.c | 77 +++++++++++++++++------------- src/core/lib/iomgr/timer_generic.h | 3 ++ 2 files changed, 46 insertions(+), 34 deletions(-) diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c index 699947409bb..a7f3462fd2c 100644 --- a/src/core/lib/iomgr/timer_generic.c +++ b/src/core/lib/iomgr/timer_generic.c @@ -83,12 +83,11 @@ static timer_shard *g_shard_queue[NUM_SHARDS]; /* == Hash table for duplicate timer detection == */ -#define NUM_HASH_BUCKETS 1000 +#define NUM_HASH_BUCKETS 1009 /* Prime number close to 1000 */ #define NUM_SLOTS_PER_BUCKET 30 static gpr_mu g_hash_mu[NUM_HASH_BUCKETS]; /* One mutex per bucket */ -static grpc_timer *g_timer_ht[NUM_HASH_BUCKETS] - [NUM_SLOTS_PER_BUCKET] = {{NULL, NULL}}; +static grpc_timer *g_timer_ht[NUM_HASH_BUCKETS] = {NULL}; static void init_timer_ht() { for (int i = 0; i < NUM_HASH_BUCKETS; i++) { @@ -98,60 +97,64 @@ static void init_timer_ht() { static bool is_in_ht(grpc_timer *t) { size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS); - bool is_found = false; + gpr_mu_lock(&g_hash_mu[i]); - for (int j = 0; j < NUM_SLOTS_PER_BUCKET; j++) { - if (g_timer_ht[i][j] == t) { - is_found = true; - break; - } + grpc_timer *p = g_timer_ht[i]; + while (p != NULL && p != t) { + p = p->hash_table_next; } gpr_mu_unlock(&g_hash_mu[i]); - return is_found; + + return (p == t); } static void add_to_ht(grpc_timer *t) { + GPR_ASSERT(!t->hash_table_next); size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS); - bool added = false; + gpr_mu_lock(&g_hash_mu[i]); - for (int j = 0; j < NUM_SLOTS_PER_BUCKET; j++) { - if (g_timer_ht[i][j] == NULL) { - g_timer_ht[i][j] = t; - added = true; - break; - } else if (g_timer_ht[i][j] == t) { - grpc_closure *c = t->closure; - gpr_log(GPR_ERROR, - "** Duplicate timer (%p) being added. Closure: (%p), created at: " - "(%s:%d), scheduled at: (%s:%d) **", - t, c, c->file_created, c->line_created, c->file_initiated, - c->line_initiated); - abort(); - } + grpc_timer *p = g_timer_ht[i]; + while (p != NULL && p != t) { + p = p->hash_table_next; } - gpr_mu_unlock(&g_hash_mu[i]); - if (!added) { + if (p == t) { + grpc_closure *c = t->closure; gpr_log(GPR_ERROR, - "** Not enough slots in the timer hash table. Please increase " - "NUM_SLOTS_PER_BUCKET **"); + "** Duplicate timer (%p) being added. Closure: (%p), created at: " + "(%s:%d), scheduled at: (%s:%d) **", + t, c, c->file_created, c->line_created, c->file_initiated, + c->line_initiated); abort(); } + + /* Timer not present in the bucket. Insert at head of the list */ + t->hash_table_next = g_timer_ht[i]; + g_timer_ht[i] = t; + gpr_mu_unlock(&g_hash_mu[i]); } static void remove_from_ht(grpc_timer *t) { size_t i = GPR_HASH_POINTER(t, NUM_HASH_BUCKETS); bool removed = false; + gpr_mu_lock(&g_hash_mu[i]); - for (int j = 0; j < NUM_SLOTS_PER_BUCKET; j++) { - if (g_timer_ht[i][j] == t) { - g_timer_ht[i][j] = 0; + if (g_timer_ht[i] == t) { + g_timer_ht[i] = g_timer_ht[i]->hash_table_next; + removed = true; + } else if (g_timer_ht[i] != NULL) { + grpc_timer *p = g_timer_ht[i]; + while (p->hash_table_next != NULL && p->hash_table_next != t) { + p = p->hash_table_next; + } + + if (p->hash_table_next == t) { + p->hash_table_next = t->hash_table_next; removed = true; - break; } } - gpr_mu_unlock(&g_hash_mu[i]); + if (!removed) { grpc_closure *c = t->closure; gpr_log(GPR_ERROR, @@ -161,6 +164,8 @@ static void remove_from_ht(grpc_timer *t) { c->line_initiated); abort(); } + + t->hash_table_next = NULL; } /* If a timer is added to a timer shard (either heap or a list), it cannot @@ -362,6 +367,10 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer, timer->closure = closure; gpr_atm deadline_atm = timer->deadline = timespec_to_atm_round_up(deadline); +#ifndef NDEBUG + timer->hash_table_next = NULL; +#endif + if (GRPC_TRACER_ON(grpc_timer_trace)) { gpr_log(GPR_DEBUG, "TIMER %p: SET %" PRId64 ".%09d [%" PRIdPTR "] now %" PRId64 ".%09d [%" PRIdPTR "] call %p[%p]", diff --git a/src/core/lib/iomgr/timer_generic.h b/src/core/lib/iomgr/timer_generic.h index 72a4ac1f107..f0597f6ea0e 100644 --- a/src/core/lib/iomgr/timer_generic.h +++ b/src/core/lib/iomgr/timer_generic.h @@ -29,6 +29,9 @@ struct grpc_timer { struct grpc_timer *next; struct grpc_timer *prev; grpc_closure *closure; +#ifndef NDEBUG + struct grpc_timer *hash_table_next; +#endif }; #endif /* GRPC_CORE_LIB_IOMGR_TIMER_GENERIC_H */ From 9fa16599510ca4772ef0b2e3631f7eaa3b429695 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Thu, 21 Sep 2017 15:43:32 -0700 Subject: [PATCH 048/109] Fix a memory leak point --- test/core/end2end/fixtures/proxy.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/core/end2end/fixtures/proxy.c b/test/core/end2end/fixtures/proxy.c index 9ad862728fe..6a2d75da09f 100644 --- a/test/core/end2end/fixtures/proxy.c +++ b/test/core/end2end/fixtures/proxy.c @@ -227,6 +227,10 @@ static void on_c2p_recv_msg(void *arg, int success) { new_closure(on_p2s_sent_close, pc), NULL); GPR_ASSERT(err == GRPC_CALL_OK); } + } else { + if (pc->c2p_msg != NULL) { + grpc_byte_buffer_destroy(pc->c2p_msg); + } } unrefpc(pc, "on_c2p_recv_msg"); From 0c867bed5bb0425c57dd539e73c790ae3995d864 Mon Sep 17 00:00:00 2001 From: Matt Kwong Date: Thu, 21 Sep 2017 16:25:48 -0700 Subject: [PATCH 049/109] Add -lstdc++ flag for compiling under gcov --- Makefile | 2 +- binding.gyp | 1 + build.yaml | 2 +- grpc.gyp | 1 + 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 0c3648566a9..7e65718485f 100644 --- a/Makefile +++ b/Makefile @@ -187,7 +187,7 @@ CXX_gcov = g++ LD_gcov = gcc LDXX_gcov = g++ CPPFLAGS_gcov = -O0 -fprofile-arcs -ftest-coverage -Wno-return-type -LDFLAGS_gcov = -fprofile-arcs -ftest-coverage -rdynamic +LDFLAGS_gcov = -fprofile-arcs -ftest-coverage -rdynamic -lstdc++ DEFINES_gcov = _DEBUG DEBUG GPR_GCOV VALID_CONFIG_memcheck = 1 diff --git a/binding.gyp b/binding.gyp index 06dc731935c..3d07dda6d63 100644 --- a/binding.gyp +++ b/binding.gyp @@ -101,6 +101,7 @@ '-fprofile-arcs', '-ftest-coverage', '-rdynamic', + '-lstdc++', ], }], ['grpc_alpine=="true"', { diff --git a/build.yaml b/build.yaml index d15cf0339ca..635c908e099 100644 --- a/build.yaml +++ b/build.yaml @@ -4778,7 +4778,7 @@ configs: CXX: g++ DEFINES: _DEBUG DEBUG GPR_GCOV LD: gcc - LDFLAGS: -fprofile-arcs -ftest-coverage -rdynamic + LDFLAGS: -fprofile-arcs -ftest-coverage -rdynamic -lstdc++ LDXX: g++ helgrind: CPPFLAGS: -O0 diff --git a/grpc.gyp b/grpc.gyp index 48fb2ba7cf2..6be7050fcab 100644 --- a/grpc.gyp +++ b/grpc.gyp @@ -98,6 +98,7 @@ '-fprofile-arcs', '-ftest-coverage', '-rdynamic', + '-lstdc++', ], }], ['grpc_alpine=="true"', { From d5ded55d8930a2a2fe5f9cd6bc425ec50850792a Mon Sep 17 00:00:00 2001 From: Sree Kuchibhotla Date: Thu, 21 Sep 2017 17:10:20 -0700 Subject: [PATCH 050/109] delete obsolete code --- src/core/lib/iomgr/timer_generic.c | 1 - 1 file changed, 1 deletion(-) diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c index a7f3462fd2c..2472cf26be3 100644 --- a/src/core/lib/iomgr/timer_generic.c +++ b/src/core/lib/iomgr/timer_generic.c @@ -84,7 +84,6 @@ static timer_shard *g_shard_queue[NUM_SHARDS]; /* == Hash table for duplicate timer detection == */ #define NUM_HASH_BUCKETS 1009 /* Prime number close to 1000 */ -#define NUM_SLOTS_PER_BUCKET 30 static gpr_mu g_hash_mu[NUM_HASH_BUCKETS]; /* One mutex per bucket */ static grpc_timer *g_timer_ht[NUM_HASH_BUCKETS] = {NULL}; From 2046d0b3c50e555f339062b4fba298e8af8b3a1f Mon Sep 17 00:00:00 2001 From: Vijay Pai Date: Tue, 19 Sep 2017 15:30:11 -0700 Subject: [PATCH 051/109] Add unary call mode to GenericStub to allow generic RPC with 1 CQ trip --- include/grpc++/generic/generic_stub.h | 10 +++++ src/cpp/client/generic_stub.cc | 10 +++++ test/cpp/end2end/generic_end2end_test.cc | 56 ++++++++++++++++++++++++ 3 files changed, 76 insertions(+) diff --git a/include/grpc++/generic/generic_stub.h b/include/grpc++/generic/generic_stub.h index 2b3ff59ea24..d5064318cfc 100644 --- a/include/grpc++/generic/generic_stub.h +++ b/include/grpc++/generic/generic_stub.h @@ -20,6 +20,7 @@ #define GRPCXX_GENERIC_GENERIC_STUB_H #include +#include #include namespace grpc { @@ -27,6 +28,7 @@ namespace grpc { class CompletionQueue; typedef ClientAsyncReaderWriter GenericClientAsyncReaderWriter; +typedef ClientAsyncResponseReader GenericClientAsyncResponseReader; /// Generic stubs provide a type-unsafe interface to call gRPC methods /// by name. @@ -51,6 +53,14 @@ class GenericStub final { std::unique_ptr PrepareCall( ClientContext* context, const grpc::string& method, CompletionQueue* cq); + /// Setup a unary call to a named method \a method using \a context, and don't + /// start it. Let it be started explicitly with StartCall. + /// The return value only indicates whether or not registration of the call + /// succeeded (i.e. the call won't proceed if the return value is nullptr). + std::unique_ptr PrepareUnaryCall( + ClientContext* context, const grpc::string& method, + const ByteBuffer& request, CompletionQueue* cq); + private: std::shared_ptr channel_; }; diff --git a/src/cpp/client/generic_stub.cc b/src/cpp/client/generic_stub.cc index de2e449fe8f..693b8bea568 100644 --- a/src/cpp/client/generic_stub.cc +++ b/src/cpp/client/generic_stub.cc @@ -47,4 +47,14 @@ std::unique_ptr GenericStub::PrepareCall( return CallInternal(channel_.get(), context, method, cq, false, nullptr); } +// setup a unary call to a named method +std::unique_ptr GenericStub::PrepareUnaryCall( + ClientContext* context, const grpc::string& method, + const ByteBuffer& request, CompletionQueue* cq) { + return std::unique_ptr( + GenericClientAsyncResponseReader::Create( + channel_.get(), cq, RpcMethod(method.c_str(), RpcMethod::NORMAL_RPC), + context, request, false)); +} + } // namespace grpc diff --git a/test/cpp/end2end/generic_end2end_test.cc b/test/cpp/end2end/generic_end2end_test.cc index b9e6e18ca77..33b35108d22 100644 --- a/test/cpp/end2end/generic_end2end_test.cc +++ b/test/cpp/end2end/generic_end2end_test.cc @@ -196,6 +196,62 @@ TEST_F(GenericEnd2endTest, SequentialRpcs) { SendRpc(10); } +TEST_F(GenericEnd2endTest, SequentialUnaryRpcs) { + ResetStub(); + const int num_rpcs = 10; + const grpc::string kMethodName("/grpc.cpp.test.util.EchoTestService/Echo"); + for (int i = 0; i < num_rpcs; i++) { + EchoRequest send_request; + EchoRequest recv_request; + EchoResponse send_response; + EchoResponse recv_response; + Status recv_status; + + ClientContext cli_ctx; + GenericServerContext srv_ctx; + GenericServerAsyncReaderWriter stream(&srv_ctx); + + // The string needs to be long enough to test heap-based slice. + send_request.set_message("Hello world. Hello world. Hello world."); + + std::unique_ptr cli_send_buffer = + SerializeToByteBuffer(&send_request); + std::unique_ptr call = + generic_stub_->PrepareUnaryCall(&cli_ctx, kMethodName, + *cli_send_buffer.get(), &cli_cq_); + call->StartCall(); + ByteBuffer cli_recv_buffer; + call->Finish(&cli_recv_buffer, &recv_status, tag(1)); + + generic_service_.RequestCall(&srv_ctx, &stream, srv_cq_.get(), + srv_cq_.get(), tag(4)); + + verify_ok(srv_cq_.get(), 4, true); + EXPECT_EQ(server_host_, srv_ctx.host().substr(0, server_host_.length())); + EXPECT_EQ(kMethodName, srv_ctx.method()); + + ByteBuffer srv_recv_buffer; + stream.Read(&srv_recv_buffer, tag(5)); + server_ok(5); + EXPECT_TRUE(ParseFromByteBuffer(&srv_recv_buffer, &recv_request)); + EXPECT_EQ(send_request.message(), recv_request.message()); + + send_response.set_message(recv_request.message()); + std::unique_ptr srv_send_buffer = + SerializeToByteBuffer(&send_response); + stream.Write(*srv_send_buffer, tag(6)); + server_ok(6); + + stream.Finish(Status::OK, tag(7)); + server_ok(7); + + client_ok(1); + EXPECT_TRUE(ParseFromByteBuffer(&cli_recv_buffer, &recv_response)); + EXPECT_EQ(send_response.message(), recv_response.message()); + EXPECT_TRUE(recv_status.ok()); + } +} + // One ping, one pong. TEST_F(GenericEnd2endTest, SimpleBidiStreaming) { ResetStub(); From af3b4eecbdb6a70b6a3d4b5050c8aaffc259b823 Mon Sep 17 00:00:00 2001 From: ZhouyihaiDing Date: Thu, 21 Sep 2017 14:25:07 -0700 Subject: [PATCH 052/109] add php into jenkins performance tests --- tools/jenkins/run_full_performance.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/jenkins/run_full_performance.sh b/tools/jenkins/run_full_performance.sh index 0f101f6da9c..a9661c7e266 100755 --- a/tools/jenkins/run_full_performance.sh +++ b/tools/jenkins/run_full_performance.sh @@ -21,7 +21,7 @@ cd $(dirname $0)/../.. # run 8core client vs 8core server tools/run_tests/run_performance_tests.py \ - -l c++ csharp node ruby java python go node_express \ + -l c++ csharp node ruby java python go node_express php \ --netperf \ --category scalable \ --bq_result_table performance_test.performance_experiment \ From 604a7ca07b7aeb5ecff210ab4e854526c78ae863 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 22 Sep 2017 09:28:32 -0700 Subject: [PATCH 053/109] Reduce stack size of simple_request e2e test --- test/core/end2end/tests/simple_request.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/test/core/end2end/tests/simple_request.c b/test/core/end2end/tests/simple_request.c index 82ab0a1cfeb..7ba5bd8f469 100644 --- a/test/core/end2end/tests/simple_request.c +++ b/test/core/end2end/tests/simple_request.c @@ -103,10 +103,10 @@ static void simple_request_body(grpc_end2end_test_config config, grpc_slice details; int was_cancelled = 2; char *peer; - grpc_stats_data before; - grpc_stats_data after; + grpc_stats_data *before = gpr_malloc(sizeof(grpc_stats_data)); + grpc_stats_data *after = gpr_malloc(sizeof(grpc_stats_data)); - grpc_stats_collect(&before); + grpc_stats_collect(before); gpr_timespec deadline = five_seconds_from_now(); c = grpc_channel_create_call( @@ -214,9 +214,9 @@ static void simple_request_body(grpc_end2end_test_config config, cq_verifier_destroy(cqv); - grpc_stats_collect(&after); + grpc_stats_collect(after); - char *stats = grpc_stats_data_as_json(&after); + char *stats = grpc_stats_data_as_json(after); gpr_log(GPR_DEBUG, "%s", stats); gpr_free(stats); @@ -224,11 +224,11 @@ static void simple_request_body(grpc_end2end_test_config config, if (config.feature_mask & FEATURE_MASK_SUPPORTS_REQUEST_PROXYING) { expected_calls *= 2; } - GPR_ASSERT(after.counters[GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED] - - before.counters[GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED] == + GPR_ASSERT(after->counters[GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED] - + before->counters[GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED] == expected_calls); - GPR_ASSERT(after.counters[GRPC_STATS_COUNTER_SERVER_CALLS_CREATED] - - before.counters[GRPC_STATS_COUNTER_SERVER_CALLS_CREATED] == + GPR_ASSERT(after->counters[GRPC_STATS_COUNTER_SERVER_CALLS_CREATED] - + before->counters[GRPC_STATS_COUNTER_SERVER_CALLS_CREATED] == expected_calls); } From 3c5203886838d08f74f20a66d9d00af23f415644 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Fri, 22 Sep 2017 09:31:03 -0700 Subject: [PATCH 054/109] Fix memory leak --- test/core/end2end/tests/simple_request.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/core/end2end/tests/simple_request.c b/test/core/end2end/tests/simple_request.c index 7ba5bd8f469..7ce7e1f2855 100644 --- a/test/core/end2end/tests/simple_request.c +++ b/test/core/end2end/tests/simple_request.c @@ -230,6 +230,8 @@ static void simple_request_body(grpc_end2end_test_config config, GPR_ASSERT(after->counters[GRPC_STATS_COUNTER_SERVER_CALLS_CREATED] - before->counters[GRPC_STATS_COUNTER_SERVER_CALLS_CREATED] == expected_calls); + gpr_free(before); + gpr_free(after); } static void test_invoke_simple_request(grpc_end2end_test_config config) { From c111341a178e526e3b92573514db0910acc69ccb Mon Sep 17 00:00:00 2001 From: Matt Kwong Date: Fri, 22 Sep 2017 11:25:34 -0700 Subject: [PATCH 055/109] Make Python generate coverage report in gcov mode --- tools/run_tests/run_tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index b66c5f7f71f..86fd7b34b21 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -639,7 +639,7 @@ class PythonLanguage(object): return [config.build for config in self.pythons] def post_tests_steps(self): - if self.config != 'gcov': + if self.config.build_config != 'gcov': return [] else: return [['tools/run_tests/helper_scripts/post_tests_python.sh']] From 60092b03d21f66d721d68af11b8d0915bc81b1ab Mon Sep 17 00:00:00 2001 From: Eric Dobson Date: Sun, 24 Sep 2017 11:09:05 -0700 Subject: [PATCH 056/109] Fix docs on grpc_call_ref --- include/grpc/grpc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/grpc/grpc.h b/include/grpc/grpc.h index fab7d438aa5..1de289fba45 100644 --- a/include/grpc/grpc.h +++ b/include/grpc/grpc.h @@ -313,7 +313,7 @@ GRPCAPI grpc_call_error grpc_call_cancel_with_status(grpc_call *call, void *reserved); /** Ref a call. - THREAD SAFETY: grpc_call_unref is thread-compatible */ + THREAD SAFETY: grpc_call_ref is thread-compatible */ GRPCAPI void grpc_call_ref(grpc_call *call); /** Unref a call. From eec7a917baf37ed26449b106f99027c97aedaba6 Mon Sep 17 00:00:00 2001 From: Shaun McCormick Date: Fri, 11 Aug 2017 14:57:14 -0500 Subject: [PATCH 057/109] Add Ruby server interceptors --- src/ruby/lib/grpc.rb | 1 + src/ruby/lib/grpc/generic/active_call.rb | 43 +++- src/ruby/lib/grpc/generic/bidi_call.rb | 29 ++- src/ruby/lib/grpc/generic/client_stub.rb | 133 ++++++++--- .../lib/grpc/generic/interceptor_registry.rb | 53 +++++ src/ruby/lib/grpc/generic/interceptors.rb | 186 +++++++++++++++ src/ruby/lib/grpc/generic/rpc_desc.rb | 80 +++++-- src/ruby/lib/grpc/generic/rpc_server.rb | 18 +- .../duplicate/echo_duplicate_services_pb.rb | 1 + src/ruby/spec/channel_connection_spec.rb | 35 +-- src/ruby/spec/generic/active_call_spec.rb | 19 +- .../spec/generic/client_interceptors_spec.rb | 153 ++++++++++++ .../spec/generic/interceptor_registry_spec.rb | 65 ++++++ src/ruby/spec/generic/rpc_server_spec.rb | 35 +-- .../spec/generic/server_interceptors_spec.rb | 218 ++++++++++++++++++ src/ruby/spec/spec_helper.rb | 4 + src/ruby/spec/support/helpers.rb | 73 ++++++ src/ruby/spec/support/services.rb | 147 ++++++++++++ 18 files changed, 1143 insertions(+), 150 deletions(-) create mode 100644 src/ruby/lib/grpc/generic/interceptor_registry.rb create mode 100644 src/ruby/lib/grpc/generic/interceptors.rb create mode 100644 src/ruby/spec/generic/client_interceptors_spec.rb create mode 100644 src/ruby/spec/generic/interceptor_registry_spec.rb create mode 100644 src/ruby/spec/generic/server_interceptors_spec.rb create mode 100644 src/ruby/spec/support/helpers.rb create mode 100644 src/ruby/spec/support/services.rb diff --git a/src/ruby/lib/grpc.rb b/src/ruby/lib/grpc.rb index 98bfc0a0fa0..37b03920727 100644 --- a/src/ruby/lib/grpc.rb +++ b/src/ruby/lib/grpc.rb @@ -24,6 +24,7 @@ require_relative 'grpc/generic/active_call' require_relative 'grpc/generic/client_stub' require_relative 'grpc/generic/service' require_relative 'grpc/generic/rpc_server' +require_relative 'grpc/generic/interceptors' begin file = File.open(ssl_roots_path) diff --git a/src/ruby/lib/grpc/generic/active_call.rb b/src/ruby/lib/grpc/generic/active_call.rb index 10eb70b4a7f..8c3aa284aae 100644 --- a/src/ruby/lib/grpc/generic/active_call.rb +++ b/src/ruby/lib/grpc/generic/active_call.rb @@ -154,6 +154,15 @@ module GRPC Operation.new(self) end + ## + # Returns a restricted view of this ActiveCall for use in interceptors + # + # @return [InterceptableView] + # + def interceptable + InterceptableView.new(self) + end + def receive_and_check_status batch_result = @call.run_batch(RECV_STATUS_ON_CLIENT => nil) set_input_stream_done @@ -515,15 +524,27 @@ module GRPC # This does not mean that must necessarily be one. E.g, the replies # produced by gen_each_reply could ignore the received_msgs # - # @param gen_each_reply [Proc] generates the BiDi stream replies - def run_server_bidi(gen_each_reply) - bd = BidiCall.new(@call, - @marshal, - @unmarshal, - metadata_received: @metadata_received, - req_view: MultiReqView.new(self)) - - bd.run_on_server(gen_each_reply, proc { set_input_stream_done }) + # @param mth [Proc] generates the BiDi stream replies + # @param interception_ctx [InterceptionContext] + # + def run_server_bidi(mth, interception_ctx) + view = multi_req_view + bidi_call = BidiCall.new( + @call, + @marshal, + @unmarshal, + metadata_received: @metadata_received, + req_view: view + ) + requests = bidi_call.read_next_loop(proc { set_input_stream_done }, false) + interception_ctx.intercept!( + :bidi_streamer, + call: view, + method: mth, + requests: requests + ) do + bidi_call.run_on_server(mth, requests) + end end # Waits till an operation completes @@ -645,5 +666,9 @@ module GRPC Operation = view_class(:cancel, :cancelled?, :deadline, :execute, :metadata, :status, :start_call, :wait, :write_flag, :write_flag=, :trailing_metadata) + + # InterceptableView further limits access to an ActiveCall's methods + # for use in interceptors on the client, exposing only the deadline + InterceptableView = view_class(:deadline) end end diff --git a/src/ruby/lib/grpc/generic/bidi_call.rb b/src/ruby/lib/grpc/generic/bidi_call.rb index c2239d0178e..3bdcc0062ee 100644 --- a/src/ruby/lib/grpc/generic/bidi_call.rb +++ b/src/ruby/lib/grpc/generic/bidi_call.rb @@ -87,23 +87,32 @@ module GRPC # This does not mean that must necessarily be one. E.g, the replies # produced by gen_each_reply could ignore the received_msgs # - # @param gen_each_reply [Proc] generates the BiDi stream replies. - # @param set_input_steam_done [Proc] call back to call when - # the reads have been completely read through. - def run_on_server(gen_each_reply, set_input_stream_done) + # @param [Proc] gen_each_reply generates the BiDi stream replies. + # @param [Enumerable] requests The enumerable of requests to run + def run_on_server(gen_each_reply, requests) + replies = nil + # Pass in the optional call object parameter if possible if gen_each_reply.arity == 1 - replys = gen_each_reply.call( - read_loop(set_input_stream_done, is_client: false)) + replies = gen_each_reply.call(requests) elsif gen_each_reply.arity == 2 - replys = gen_each_reply.call( - read_loop(set_input_stream_done, is_client: false), - @req_view) + replies = gen_each_reply.call(requests, @req_view) else fail 'Illegal arity of reply generator' end - write_loop(replys, is_client: false) + write_loop(replies, is_client: false) + end + + ## + # Read the next stream iteration + # + # @param [Proc] finalize_stream callback to call when the reads have been + # completely read through. + # @param [Boolean] is_client If this is a client or server request + # + def read_next_loop(finalize_stream, is_client = false) + read_loop(finalize_stream, is_client: is_client) end private diff --git a/src/ruby/lib/grpc/generic/client_stub.rb b/src/ruby/lib/grpc/generic/client_stub.rb index 75a95a4e940..9a50f8a99dc 100644 --- a/src/ruby/lib/grpc/generic/client_stub.rb +++ b/src/ruby/lib/grpc/generic/client_stub.rb @@ -89,17 +89,23 @@ module GRPC # used within a gRPC server. # @param channel_args [Hash] the channel arguments. Note: this argument is # ignored if the channel_override argument is provided. + # @param interceptors [Array] An array of + # GRPC::ClientInterceptor objects that will be used for + # intercepting calls before they are executed + # Interceptors are an EXPERIMENTAL API. def initialize(host, creds, channel_override: nil, timeout: nil, propagate_mask: nil, - channel_args: {}) + channel_args: {}, + interceptors: []) @ch = ClientStub.setup_channel(channel_override, host, creds, channel_args) alt_host = channel_args[Core::Channel::SSL_TARGET] @host = alt_host.nil? ? host : alt_host @propagate_mask = propagate_mask @timeout = timeout.nil? ? DEFAULT_TIMEOUT : timeout + @interceptors = InterceptorRegistry.new(interceptors) end # request_response sends a request to a GRPC server, and returns the @@ -149,16 +155,29 @@ module GRPC deadline: deadline, parent: parent, credentials: credentials) - return c.request_response(req, metadata: metadata) unless return_op - - # return the operation view of the active_call; define #execute as a - # new method for this instance that invokes #request_response. - c.merge_metadata_to_send(metadata) - op = c.operation - op.define_singleton_method(:execute) do - c.request_response(req, metadata: metadata) + interception_context = @interceptors.build_context + intercept_args = { + method: method, + request: req, + call: c.interceptable, + metadata: metadata + } + if return_op + # return the operation view of the active_call; define #execute as a + # new method for this instance that invokes #request_response. + c.merge_metadata_to_send(metadata) + op = c.operation + op.define_singleton_method(:execute) do + interception_context.intercept!(:request_response, intercept_args) do + c.request_response(req, metadata: metadata) + end + end + op + else + interception_context.intercept!(:request_response, intercept_args) do + c.request_response(req, metadata: metadata) + end end - op end # client_streamer sends a stream of requests to a GRPC server, and @@ -213,16 +232,29 @@ module GRPC deadline: deadline, parent: parent, credentials: credentials) - return c.client_streamer(requests, metadata: metadata) unless return_op - - # return the operation view of the active_call; define #execute as a - # new method for this instance that invokes #client_streamer. - c.merge_metadata_to_send(metadata) - op = c.operation - op.define_singleton_method(:execute) do - c.client_streamer(requests) + interception_context = @interceptors.build_context + intercept_args = { + method: method, + requests: requests, + call: c.interceptable, + metadata: metadata + } + if return_op + # return the operation view of the active_call; define #execute as a + # new method for this instance that invokes #client_streamer. + c.merge_metadata_to_send(metadata) + op = c.operation + op.define_singleton_method(:execute) do + interception_context.intercept!(:client_streamer, intercept_args) do + c.client_streamer(requests) + end + end + op + else + interception_context.intercept!(:client_streamer, intercept_args) do + c.client_streamer(requests, metadata: metadata) + end end - op end # server_streamer sends one request to the GRPC server, which yields a @@ -292,16 +324,29 @@ module GRPC deadline: deadline, parent: parent, credentials: credentials) - return c.server_streamer(req, metadata: metadata, &blk) unless return_op - - # return the operation view of the active_call; define #execute - # as a new method for this instance that invokes #server_streamer - c.merge_metadata_to_send(metadata) - op = c.operation - op.define_singleton_method(:execute) do - c.server_streamer(req, &blk) + interception_context = @interceptors.build_context + intercept_args = { + method: method, + request: req, + call: c.interceptable, + metadata: metadata + } + if return_op + # return the operation view of the active_call; define #execute + # as a new method for this instance that invokes #server_streamer + c.merge_metadata_to_send(metadata) + op = c.operation + op.define_singleton_method(:execute) do + interception_context.intercept!(:server_streamer, intercept_args) do + c.server_streamer(req, &blk) + end + end + op + else + interception_context.intercept!(:server_streamer, intercept_args) do + c.server_streamer(req, metadata: metadata, &blk) + end end - op end # bidi_streamer sends a stream of requests to the GRPC server, and yields @@ -405,17 +450,29 @@ module GRPC deadline: deadline, parent: parent, credentials: credentials) - return c.bidi_streamer(requests, metadata: metadata, - &blk) unless return_op - - # return the operation view of the active_call; define #execute - # as a new method for this instance that invokes #bidi_streamer - c.merge_metadata_to_send(metadata) - op = c.operation - op.define_singleton_method(:execute) do - c.bidi_streamer(requests, &blk) + interception_context = @interceptors.build_context + intercept_args = { + method: method, + requests: requests, + call: c.interceptable, + metadata: metadata + } + if return_op + # return the operation view of the active_call; define #execute + # as a new method for this instance that invokes #bidi_streamer + c.merge_metadata_to_send(metadata) + op = c.operation + op.define_singleton_method(:execute) do + interception_context.intercept!(:bidi_streamer, intercept_args) do + c.bidi_streamer(requests, &blk) + end + end + op + else + interception_context.intercept!(:bidi_streamer, intercept_args) do + c.bidi_streamer(requests, metadata: metadata, &blk) + end end - op end private diff --git a/src/ruby/lib/grpc/generic/interceptor_registry.rb b/src/ruby/lib/grpc/generic/interceptor_registry.rb new file mode 100644 index 00000000000..b241eb9a86d --- /dev/null +++ b/src/ruby/lib/grpc/generic/interceptor_registry.rb @@ -0,0 +1,53 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# GRPC contains the General RPC module. +module GRPC + ## + # Represents a registry of added interceptors available for enumeration. + # The registry can be used for both server and client interceptors. + # This class is internal to gRPC and not meant for public usage. + # + class InterceptorRegistry + ## + # An error raised when an interceptor is attempted to be added + # that does not extend GRPC::Interceptor + # + class DescendantError < StandardError; end + + ## + # Initialize the registry with an empty interceptor list + # This is an EXPERIMENTAL API. + # + def initialize(interceptors = []) + @interceptors = [] + interceptors.each do |i| + base = GRPC::Interceptor + unless i.class.ancestors.include?(base) + fail DescendantError, "Interceptors must descend from #{base}" + end + @interceptors << i + end + end + + ## + # Builds an interception context from this registry + # + # @return [InterceptionContext] + # + def build_context + InterceptionContext.new(@interceptors) + end + end +end diff --git a/src/ruby/lib/grpc/generic/interceptors.rb b/src/ruby/lib/grpc/generic/interceptors.rb new file mode 100644 index 00000000000..73faec4b9c7 --- /dev/null +++ b/src/ruby/lib/grpc/generic/interceptors.rb @@ -0,0 +1,186 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +require_relative 'interceptor_registry' + +# GRPC contains the General RPC module. +module GRPC + ## + # Base class for interception in GRPC + # + class Interceptor + ## + # @param [Hash] options A hash of options that will be used + # by the interceptor. This is an EXPERIMENTAL API. + # + def initialize(options = {}) + @options = options || {} + end + end + + ## + # ClientInterceptor allows for wrapping outbound gRPC client stub requests. + # This is an EXPERIMENTAL API. + # + class ClientInterceptor < Interceptor + ## + # Intercept a unary request response call + # + # @param [Object] request + # @param [GRPC::ActiveCall] call + # @param [Method] method + # @param [Hash] metadata + # + def request_response(request:, call:, method:, metadata:) + GRPC.logger.debug "Intercepting request response method #{method}" \ + " for request #{request} with call #{call} and metadata: #{metadata}" + yield + end + + ## + # Intercept a client streaming call + # + # @param [Enumerable] requests + # @param [GRPC::ActiveCall] call + # @param [Method] method + # @param [Hash] metadata + # + def client_streamer(requests:, call:, method:, metadata:) + GRPC.logger.debug "Intercepting client streamer method #{method}" \ + " for requests #{requests} with call #{call} and metadata: #{metadata}" + yield + end + + ## + # Intercept a server streaming call + # + # @param [Object] request + # @param [GRPC::ActiveCall] call + # @param [Method] method + # @param [Hash] metadata + # + def server_streamer(request:, call:, method:, metadata:) + GRPC.logger.debug "Intercepting server streamer method #{method}" \ + " for request #{request} with call #{call} and metadata: #{metadata}" + yield + end + + ## + # Intercept a BiDi streaming call + # + # @param [Enumerable] requests + # @param [GRPC::ActiveCall] call + # @param [Method] method + # @param [Hash] metadata + # + def bidi_streamer(requests:, call:, method:, metadata:) + GRPC.logger.debug "Intercepting bidi streamer method #{method}" \ + " for requests #{requests} with call #{call} and metadata: #{metadata}" + yield + end + end + + ## + # ServerInterceptor allows for wrapping gRPC server execution handling. + # This is an EXPERIMENTAL API. + # + class ServerInterceptor < Interceptor + ## + # Intercept a unary request response call. + # + # @param [Object] request + # @param [GRPC::ActiveCall::SingleReqView] call + # @param [Method] method + # + def request_response(request:, call:, method:) + GRPC.logger.debug "Intercepting request response method #{method}" \ + " for request #{request} with call #{call}" + yield + end + + ## + # Intercept a client streaming call + # + # @param [GRPC::ActiveCall::MultiReqView] call + # @param [Method] method + # + def client_streamer(call:, method:) + GRPC.logger.debug "Intercepting client streamer method #{method}" \ + " with call #{call}" + yield + end + + ## + # Intercept a server streaming call + # + # @param [Object] request + # @param [GRPC::ActiveCall::SingleReqView] call + # @param [Method] method + # + def server_streamer(request:, call:, method:) + GRPC.logger.debug "Intercepting server streamer method #{method}" \ + " for request #{request} with call #{call}" + yield + end + + ## + # Intercept a BiDi streaming call + # + # @param [Enumerable] requests + # @param [GRPC::ActiveCall::MultiReqView] call + # @param [Method] method + # + def bidi_streamer(requests:, call:, method:) + GRPC.logger.debug "Intercepting bidi streamer method #{method}" \ + " for requests #{requests} with call #{call}" + yield + end + end + + ## + # Represents the context in which an interceptor runs. Used to provide an + # injectable mechanism for handling interception. This is an EXPERIMENTAL API. + # + class InterceptionContext + ## + # @param [Array] + # + def initialize(interceptors = []) + @interceptors = interceptors.dup + end + + ## + # Intercept the call and fire out to interceptors in a FIFO execution. + # This is an EXPERIMENTAL API. + # + # @param [Symbol] type The request type + # @param [Hash] args The arguments for the call + # + def intercept!(type, args = {}) + return yield if @interceptors.none? + + i = @interceptors.pop + return yield unless i + + i.send(type, args) do + if @interceptors.any? + intercept!(type, args) do + yield + end + else + yield + end + end + end + end +end diff --git a/src/ruby/lib/grpc/generic/rpc_desc.rb b/src/ruby/lib/grpc/generic/rpc_desc.rb index 6fb6c412fb1..5fd1805aabf 100644 --- a/src/ruby/lib/grpc/generic/rpc_desc.rb +++ b/src/ruby/lib/grpc/generic/rpc_desc.rb @@ -47,43 +47,85 @@ module GRPC proc { |o| unmarshal_class.method(unmarshal_method).call(o) } end - def handle_request_response(active_call, mth) + def handle_request_response(active_call, mth, inter_ctx) req = active_call.read_unary_request - resp = mth.call(req, active_call.single_req_view) - active_call.server_unary_response( - resp, trailing_metadata: active_call.output_metadata) + call = active_call.single_req_view + + inter_ctx.intercept!( + :request_response, + method: mth, + call: call, + request: req + ) do + resp = mth.call(req, call) + active_call.server_unary_response( + resp, + trailing_metadata: active_call.output_metadata + ) + end end - def handle_client_streamer(active_call, mth) - resp = mth.call(active_call.multi_req_view) - active_call.server_unary_response( - resp, trailing_metadata: active_call.output_metadata) + def handle_client_streamer(active_call, mth, inter_ctx) + call = active_call.multi_req_view + + inter_ctx.intercept!( + :client_streamer, + method: mth, + call: call + ) do + resp = mth.call(call) + active_call.server_unary_response( + resp, + trailing_metadata: active_call.output_metadata + ) + end end - def handle_server_streamer(active_call, mth) + def handle_server_streamer(active_call, mth, inter_ctx) req = active_call.read_unary_request - replys = mth.call(req, active_call.single_req_view) - replys.each { |r| active_call.remote_send(r) } - send_status(active_call, OK, 'OK', active_call.output_metadata) + call = active_call.single_req_view + + inter_ctx.intercept!( + :server_streamer, + method: mth, + call: call, + request: req + ) do + replies = mth.call(req, call) + replies.each { |r| active_call.remote_send(r) } + send_status(active_call, OK, 'OK', active_call.output_metadata) + end end - def handle_bidi_streamer(active_call, mth) - active_call.run_server_bidi(mth) + ## + # @param [GRPC::ActiveCall] active_call + # @param [Method] mth + # @param [Array] inter_ctx + # + def handle_bidi_streamer(active_call, mth, inter_ctx) + active_call.run_server_bidi(mth, inter_ctx) send_status(active_call, OK, 'OK', active_call.output_metadata) end - def run_server_method(active_call, mth) + ## + # @param [GRPC::ActiveCall] active_call The current active call object + # for the request + # @param [Method] mth The current RPC method being called + # @param [GRPC::InterceptionContext] inter_ctx The interception context + # being executed + # + def run_server_method(active_call, mth, inter_ctx = InterceptionContext.new) # While a server method is running, it might be cancelled, its deadline # might be reached, the handler could throw an unknown error, or a # well-behaved handler could throw a StatusError. if request_response? - handle_request_response(active_call, mth) + handle_request_response(active_call, mth, inter_ctx) elsif client_streamer? - handle_client_streamer(active_call, mth) + handle_client_streamer(active_call, mth, inter_ctx) elsif server_streamer? - handle_server_streamer(active_call, mth) + handle_server_streamer(active_call, mth, inter_ctx) else # is a bidi_stream - handle_bidi_streamer(active_call, mth) + handle_bidi_streamer(active_call, mth, inter_ctx) end rescue BadStatus => e # this is raised by handlers that want GRPC to send an application error diff --git a/src/ruby/lib/grpc/generic/rpc_server.rb b/src/ruby/lib/grpc/generic/rpc_server.rb index 33b3cea1fc3..d5fc11dc1ca 100644 --- a/src/ruby/lib/grpc/generic/rpc_server.rb +++ b/src/ruby/lib/grpc/generic/rpc_server.rb @@ -196,11 +196,18 @@ module GRPC # # * server_args: # A server arguments hash to be passed down to the underlying core server + # + # * interceptors: + # Am array of GRPC::ServerInterceptor objects that will be used for + # intercepting server handlers to provide extra functionality. + # Interceptors are an EXPERIMENTAL API. + # def initialize(pool_size:DEFAULT_POOL_SIZE, max_waiting_requests:DEFAULT_MAX_WAITING_REQUESTS, poll_period:DEFAULT_POLL_PERIOD, connect_md_proc:nil, - server_args:{}) + server_args:{}, + interceptors:[]) @connect_md_proc = RpcServer.setup_connect_md_proc(connect_md_proc) @max_waiting_requests = max_waiting_requests @poll_period = poll_period @@ -212,6 +219,7 @@ module GRPC # :stopped. State transitions can only proceed in that order. @running_state = :not_started @server = Core::Server.new(server_args) + @interceptors = InterceptorRegistry.new(interceptors) end # stops a running server @@ -374,7 +382,11 @@ module GRPC @pool.schedule(active_call) do |ac| c, mth = ac begin - rpc_descs[mth].run_server_method(c, rpc_handlers[mth]) + rpc_descs[mth].run_server_method( + c, + rpc_handlers[mth], + @interceptors.build_context + ) rescue StandardError c.send_status(GRPC::Core::StatusCodes::INTERNAL, 'Server handler failed') @@ -382,7 +394,7 @@ module GRPC end end rescue Core::CallError, RuntimeError => e - # these might happen for various reasonse. The correct behaviour of + # these might happen for various reasons. The correct behavior of # the server is to log them and continue, if it's not shutting down. if running_state == :running GRPC.logger.warn("server call failed: #{e}") diff --git a/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb b/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb index 683370121ea..ab50d9b3a58 100644 --- a/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb +++ b/src/ruby/pb/grpc/testing/duplicate/echo_duplicate_services_pb.rb @@ -34,6 +34,7 @@ module Grpc self.service_name = 'grpc.testing.duplicate.EchoTestService' rpc :Echo, Grpc::Testing::EchoRequest, Grpc::Testing::EchoResponse + rpc :ResponseStream, Grpc::Testing::EchoRequest, stream(Grpc::Testing::EchoResponse) end Stub = Service.rpc_stub_class diff --git a/src/ruby/spec/channel_connection_spec.rb b/src/ruby/spec/channel_connection_spec.rb index c76056606bd..ce3e3b1c935 100644 --- a/src/ruby/spec/channel_connection_spec.rb +++ b/src/ruby/spec/channel_connection_spec.rb @@ -11,45 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -require 'grpc' +require 'spec_helper' require 'timeout' include Timeout include GRPC::Core -# A test message -class EchoMsg - def self.marshal(_o) - '' - end - - def self.unmarshal(_o) - EchoMsg.new - end -end - -# A test service with an echo implementation. -class EchoService - include GRPC::GenericService - rpc :an_rpc, EchoMsg, EchoMsg - attr_reader :received_md - - def initialize(**kw) - @trailing_metadata = kw - @received_md = [] - end - - def an_rpc(req, call) - GRPC.logger.info('echo service received a request') - call.output_metadata.update(@trailing_metadata) - @received_md << call.metadata unless call.metadata.nil? - req - end -end - -EchoStub = EchoService.rpc_stub_class - def start_server(port = 0) @srv = GRPC::RpcServer.new(pool_size: 1) server_port = @srv.add_http2_port("localhost:#{port}", :this_port_is_insecure) diff --git a/src/ruby/spec/generic/active_call_spec.rb b/src/ruby/spec/generic/active_call_spec.rb index a00df9236d2..120acc35afb 100644 --- a/src/ruby/spec/generic/active_call_spec.rb +++ b/src/ruby/spec/generic/active_call_spec.rb @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -require 'grpc' +require 'spec_helper' include GRPC::Core::StatusCodes @@ -82,6 +82,16 @@ describe GRPC::ActiveCall do end end end + + describe '#interceptable' do + it 'exposes a fixed subset of the ActiveCall.methods' do + want = %w(deadline) + v = @client_call.interceptable + want.each do |w| + expect(v.methods.include?(w)) + end + end + end end describe '#remote_send' do @@ -609,9 +619,11 @@ describe GRPC::ActiveCall do msgs end + int_ctx = GRPC::InterceptionContext.new + @server_thread = Thread.new do @server_call.run_server_bidi( - fake_gen_each_reply_with_no_call_param) + fake_gen_each_reply_with_no_call_param, int_ctx) @server_call.send_status(@server_status) end end @@ -624,10 +636,11 @@ describe GRPC::ActiveCall do call_param.send_initial_metadata msgs end + int_ctx = GRPC::InterceptionContext.new @server_thread = Thread.new do @server_call.run_server_bidi( - fake_gen_each_reply_with_call_param) + fake_gen_each_reply_with_call_param, int_ctx) @server_call.send_status(@server_status) end end diff --git a/src/ruby/spec/generic/client_interceptors_spec.rb b/src/ruby/spec/generic/client_interceptors_spec.rb new file mode 100644 index 00000000000..f292715e4df --- /dev/null +++ b/src/ruby/spec/generic/client_interceptors_spec.rb @@ -0,0 +1,153 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +require 'spec_helper' + +describe 'Client Interceptors' do + let(:interceptor) { TestClientInterceptor.new } + let(:interceptors_opts) { { interceptors: [interceptor] } } + let(:request) { EchoMsg.new } + let(:service) { EchoService } + + before(:each) do + build_rpc_server + end + + context 'when a client interceptor is added' do + context 'with a request/response call' do + it 'should be called', server: true do + expect(interceptor).to receive(:request_response) + .once.and_call_original + + run_services_on_server(@server, services: [service]) do + stub = build_insecure_stub(EchoStub, opts: interceptors_opts) + expect_any_instance_of(GRPC::ActiveCall).to receive(:request_response) + .once.and_call_original + expect(stub.an_rpc(request)).to be_a(EchoMsg) + end + end + + it 'can modify outgoing metadata', server: true do + expect(interceptor).to receive(:request_response) + .once.and_call_original + + run_services_on_server(@server, services: [service]) do + stub = build_insecure_stub(EchoStub, opts: interceptors_opts) + expect_any_instance_of(GRPC::ActiveCall).to receive(:request_response) + .with(request, metadata: { 'foo' => 'bar_from_request_response' }) + .once.and_call_original + expect(stub.an_rpc(request)).to be_a(EchoMsg) + end + end + end + + context 'with a client streaming call' do + it 'should be called', server: true do + expect(interceptor).to receive(:client_streamer) + .once.and_call_original + + run_services_on_server(@server, services: [service]) do + stub = build_insecure_stub(EchoStub, opts: interceptors_opts) + expect_any_instance_of(GRPC::ActiveCall).to receive(:client_streamer) + .once.and_call_original + requests = [EchoMsg.new, EchoMsg.new] + expect(stub.a_client_streaming_rpc(requests)).to be_a(EchoMsg) + end + end + + it 'can modify outgoing metadata', server: true do + expect(interceptor).to receive(:client_streamer) + .once.and_call_original + + run_services_on_server(@server, services: [service]) do + stub = build_insecure_stub(EchoStub, opts: interceptors_opts) + requests = [EchoMsg.new, EchoMsg.new] + expect_any_instance_of(GRPC::ActiveCall).to receive(:client_streamer) + .with(requests, metadata: { 'foo' => 'bar_from_client_streamer' }) + .once.and_call_original + expect(stub.a_client_streaming_rpc(requests)).to be_a(EchoMsg) + end + end + end + + context 'with a server streaming call' do + it 'should be called', server: true do + expect(interceptor).to receive(:server_streamer) + .once.and_call_original + + run_services_on_server(@server, services: [service]) do + stub = build_insecure_stub(EchoStub, opts: interceptors_opts) + request = EchoMsg.new + expect_any_instance_of(GRPC::ActiveCall).to receive(:server_streamer) + .once.and_call_original + responses = stub.a_server_streaming_rpc(request) + responses.each do |r| + expect(r).to be_a(EchoMsg) + end + end + end + + it 'can modify outgoing metadata', server: true do + expect(interceptor).to receive(:server_streamer) + .once.and_call_original + + run_services_on_server(@server, services: [service]) do + stub = build_insecure_stub(EchoStub, opts: interceptors_opts) + request = EchoMsg.new + expect_any_instance_of(GRPC::ActiveCall).to receive(:server_streamer) + .with(request, metadata: { 'foo' => 'bar_from_server_streamer' }) + .once.and_call_original + responses = stub.a_server_streaming_rpc(request) + responses.each do |r| + expect(r).to be_a(EchoMsg) + end + end + end + end + + context 'with a bidi call' do + it 'should be called', server: true do + expect(interceptor).to receive(:bidi_streamer) + .once.and_call_original + + run_services_on_server(@server, services: [service]) do + stub = build_insecure_stub(EchoStub, opts: interceptors_opts) + expect_any_instance_of(GRPC::ActiveCall).to receive(:bidi_streamer) + .once.and_call_original + requests = [EchoMsg.new, EchoMsg.new] + responses = stub.a_bidi_rpc(requests) + responses.each do |r| + expect(r).to be_a(EchoMsg) + end + end + end + + it 'can modify outgoing metadata', server: true do + expect(interceptor).to receive(:bidi_streamer) + .once.and_call_original + + run_services_on_server(@server, services: [service]) do + stub = build_insecure_stub(EchoStub, opts: interceptors_opts) + requests = [EchoMsg.new, EchoMsg.new] + expect_any_instance_of(GRPC::ActiveCall).to receive(:bidi_streamer) + .with(requests, metadata: { 'foo' => 'bar_from_bidi_streamer' }) + .once.and_call_original + responses = stub.a_bidi_rpc(requests) + responses.each do |r| + expect(r).to be_a(EchoMsg) + end + end + end + end + end +end diff --git a/src/ruby/spec/generic/interceptor_registry_spec.rb b/src/ruby/spec/generic/interceptor_registry_spec.rb new file mode 100644 index 00000000000..f93f5cec096 --- /dev/null +++ b/src/ruby/spec/generic/interceptor_registry_spec.rb @@ -0,0 +1,65 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +require 'spec_helper' + +describe GRPC::InterceptorRegistry do + let(:server) { RpcServer.new } + let(:interceptor) { TestServerInterceptor.new } + let(:interceptors) { [interceptor] } + let(:registry) { described_class.new(interceptors) } + + describe 'initialization' do + subject { registry } + + context 'with an interceptor extending GRPC::ServerInterceptor' do + it 'should add the interceptor to the registry' do + subject + is = registry.instance_variable_get('@interceptors') + expect(is.count).to eq 1 + expect(is.first).to eq interceptor + end + end + + context 'with multiple interceptors' do + let(:interceptor2) { TestServerInterceptor.new } + let(:interceptor3) { TestServerInterceptor.new } + let(:interceptors) { [interceptor, interceptor2, interceptor3] } + + it 'should maintain order of insertion when iterated against' do + subject + is = registry.instance_variable_get('@interceptors') + expect(is.count).to eq 3 + is.each_with_index do |i, idx| + case idx + when 0 + expect(i).to eq interceptor + when 1 + expect(i).to eq interceptor2 + when 2 + expect(i).to eq interceptor3 + end + end + end + end + + context 'with an interceptor not extending GRPC::ServerInterceptor' do + let(:interceptor) { Class } + let(:err) { GRPC::InterceptorRegistry::DescendantError } + + it 'should raise an InvalidArgument exception' do + expect { subject }.to raise_error(err) + end + end + end +end diff --git a/src/ruby/spec/generic/rpc_server_spec.rb b/src/ruby/spec/generic/rpc_server_spec.rb index b887eaaf4e9..05059fbecf0 100644 --- a/src/ruby/spec/generic/rpc_server_spec.rb +++ b/src/ruby/spec/generic/rpc_server_spec.rb @@ -11,8 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -require 'grpc' +require 'spec_helper' def load_test_certs test_root = File.join(File.dirname(File.dirname(__FILE__)), 'testdata') @@ -28,17 +27,6 @@ def check_md(wanted_md, received_md) end end -# A test message -class EchoMsg - def self.marshal(_o) - '' - end - - def self.unmarshal(_o) - EchoMsg.new - end -end - # A test service with no methods. class EmptyService include GRPC::GenericService @@ -50,27 +38,6 @@ class NoRpcImplementation rpc :an_rpc, EchoMsg, EchoMsg end -# A test service with an echo implementation. -class EchoService - include GRPC::GenericService - rpc :an_rpc, EchoMsg, EchoMsg - attr_reader :received_md - - def initialize(**kw) - @trailing_metadata = kw - @received_md = [] - end - - def an_rpc(req, call) - GRPC.logger.info('echo service received a request') - call.output_metadata.update(@trailing_metadata) - @received_md << call.metadata unless call.metadata.nil? - req - end -end - -EchoStub = EchoService.rpc_stub_class - # A test service with an implementation that fails with BadStatus class FailingService include GRPC::GenericService diff --git a/src/ruby/spec/generic/server_interceptors_spec.rb b/src/ruby/spec/generic/server_interceptors_spec.rb new file mode 100644 index 00000000000..eb866860843 --- /dev/null +++ b/src/ruby/spec/generic/server_interceptors_spec.rb @@ -0,0 +1,218 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +require 'spec_helper' + +describe 'Server Interceptors' do + let(:interceptor) { TestServerInterceptor.new } + let(:request) { EchoMsg.new } + let(:trailing_metadata) { {} } + let(:service) { EchoService.new(trailing_metadata) } + let(:interceptors) { [] } + + before(:each) do + build_rpc_server(server_opts: { interceptors: interceptors }) + end + + context 'when a server interceptor is added' do + let(:interceptors) { [interceptor] } + let(:client_metadata) { { client_md: 'test' } } + let(:client_call_opts) { { metadata: client_metadata, return_op: true } } + + context 'with a request/response call' do + let(:trailing_metadata) { { server_om: 'from_request_response' } } + + it 'should be called', server: true do + expect(interceptor).to receive(:request_response) + .once.and_call_original + + run_services_on_server(@server, services: [service]) do + stub = build_insecure_stub(EchoStub) + expect(stub.an_rpc(request)).to be_a(EchoMsg) + end + end + + it 'can modify trailing metadata', server: true do + expect(interceptor).to receive(:request_response) + .once.and_call_original + + run_services_on_server(@server, services: [service]) do + stub = build_insecure_stub(EchoStub) + expect_any_instance_of(GRPC::ActiveCall).to( + receive(:request_response).with(request, metadata: client_metadata) + .once.and_call_original + ) + op = stub.an_rpc(request, client_call_opts) + msg = op.execute + expect(op.trailing_metadata).to eq( + 'interc' => 'from_request_response', + 'server_om' => 'from_request_response' + ) + expect(msg).to be_a(EchoMsg) + end + end + end + + context 'with a client streaming call' do + let(:trailing_metadata) { { server_om: 'from_client_streamer' } } + let(:requests) { [EchoMsg.new, EchoMsg.new] } + + it 'should be called', server: true do + expect(interceptor).to receive(:client_streamer) + .once.and_call_original + + run_services_on_server(@server, services: [service]) do + stub = build_insecure_stub(EchoStub) + expect(stub.a_client_streaming_rpc(requests)).to be_a(EchoMsg) + end + end + + it 'can modify trailing metadata', server: true do + expect(interceptor).to receive(:client_streamer) + .once.and_call_original + + run_services_on_server(@server, services: [service]) do + stub = build_insecure_stub(EchoStub) + expect_any_instance_of(GRPC::ActiveCall).to( + receive(:client_streamer).with(requests) + .once.and_call_original + ) + op = stub.a_client_streaming_rpc(requests, client_call_opts) + msg = op.execute + expect(op.trailing_metadata).to eq( + 'interc' => 'from_client_streamer', + 'server_om' => 'from_client_streamer' + ) + expect(msg).to be_a(EchoMsg) + end + end + end + + context 'with a server streaming call' do + let(:trailing_metadata) { { server_om: 'from_server_streamer' } } + let(:request) { EchoMsg.new } + + it 'should be called', server: true do + expect(interceptor).to receive(:server_streamer) + .once.and_call_original + + run_services_on_server(@server, services: [service]) do + stub = build_insecure_stub(EchoStub) + responses = stub.a_server_streaming_rpc(request) + responses.each do |r| + expect(r).to be_a(EchoMsg) + end + end + end + + it 'can modify trailing metadata', server: true do + expect(interceptor).to receive(:server_streamer) + .once.and_call_original + + run_services_on_server(@server, services: [service]) do + stub = build_insecure_stub(EchoStub) + expect_any_instance_of(GRPC::ActiveCall).to( + receive(:server_streamer).with(request) + .once.and_call_original + ) + op = stub.a_server_streaming_rpc(request, client_call_opts) + responses = op.execute + responses.each do |r| + expect(r).to be_a(EchoMsg) + end + expect(op.trailing_metadata).to eq( + 'interc' => 'from_server_streamer', + 'server_om' => 'from_server_streamer' + ) + end + end + end + + context 'with a bidi call' do + let(:trailing_metadata) { { server_om: 'from_bidi_streamer' } } + let(:requests) { [EchoMsg.new, EchoMsg.new] } + + it 'should be called', server: true do + expect(interceptor).to receive(:bidi_streamer) + .once.and_call_original + + run_services_on_server(@server, services: [service]) do + stub = build_insecure_stub(EchoStub) + responses = stub.a_bidi_rpc(requests) + responses.each do |r| + expect(r).to be_a(EchoMsg) + end + end + end + + it 'can modify trailing metadata', server: true do + expect(interceptor).to receive(:bidi_streamer) + .once.and_call_original + + run_services_on_server(@server, services: [service]) do + stub = build_insecure_stub(EchoStub) + expect_any_instance_of(GRPC::ActiveCall).to( + receive(:bidi_streamer).with(requests) + .once.and_call_original + ) + op = stub.a_bidi_rpc(requests, client_call_opts) + responses = op.execute + responses.each do |r| + expect(r).to be_a(EchoMsg) + end + expect(op.trailing_metadata).to eq( + 'interc' => 'from_bidi_streamer', + 'server_om' => 'from_bidi_streamer' + ) + end + end + end + end + + context 'when multiple interceptors are added' do + let(:interceptor2) { TestServerInterceptor.new } + let(:interceptor3) { TestServerInterceptor.new } + let(:interceptors) do + [ + interceptor, + interceptor2, + interceptor3 + ] + end + + it 'each should be called', server: true do + expect(interceptor).to receive(:request_response) + .once.and_call_original + expect(interceptor2).to receive(:request_response) + .once.and_call_original + expect(interceptor3).to receive(:request_response) + .once.and_call_original + + run_services_on_server(@server, services: [service]) do + stub = build_insecure_stub(EchoStub) + expect(stub.an_rpc(request)).to be_a(EchoMsg) + end + end + end + + context 'when an interceptor is not added' do + it 'should not be called', server: true do + expect(interceptor).to_not receive(:call) + + run_services_on_server(@server, services: [service]) do + stub = build_insecure_stub(EchoStub) + expect(stub.an_rpc(request)).to be_a(EchoMsg) + end + end + end +end diff --git a/src/ruby/spec/spec_helper.rb b/src/ruby/spec/spec_helper.rb index 6e1eba19455..8fe9e6e808c 100644 --- a/src/ruby/spec/spec_helper.rb +++ b/src/ruby/spec/spec_helper.rb @@ -32,6 +32,9 @@ require 'rspec' require 'logging' require 'rspec/logging_helper' +require_relative 'support/services' +require_relative 'support/helpers' + # GRPC is the general RPC module # # Configure its logging for fine-grained log control during test runs @@ -49,6 +52,7 @@ Logging.logger['GRPC::BidiCall'].level = :info RSpec.configure do |config| include RSpec::LoggingHelper config.capture_log_messages # comment this out to see logs during test runs + include GRPC::Spec::Helpers end RSpec::Expectations.configuration.warn_about_potential_false_positives = false diff --git a/src/ruby/spec/support/helpers.rb b/src/ruby/spec/support/helpers.rb new file mode 100644 index 00000000000..65fffff9e7e --- /dev/null +++ b/src/ruby/spec/support/helpers.rb @@ -0,0 +1,73 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# GRPC contains the General RPC module. +module GRPC + ## + # GRPC RSpec base module + # + module Spec + ## + # A module that is used for providing generic helpers across the + # GRPC test suite + # + module Helpers + # Shortcut syntax for a GRPC RPC Server + RpcServer = GRPC::RpcServer + + ## + # Build an RPC server used for testing + # + def build_rpc_server(server_opts: {}, + client_opts: {}) + @server = RpcServer.new({ poll_period: 1 }.merge(server_opts)) + @port = @server.add_http2_port('0.0.0.0:0', :this_port_is_insecure) + @host = "0.0.0.0:#{@port}" + @client_opts = client_opts + @server + end + + ## + # Run services on an RPC server, yielding to allow testing within + # + # @param [RpcServer] server + # @param [Array] services + # + def run_services_on_server(server, services: []) + services.each do |s| + server.handle(s) + end + t = Thread.new { server.run } + server.wait_till_running + + yield + + server.stop + t.join + end + + ## + # Build an insecure stub from a given stub class + # + # @param [Class] klass + # @param [String] host + # + def build_insecure_stub(klass, host: nil, opts: nil) + host ||= @host + opts ||= @client_opts + klass.new(host, :this_channel_is_insecure, **opts) + end + end + end +end diff --git a/src/ruby/spec/support/services.rb b/src/ruby/spec/support/services.rb new file mode 100644 index 00000000000..27cc8e61acf --- /dev/null +++ b/src/ruby/spec/support/services.rb @@ -0,0 +1,147 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Test stubs for various scenarios +require 'grpc' + +# A test message +class EchoMsg + def self.marshal(_o) + '' + end + + def self.unmarshal(_o) + EchoMsg.new + end +end + +# A test service with an echo implementation. +class EchoService + include GRPC::GenericService + rpc :an_rpc, EchoMsg, EchoMsg + rpc :a_client_streaming_rpc, stream(EchoMsg), EchoMsg + rpc :a_server_streaming_rpc, EchoMsg, stream(EchoMsg) + rpc :a_bidi_rpc, stream(EchoMsg), stream(EchoMsg) + attr_reader :received_md + + def initialize(**kw) + @trailing_metadata = kw + @received_md = [] + end + + def an_rpc(req, call) + GRPC.logger.info('echo service received a request') + call.output_metadata.update(@trailing_metadata) + @received_md << call.metadata unless call.metadata.nil? + req + end + + def a_client_streaming_rpc(call) + # iterate through requests so call can complete + call.output_metadata.update(@trailing_metadata) + call.each_remote_read.each { |r| p r } + EchoMsg.new + end + + def a_server_streaming_rpc(_req, call) + call.output_metadata.update(@trailing_metadata) + [EchoMsg.new, EchoMsg.new] + end + + def a_bidi_rpc(requests, call) + call.output_metadata.update(@trailing_metadata) + requests.each { |r| p r } + [EchoMsg.new, EchoMsg.new] + end +end + +EchoStub = EchoService.rpc_stub_class + +# For testing server interceptors +class TestServerInterceptor < GRPC::ServerInterceptor + def request_response(request:, call:, method:) + p "Received request/response call at method #{method}" \ + " with request #{request} for call #{call}" + call.output_metadata[:interc] = 'from_request_response' + p "[GRPC::Ok] (#{method.owner.name}.#{method.name})" + yield + end + + def client_streamer(call:, method:) + call.output_metadata[:interc] = 'from_client_streamer' + call.each_remote_read.each do |r| + p "In interceptor: #{r}" + end + p "Received client streamer call at method #{method} for call #{call}" + yield + end + + def server_streamer(request:, call:, method:) + p "Received server streamer call at method #{method} with request" \ + " #{request} for call #{call}" + call.output_metadata[:interc] = 'from_server_streamer' + yield + end + + def bidi_streamer(requests:, call:, method:) + requests.each do |r| + p "Bidi request: #{r}" + end + p "Received bidi streamer call at method #{method} with requests" \ + " #{requests} for call #{call}" + call.output_metadata[:interc] = 'from_bidi_streamer' + yield + end +end + +# For testing client interceptors +class TestClientInterceptor < GRPC::ClientInterceptor + def request_response(request:, call:, method:, metadata: {}) + p "Intercepted request/response call at method #{method}" \ + " with request #{request} for call #{call}" \ + " and metadata: #{metadata}" + metadata['foo'] = 'bar_from_request_response' + yield + end + + def client_streamer(requests:, call:, method:, metadata: {}) + p "Received client streamer call at method #{method}" \ + " with requests #{requests} for call #{call}" \ + " and metadata: #{metadata}" + requests.each do |r| + p "In client interceptor: #{r}" + end + metadata['foo'] = 'bar_from_client_streamer' + yield + end + + def server_streamer(request:, call:, method:, metadata: {}) + p "Received server streamer call at method #{method}" \ + " with request #{request} for call #{call}" \ + " and metadata: #{metadata}" + metadata['foo'] = 'bar_from_server_streamer' + yield + end + + def bidi_streamer(requests:, call:, method:, metadata: {}) + p "Received bidi streamer call at method #{method}" \ + "with requests #{requests} for call #{call}" \ + " and metadata: #{metadata}" + requests.each do |r| + p "In client interceptor: #{r}" + end + metadata['foo'] = 'bar_from_bidi_streamer' + yield + end +end From 9247ad5c55fbc9013484257b98d2df926e238fab Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Mon, 25 Sep 2017 13:35:48 -0700 Subject: [PATCH 058/109] Fix dumb timer cancellation bug (reversed conditional). --- src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c index 85ef7894ea6..bfda9460c0d 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c @@ -1349,7 +1349,7 @@ static void lb_call_destroy_locked(grpc_exec_ctx *exec_ctx, grpc_byte_buffer_destroy(glb_policy->lb_request_payload); grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details); - if (!glb_policy->client_load_report_timer_pending) { + if (glb_policy->client_load_report_timer_pending) { grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer); } } From 2de36a80376dd1739c53aead2d7cc84ba9219eed Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Mon, 25 Sep 2017 14:54:44 -0700 Subject: [PATCH 059/109] Remove unnecessary code to defer client load report until after initial request. --- .../client_channel/lb_policy/grpclb/grpclb.c | 68 ++++--------------- 1 file changed, 15 insertions(+), 53 deletions(-) diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c index bfda9460c0d..c129d47af79 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c @@ -354,9 +354,6 @@ typedef struct glb_lb_policy { /************************************************************/ /* client data associated with the LB server communication */ /************************************************************/ - /* Finished sending initial request. */ - grpc_closure lb_on_sent_initial_request; - /* Status from the LB server has been received. This signals the end of the LB * call. */ grpc_closure lb_on_server_status_received; @@ -390,7 +387,6 @@ typedef struct glb_lb_policy { /** LB call retry timer */ grpc_timer lb_call_retry_timer; - bool initial_request_sent; bool seen_initial_response; /* Stats for client-side load reporting. Should be unreffed and @@ -1203,21 +1199,6 @@ static void client_load_report_done_locked(grpc_exec_ctx *exec_ctx, void *arg, schedule_next_client_load_report(exec_ctx, glb_policy); } -static void do_send_client_load_report_locked(grpc_exec_ctx *exec_ctx, - glb_lb_policy *glb_policy) { - grpc_op op; - memset(&op, 0, sizeof(op)); - op.op = GRPC_OP_SEND_MESSAGE; - op.data.send_message.send_message = glb_policy->client_load_report_payload; - GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure, - client_load_report_done_locked, glb_policy, - grpc_combiner_scheduler(glb_policy->base.combiner)); - grpc_call_error call_error = grpc_call_start_batch_and_execute( - exec_ctx, glb_policy->lb_call, &op, 1, - &glb_policy->client_load_report_closure); - GPR_ASSERT(GRPC_CALL_OK == call_error); -} - static bool load_report_counters_are_zero(grpc_grpclb_request *request) { grpc_grpclb_dropped_call_counts *drop_entries = (grpc_grpclb_dropped_call_counts *) @@ -1260,17 +1241,20 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_raw_byte_buffer_create(&request_payload_slice, 1); grpc_slice_unref_internal(exec_ctx, request_payload_slice); grpc_grpclb_request_destroy(request); - // If we've already sent the initial request, then we can go ahead and - // sent the load report. Otherwise, we need to wait until the initial - // request has been sent to send this - // (see lb_on_sent_initial_request_locked() below). - if (glb_policy->initial_request_sent) { - do_send_client_load_report_locked(exec_ctx, glb_policy); - } + // Send load report message. + grpc_op op; + memset(&op, 0, sizeof(op)); + op.op = GRPC_OP_SEND_MESSAGE; + op.data.send_message.send_message = glb_policy->client_load_report_payload; + GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure, + client_load_report_done_locked, glb_policy, + grpc_combiner_scheduler(glb_policy->base.combiner)); + grpc_call_error call_error = grpc_call_start_batch_and_execute( + exec_ctx, glb_policy->lb_call, &op, 1, + &glb_policy->client_load_report_closure); + GPR_ASSERT(GRPC_CALL_OK == call_error); } -static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx, - void *arg, grpc_error *error); static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error); static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg, @@ -1315,9 +1299,6 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx, grpc_slice_unref_internal(exec_ctx, request_payload_slice); grpc_grpclb_request_destroy(request); - GRPC_CLOSURE_INIT(&glb_policy->lb_on_sent_initial_request, - lb_on_sent_initial_request_locked, glb_policy, - grpc_combiner_scheduler(glb_policy->base.combiner)); GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received, lb_on_server_status_received_locked, glb_policy, grpc_combiner_scheduler(glb_policy->base.combiner)); @@ -1332,7 +1313,6 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx, GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000, GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000); - glb_policy->initial_request_sent = false; glb_policy->seen_initial_response = false; glb_policy->last_client_load_report_counters_were_zero = false; } @@ -1373,7 +1353,7 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx, GPR_ASSERT(glb_policy->lb_call != NULL); grpc_call_error call_error; - grpc_op ops[4]; + grpc_op ops[3]; memset(ops, 0, sizeof(ops)); grpc_op *op = ops; @@ -1394,13 +1374,8 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx, op->flags = 0; op->reserved = NULL; op++; - /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref - * count goes to zero) to be unref'd in lb_on_sent_initial_request_locked() */ - GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, - "lb_on_sent_initial_request_locked"); - call_error = grpc_call_start_batch_and_execute( - exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops), - &glb_policy->lb_on_sent_initial_request); + call_error = grpc_call_start_batch_and_execute(exec_ctx, glb_policy->lb_call, + ops, (size_t)(op - ops), NULL); GPR_ASSERT(GRPC_CALL_OK == call_error); op = ops; @@ -1437,19 +1412,6 @@ static void query_for_backends_locked(grpc_exec_ctx *exec_ctx, GPR_ASSERT(GRPC_CALL_OK == call_error); } -static void lb_on_sent_initial_request_locked(grpc_exec_ctx *exec_ctx, - void *arg, grpc_error *error) { - glb_lb_policy *glb_policy = (glb_lb_policy *)arg; - glb_policy->initial_request_sent = true; - // If we attempted to send a client load report before the initial - // request was sent, send the load report now. - if (glb_policy->client_load_report_payload != NULL) { - do_send_client_load_report_locked(exec_ctx, glb_policy); - } - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, - "lb_on_sent_initial_request_locked"); -} - static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { glb_lb_policy *glb_policy = (glb_lb_policy *)arg; From a4792f5ad13dec52dfde7dca8e996b7075a2fdc3 Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Tue, 26 Sep 2017 09:06:35 -0700 Subject: [PATCH 060/109] Defer restarting LB call until after timer cancellation. --- .../client_channel/lb_policy/grpclb/grpclb.c | 112 ++++++++++-------- 1 file changed, 64 insertions(+), 48 deletions(-) diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c index c129d47af79..05a106f214d 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c @@ -1169,6 +1169,58 @@ static void glb_notify_on_state_change_locked(grpc_exec_ctx *exec_ctx, exec_ctx, &glb_policy->state_tracker, current, notify); } +static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error) { + glb_lb_policy *glb_policy = (glb_lb_policy *)arg; + glb_policy->retry_timer_active = false; + if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) { + if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { + gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)", + (void *)glb_policy); + } + GPR_ASSERT(glb_policy->lb_call == NULL); + query_for_backends_locked(exec_ctx, glb_policy); + } + GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer"); +} + +static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx, + glb_lb_policy *glb_policy) { + if (glb_policy->started_picking && glb_policy->updating_lb_call) { + if (glb_policy->retry_timer_active) { + grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer); + } + if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy); + glb_policy->updating_lb_call = false; + } else if (!glb_policy->shutting_down) { + /* if we aren't shutting down, restart the LB client call after some time */ + gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); + gpr_timespec next_try = + gpr_backoff_step(&glb_policy->lb_call_backoff_state, now); + if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { + gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...", + (void *)glb_policy); + gpr_timespec timeout = gpr_time_sub(next_try, now); + if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) { + gpr_log(GPR_DEBUG, + "... retry_timer_active in %" PRId64 ".%09d seconds.", + timeout.tv_sec, timeout.tv_nsec); + } else { + gpr_log(GPR_DEBUG, "... retry_timer_active immediately."); + } + } + GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer"); + GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry, + lb_call_on_retry_timer_locked, glb_policy, + grpc_combiner_scheduler(glb_policy->base.combiner)); + glb_policy->retry_timer_active = true; + grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try, + &glb_policy->lb_on_call_retry, now); + } + GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, + "lb_on_server_status_received_locked"); +} + static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error); @@ -1218,6 +1270,9 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg, glb_policy->client_load_report_timer_pending = false; GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "client_load_report"); + if (glb_policy->lb_call == NULL) { + maybe_restart_lb_call(exec_ctx, glb_policy); + } return; } // Construct message payload. @@ -1252,7 +1307,10 @@ static void send_client_load_report_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_call_error call_error = grpc_call_start_batch_and_execute( exec_ctx, glb_policy->lb_call, &op, 1, &glb_policy->client_load_report_closure); - GPR_ASSERT(GRPC_CALL_OK == call_error); + if (call_error != GRPC_CALL_OK) { + gpr_log(GPR_ERROR, "call_error=%d", call_error); + GPR_ASSERT(GRPC_CALL_OK == call_error); + } } static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx, @@ -1534,21 +1592,6 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg, } } -static void lb_call_on_retry_timer_locked(grpc_exec_ctx *exec_ctx, void *arg, - grpc_error *error) { - glb_lb_policy *glb_policy = (glb_lb_policy *)arg; - glb_policy->retry_timer_active = false; - if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) { - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)", - (void *)glb_policy); - } - GPR_ASSERT(glb_policy->lb_call == NULL); - query_for_backends_locked(exec_ctx, glb_policy); - } - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer"); -} - static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { glb_lb_policy *glb_policy = (glb_lb_policy *)arg; @@ -1565,39 +1608,12 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx, } /* We need to perform cleanups no matter what. */ lb_call_destroy_locked(exec_ctx, glb_policy); - if (glb_policy->started_picking && glb_policy->updating_lb_call) { - if (glb_policy->retry_timer_active) { - grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer); - } - if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy); - glb_policy->updating_lb_call = false; - } else if (!glb_policy->shutting_down) { - /* if we aren't shutting down, restart the LB client call after some time */ - gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); - gpr_timespec next_try = - gpr_backoff_step(&glb_policy->lb_call_backoff_state, now); - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...", - (void *)glb_policy); - gpr_timespec timeout = gpr_time_sub(next_try, now); - if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) { - gpr_log(GPR_DEBUG, - "... retry_timer_active in %" PRId64 ".%09d seconds.", - timeout.tv_sec, timeout.tv_nsec); - } else { - gpr_log(GPR_DEBUG, "... retry_timer_active immediately."); - } - } - GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer"); - GRPC_CLOSURE_INIT(&glb_policy->lb_on_call_retry, - lb_call_on_retry_timer_locked, glb_policy, - grpc_combiner_scheduler(glb_policy->base.combiner)); - glb_policy->retry_timer_active = true; - grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try, - &glb_policy->lb_on_call_retry, now); + // If the load report timer is still pending, we wait for it to be + // called before restarting the call. Otherwise, we restart the call + // here. + if (!glb_policy->client_load_report_timer_pending) { + maybe_restart_lb_call(exec_ctx, glb_policy); } - GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, - "lb_on_server_status_received_locked"); } static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, From c8e145bd93d3aa0ae02fe8bbddc265f1c35b5339 Mon Sep 17 00:00:00 2001 From: ZhouyihaiDing Date: Mon, 25 Sep 2017 23:20:30 +0000 Subject: [PATCH 061/109] add protobuf c_ext to php benchmark --- src/ruby/qps/proxy-worker.rb | 16 +++++++++-- tools/jenkins/run_full_performance.sh | 2 +- tools/run_tests/performance/run_worker_php.sh | 11 ++++++-- .../run_tests/performance/scenario_config.py | 28 +++++++++++++++++++ 4 files changed, 50 insertions(+), 7 deletions(-) diff --git a/src/ruby/qps/proxy-worker.rb b/src/ruby/qps/proxy-worker.rb index 488610ae74d..ae7006e7d60 100755 --- a/src/ruby/qps/proxy-worker.rb +++ b/src/ruby/qps/proxy-worker.rb @@ -31,8 +31,9 @@ require 'src/proto/grpc/testing/services_services_pb' require 'src/proto/grpc/testing/proxy-service_services_pb' class ProxyBenchmarkClientServiceImpl < Grpc::Testing::ProxyClientService::Service - def initialize(port) + def initialize(port, c_ext) @mytarget = "localhost:" + port.to_s + @use_c_ext = c_ext end def setup(config) @config = config @@ -41,7 +42,13 @@ class ProxyBenchmarkClientServiceImpl < Grpc::Testing::ProxyClientService::Servi @histogram = Histogram.new(@histres, @histmax) @start_time = Time.now # TODO(vjpai): Support multiple client channels by spawning off a PHP client per channel - command = "php -d extension=" + File.expand_path(File.dirname(__FILE__)) + "/../../php/ext/grpc/modules/grpc.so " + File.expand_path(File.dirname(__FILE__)) + "/../../php/tests/qps/client.php " + @mytarget + if @use_c_ext + puts "Use protobuf c extension" + command = "php -d extension=" + File.expand_path(File.dirname(__FILE__)) + "/../../php/tests/qps/vendor/google/protobuf/php/ext/google/protobuf/modules/protobuf.so " + "-d extension=" + File.expand_path(File.dirname(__FILE__)) + "/../../php/ext/grpc/modules/grpc.so " + File.expand_path(File.dirname(__FILE__)) + "/../../php/tests/qps/client.php " + @mytarget + else + puts "Use protobuf php extension" + command = "php -d extension=" + File.expand_path(File.dirname(__FILE__)) + "/../../php/ext/grpc/modules/grpc.so " + File.expand_path(File.dirname(__FILE__)) + "/../../php/tests/qps/client.php " + @mytarget + end puts "Starting command: " + command @php_pid = spawn(command) end @@ -128,6 +135,9 @@ def proxymain opts.on('--driver_port PORT', '') do |v| options['driver_port'] = v end + opts.on("-c", "--[no-]c_proto_ext", "Use protobuf C-extention") do |c| + options[:c_ext] = c + end end.parse! # Configure any errors with client or server child threads to surface @@ -136,7 +146,7 @@ def proxymain s = GRPC::RpcServer.new port = s.add_http2_port("0.0.0.0:" + options['driver_port'].to_s, :this_port_is_insecure) - bmc = ProxyBenchmarkClientServiceImpl.new(port) + bmc = ProxyBenchmarkClientServiceImpl.new(port, options[:c_ext]) s.handle(bmc) s.handle(ProxyWorkerServiceImpl.new(s, bmc)) s.run diff --git a/tools/jenkins/run_full_performance.sh b/tools/jenkins/run_full_performance.sh index a9661c7e266..aae76b0fde8 100755 --- a/tools/jenkins/run_full_performance.sh +++ b/tools/jenkins/run_full_performance.sh @@ -21,7 +21,7 @@ cd $(dirname $0)/../.. # run 8core client vs 8core server tools/run_tests/run_performance_tests.py \ - -l c++ csharp node ruby java python go node_express php \ + -l c++ csharp node ruby java python go node_express php php_ext \ --netperf \ --category scalable \ --bq_result_table performance_test.performance_experiment \ diff --git a/tools/run_tests/performance/run_worker_php.sh b/tools/run_tests/performance/run_worker_php.sh index 5d0c4fa4fd3..e524d5286d0 100755 --- a/tools/run_tests/performance/run_worker_php.sh +++ b/tools/run_tests/performance/run_worker_php.sh @@ -16,13 +16,18 @@ source ~/.rvm/scripts/rvm set -ex -repo=$(dirname $0)/../../.. - +cd $(dirname $0)/../../.. +repo=$(pwd) # First set up all dependences needed for PHP QPS test cd $repo cd src/php/tests/qps composer install +# Install protobuf C-extension for php +cd vendor/google/protobuf/php/ext/google/protobuf +phpize +./configure +make # The proxy worker for PHP is implemented in Ruby -cd ../../../.. +cd $repo ruby src/ruby/qps/proxy-worker.rb $@ diff --git a/tools/run_tests/performance/scenario_config.py b/tools/run_tests/performance/scenario_config.py index 5efc9f56485..31017f1d85a 100644 --- a/tools/run_tests/performance/scenario_config.py +++ b/tools/run_tests/performance/scenario_config.py @@ -827,6 +827,33 @@ class PhpLanguage: return 'php' +class PhpLanguage_ext: + + def __init__(self): + pass + self.safename = str(self) + + def worker_cmdline(self): + return ['tools/run_tests/performance/run_worker_php.sh -c'] + + def worker_port_offset(self): + return 800 + + def scenarios(self): + yield _ping_pong_scenario( + 'php_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY', + client_type='SYNC_CLIENT', server_type='SYNC_SERVER', + server_language='c++', async_server_threads=1) + + yield _ping_pong_scenario( + 'php_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING', + client_type='SYNC_CLIENT', server_type='SYNC_SERVER', + server_language='c++', async_server_threads=1) + + def __str__(self): + return 'php' + + class JavaLanguage: def __init__(self): @@ -1025,6 +1052,7 @@ LANGUAGES = { 'node_express': NodeExpressLanguage(), 'ruby' : RubyLanguage(), 'php' : PhpLanguage(), + 'php_ext' : PhpLanguage_ext(), 'java' : JavaLanguage(), 'python' : PythonLanguage(), 'go' : GoLanguage(), From e249079cbed238a28bed148dbd2c0974d2dea265 Mon Sep 17 00:00:00 2001 From: Mehrdad Afshari Date: Tue, 26 Sep 2017 11:25:45 -0700 Subject: [PATCH 062/109] Be a tad more specific requesting error reports This commit is a rebase of an old contribution with minor formatting edits (cf. https://github.com/grpc/grpc/pull/8663) [Original Author] Masood Malekghassemi Date: Mon Nov 7 14:49:09 2016 -0800 Contributor is a Xoogler and the contribution is owned by Google Inc. as per the copyright assignment agreement with the original author, as it was drafted during their employment with Google Inc. --- src/python/grpcio/support.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/python/grpcio/support.py b/src/python/grpcio/support.py index 510bf422a07..f2395eb26c2 100644 --- a/src/python/grpcio/support.py +++ b/src/python/grpcio/support.py @@ -94,7 +94,7 @@ def diagnose_attribute_error(build_ext, error): _ERROR_DIAGNOSES = { errors.CompileError: diagnose_compile_error, - AttributeError: diagnose_attribute_error + AttributeError: diagnose_attribute_error, } @@ -102,8 +102,10 @@ def diagnose_build_ext_error(build_ext, error, formatted): diagnostic = _ERROR_DIAGNOSES.get(type(error)) if diagnostic is None: raise commands.CommandError( - "\n\nWe could not diagnose your build failure. Please file an issue at " - "http://www.github.com/grpc/grpc with `[Python install]` in the title." - "\n\n{}".format(formatted)) + "\n\nWe could not diagnose your build failure. If you are unable to " + "proceed, please file an issue at http://www.github.com/grpc/grpc " + "with `[Python install]` in the title; please attach the whole log " + "(including everything that may have appeared above the Python " + "backtrace).\n\n{}".format(formatted)) else: diagnostic(build_ext, error) From 2477cf37783bf580d45261fb44706fe99d6959d9 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Tue, 26 Sep 2017 12:20:35 -0700 Subject: [PATCH 063/109] Revert "General executor speedups, introspection" --- src/core/lib/debug/stats_data.c | 62 ++-------------- src/core/lib/debug/stats_data.h | 26 ++----- src/core/lib/debug/stats_data.yaml | 8 --- src/core/lib/debug/stats_data_bq_schema.sql | 2 - src/core/lib/iomgr/executor.c | 69 ++++++++---------- test/core/iomgr/pollset_set_test.c | 6 +- tools/codegen/core/gen_stats_data.py | 4 +- .../performance/massage_qps_stats.py | 8 --- .../performance/scenario_result_schema.json | 70 ------------------- 9 files changed, 48 insertions(+), 207 deletions(-) diff --git a/src/core/lib/debug/stats_data.c b/src/core/lib/debug/stats_data.c index fb6055f7957..c0aec63c1d3 100644 --- a/src/core/lib/debug/stats_data.c +++ b/src/core/lib/debug/stats_data.c @@ -109,8 +109,6 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = { "executor_wakeup_initiated", "executor_queue_drained", "executor_push_retries", - "executor_threads_created", - "executor_threads_used", "server_requested_calls", "server_slowpath_requests_queued", }; @@ -219,8 +217,6 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = { "Number of times an executor queue was drained", "Number of times we raced and were forced to retry pushing a closure to " "the executor", - "Size of the backing thread pool for overflow gRPC Core work", - "How many executor threads actually got used", "How many calls were requested (not necessarily received) by the server", "How many times was the server slow path taken (indicates too few " "outstanding requests)", @@ -238,7 +234,6 @@ const char *grpc_stats_histogram_name[GRPC_STATS_HISTOGRAM_COUNT] = { "http2_send_message_per_write", "http2_send_trailing_metadata_per_write", "http2_send_flowctl_per_write", - "executor_closures_per_wakeup", "server_cqs_checked", }; const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = { @@ -254,7 +249,6 @@ const char *grpc_stats_histogram_doc[GRPC_STATS_HISTOGRAM_COUNT] = { "Number of streams whose payload was written per TCP write", "Number of streams terminated per TCP write", "Number of flow control updates written per TCP write", - "Number of closures executed each time an executor wakes up", "How many completion queues were checked looking for a CQ that had " "requested the incoming call", }; @@ -326,7 +320,6 @@ const uint8_t grpc_stats_table_7[102] = { const int grpc_stats_table_8[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64}; const uint8_t grpc_stats_table_9[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5}; void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 262144); if (value < 6) { GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE, @@ -352,7 +345,6 @@ void grpc_stats_inc_call_initial_size(grpc_exec_ctx *exec_ctx, int value) { (exec_ctx), value, grpc_stats_table_0, 64)); } void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 1024); if (value < 29) { GRPC_STATS_INC_HISTOGRAM((exec_ctx), @@ -379,7 +371,6 @@ void grpc_stats_inc_poll_events_returned(grpc_exec_ctx *exec_ctx, int value) { (exec_ctx), value, grpc_stats_table_2, 128)); } void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 16777216); if (value < 5) { GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, @@ -405,7 +396,6 @@ void grpc_stats_inc_tcp_write_size(grpc_exec_ctx *exec_ctx, int value) { (exec_ctx), value, grpc_stats_table_4, 64)); } void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 1024); if (value < 13) { GRPC_STATS_INC_HISTOGRAM((exec_ctx), @@ -431,7 +421,6 @@ void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx *exec_ctx, int value) { (exec_ctx), value, grpc_stats_table_6, 64)); } void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 16777216); if (value < 5) { GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, @@ -457,7 +446,6 @@ void grpc_stats_inc_tcp_read_size(grpc_exec_ctx *exec_ctx, int value) { (exec_ctx), value, grpc_stats_table_4, 64)); } void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 16777216); if (value < 5) { GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, @@ -484,7 +472,6 @@ void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx *exec_ctx, int value) { } void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 1024); if (value < 13) { GRPC_STATS_INC_HISTOGRAM( @@ -512,7 +499,6 @@ void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx *exec_ctx, } void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 16777216); if (value < 5) { GRPC_STATS_INC_HISTOGRAM( @@ -540,7 +526,6 @@ void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx *exec_ctx, } void grpc_stats_inc_http2_send_initial_metadata_per_write( grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 1024); if (value < 13) { GRPC_STATS_INC_HISTOGRAM( @@ -570,7 +555,6 @@ void grpc_stats_inc_http2_send_initial_metadata_per_write( } void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 1024); if (value < 13) { GRPC_STATS_INC_HISTOGRAM( @@ -598,7 +582,6 @@ void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx *exec_ctx, } void grpc_stats_inc_http2_send_trailing_metadata_per_write( grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 1024); if (value < 13) { GRPC_STATS_INC_HISTOGRAM( @@ -628,7 +611,6 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write( } void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 1024); if (value < 13) { GRPC_STATS_INC_HISTOGRAM( @@ -654,36 +636,7 @@ void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx, grpc_stats_histo_find_bucket_slow( (exec_ctx), value, grpc_stats_table_6, 64)); } -void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx, - int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ - value = GPR_CLAMP(value, 0, 1024); - if (value < 13) { - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, value); - return; - } - union { - double dbl; - uint64_t uint; - } _val, _bkt; - _val.dbl = value; - if (_val.uint < 4637863191261478912ull) { - int bucket = - grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13; - _bkt.dbl = grpc_stats_table_6[bucket]; - bucket -= (_val.uint < _bkt.uint); - GRPC_STATS_INC_HISTOGRAM( - (exec_ctx), GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, bucket); - return; - } - GRPC_STATS_INC_HISTOGRAM((exec_ctx), - GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, - grpc_stats_histo_find_bucket_slow( - (exec_ctx), value, grpc_stats_table_6, 64)); -} void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) { - /* Automatically generated by tools/codegen/core/gen_stats_data.py */ value = GPR_CLAMP(value, 0, 64); if (value < 3) { GRPC_STATS_INC_HISTOGRAM((exec_ctx), @@ -708,17 +661,17 @@ void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int value) { grpc_stats_histo_find_bucket_slow( (exec_ctx), value, grpc_stats_table_8, 8)); } -const int grpc_stats_histo_buckets[14] = {64, 128, 64, 64, 64, 64, 64, - 64, 64, 64, 64, 64, 64, 8}; -const int grpc_stats_histo_start[14] = {0, 64, 192, 256, 320, 384, 448, - 512, 576, 640, 704, 768, 832, 896}; -const int *const grpc_stats_histo_bucket_boundaries[14] = { +const int grpc_stats_histo_buckets[13] = {64, 128, 64, 64, 64, 64, 64, + 64, 64, 64, 64, 64, 8}; +const int grpc_stats_histo_start[13] = {0, 64, 192, 256, 320, 384, 448, + 512, 576, 640, 704, 768, 832}; +const int *const grpc_stats_histo_bucket_boundaries[13] = { grpc_stats_table_0, grpc_stats_table_2, grpc_stats_table_4, grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_4, grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6, - grpc_stats_table_6, grpc_stats_table_8}; -void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx, int x) = { + grpc_stats_table_8}; +void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx *exec_ctx, int x) = { grpc_stats_inc_call_initial_size, grpc_stats_inc_poll_events_returned, grpc_stats_inc_tcp_write_size, @@ -731,5 +684,4 @@ void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx, int x) = { grpc_stats_inc_http2_send_message_per_write, grpc_stats_inc_http2_send_trailing_metadata_per_write, grpc_stats_inc_http2_send_flowctl_per_write, - grpc_stats_inc_executor_closures_per_wakeup, grpc_stats_inc_server_cqs_checked}; diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h index 6c0ad30543f..28dab00117b 100644 --- a/src/core/lib/debug/stats_data.h +++ b/src/core/lib/debug/stats_data.h @@ -111,8 +111,6 @@ typedef enum { GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED, GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED, GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES, - GRPC_STATS_COUNTER_EXECUTOR_THREADS_CREATED, - GRPC_STATS_COUNTER_EXECUTOR_THREADS_USED, GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS, GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED, GRPC_STATS_COUNTER_COUNT @@ -132,7 +130,6 @@ typedef enum { GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, - GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP, GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, GRPC_STATS_HISTOGRAM_COUNT } grpc_stats_histograms; @@ -163,11 +160,9 @@ typedef enum { GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE_BUCKETS = 64, GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_FIRST_SLOT = 768, GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE_BUCKETS = 64, - GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_FIRST_SLOT = 832, - GRPC_STATS_HISTOGRAM_EXECUTOR_CLOSURES_PER_WAKEUP_BUCKETS = 64, - GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 896, + GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_FIRST_SLOT = 832, GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_BUCKETS = 8, - GRPC_STATS_HISTOGRAM_BUCKETS = 904 + GRPC_STATS_HISTOGRAM_BUCKETS = 840 } grpc_stats_histogram_constants; #define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED) @@ -417,11 +412,6 @@ typedef enum { GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED) #define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES) -#define GRPC_STATS_INC_EXECUTOR_THREADS_CREATED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), \ - GRPC_STATS_COUNTER_EXECUTOR_THREADS_CREATED) -#define GRPC_STATS_INC_EXECUTOR_THREADS_USED(exec_ctx) \ - GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_THREADS_USED) #define GRPC_STATS_INC_SERVER_REQUESTED_CALLS(exec_ctx) \ GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS) #define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx) \ @@ -468,17 +458,13 @@ void grpc_stats_inc_http2_send_trailing_metadata_per_write( grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value)) void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx *exec_ctx, int x); -#define GRPC_STATS_INC_EXECUTOR_CLOSURES_PER_WAKEUP(exec_ctx, value) \ - grpc_stats_inc_executor_closures_per_wakeup((exec_ctx), (int)(value)) -void grpc_stats_inc_executor_closures_per_wakeup(grpc_exec_ctx *exec_ctx, - int x); #define GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, value) \ grpc_stats_inc_server_cqs_checked((exec_ctx), (int)(value)) void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx *exec_ctx, int x); -extern const int grpc_stats_histo_buckets[14]; -extern const int grpc_stats_histo_start[14]; -extern const int *const grpc_stats_histo_bucket_boundaries[14]; -extern void (*const grpc_stats_inc_histogram[14])(grpc_exec_ctx *exec_ctx, +extern const int grpc_stats_histo_buckets[13]; +extern const int grpc_stats_histo_start[13]; +extern const int *const grpc_stats_histo_bucket_boundaries[13]; +extern void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx *exec_ctx, int x); #endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */ diff --git a/src/core/lib/debug/stats_data.yaml b/src/core/lib/debug/stats_data.yaml index de575f01c73..b5c15ff55c0 100644 --- a/src/core/lib/debug/stats_data.yaml +++ b/src/core/lib/debug/stats_data.yaml @@ -259,14 +259,6 @@ - counter: executor_push_retries doc: Number of times we raced and were forced to retry pushing a closure to the executor -- counter: executor_threads_created - doc: Size of the backing thread pool for overflow gRPC Core work -- counter: executor_threads_used - doc: How many executor threads actually got used -- histogram: executor_closures_per_wakeup - max: 1024 - buckets: 64 - doc: Number of closures executed each time an executor wakes up # server - counter: server_requested_calls doc: How many calls were requested (not necessarily received) by the server diff --git a/src/core/lib/debug/stats_data_bq_schema.sql b/src/core/lib/debug/stats_data_bq_schema.sql index 0611ccaff0e..f96e40c00ef 100644 --- a/src/core/lib/debug/stats_data_bq_schema.sql +++ b/src/core/lib/debug/stats_data_bq_schema.sql @@ -84,7 +84,5 @@ executor_scheduled_to_self_per_iteration:FLOAT, executor_wakeup_initiated_per_iteration:FLOAT, executor_queue_drained_per_iteration:FLOAT, executor_push_retries_per_iteration:FLOAT, -executor_threads_created_per_iteration:FLOAT, -executor_threads_used_per_iteration:FLOAT, server_requested_calls_per_iteration:FLOAT, server_slowpath_requests_queued_per_iteration:FLOAT diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c index 2439f15a8a9..892385d7d78 100644 --- a/src/core/lib/iomgr/executor.c +++ b/src/core/lib/iomgr/executor.c @@ -32,14 +32,16 @@ #include "src/core/lib/iomgr/exec_ctx.h" #include "src/core/lib/support/spinlock.h" +#define MAX_DEPTH 2 + typedef struct { gpr_mu mu; gpr_cv cv; grpc_closure_list elems; + size_t depth; bool shutdown; bool queued_long_job; gpr_thd_id id; - grpc_closure_list local_elems; } thread_state; static thread_state *g_thread_state; @@ -54,35 +56,32 @@ static grpc_tracer_flag executor_trace = static void executor_thread(void *arg); -static void run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) { - int n = 0; // number of closures executed +static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) { + size_t n = 0; - while (!grpc_closure_list_empty(*list)) { - grpc_closure *c = list->head; - grpc_closure_list_init(list); - while (c != NULL) { - grpc_closure *next = c->next_data.next; - grpc_error *error = c->error_data.error; - if (GRPC_TRACER_ON(executor_trace)) { + grpc_closure *c = list.head; + while (c != NULL) { + grpc_closure *next = c->next_data.next; + grpc_error *error = c->error_data.error; + if (GRPC_TRACER_ON(executor_trace)) { #ifndef NDEBUG - gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c, - c->file_created, c->line_created); + gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c, + c->file_created, c->line_created); #else - gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c); + gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c); #endif - } + } #ifndef NDEBUG - c->scheduled = false; + c->scheduled = false; #endif - n++; - c->cb(exec_ctx, c->cb_arg, error); - GRPC_ERROR_UNREF(error); - c = next; - grpc_exec_ctx_flush(exec_ctx); - } + c->cb(exec_ctx, c->cb_arg, error); + GRPC_ERROR_UNREF(error); + c = next; + n++; + grpc_exec_ctx_flush(exec_ctx); } - GRPC_STATS_INC_EXECUTOR_CLOSURES_PER_WAKEUP(exec_ctx, n); + return n; } bool grpc_executor_is_threaded() { @@ -127,7 +126,7 @@ void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) { for (size_t i = 0; i < g_max_threads; i++) { gpr_mu_destroy(&g_thread_state[i].mu); gpr_cv_destroy(&g_thread_state[i].cv); - run_closures(exec_ctx, &g_thread_state[i].elems); + run_closures(exec_ctx, g_thread_state[i].elems); } gpr_free(g_thread_state); gpr_tls_destroy(&g_this_thread_state); @@ -151,14 +150,14 @@ static void executor_thread(void *arg) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL); - GRPC_STATS_INC_EXECUTOR_THREADS_CREATED(&exec_ctx); - - bool used = false; + size_t subtract_depth = 0; for (;;) { if (GRPC_TRACER_ON(executor_trace)) { - gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step", (int)(ts - g_thread_state)); + gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")", + (int)(ts - g_thread_state), subtract_depth); } gpr_mu_lock(&ts->mu); + ts->depth -= subtract_depth; while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) { ts->queued_long_job = false; gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME)); @@ -171,20 +170,15 @@ static void executor_thread(void *arg) { gpr_mu_unlock(&ts->mu); break; } - if (!used) { - GRPC_STATS_INC_EXECUTOR_THREADS_USED(&exec_ctx); - used = true; - } GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx); - GPR_ASSERT(grpc_closure_list_empty(ts->local_elems)); - ts->local_elems = ts->elems; + grpc_closure_list exec = ts->elems; ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT; gpr_mu_unlock(&ts->mu); if (GRPC_TRACER_ON(executor_trace)) { gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state)); } - run_closures(&exec_ctx, &ts->local_elems); + subtract_depth = run_closures(&exec_ctx, exec); } grpc_exec_ctx_finish(&exec_ctx); } @@ -217,10 +211,6 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)]; } else { GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx); - if (is_short) { - grpc_closure_list_append(&ts->local_elems, closure, error); - return; - } } thread_state *orig_ts = ts; @@ -260,7 +250,8 @@ static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure, gpr_cv_signal(&ts->cv); } grpc_closure_list_append(&ts->elems, closure, error); - try_new_thread = ts->elems.head != closure && + ts->depth++; + try_new_thread = ts->depth > MAX_DEPTH && cur_thread_count < g_max_threads && !ts->shutdown; if (!is_short) ts->queued_long_job = true; gpr_mu_unlock(&ts->mu); diff --git a/test/core/iomgr/pollset_set_test.c b/test/core/iomgr/pollset_set_test.c index 70efca8b16f..5750ac0f4b3 100644 --- a/test/core/iomgr/pollset_set_test.c +++ b/test/core/iomgr/pollset_set_test.c @@ -24,7 +24,6 @@ #include #include -#include #include #include #include @@ -434,7 +433,8 @@ int main(int argc, char **argv) { const char *poll_strategy = grpc_get_poll_strategy_name(); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_test_init(argc, argv); - grpc_init(); + grpc_iomgr_init(&exec_ctx); + grpc_iomgr_start(&exec_ctx); if (poll_strategy != NULL && (strcmp(poll_strategy, "epoll") == 0 || @@ -449,8 +449,8 @@ int main(int argc, char **argv) { poll_strategy); } + grpc_iomgr_shutdown(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx); - grpc_shutdown(); return 0; } #else /* defined(GRPC_LINUX_EPOLL) */ diff --git a/tools/codegen/core/gen_stats_data.py b/tools/codegen/core/gen_stats_data.py index 10ad0cc831f..8359734c848 100755 --- a/tools/codegen/core/gen_stats_data.py +++ b/tools/codegen/core/gen_stats_data.py @@ -147,8 +147,7 @@ def gen_bucket_code(histogram): shift_data = find_ideal_shift(code_bounds[first_nontrivial:], 256 * histogram.buckets) #print first_nontrivial, shift_data, bounds #if shift_data is not None: print [hex(x >> shift_data[0]) for x in code_bounds[first_nontrivial:]] - code = '\n/* Automatically generated by tools/codegen/core/gen_stats_data.py */\n' - code += 'value = GPR_CLAMP(value, 0, %d);\n' % histogram.max + code = 'value = GPR_CLAMP(value, 0, %d);\n' % histogram.max map_table = gen_map_table(code_bounds[first_nontrivial:], shift_data) if first_nontrivial is None: code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, value);\n' @@ -408,3 +407,4 @@ with open('src/core/lib/debug/stats_data_bq_schema.sql', 'w') as S: for counter in inst_map['Counter']: columns.append(('%s_per_iteration' % counter.name, 'FLOAT')) print >>S, ',\n'.join('%s:%s' % x for x in columns) + diff --git a/tools/run_tests/performance/massage_qps_stats.py b/tools/run_tests/performance/massage_qps_stats.py index e1620adad26..9b9355308a3 100644 --- a/tools/run_tests/performance/massage_qps_stats.py +++ b/tools/run_tests/performance/massage_qps_stats.py @@ -106,8 +106,6 @@ def massage_qps_stats(scenario_result): stats["core_executor_wakeup_initiated"] = massage_qps_stats_helpers.counter(core_stats, "executor_wakeup_initiated") stats["core_executor_queue_drained"] = massage_qps_stats_helpers.counter(core_stats, "executor_queue_drained") stats["core_executor_push_retries"] = massage_qps_stats_helpers.counter(core_stats, "executor_push_retries") - stats["core_executor_threads_created"] = massage_qps_stats_helpers.counter(core_stats, "executor_threads_created") - stats["core_executor_threads_used"] = massage_qps_stats_helpers.counter(core_stats, "executor_threads_used") stats["core_server_requested_calls"] = massage_qps_stats_helpers.counter(core_stats, "server_requested_calls") stats["core_server_slowpath_requests_queued"] = massage_qps_stats_helpers.counter(core_stats, "server_slowpath_requests_queued") h = massage_qps_stats_helpers.histogram(core_stats, "call_initial_size") @@ -182,12 +180,6 @@ def massage_qps_stats(scenario_result): stats["core_http2_send_flowctl_per_write_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries) stats["core_http2_send_flowctl_per_write_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries) stats["core_http2_send_flowctl_per_write_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries) - h = massage_qps_stats_helpers.histogram(core_stats, "executor_closures_per_wakeup") - stats["core_executor_closures_per_wakeup"] = ",".join("%f" % x for x in h.buckets) - stats["core_executor_closures_per_wakeup_bkts"] = ",".join("%f" % x for x in h.boundaries) - stats["core_executor_closures_per_wakeup_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries) - stats["core_executor_closures_per_wakeup_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries) - stats["core_executor_closures_per_wakeup_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries) h = massage_qps_stats_helpers.histogram(core_stats, "server_cqs_checked") stats["core_server_cqs_checked"] = ",".join("%f" % x for x in h.buckets) stats["core_server_cqs_checked_bkts"] = ",".join("%f" % x for x in h.boundaries) diff --git a/tools/run_tests/performance/scenario_result_schema.json b/tools/run_tests/performance/scenario_result_schema.json index c7b1904bd19..2f0fd916d4f 100644 --- a/tools/run_tests/performance/scenario_result_schema.json +++ b/tools/run_tests/performance/scenario_result_schema.json @@ -540,16 +540,6 @@ "name": "core_executor_push_retries", "type": "INTEGER" }, - { - "mode": "NULLABLE", - "name": "core_executor_threads_created", - "type": "INTEGER" - }, - { - "mode": "NULLABLE", - "name": "core_executor_threads_used", - "type": "INTEGER" - }, { "mode": "NULLABLE", "name": "core_server_requested_calls", @@ -860,31 +850,6 @@ "name": "core_http2_send_flowctl_per_write_99p", "type": "FLOAT" }, - { - "mode": "NULLABLE", - "name": "core_executor_closures_per_wakeup", - "type": "STRING" - }, - { - "mode": "NULLABLE", - "name": "core_executor_closures_per_wakeup_bkts", - "type": "STRING" - }, - { - "mode": "NULLABLE", - "name": "core_executor_closures_per_wakeup_50p", - "type": "FLOAT" - }, - { - "mode": "NULLABLE", - "name": "core_executor_closures_per_wakeup_95p", - "type": "FLOAT" - }, - { - "mode": "NULLABLE", - "name": "core_executor_closures_per_wakeup_99p", - "type": "FLOAT" - }, { "mode": "NULLABLE", "name": "core_server_cqs_checked", @@ -1367,16 +1332,6 @@ "name": "core_executor_push_retries", "type": "INTEGER" }, - { - "mode": "NULLABLE", - "name": "core_executor_threads_created", - "type": "INTEGER" - }, - { - "mode": "NULLABLE", - "name": "core_executor_threads_used", - "type": "INTEGER" - }, { "mode": "NULLABLE", "name": "core_server_requested_calls", @@ -1687,31 +1642,6 @@ "name": "core_http2_send_flowctl_per_write_99p", "type": "FLOAT" }, - { - "mode": "NULLABLE", - "name": "core_executor_closures_per_wakeup", - "type": "STRING" - }, - { - "mode": "NULLABLE", - "name": "core_executor_closures_per_wakeup_bkts", - "type": "STRING" - }, - { - "mode": "NULLABLE", - "name": "core_executor_closures_per_wakeup_50p", - "type": "FLOAT" - }, - { - "mode": "NULLABLE", - "name": "core_executor_closures_per_wakeup_95p", - "type": "FLOAT" - }, - { - "mode": "NULLABLE", - "name": "core_executor_closures_per_wakeup_99p", - "type": "FLOAT" - }, { "mode": "NULLABLE", "name": "core_server_cqs_checked", From f1b1ab08994f3f269c0128a87dd3355fa4839a85 Mon Sep 17 00:00:00 2001 From: Alexander Polcyn Date: Tue, 26 Sep 2017 12:32:29 -0700 Subject: [PATCH 064/109] return nil from google rpc status conversion if the grpc-status-details-bin trailer wasnt set --- src/ruby/lib/grpc/google_rpc_status_utils.rb | 9 ++- src/ruby/spec/google_rpc_status_utils_spec.rb | 77 ++++++++++++++++++- 2 files changed, 81 insertions(+), 5 deletions(-) diff --git a/src/ruby/lib/grpc/google_rpc_status_utils.rb b/src/ruby/lib/grpc/google_rpc_status_utils.rb index fdadd6b76e4..f253b082b63 100644 --- a/src/ruby/lib/grpc/google_rpc_status_utils.rb +++ b/src/ruby/lib/grpc/google_rpc_status_utils.rb @@ -19,10 +19,17 @@ require 'google/rpc/status_pb' module GRPC # GoogleRpcStatusUtils provides utilities to convert between a # GRPC::Core::Status and a deserialized Google::Rpc::Status proto + # Returns nil if the grpc-status-details-bin trailer could not be + # converted to a GoogleRpcStatus due to the server not providing + # the necessary trailers. + # Raises an error if the server did provide the necessary trailers + # but they fail to deseriliaze into a GoogleRpcStatus protobuf. class GoogleRpcStatusUtils def self.extract_google_rpc_status(status) fail ArgumentError, 'bad type' unless status.is_a? Struct::Status - Google::Rpc::Status.decode(status.metadata['grpc-status-details-bin']) + grpc_status_details_bin_trailer = 'grpc-status-details-bin' + return nil if status.metadata[grpc_status_details_bin_trailer].nil? + Google::Rpc::Status.decode(status.metadata[grpc_status_details_bin_trailer]) end end end diff --git a/src/ruby/spec/google_rpc_status_utils_spec.rb b/src/ruby/spec/google_rpc_status_utils_spec.rb index fe221c30ddd..6f2a06b1d93 100644 --- a/src/ruby/spec/google_rpc_status_utils_spec.rb +++ b/src/ruby/spec/google_rpc_status_utils_spec.rb @@ -31,12 +31,11 @@ describe 'conversion from a status struct to a google protobuf status' do expect(exception.message.include?('bad type')).to be true end - it 'fails with some error if the header key is missing' do + it 'returns nil if the header key is missing' do status = Struct::Status.new(1, 'details', key: 'val') expect(status.metadata.nil?).to be false - expect do - GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status) - end.to raise_error(StandardError) + expect(GRPC::GoogleRpcStatusUtils.extract_google_rpc_status( + status)).to be(nil) end it 'fails with some error if the header key fails to deserialize' do @@ -221,3 +220,73 @@ describe 'receving a google rpc status from a remote endpoint' do status_from_exception)).to eq(rpc_status) end end + +# A test service that fails without explicitly setting the +# grpc-status-details-bin trailer. Tests assumptions about value +# of grpc-status-details-bin on the client side when the trailer wasn't +# set explicitly. +class NoStatusDetailsBinTestService + include GRPC::GenericService + rpc :an_rpc, EchoMsg, EchoMsg + + def an_rpc(_, _) + fail GRPC::Unknown + end +end + +NoStatusDetailsBinTestServiceStub = NoStatusDetailsBinTestService.rpc_stub_class + +describe 'when the endpoint doesnt send grpc-status-details-bin' do + def start_server + @srv = GRPC::RpcServer.new(pool_size: 1) + @server_port = @srv.add_http2_port('localhost:0', + :this_port_is_insecure) + @srv.handle(NoStatusDetailsBinTestService) + @server_thd = Thread.new { @srv.run } + @srv.wait_till_running + end + + def stop_server + expect(@srv.stopped?).to be(false) + @srv.stop + @server_thd.join + expect(@srv.stopped?).to be(true) + end + + before(:each) do + start_server + end + + after(:each) do + stop_server + end + + it 'should receive nil when we extract try to extract a google '\ + 'rpc status from a BadStatus exception that didnt have it' do + stub = NoStatusDetailsBinTestServiceStub.new("localhost:#{@server_port}", + :this_channel_is_insecure) + begin + stub.an_rpc(EchoMsg.new) + rescue GRPC::Unknown => e + rpc_status = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status( + e.to_status) + end + expect(rpc_status).to be(nil) + end + + it 'should receive nil when we extract try to extract a google '\ + 'rpc status from an op views status object that didnt have it' do + stub = NoStatusDetailsBinTestServiceStub.new("localhost:#{@server_port}", + :this_channel_is_insecure) + op = stub.an_rpc(EchoMsg.new, return_op: true) + begin + op.execute + rescue GRPC::Unknown => e + status_from_exception = e.to_status + end + expect(GRPC::GoogleRpcStatusUtils.extract_google_rpc_status( + status_from_exception)).to be(nil) + expect(GRPC::GoogleRpcStatusUtils.extract_google_rpc_status( + op.status)).to be nil + end +end From 5fef3e3fd56e0b978cf1df99d326251601b2a42b Mon Sep 17 00:00:00 2001 From: ZhouyihaiDing Date: Tue, 26 Sep 2017 18:25:28 +0000 Subject: [PATCH 065/109] reuse class; change language name; scenario naame; --- tools/jenkins/run_full_performance.sh | 2 +- .../run_tests/performance/scenario_config.py | 48 ++++++------------- 2 files changed, 15 insertions(+), 35 deletions(-) diff --git a/tools/jenkins/run_full_performance.sh b/tools/jenkins/run_full_performance.sh index aae76b0fde8..9598fd77349 100755 --- a/tools/jenkins/run_full_performance.sh +++ b/tools/jenkins/run_full_performance.sh @@ -21,7 +21,7 @@ cd $(dirname $0)/../.. # run 8core client vs 8core server tools/run_tests/run_performance_tests.py \ - -l c++ csharp node ruby java python go node_express php php_ext \ + -l c++ csharp node ruby java python go node_express php_protobuf_php php_protobuf_c \ --netperf \ --category scalable \ --bq_result_table performance_test.performance_experiment \ diff --git a/tools/run_tests/performance/scenario_config.py b/tools/run_tests/performance/scenario_config.py index 31017f1d85a..5019358ab3e 100644 --- a/tools/run_tests/performance/scenario_config.py +++ b/tools/run_tests/performance/scenario_config.py @@ -802,52 +802,32 @@ class RubyLanguage: class PhpLanguage: - def __init__(self): + def __init__(self, use_protobuf_c_extension=False): pass + self.use_protobuf_c_extension=use_protobuf_c_extension self.safename = str(self) def worker_cmdline(self): + if self.use_protobuf_c_extension: + return ['tools/run_tests/performance/run_worker_php.sh -c'] return ['tools/run_tests/performance/run_worker_php.sh'] def worker_port_offset(self): return 800 def scenarios(self): + php_extension_mode='php_protobuf_php_extension' + if self.use_protobuf_c_extension: + php_extension_mode='php_protobuf_c_extension' + yield _ping_pong_scenario( - 'php_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', - server_language='c++', async_server_threads=1) - - yield _ping_pong_scenario( - 'php_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', - server_language='c++', async_server_threads=1) - - def __str__(self): - return 'php' - - -class PhpLanguage_ext: - - def __init__(self): - pass - self.safename = str(self) - - def worker_cmdline(self): - return ['tools/run_tests/performance/run_worker_php.sh -c'] - - def worker_port_offset(self): - return 800 - - def scenarios(self): - yield _ping_pong_scenario( - 'php_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', + '%s_to_cpp_protobuf_sync_unary_ping_pong' % php_extension_mode, + rpc_type='UNARY', client_type='SYNC_CLIENT', server_type='SYNC_SERVER', server_language='c++', async_server_threads=1) yield _ping_pong_scenario( - 'php_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING', - client_type='SYNC_CLIENT', server_type='SYNC_SERVER', + '%s_to_cpp_protobuf_sync_streaming_ping_pong' % php_extension_mode, + rpc_type='STREAMING', client_type='SYNC_CLIENT', server_type='SYNC_SERVER', server_language='c++', async_server_threads=1) def __str__(self): @@ -1051,8 +1031,8 @@ LANGUAGES = { 'node' : NodeLanguage(), 'node_express': NodeExpressLanguage(), 'ruby' : RubyLanguage(), - 'php' : PhpLanguage(), - 'php_ext' : PhpLanguage_ext(), + 'php_protobuf_php' : PhpLanguage(), + 'php_protobuf_c' : PhpLanguage(use_protobuf_c_extension=True), 'java' : JavaLanguage(), 'python' : PythonLanguage(), 'go' : GoLanguage(), From 1a8bb821f4e978bc94f3cc4f87a8653d9de284e5 Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Tue, 26 Sep 2017 15:15:12 -0700 Subject: [PATCH 066/109] Add tracer for plugin credentials. --- doc/environment_variables.md | 1 + .../credentials/plugin/plugin_credentials.c | 40 +++++++++++++++++++ .../credentials/plugin/plugin_credentials.h | 2 + src/core/lib/surface/init_secure.c | 6 ++- 4 files changed, 48 insertions(+), 1 deletion(-) diff --git a/doc/environment_variables.md b/doc/environment_variables.md index f90f1d5b10e..f775de16644 100644 --- a/doc/environment_variables.md +++ b/doc/environment_variables.md @@ -58,6 +58,7 @@ some configuration as environment variables that can be set. completion queue - round_robin - traces the round_robin load balancing policy - pick_first - traces the pick first load balancing policy + - plugin_credentials - traces plugin credentials - resource_quota - trace resource quota objects internals - glb - traces the grpclb load balancer - queue_pluck diff --git a/src/core/lib/security/credentials/plugin/plugin_credentials.c b/src/core/lib/security/credentials/plugin/plugin_credentials.c index 8f49c5aef2d..ee20241e3fc 100644 --- a/src/core/lib/security/credentials/plugin/plugin_credentials.c +++ b/src/core/lib/security/credentials/plugin/plugin_credentials.c @@ -31,6 +31,9 @@ #include "src/core/lib/surface/api_trace.h" #include "src/core/lib/surface/validate_metadata.h" +grpc_tracer_flag grpc_plugin_credentials_trace = + GRPC_TRACER_INITIALIZER(false, "plugin_credentials"); + static void plugin_destruct(grpc_exec_ctx *exec_ctx, grpc_call_credentials *creds) { grpc_plugin_credentials *c = (grpc_plugin_credentials *)creds; @@ -120,6 +123,12 @@ static void plugin_md_request_metadata_ready(void *request, NULL, NULL); grpc_plugin_credentials_pending_request *r = (grpc_plugin_credentials_pending_request *)request; + if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + gpr_log(GPR_INFO, + "plugin_credentials[%p]: request %p: plugin returned " + "asynchronously", + r->creds, r); + } // Remove request from pending list if not previously cancelled. pending_request_complete(&exec_ctx, r); // If it has not been cancelled, process it. @@ -127,6 +136,11 @@ static void plugin_md_request_metadata_ready(void *request, grpc_error *error = process_plugin_result(&exec_ctx, r, md, num_md, status, error_details); GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata, error); + } else if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + gpr_log(GPR_INFO, + "plugin_credentials[%p]: request %p: plugin was previously " + "cancelled", + r->creds, r); } gpr_free(r); grpc_exec_ctx_finish(&exec_ctx); @@ -158,6 +172,10 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx, c->pending_requests = pending_request; gpr_mu_unlock(&c->mu); // Invoke the plugin. The callback holds a ref to us. + if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + gpr_log(GPR_INFO, "plugin_credentials[%p]: request %p: invoking plugin", + c, pending_request); + } grpc_call_credentials_ref(creds); grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX]; size_t num_creds_md = 0; @@ -167,6 +185,12 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx, plugin_md_request_metadata_ready, pending_request, creds_md, &num_creds_md, &status, &error_details)) { + if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + gpr_log(GPR_INFO, + "plugin_credentials[%p]: request %p: plugin will return " + "asynchronously", + c, pending_request); + } return false; // Asynchronous return. } // Returned synchronously. @@ -176,8 +200,20 @@ static bool plugin_get_request_metadata(grpc_exec_ctx *exec_ctx, // asynchronously by plugin_cancel_get_request_metadata(), so return // false. Otherwise, process the result. if (pending_request->cancelled) { + if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + gpr_log(GPR_INFO, + "plugin_credentials[%p]: request %p was cancelled, error " + "will be returned asynchronously", + c, pending_request); + } retval = false; } else { + if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + gpr_log(GPR_INFO, + "plugin_credentials[%p]: request %p: plugin returned " + "synchronously", + c, pending_request); + } *error = process_plugin_result(exec_ctx, pending_request, creds_md, num_creds_md, status, error_details); } @@ -201,6 +237,10 @@ static void plugin_cancel_get_request_metadata( c->pending_requests; pending_request != NULL; pending_request = pending_request->next) { if (pending_request->md_array == md_array) { + if (GRPC_TRACER_ON(grpc_plugin_credentials_trace)) { + gpr_log(GPR_INFO, "plugin_credentials[%p]: cancelling request %p", c, + pending_request); + } pending_request->cancelled = true; GRPC_CLOSURE_SCHED(exec_ctx, pending_request->on_request_metadata, GRPC_ERROR_REF(error)); diff --git a/src/core/lib/security/credentials/plugin/plugin_credentials.h b/src/core/lib/security/credentials/plugin/plugin_credentials.h index 57266d589a4..f56df9eac5a 100644 --- a/src/core/lib/security/credentials/plugin/plugin_credentials.h +++ b/src/core/lib/security/credentials/plugin/plugin_credentials.h @@ -21,6 +21,8 @@ #include "src/core/lib/security/credentials/credentials.h" +extern grpc_tracer_flag grpc_plugin_credentials_trace; + struct grpc_plugin_credentials; typedef struct grpc_plugin_credentials_pending_request { diff --git a/src/core/lib/surface/init_secure.c b/src/core/lib/surface/init_secure.c index 2366c24910c..8fbde3d1b4a 100644 --- a/src/core/lib/surface/init_secure.c +++ b/src/core/lib/surface/init_secure.c @@ -25,6 +25,7 @@ #include "src/core/lib/debug/trace.h" #include "src/core/lib/security/credentials/credentials.h" +#include "src/core/lib/security/credentials/plugin/plugin_credentials.h" #include "src/core/lib/security/transport/auth_filters.h" #include "src/core/lib/security/transport/secure_endpoint.h" #include "src/core/lib/security/transport/security_connector.h" @@ -84,4 +85,7 @@ void grpc_register_security_filters(void) { maybe_prepend_server_auth_filter, NULL); } -void grpc_security_init() { grpc_security_register_handshaker_factories(); } +void grpc_security_init() { + grpc_security_register_handshaker_factories(); + grpc_register_tracer(&grpc_plugin_credentials_trace); +} From 6bf31841276d7b9d2c449da279c0de47c6850a82 Mon Sep 17 00:00:00 2001 From: yang-g Date: Tue, 26 Sep 2017 16:40:45 -0700 Subject: [PATCH 067/109] Fix bad_client_window_overflow_test at mac --- test/core/bad_client/bad_client.c | 11 ++++++++--- test/core/bad_client/bad_client.h | 1 + test/core/bad_client/tests/window_overflow.c | 3 ++- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/test/core/bad_client/bad_client.c b/test/core/bad_client/bad_client.c index 383d1240cb0..fff0c793ed6 100644 --- a/test/core/bad_client/bad_client.c +++ b/test/core/bad_client/bad_client.c @@ -134,9 +134,12 @@ void grpc_run_bad_client_test( grpc_endpoint_write(&exec_ctx, sfd.client, &outgoing, &done_write_closure); grpc_exec_ctx_finish(&exec_ctx); - /* Await completion */ - GPR_ASSERT( - gpr_event_wait(&a.done_write, grpc_timeout_seconds_to_deadline(5))); + /* Await completion, unless the request is large and write may not finish + * before the peer shuts down. */ + if (!(flags & GRPC_BAD_CLIENT_LARGE_REQUEST)) { + GPR_ASSERT( + gpr_event_wait(&a.done_write, grpc_timeout_seconds_to_deadline(5))); + } if (flags & GRPC_BAD_CLIENT_DISCONNECT) { grpc_endpoint_shutdown( @@ -186,6 +189,8 @@ void grpc_run_bad_client_test( grpc_exec_ctx_finish(&exec_ctx); } + GPR_ASSERT( + gpr_event_wait(&a.done_write, grpc_timeout_seconds_to_deadline(1))); shutdown_cq = grpc_completion_queue_create_for_pluck(NULL); grpc_server_shutdown_and_notify(a.server, shutdown_cq, NULL); GPR_ASSERT(grpc_completion_queue_pluck( diff --git a/test/core/bad_client/bad_client.h b/test/core/bad_client/bad_client.h index 22f1a3abc7a..a5b01f7f2c1 100644 --- a/test/core/bad_client/bad_client.h +++ b/test/core/bad_client/bad_client.h @@ -37,6 +37,7 @@ typedef bool (*grpc_bad_client_client_stream_validator)( grpc_slice_buffer *incoming); #define GRPC_BAD_CLIENT_DISCONNECT 1 +#define GRPC_BAD_CLIENT_LARGE_REQUEST 2 /* Test runner. diff --git a/test/core/bad_client/tests/window_overflow.c b/test/core/bad_client/tests/window_overflow.c index 1f29bd32fbd..18c647ad8a7 100644 --- a/test/core/bad_client/tests/window_overflow.c +++ b/test/core/bad_client/tests/window_overflow.c @@ -90,7 +90,8 @@ int main(int argc, char **argv) { addbuf(message, sizeof(message)); } } - grpc_run_bad_client_test(verifier, NULL, g_buffer, g_count, 0); + grpc_run_bad_client_test(verifier, NULL, g_buffer, g_count, + GRPC_BAD_CLIENT_LARGE_REQUEST); gpr_free(g_buffer); return 0; From 3069fc5eb8f8cde9187fad84d84ed507a769dc47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Vo=C3=9F?= Date: Wed, 27 Sep 2017 09:43:14 +0200 Subject: [PATCH 068/109] Increase reference count on state used in tcp connect. The state is used both in the callback for the actual connect as well as in the additional timeout that is setup for the operation. Both code paths decrease the reference count and if they happen to be queued at the same time, memory is corrupted. Subsequent behavior is undefined and segfaults can be observed as a result. Fixes #12608 --- src/core/lib/iomgr/tcp_client_uv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c index 786c456b735..f2b23aae2e6 100644 --- a/src/core/lib/iomgr/tcp_client_uv.c +++ b/src/core/lib/iomgr/tcp_client_uv.c @@ -145,7 +145,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, connect->resource_quota = resource_quota; uv_tcp_init(uv_default_loop(), connect->tcp_handle); connect->connect_req.data = connect; - connect->refs = 1; + connect->refs = 2; // One for the connect operation, one for the timer. if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting", From cb799c5132462dc99f49c35831d0826828081719 Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Wed, 27 Sep 2017 17:35:14 +0000 Subject: [PATCH 069/109] Fix http_proxy proxy mapper to not set proxy_name when returning false. --- .../ext/filters/client_channel/http_proxy.c | 25 ++++++++----------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/src/core/ext/filters/client_channel/http_proxy.c b/src/core/ext/filters/client_channel/http_proxy.c index c507a2750e2..eee16884eed 100644 --- a/src/core/ext/filters/client_channel/http_proxy.c +++ b/src/core/ext/filters/client_channel/http_proxy.c @@ -98,18 +98,12 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, "'http_proxy' environment variable set, but cannot " "parse server URI '%s' -- not using proxy", server_uri); - if (uri != NULL) { - gpr_free(user_cred); - grpc_uri_destroy(uri); - } - return false; + goto no_use_proxy; } if (strcmp(uri->scheme, "unix") == 0) { gpr_log(GPR_INFO, "not using proxy for Unix domain socket '%s'", server_uri); - gpr_free(user_cred); - grpc_uri_destroy(uri); - return false; + goto no_use_proxy; } char* no_proxy_str = gpr_getenv("no_proxy"); if (no_proxy_str != NULL) { @@ -147,12 +141,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, gpr_free(no_proxy_hosts); gpr_free(server_host); gpr_free(server_port); - if (!use_proxy) { - grpc_uri_destroy(uri); - gpr_free(*name_to_resolve); - *name_to_resolve = NULL; - return false; - } + if (!use_proxy) goto no_use_proxy; } } grpc_arg args_to_add[2]; @@ -173,9 +162,15 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, } else { *new_args = grpc_channel_args_copy_and_add(args, args_to_add, 1); } - gpr_free(user_cred); grpc_uri_destroy(uri); + gpr_free(user_cred); return true; +no_use_proxy: + if (uri != NULL) grpc_uri_destroy(uri); + gpr_free(*name_to_resolve); + *name_to_resolve = NULL; + gpr_free(user_cred); + return false; } static bool proxy_mapper_map_address(grpc_exec_ctx* exec_ctx, From 1c34d1d0d87d950d65fa9666d24397bc4ab2c2f9 Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Wed, 27 Sep 2017 12:25:19 -0700 Subject: [PATCH 070/109] Fix C++ goto problem. --- src/core/ext/filters/client_channel/http_proxy.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/core/ext/filters/client_channel/http_proxy.c b/src/core/ext/filters/client_channel/http_proxy.c index eee16884eed..a16b44d3dc3 100644 --- a/src/core/ext/filters/client_channel/http_proxy.c +++ b/src/core/ext/filters/client_channel/http_proxy.c @@ -91,6 +91,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, char* user_cred = NULL; *name_to_resolve = get_http_proxy_server(exec_ctx, &user_cred); if (*name_to_resolve == NULL) return false; + char* no_proxy_str = NULL; grpc_uri* uri = grpc_uri_parse(exec_ctx, server_uri, false /* suppress_errors */); if (uri == NULL || uri->path[0] == '\0') { @@ -105,7 +106,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, server_uri); goto no_use_proxy; } - char* no_proxy_str = gpr_getenv("no_proxy"); + no_proxy_str = gpr_getenv("no_proxy"); if (no_proxy_str != NULL) { static const char* NO_PROXY_SEPARATOR = ","; bool use_proxy = true; From fe40815682f7da0a7d6f09be2253d6660ef8e7a5 Mon Sep 17 00:00:00 2001 From: Juanli Shen Date: Wed, 27 Sep 2017 12:27:20 -0700 Subject: [PATCH 071/109] Readd grpclb fallback --- include/grpc++/support/channel_arguments.h | 6 + include/grpc/impl/codegen/grpc_types.h | 6 +- .../client_channel/lb_policy/grpclb/grpclb.c | 254 +++++++++++++----- .../client_channel/lb_policy_factory.c | 2 +- .../client_channel/lb_policy_factory.h | 2 +- src/cpp/common/channel_arguments.cc | 4 + test/cpp/end2end/grpclb_end2end_test.cc | 180 ++++++++++++- 7 files changed, 384 insertions(+), 70 deletions(-) diff --git a/include/grpc++/support/channel_arguments.h b/include/grpc++/support/channel_arguments.h index 7b6befeaf1e..9dc505f0082 100644 --- a/include/grpc++/support/channel_arguments.h +++ b/include/grpc++/support/channel_arguments.h @@ -64,6 +64,12 @@ class ChannelArguments { /// Set the compression algorithm for the channel. void SetCompressionAlgorithm(grpc_compression_algorithm algorithm); + /// Set the grpclb fallback timeout (in ms) for the channel. If this amount + /// of time has passed but we have not gotten any non-empty \a serverlist from + /// the balancer, we will fall back to use the backend address(es) returned by + /// the resolver. + void SetGrpclbFallbackTimeout(int fallback_timeout); + /// Set the socket mutator for the channel. void SetSocketMutator(grpc_socket_mutator* mutator); diff --git a/include/grpc/impl/codegen/grpc_types.h b/include/grpc/impl/codegen/grpc_types.h index 90f03f49a3e..65463bb33b6 100644 --- a/include/grpc/impl/codegen/grpc_types.h +++ b/include/grpc/impl/codegen/grpc_types.h @@ -288,7 +288,11 @@ typedef struct { "grpc.experimental.tcp_max_read_chunk_size" /* Timeout in milliseconds to use for calls to the grpclb load balancer. If 0 or unset, the balancer calls will have no deadline. */ -#define GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS "grpc.grpclb_timeout_ms" +#define GRPC_ARG_GRPCLB_CALL_TIMEOUT_MS "grpc.grpclb_call_timeout_ms" +/* Timeout in milliseconds to wait for the serverlist from the grpclb load + balancer before using fallback backend addresses from the resolver. + If 0, fallback will never be used. */ +#define GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS "grpc.grpclb_fallback_timeout_ms" /** If non-zero, grpc server's cronet compression workaround will be enabled */ #define GRPC_ARG_WORKAROUND_CRONET_COMPRESSION \ "grpc.workaround.cronet_compression" diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c index 05a106f214d..8dc81b46d13 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c @@ -123,6 +123,7 @@ #define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6 #define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120 #define GRPC_GRPCLB_RECONNECT_JITTER 0.2 +#define GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS 10000 grpc_tracer_flag grpc_lb_glb_trace = GRPC_TRACER_INITIALIZER(false, "glb"); @@ -299,6 +300,10 @@ typedef struct glb_lb_policy { /** timeout in milliseconds for the LB call. 0 means no deadline. */ int lb_call_timeout_ms; + /** timeout in milliseconds for before using fallback backend addresses. + * 0 means not using fallback. */ + int lb_fallback_timeout_ms; + /** for communicating with the LB server */ grpc_channel *lb_channel; @@ -325,6 +330,9 @@ typedef struct glb_lb_policy { * Otherwise, we delegate to the RR policy. */ size_t serverlist_index; + /** stores the backend addresses from the resolver */ + grpc_lb_addresses *fallback_backend_addresses; + /** list of picks that are waiting on RR's policy connectivity */ pending_pick *pending_picks; @@ -345,6 +353,9 @@ typedef struct glb_lb_policy { /** is \a lb_call_retry_timer active? */ bool retry_timer_active; + /** is \a lb_fallback_timer active? */ + bool fallback_timer_active; + /** called upon changes to the LB channel's connectivity. */ grpc_closure lb_channel_on_connectivity_changed; @@ -364,6 +375,9 @@ typedef struct glb_lb_policy { /* LB call retry timer callback. */ grpc_closure lb_on_call_retry; + /* LB fallback timer callback. */ + grpc_closure lb_on_fallback; + grpc_call *lb_call; /* streaming call to the LB server, */ grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */ @@ -387,6 +401,9 @@ typedef struct glb_lb_policy { /** LB call retry timer */ grpc_timer lb_call_retry_timer; + /** LB fallback timer */ + grpc_timer lb_fallback_timer; + bool seen_initial_response; /* Stats for client-side load reporting. Should be unreffed and @@ -532,6 +549,32 @@ static grpc_lb_addresses *process_serverlist_locked( return lb_addresses; } +/* Returns the backend addresses extracted from the given addresses */ +static grpc_lb_addresses *extract_backend_addresses_locked( + grpc_exec_ctx *exec_ctx, const grpc_lb_addresses *addresses) { + /* first pass: count the number of backend addresses */ + size_t num_backends = 0; + for (size_t i = 0; i < addresses->num_addresses; ++i) { + if (!addresses->addresses[i].is_balancer) { + ++num_backends; + } + } + /* second pass: actually populate the addresses and (empty) LB tokens */ + grpc_lb_addresses *backend_addresses = + grpc_lb_addresses_create(num_backends, &lb_token_vtable); + size_t num_copied = 0; + for (size_t i = 0; i < addresses->num_addresses; ++i) { + if (addresses->addresses[i].is_balancer) continue; + const grpc_resolved_address *addr = &addresses->addresses[i].address; + grpc_lb_addresses_set_address(backend_addresses, num_copied, &addr->addr, + addr->len, false /* is_balancer */, + NULL /* balancer_name */, + (void *)GRPC_MDELEM_LB_TOKEN_EMPTY.payload); + ++num_copied; + } + return backend_addresses; +} + static void update_lb_connectivity_status_locked( grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy, grpc_connectivity_state rr_state, grpc_error *rr_state_error) { @@ -599,35 +642,38 @@ static bool pick_from_internal_rr_locked( grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy, const grpc_lb_policy_pick_args *pick_args, bool force_async, grpc_connected_subchannel **target, wrapped_rr_closure_arg *wc_arg) { - // Look at the index into the serverlist to see if we should drop this call. - grpc_grpclb_server *server = - glb_policy->serverlist->servers[glb_policy->serverlist_index++]; - if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) { - glb_policy->serverlist_index = 0; // Wrap-around. - } - if (server->drop) { - // Not using the RR policy, so unref it. - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")", - (intptr_t)wc_arg->rr_policy); + // Check for drops if we are not using fallback backend addresses. + if (glb_policy->serverlist != NULL) { + // Look at the index into the serverlist to see if we should drop this call. + grpc_grpclb_server *server = + glb_policy->serverlist->servers[glb_policy->serverlist_index++]; + if (glb_policy->serverlist_index == glb_policy->serverlist->num_servers) { + glb_policy->serverlist_index = 0; // Wrap-around. } - GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync"); - // Update client load reporting stats to indicate the number of - // dropped calls. Note that we have to do this here instead of in - // the client_load_reporting filter, because we do not create a - // subchannel call (and therefore no client_load_reporting filter) - // for dropped calls. - grpc_grpclb_client_stats_add_call_dropped_locked(server->load_balance_token, - wc_arg->client_stats); - grpc_grpclb_client_stats_unref(wc_arg->client_stats); - if (force_async) { - GPR_ASSERT(wc_arg->wrapped_closure != NULL); - GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE); + if (server->drop) { + // Not using the RR policy, so unref it. + if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { + gpr_log(GPR_INFO, "Unreffing RR for drop (0x%" PRIxPTR ")", + (intptr_t)wc_arg->rr_policy); + } + GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync"); + // Update client load reporting stats to indicate the number of + // dropped calls. Note that we have to do this here instead of in + // the client_load_reporting filter, because we do not create a + // subchannel call (and therefore no client_load_reporting filter) + // for dropped calls. + grpc_grpclb_client_stats_add_call_dropped_locked( + server->load_balance_token, wc_arg->client_stats); + grpc_grpclb_client_stats_unref(wc_arg->client_stats); + if (force_async) { + GPR_ASSERT(wc_arg->wrapped_closure != NULL); + GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE); + gpr_free(wc_arg->free_when_done); + return false; + } gpr_free(wc_arg->free_when_done); - return false; + return true; } - gpr_free(wc_arg->free_when_done); - return true; } // Pick via the RR policy. const bool pick_done = grpc_lb_policy_pick_locked( @@ -665,8 +711,18 @@ static bool pick_from_internal_rr_locked( static grpc_lb_policy_args *lb_policy_args_create(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy) { - grpc_lb_addresses *addresses = - process_serverlist_locked(exec_ctx, glb_policy->serverlist); + grpc_lb_addresses *addresses; + if (glb_policy->serverlist != NULL) { + GPR_ASSERT(glb_policy->serverlist->num_servers > 0); + addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist); + } else { + // If rr_handover_locked() is invoked when we haven't received any + // serverlist from the balancer, we use the fallback backends returned by + // the resolver. Note that the fallback backend list may be empty, in which + // case the new round_robin policy will keep the requested picks pending. + GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL); + addresses = grpc_lb_addresses_copy(glb_policy->fallback_backend_addresses); + } GPR_ASSERT(addresses != NULL); grpc_lb_policy_args *args = (grpc_lb_policy_args *)gpr_zalloc(sizeof(*args)); args->client_channel_factory = glb_policy->cc_factory; @@ -772,8 +828,6 @@ static void create_rr_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy, /* glb_policy->rr_policy may be NULL (initial handover) */ static void rr_handover_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy) { - GPR_ASSERT(glb_policy->serverlist != NULL && - glb_policy->serverlist->num_servers > 0); if (glb_policy->shutting_down) return; grpc_lb_policy_args *args = lb_policy_args_create(exec_ctx, glb_policy); GPR_ASSERT(args != NULL); @@ -922,6 +976,9 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) { if (glb_policy->serverlist != NULL) { grpc_grpclb_destroy_serverlist(glb_policy->serverlist); } + if (glb_policy->fallback_backend_addresses != NULL) { + grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses); + } grpc_fake_resolver_response_generator_unref(glb_policy->response_generator); grpc_subchannel_index_unref(); if (glb_policy->pending_update_args != NULL) { @@ -1063,10 +1120,28 @@ static void glb_cancel_picks_locked(grpc_exec_ctx *exec_ctx, GRPC_ERROR_UNREF(error); } +static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error); static void query_for_backends_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy); static void start_picking_locked(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy) { + /* start a timer to fall back */ + if (glb_policy->lb_fallback_timeout_ms > 0 && + glb_policy->serverlist == NULL && !glb_policy->fallback_timer_active) { + gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); + gpr_timespec deadline = gpr_time_add( + now, + gpr_time_from_millis(glb_policy->lb_fallback_timeout_ms, GPR_TIMESPAN)); + GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer"); + GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked, + glb_policy, + grpc_combiner_scheduler(glb_policy->base.combiner)); + glb_policy->fallback_timer_active = true; + grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline, + &glb_policy->lb_on_fallback, now); + } + glb_policy->started_picking = true; gpr_backoff_reset(&glb_policy->lb_call_backoff_state); query_for_backends_locked(exec_ctx, glb_policy); @@ -1545,6 +1620,15 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg, if (glb_policy->serverlist != NULL) { /* dispose of the old serverlist */ grpc_grpclb_destroy_serverlist(glb_policy->serverlist); + } else { + /* or dispose of the fallback */ + grpc_lb_addresses_destroy(exec_ctx, + glb_policy->fallback_backend_addresses); + glb_policy->fallback_backend_addresses = NULL; + if (glb_policy->fallback_timer_active) { + grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer); + glb_policy->fallback_timer_active = false; + } } /* and update the copy in the glb_lb_policy instance. This * serverlist instance will be destroyed either upon the next @@ -1555,9 +1639,7 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg, } } else { if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, - "Received empty server list. Picks will stay pending until " - "a response with > 0 servers is received"); + gpr_log(GPR_INFO, "Received empty server list, ignoring."); } grpc_grpclb_destroy_serverlist(serverlist); } @@ -1592,6 +1674,27 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg, } } +static void lb_on_fallback_timer_locked(grpc_exec_ctx *exec_ctx, void *arg, + grpc_error *error) { + glb_lb_policy *glb_policy = (glb_lb_policy *)arg; + glb_policy->fallback_timer_active = false; + /* If we receive a serverlist after the timer fires but before this callback + * actually runs, don't fall back. */ + if (glb_policy->serverlist == NULL) { + if (!glb_policy->shutting_down && error == GRPC_ERROR_NONE) { + if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { + gpr_log(GPR_INFO, + "Falling back to use backends from resolver (grpclb %p)", + (void *)glb_policy); + } + GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL); + rr_handover_locked(exec_ctx, glb_policy); + } + } + GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, + "grpclb_fallback_timer"); +} + static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { glb_lb_policy *glb_policy = (glb_lb_policy *)arg; @@ -1616,31 +1719,22 @@ static void lb_on_server_status_received_locked(grpc_exec_ctx *exec_ctx, } } +static void fallback_update_locked(grpc_exec_ctx *exec_ctx, + glb_lb_policy *glb_policy, + const grpc_lb_addresses *addresses) { + GPR_ASSERT(glb_policy->fallback_backend_addresses != NULL); + grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses); + glb_policy->fallback_backend_addresses = + extract_backend_addresses_locked(exec_ctx, addresses); + if (glb_policy->lb_fallback_timeout_ms > 0 && + !glb_policy->fallback_timer_active) { + rr_handover_locked(exec_ctx, glb_policy); + } +} + static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, const grpc_lb_policy_args *args) { glb_lb_policy *glb_policy = (glb_lb_policy *)policy; - if (glb_policy->updating_lb_channel) { - if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { - gpr_log(GPR_INFO, - "Update already in progress for grpclb %p. Deferring update.", - (void *)glb_policy); - } - if (glb_policy->pending_update_args != NULL) { - grpc_channel_args_destroy(exec_ctx, - glb_policy->pending_update_args->args); - gpr_free(glb_policy->pending_update_args); - } - glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc( - sizeof(*glb_policy->pending_update_args)); - glb_policy->pending_update_args->client_channel_factory = - args->client_channel_factory; - glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args); - glb_policy->pending_update_args->combiner = args->combiner; - return; - } - - glb_policy->updating_lb_channel = true; - // Propagate update to lb_channel (pick first). const grpc_arg *arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); if (arg == NULL || arg->type != GRPC_ARG_POINTER) { @@ -1658,13 +1752,43 @@ static void glb_update_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy, "ignoring.", (void *)glb_policy); } + return; } const grpc_lb_addresses *addresses = (const grpc_lb_addresses *)arg->value.pointer.p; + + if (glb_policy->serverlist == NULL) { + // If a non-empty serverlist hasn't been received from the balancer, + // propagate the update to fallback_backend_addresses. + fallback_update_locked(exec_ctx, glb_policy, addresses); + } else if (glb_policy->updating_lb_channel) { + // If we have recieved serverlist from the balancer, we need to defer update + // when there is an in-progress one. + if (GRPC_TRACER_ON(grpc_lb_glb_trace)) { + gpr_log(GPR_INFO, + "Update already in progress for grpclb %p. Deferring update.", + (void *)glb_policy); + } + if (glb_policy->pending_update_args != NULL) { + grpc_channel_args_destroy(exec_ctx, + glb_policy->pending_update_args->args); + gpr_free(glb_policy->pending_update_args); + } + glb_policy->pending_update_args = (grpc_lb_policy_args *)gpr_zalloc( + sizeof(*glb_policy->pending_update_args)); + glb_policy->pending_update_args->client_channel_factory = + args->client_channel_factory; + glb_policy->pending_update_args->args = grpc_channel_args_copy(args->args); + glb_policy->pending_update_args->combiner = args->combiner; + return; + } + + glb_policy->updating_lb_channel = true; GPR_ASSERT(glb_policy->lb_channel != NULL); grpc_channel_args *lb_channel_args = build_lb_channel_args( exec_ctx, addresses, glb_policy->response_generator, args->args); - /* Propagate updates to the LB channel through the fake resolver */ + /* Propagate updates to the LB channel (pick first) through the fake resolver + */ grpc_fake_resolver_response_generator_set_response( exec_ctx, glb_policy->response_generator, lb_channel_args); grpc_channel_args_destroy(exec_ctx, lb_channel_args); @@ -1767,13 +1891,7 @@ static const grpc_lb_policy_vtable glb_lb_policy_vtable = { static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, grpc_lb_policy_factory *factory, grpc_lb_policy_args *args) { - /* Count the number of gRPC-LB addresses. There must be at least one. - * TODO(roth): For now, we ignore non-balancer addresses, but in the - * future, we may change the behavior such that we fall back to using - * the non-balancer addresses if we cannot reach any balancers. In the - * fallback case, we should use the LB policy indicated by - * GRPC_ARG_LB_POLICY_NAME (although if that specifies grpclb or is - * unset, we should default to pick_first). */ + /* Count the number of gRPC-LB addresses. There must be at least one. */ const grpc_arg *arg = grpc_channel_args_find(args->args, GRPC_ARG_LB_ADDRESSES); if (arg == NULL || arg->type != GRPC_ARG_POINTER) { @@ -1809,6 +1927,11 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, glb_policy->lb_call_timeout_ms = grpc_channel_arg_get_integer(arg, (grpc_integer_options){0, 0, INT_MAX}); + arg = grpc_channel_args_find(args->args, GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS); + glb_policy->lb_fallback_timeout_ms = grpc_channel_arg_get_integer( + arg, (grpc_integer_options){GRPC_GRPCLB_DEFAULT_FALLBACK_TIMEOUT_MS, 0, + INT_MAX}); + // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args, // since we use this to trigger the client_load_reporting filter. grpc_arg new_arg = grpc_channel_arg_string_create( @@ -1817,6 +1940,11 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, glb_policy->args = grpc_channel_args_copy_and_add_and_remove( args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1); + /* Extract the backend addresses (may be empty) from the resolver for + * fallback. */ + glb_policy->fallback_backend_addresses = + extract_backend_addresses_locked(exec_ctx, addresses); + /* Create a client channel over them to communicate with a LB service */ glb_policy->response_generator = grpc_fake_resolver_response_generator_create(); diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.c b/src/core/ext/filters/client_channel/lb_policy_factory.c index 4d1405454c9..05ab43d0b69 100644 --- a/src/core/ext/filters/client_channel/lb_policy_factory.c +++ b/src/core/ext/filters/client_channel/lb_policy_factory.c @@ -56,7 +56,7 @@ grpc_lb_addresses* grpc_lb_addresses_copy(const grpc_lb_addresses* addresses) { } void grpc_lb_addresses_set_address(grpc_lb_addresses* addresses, size_t index, - void* address, size_t address_len, + const void* address, size_t address_len, bool is_balancer, const char* balancer_name, void* user_data) { GPR_ASSERT(index < addresses->num_addresses); diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.h b/src/core/ext/filters/client_channel/lb_policy_factory.h index 9d9fb143df5..cf0f8cb6157 100644 --- a/src/core/ext/filters/client_channel/lb_policy_factory.h +++ b/src/core/ext/filters/client_channel/lb_policy_factory.h @@ -73,7 +73,7 @@ grpc_lb_addresses *grpc_lb_addresses_copy(const grpc_lb_addresses *addresses); * \a address is a socket address of length \a address_len. * Takes ownership of \a balancer_name. */ void grpc_lb_addresses_set_address(grpc_lb_addresses *addresses, size_t index, - void *address, size_t address_len, + const void *address, size_t address_len, bool is_balancer, const char *balancer_name, void *user_data); diff --git a/src/cpp/common/channel_arguments.cc b/src/cpp/common/channel_arguments.cc index f130aecd4b5..f89f5f1f03d 100644 --- a/src/cpp/common/channel_arguments.cc +++ b/src/cpp/common/channel_arguments.cc @@ -86,6 +86,10 @@ void ChannelArguments::SetCompressionAlgorithm( SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, algorithm); } +void ChannelArguments::SetGrpclbFallbackTimeout(int fallback_timeout) { + SetInt(GRPC_ARG_GRPCLB_FALLBACK_TIMEOUT_MS, fallback_timeout); +} + void ChannelArguments::SetSocketMutator(grpc_socket_mutator* mutator) { if (!mutator) { return; diff --git a/test/cpp/end2end/grpclb_end2end_test.cc b/test/cpp/end2end/grpclb_end2end_test.cc index 77ed1552923..f73a9c17917 100644 --- a/test/cpp/end2end/grpclb_end2end_test.cc +++ b/test/cpp/end2end/grpclb_end2end_test.cc @@ -368,8 +368,9 @@ class GrpclbEnd2endTest : public ::testing::Test { grpc_fake_resolver_response_generator_unref(response_generator_); } - void ResetStub() { + void ResetStub(int fallback_timeout = 0) { ChannelArguments args; + args.SetGrpclbFallbackTimeout(fallback_timeout); args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR, response_generator_); std::ostringstream uri; @@ -470,10 +471,10 @@ class GrpclbEnd2endTest : public ::testing::Test { grpc_exec_ctx_finish(&exec_ctx); } - const std::vector GetBackendPorts() const { + const std::vector GetBackendPorts(const size_t start_index = 0) const { std::vector backend_ports; - for (const auto& bs : backend_servers_) { - backend_ports.push_back(bs.port_); + for (size_t i = start_index; i < backend_servers_.size(); ++i) { + backend_ports.push_back(backend_servers_[i].port_); } return backend_ports; } @@ -642,6 +643,177 @@ TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) { EXPECT_EQ("grpclb", channel_->GetLoadBalancingPolicyName()); } +TEST_F(SingleBalancerTest, Fallback) { + const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor(); + const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor(); + const size_t kNumBackendInResolution = backends_.size() / 2; + + ResetStub(kFallbackTimeoutMs); + std::vector addresses; + addresses.emplace_back(AddressData{balancer_servers_[0].port_, true, ""}); + for (size_t i = 0; i < kNumBackendInResolution; ++i) { + addresses.emplace_back(AddressData{backend_servers_[i].port_, false, ""}); + } + SetNextResolution(addresses); + + // Send non-empty serverlist only after kServerlistDelayMs. + ScheduleResponseForBalancer( + 0, BalancerServiceImpl::BuildResponseForBackends( + GetBackendPorts(kNumBackendInResolution /* start_index */), {}), + kServerlistDelayMs); + + // Wait until all the fallback backends are reachable. + for (size_t i = 0; i < kNumBackendInResolution; ++i) { + WaitForBackend(i); + } + + // The first request. + gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH =========="); + CheckRpcSendOk(kNumBackendInResolution); + gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH =========="); + + // Fallback is used: each backend returned by the resolver should have + // gotten one request. + for (size_t i = 0; i < kNumBackendInResolution; ++i) { + EXPECT_EQ(1U, backend_servers_[i].service_->request_count()); + } + for (size_t i = kNumBackendInResolution; i < backends_.size(); ++i) { + EXPECT_EQ(0U, backend_servers_[i].service_->request_count()); + } + + // Wait until the serverlist reception has been processed and all backends + // in the serverlist are reachable. + for (size_t i = kNumBackendInResolution; i < backends_.size(); ++i) { + WaitForBackend(i); + } + + // Send out the second request. + gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH =========="); + CheckRpcSendOk(backends_.size() - kNumBackendInResolution); + gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH =========="); + + // Serverlist is used: each backend returned by the balancer should + // have gotten one request. + for (size_t i = 0; i < kNumBackendInResolution; ++i) { + EXPECT_EQ(0U, backend_servers_[i].service_->request_count()); + } + for (size_t i = kNumBackendInResolution; i < backends_.size(); ++i) { + EXPECT_EQ(1U, backend_servers_[i].service_->request_count()); + } + + balancers_[0]->NotifyDoneWithServerlists(); + // The balancer got a single request. + EXPECT_EQ(1U, balancer_servers_[0].service_->request_count()); + // and sent a single response. + EXPECT_EQ(1U, balancer_servers_[0].service_->response_count()); +} + +TEST_F(SingleBalancerTest, FallbackUpdate) { + const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor(); + const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor(); + const size_t kNumBackendInResolution = backends_.size() / 3; + const size_t kNumBackendInResolutionUpdate = backends_.size() / 3; + + ResetStub(kFallbackTimeoutMs); + std::vector addresses; + addresses.emplace_back(AddressData{balancer_servers_[0].port_, true, ""}); + for (size_t i = 0; i < kNumBackendInResolution; ++i) { + addresses.emplace_back(AddressData{backend_servers_[i].port_, false, ""}); + } + SetNextResolution(addresses); + + // Send non-empty serverlist only after kServerlistDelayMs. + ScheduleResponseForBalancer( + 0, BalancerServiceImpl::BuildResponseForBackends( + GetBackendPorts(kNumBackendInResolution + + kNumBackendInResolutionUpdate /* start_index */), + {}), + kServerlistDelayMs); + + // Wait until all the fallback backends are reachable. + for (size_t i = 0; i < kNumBackendInResolution; ++i) { + WaitForBackend(i); + } + + // The first request. + gpr_log(GPR_INFO, "========= BEFORE FIRST BATCH =========="); + CheckRpcSendOk(kNumBackendInResolution); + gpr_log(GPR_INFO, "========= DONE WITH FIRST BATCH =========="); + + // Fallback is used: each backend returned by the resolver should have + // gotten one request. + for (size_t i = 0; i < kNumBackendInResolution; ++i) { + EXPECT_EQ(1U, backend_servers_[i].service_->request_count()); + } + for (size_t i = kNumBackendInResolution; i < backends_.size(); ++i) { + EXPECT_EQ(0U, backend_servers_[i].service_->request_count()); + } + + addresses.clear(); + addresses.emplace_back(AddressData{balancer_servers_[0].port_, true, ""}); + for (size_t i = kNumBackendInResolution; + i < kNumBackendInResolution + kNumBackendInResolutionUpdate; ++i) { + addresses.emplace_back(AddressData{backend_servers_[i].port_, false, ""}); + } + SetNextResolution(addresses); + + // Wait until the resolution update has been processed and all the new + // fallback backends are reachable. + for (size_t i = kNumBackendInResolution; + i < kNumBackendInResolution + kNumBackendInResolutionUpdate; ++i) { + WaitForBackend(i); + } + + // Send out the second request. + gpr_log(GPR_INFO, "========= BEFORE SECOND BATCH =========="); + CheckRpcSendOk(kNumBackendInResolutionUpdate); + gpr_log(GPR_INFO, "========= DONE WITH SECOND BATCH =========="); + + // The resolution update is used: each backend in the resolution update should + // have gotten one request. + for (size_t i = 0; i < kNumBackendInResolution; ++i) { + EXPECT_EQ(0U, backend_servers_[i].service_->request_count()); + } + for (size_t i = kNumBackendInResolution; + i < kNumBackendInResolution + kNumBackendInResolutionUpdate; ++i) { + EXPECT_EQ(1U, backend_servers_[i].service_->request_count()); + } + for (size_t i = kNumBackendInResolution + kNumBackendInResolutionUpdate; + i < backends_.size(); ++i) { + EXPECT_EQ(0U, backend_servers_[i].service_->request_count()); + } + + // Wait until the serverlist reception has been processed and all backends + // in the serverlist are reachable. + for (size_t i = kNumBackendInResolution + kNumBackendInResolutionUpdate; + i < backends_.size(); ++i) { + WaitForBackend(i); + } + + // Send out the third request. + gpr_log(GPR_INFO, "========= BEFORE THIRD BATCH =========="); + CheckRpcSendOk(backends_.size() - kNumBackendInResolution - + kNumBackendInResolutionUpdate); + gpr_log(GPR_INFO, "========= DONE WITH THIRD BATCH =========="); + + // Serverlist is used: each backend returned by the balancer should + // have gotten one request. + for (size_t i = 0; + i < kNumBackendInResolution + kNumBackendInResolutionUpdate; ++i) { + EXPECT_EQ(0U, backend_servers_[i].service_->request_count()); + } + for (size_t i = kNumBackendInResolution + kNumBackendInResolutionUpdate; + i < backends_.size(); ++i) { + EXPECT_EQ(1U, backend_servers_[i].service_->request_count()); + } + + balancers_[0]->NotifyDoneWithServerlists(); + // The balancer got a single request. + EXPECT_EQ(1U, balancer_servers_[0].service_->request_count()); + // and sent a single response. + EXPECT_EQ(1U, balancer_servers_[0].service_->response_count()); +} + TEST_F(SingleBalancerTest, BackendsRestart) { const size_t kNumRpcsPerAddress = 100; ScheduleResponseForBalancer( From 316855ef9857636f5501678a7feac7b9cf9441fc Mon Sep 17 00:00:00 2001 From: Matt Kwong Date: Wed, 27 Sep 2017 14:26:41 -0700 Subject: [PATCH 072/109] Don't stop on failure in post test steps --- tools/run_tests/run_tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index 86fd7b34b21..aafd8bba35a 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -1575,7 +1575,7 @@ def _build_and_run( suite_name=args.report_suite_name) number_failures, _ = jobset.run( - post_tests_steps, maxjobs=1, stop_on_failure=True, + post_tests_steps, maxjobs=1, stop_on_failure=False, newline_on_success=newline_on_success, travis=args.travis) out = [] From d847616e977b7f6d343d733dfadea9835e6d5a4c Mon Sep 17 00:00:00 2001 From: Yong Ni Date: Thu, 21 Sep 2017 18:31:09 -0700 Subject: [PATCH 073/109] Added step-by-step instrution on how to add a new release to grpc backward compatiblity test. --- tools/interop_matrix/README.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tools/interop_matrix/README.md b/tools/interop_matrix/README.md index f92dc690e2b..c2f354399f0 100644 --- a/tools/interop_matrix/README.md +++ b/tools/interop_matrix/README.md @@ -5,6 +5,21 @@ This directory contains scripts that facilitate building and running gRPC tests The setup builds gRPC docker images for each language/runtime and upload it to Google Container Registry (GCR). These images, encapsulating gRPC stack from specific releases/tag, are used to test version compatiblity between gRPC release versions. +## Step-by-step instructions for adding a new release to compatibility test +We have continuous nightly test setup to test gRPC backward compatibility between old clients and latest server. When a gRPC developer creates a new gRPC release, s/he is also responsible to add the just-released gRPC client to the nightly test. The steps are: +- Add (or update) an entry in ./client_matrix.py file to reference the github tag for the release. +- Build new client docker image(s). For example, for java release `v1.9.9`, do + - `tools/interop_matrix/create_matrix_images.py --git_checkout --release=v1.9.9 --language=java` +- Verify that the new docker image was built successfully and uploaded to GCR. For example, + - `gcloud beta container images list-tags gcr.io/grpc-testing/grpc_interop_java_oracle8` + - should show an image entry with tag `v1.9.9`. +- Verify the just-created docker client image would pass backward compatibility test (it should). For example, + - `gcloud docker -- pull gcr.io/grpc-testing/grpc_interop_java_oracle8:v1.9.9` followed by + - `docker_image=gcr.io/grpc-testing/grpc_interop_java_oracle8:v1.9.9 ./testcases/java__master` +- git commit the change and merge it to upstream/master. +- (Optional) clean up the tmp directory to where grpc source is cloned at `/export/hda3/tmp/grpc_matrix/`. +For more details on each step, refer to sections below. + ## Instructions for creating GCR images - Edit `./client_matrix.py` to include desired gRPC release. - Run `tools/interop_matrix/create_matrix_images.py`. Useful options: @@ -45,3 +60,4 @@ For example: Note: - File path starting with `tools/` or `template/` are relative to the grpc repo root dir. File path starting with `./` are relative to current directory (`tools/interop_matrix`). +- Creating and referencing images in GCR require read and write permission to Google Container Registry path gcr.io/grpc-testing. From 9e3a76ba717baf185918acb86871ae27b7810dbc Mon Sep 17 00:00:00 2001 From: Alexander Polcyn Date: Fri, 26 May 2017 18:30:25 -0700 Subject: [PATCH 074/109] Add c-ares resolver tests against GCE DNS, using the unit test. --- .../create_private_dns_zone.sh.template | 4 + .../create_private_dns_zone_defs.include | 32 ++ .../naming/private_dns_zone_init.sh.template | 4 + .../naming/private_dns_zone_init_defs.include | 40 ++ ...esolver_gce_integration_tests_defs.include | 64 ++++ ...r_gce_integration_tests_runner.sh.template | 4 + test/cpp/naming/README.md | 43 +++ test/cpp/naming/create_private_dns_zone.sh | 27 ++ test/cpp/naming/gen_build_yaml.py | 110 +++++- test/cpp/naming/private_dns_zone_init.sh | 215 +++++++++++ .../naming/resolver_component_tests_runner.sh | 26 +- .../resolver_gce_integration_tests_runner.sh | 359 ++++++++++++++++++ .../naming/resolver_test_record_groups.yaml | 2 +- test/cpp/naming/test_dns_server.py | 2 +- 14 files changed, 908 insertions(+), 24 deletions(-) create mode 100644 templates/test/cpp/naming/create_private_dns_zone.sh.template create mode 100644 templates/test/cpp/naming/create_private_dns_zone_defs.include create mode 100644 templates/test/cpp/naming/private_dns_zone_init.sh.template create mode 100644 templates/test/cpp/naming/private_dns_zone_init_defs.include create mode 100644 templates/test/cpp/naming/resolver_gce_integration_tests_defs.include create mode 100644 templates/test/cpp/naming/resolver_gce_integration_tests_runner.sh.template create mode 100644 test/cpp/naming/README.md create mode 100755 test/cpp/naming/create_private_dns_zone.sh create mode 100755 test/cpp/naming/private_dns_zone_init.sh create mode 100755 test/cpp/naming/resolver_gce_integration_tests_runner.sh diff --git a/templates/test/cpp/naming/create_private_dns_zone.sh.template b/templates/test/cpp/naming/create_private_dns_zone.sh.template new file mode 100644 index 00000000000..14324b098c5 --- /dev/null +++ b/templates/test/cpp/naming/create_private_dns_zone.sh.template @@ -0,0 +1,4 @@ +%YAML 1.2 +--- | + <%namespace file="create_private_dns_zone_defs.include" import="*"/>\ + ${create_private_dns_zone(resolver_gce_integration_tests_zone_id, resolver_tests_common_zone_name)} diff --git a/templates/test/cpp/naming/create_private_dns_zone_defs.include b/templates/test/cpp/naming/create_private_dns_zone_defs.include new file mode 100644 index 00000000000..465dd6394bf --- /dev/null +++ b/templates/test/cpp/naming/create_private_dns_zone_defs.include @@ -0,0 +1,32 @@ +<%def name="create_private_dns_zone(resolver_gce_integration_tests_zone_id, resolver_tests_common_zone_name)">#!/bin/bash +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is auto-generated + +set -ex + +cd $(dirname $0)/../../.. + +gcloud alpha dns managed-zones create \\ + + ${resolver_gce_integration_tests_zone_id} \\ + + --dns-name=${resolver_tests_common_zone_name} \\ + + --description="GCE-DNS-private-zone-for-GRPC-testing" \\ + + --visibility=private \\ + + --networks=default diff --git a/templates/test/cpp/naming/private_dns_zone_init.sh.template b/templates/test/cpp/naming/private_dns_zone_init.sh.template new file mode 100644 index 00000000000..d5ffd04add1 --- /dev/null +++ b/templates/test/cpp/naming/private_dns_zone_init.sh.template @@ -0,0 +1,4 @@ +%YAML 1.2 +--- | + <%namespace file="private_dns_zone_init_defs.include" import="*"/>\ + ${private_dns_zone_init(all_integration_test_records, resolver_gce_integration_tests_zone_id, resolver_tests_common_zone_name)} diff --git a/templates/test/cpp/naming/private_dns_zone_init_defs.include b/templates/test/cpp/naming/private_dns_zone_init_defs.include new file mode 100644 index 00000000000..06bc8adb94e --- /dev/null +++ b/templates/test/cpp/naming/private_dns_zone_init_defs.include @@ -0,0 +1,40 @@ +<%def name="private_dns_zone_init(records,resolver_gce_integration_tests_zone_id,resolver_tests_common_zone_name)">#!/bin/bash +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is auto-generated + +set -ex + +cd $(dirname $0)/../../.. + +gcloud dns record-sets transaction start -z=${resolver_gce_integration_tests_zone_id} + +% for r in records: +gcloud dns record-sets transaction add \\ + + -z=${resolver_gce_integration_tests_zone_id} \\ + + --name=${r['name']}.${resolver_tests_common_zone_name} \\ + + --type=${r['type']} \\ + + --ttl=${r['ttl']} \\ + + ${r['data']} + +% endfor +gcloud dns record-sets transaction describe -z=${resolver_gce_integration_tests_zone_id} +gcloud dns record-sets transaction execute -z=${resolver_gce_integration_tests_zone_id} +gcloud dns record-sets list -z=${resolver_gce_integration_tests_zone_id} diff --git a/templates/test/cpp/naming/resolver_gce_integration_tests_defs.include b/templates/test/cpp/naming/resolver_gce_integration_tests_defs.include new file mode 100644 index 00000000000..2413ec57d09 --- /dev/null +++ b/templates/test/cpp/naming/resolver_gce_integration_tests_defs.include @@ -0,0 +1,64 @@ +<%def name="resolver_gce_integration_tests(tests, records, resolver_tests_common_zone_name)">#!/bin/bash +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is auto-generated + +set -ex + +if [[ "$GRPC_DNS_RESOLVER" == "" ]]; then + export GRPC_DNS_RESOLVER=ares +elif [[ "$GRPC_DNS_RESOLVER" != ares ]]; then + echo "Unexpected: GRPC_DNS_RESOLVER=$GRPC_DNS_RESOLVER. This test only works with c-ares resolver" + exit 1 +fi + +cd $(dirname $0)/../../.. + +if [[ "$CONFIG" == "" ]]; then + export CONFIG=opt +fi +make resolver_component_test +echo "Sanity check DNS records are resolveable with dig:" +EXIT_CODE=0 + +% for r in records: +ONE_FAILED=0 +dig ${r['type']} ${r['name']}.${resolver_tests_common_zone_name} | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig ${r['type']} ${r['name']}.${resolver_tests_common_zone_name} FAILED" + exit 1 +fi + +% endfor +echo "Sanity check PASSED. Run resolver tests:" + +% for test in tests: +ONE_FAILED=0 +bins/$CONFIG/resolver_component_test \\ + + --target_name='${test['target_name']}' \\ + + --expected_addrs='${test['expected_addrs']}' \\ + + --expected_chosen_service_config='${test['expected_chosen_service_config']}' \\ + + --expected_lb_policy='${test['expected_lb_policy']}' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Test based on target record: ${test['target_name']} FAILED" + EXIT_CODE=1 +fi + +% endfor +exit $EXIT_CODE diff --git a/templates/test/cpp/naming/resolver_gce_integration_tests_runner.sh.template b/templates/test/cpp/naming/resolver_gce_integration_tests_runner.sh.template new file mode 100644 index 00000000000..c728784d29a --- /dev/null +++ b/templates/test/cpp/naming/resolver_gce_integration_tests_runner.sh.template @@ -0,0 +1,4 @@ +%YAML 1.2 +--- | + <%namespace file="resolver_gce_integration_tests_defs.include" import="*"/>\ + ${resolver_gce_integration_tests(resolver_gce_integration_test_cases, all_integration_test_records, resolver_tests_common_zone_name)} diff --git a/test/cpp/naming/README.md b/test/cpp/naming/README.md new file mode 100644 index 00000000000..e33184620c0 --- /dev/null +++ b/test/cpp/naming/README.md @@ -0,0 +1,43 @@ +# Resolver Tests + +This directory has tests and infrastructure for unit tests and GCE +integration tests of gRPC resolver functionality. + +There are two different tests here: + +## Resolver unit tests (resolver "component" tests) + +These tests run per-change, along with the rest of the grpc unit tests. +They query a local testing DNS server. + +## GCE integration tests + +These tests use the same test binary and the same test records +as the unit tests, but they run against GCE DNS (this is done by +running the test on a GCE instance and not specifying an authority +in uris). These tests run in a background job, which needs to be +actively monitored. + +## Making changes to test records + +After making a change to `resolver_test_record_groups.yaml`: + +1. Increment the "version number" in the `resolver_tests_common_zone_name` + DNS zone (this is a yaml field at the top + of `resolver_test_record_groups.yaml`). + +2. Regenerate projects. + +3. From the repo root, run: + +``` +$ test/cpp/naming/create_dns_private_zone.sh +$ test/cpp/naming/private_dns_zone_init.sh +``` + +Note that these commands must be ran in environment that +has access to the grpc-testing GCE project. + +If everything runs smoothly, then once the change is merged, +the GCE DNS integration testing job will transition to the +new records and continue passing. diff --git a/test/cpp/naming/create_private_dns_zone.sh b/test/cpp/naming/create_private_dns_zone.sh new file mode 100755 index 00000000000..3d7520b90a8 --- /dev/null +++ b/test/cpp/naming/create_private_dns_zone.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is auto-generated + +set -ex + +cd $(dirname $0)/../../.. + +gcloud alpha dns managed-zones create \ + resolver-tests-version-1-grpctestingexp-zone-id \ + --dns-name=resolver-tests-version-1.grpctestingexp. \ + --description="GCE-DNS-private-zone-for-GRPC-testing" \ + --visibility=private \ + --networks=default diff --git a/test/cpp/naming/gen_build_yaml.py b/test/cpp/naming/gen_build_yaml.py index 3a51fef7a0e..91718156e9a 100755 --- a/test/cpp/naming/gen_build_yaml.py +++ b/test/cpp/naming/gen_build_yaml.py @@ -24,6 +24,12 @@ import json _LOCAL_DNS_SERVER_ADDRESS = '127.0.0.1:15353' +_TARGET_RECORDS_TO_SKIP_AGAINST_GCE = [ + # TODO: enable this once able to upload the very large TXT record + # in this group to GCE DNS. + 'ipv4-config-causing-fallback-to-tcp', +] + def _append_zone_name(name, zone_name): return '%s.%s' % (name, zone_name) @@ -33,21 +39,107 @@ def _build_expected_addrs_cmd_arg(expected_addrs): out.append('%s,%s' % (addr['address'], str(addr['is_balancer']))) return ';'.join(out) +def _data_for_type(r_type, r_data, common_zone_name): + if r_type in ['A', 'AAAA']: + return ' '.join(map(lambda x: '\"%s\"' % x, r_data)) + if r_type == 'SRV': + assert len(r_data) == 1 + target = r_data[0].split(' ')[3] + uploadable_target = '%s.%s' % (target, common_zone_name) + uploadable = r_data[0].split(' ') + uploadable[3] = uploadable_target + return '\"%s\"' % ' '.join(uploadable) + if r_type == 'TXT': + assert len(r_data) == 1 + chunks = [] + all_data = r_data[0] + cur = 0 + # Split TXT records that span more than 255 characters (the single + # string length-limit in DNS) into multiple strings. Each string + # needs to be wrapped with double-quotes, and all inner double-quotes + # are escaped. The wrapping double-quotes and inner backslashes can be + # counted towards the 255 character length limit (as observed with gcloud), + # so make sure all strings fit within that limit. + while len(all_data[cur:]) > 0: + next_chunk = '\"' + while len(next_chunk) < 254 and len(all_data[cur:]) > 0: + if all_data[cur] == '\"': + if len(next_chunk) < 253: + next_chunk += '\\\"' + else: + break + else: + next_chunk += all_data[cur] + cur += 1 + next_chunk += '\"' + if len(next_chunk) > 255: + raise Exception('Bug: next chunk is too long.') + chunks.append(next_chunk) + # Wrap the whole record in single quotes to make sure all strings + # are associated with the same TXT record (to make it one bash token for + # gcloud) + return '\'%s\'' % ' '.join(chunks) + +# Convert DNS records from their "within a test group" format +# of the yaml file to an easier form for the templates to use. +def _gcloud_uploadable_form(test_cases, common_zone_name): + out = [] + for group in test_cases: + if group['record_to_resolve'] in _TARGET_RECORDS_TO_SKIP_AGAINST_GCE: + continue + for record_name in group['records'].keys(): + r_ttl = None + all_r_data = {} + for r_data in group['records'][record_name]: + # enforce records have the same TTL only for simplicity + if r_ttl is None: + r_ttl = r_data['TTL'] + assert r_ttl == r_data['TTL'], '%s and %s differ' % (r_ttl, r_data['TTL']) + r_type = r_data['type'] + if all_r_data.get(r_type) is None: + all_r_data[r_type] = [] + all_r_data[r_type].append(r_data['data']) + for r_type in all_r_data.keys(): + for r in out: + assert r['name'] != record_name or r['type'] != r_type, 'attempt to add a duplicate record' + out.append({ + 'name': record_name, + 'ttl': r_ttl, + 'type': r_type, + 'data': _data_for_type(r_type, all_r_data[r_type], common_zone_name) + }) + return out + +def _gce_dns_zone_id(resolver_component_data): + dns_name = resolver_component_data['resolver_tests_common_zone_name'] + return dns_name.replace('.', '-') + 'zone-id' + +def _resolver_test_cases(resolver_component_data, records_to_skip): + out = [] + for test_case in resolver_component_data['resolver_component_tests']: + if test_case['record_to_resolve'] in records_to_skip: + continue + out.append({ + 'target_name': _append_zone_name(test_case['record_to_resolve'], + resolver_component_data['resolver_tests_common_zone_name']), + 'expected_addrs': _build_expected_addrs_cmd_arg(test_case['expected_addrs']), + 'expected_chosen_service_config': (test_case['expected_chosen_service_config'] or ''), + 'expected_lb_policy': (test_case['expected_lb_policy'] or ''), + }) + return out + def main(): resolver_component_data = '' with open('test/cpp/naming/resolver_test_record_groups.yaml') as f: resolver_component_data = yaml.load(f) json = { - 'resolver_component_test_cases': [ - { - 'target_name': _append_zone_name(test_case['record_to_resolve'], - resolver_component_data['resolver_component_tests_common_zone_name']), - 'expected_addrs': _build_expected_addrs_cmd_arg(test_case['expected_addrs']), - 'expected_chosen_service_config': (test_case['expected_chosen_service_config'] or ''), - 'expected_lb_policy': (test_case['expected_lb_policy'] or ''), - } for test_case in resolver_component_data['resolver_component_tests'] - ], + 'resolver_tests_common_zone_name': resolver_component_data['resolver_tests_common_zone_name'], + 'resolver_gce_integration_tests_zone_id': _gce_dns_zone_id(resolver_component_data), + 'all_integration_test_records': _gcloud_uploadable_form(resolver_component_data['resolver_component_tests'], + resolver_component_data['resolver_tests_common_zone_name']), + 'resolver_gce_integration_test_cases': _resolver_test_cases(resolver_component_data, _TARGET_RECORDS_TO_SKIP_AGAINST_GCE), + 'resolver_component_test_cases': _resolver_test_cases(resolver_component_data, []), 'targets': [ { 'name': 'resolver_component_test' + unsecure_build_config_suffix, diff --git a/test/cpp/naming/private_dns_zone_init.sh b/test/cpp/naming/private_dns_zone_init.sh new file mode 100755 index 00000000000..4eaf750ab71 --- /dev/null +++ b/test/cpp/naming/private_dns_zone_init.sh @@ -0,0 +1,215 @@ +#!/bin/bash +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is auto-generated + +set -ex + +cd $(dirname $0)/../../.. + +gcloud dns record-sets transaction start -z=resolver-tests-version-1-grpctestingexp-zone-id + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=_grpclb._tcp.srv-ipv4-single-target.resolver-tests-version-1.grpctestingexp. \ + --type=SRV \ + --ttl=2100 \ + "0 0 1234 ipv4-single-target.resolver-tests-version-1.grpctestingexp." + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=ipv4-single-target.resolver-tests-version-1.grpctestingexp. \ + --type=A \ + --ttl=2100 \ + "1.2.3.4" + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=_grpclb._tcp.srv-ipv4-multi-target.resolver-tests-version-1.grpctestingexp. \ + --type=SRV \ + --ttl=2100 \ + "0 0 1234 ipv4-multi-target.resolver-tests-version-1.grpctestingexp." + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=ipv4-multi-target.resolver-tests-version-1.grpctestingexp. \ + --type=A \ + --ttl=2100 \ + "1.2.3.5" "1.2.3.6" "1.2.3.7" + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=_grpclb._tcp.srv-ipv6-single-target.resolver-tests-version-1.grpctestingexp. \ + --type=SRV \ + --ttl=2100 \ + "0 0 1234 ipv6-single-target.resolver-tests-version-1.grpctestingexp." + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=ipv6-single-target.resolver-tests-version-1.grpctestingexp. \ + --type=AAAA \ + --ttl=2100 \ + "2607:f8b0:400a:801::1001" + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=_grpclb._tcp.srv-ipv6-multi-target.resolver-tests-version-1.grpctestingexp. \ + --type=SRV \ + --ttl=2100 \ + "0 0 1234 ipv6-multi-target.resolver-tests-version-1.grpctestingexp." + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=ipv6-multi-target.resolver-tests-version-1.grpctestingexp. \ + --type=AAAA \ + --ttl=2100 \ + "2607:f8b0:400a:801::1002" "2607:f8b0:400a:801::1003" "2607:f8b0:400a:801::1004" + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=srv-ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. \ + --type=TXT \ + --ttl=2100 \ + '"grpc_config=[{\"serviceConfig\":{\"loadBalancingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"SimpleService\",\"waitForReady\":true}]}]}}]"' + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. \ + --type=A \ + --ttl=2100 \ + "1.2.3.4" + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=_grpclb._tcp.srv-ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. \ + --type=SRV \ + --ttl=2100 \ + "0 0 1234 ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp." + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=ipv4-no-srv-simple-service-config.resolver-tests-version-1.grpctestingexp. \ + --type=A \ + --ttl=2100 \ + "1.2.3.4" + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=ipv4-no-srv-simple-service-config.resolver-tests-version-1.grpctestingexp. \ + --type=TXT \ + --ttl=2100 \ + '"grpc_config=[{\"serviceConfig\":{\"loadBalancingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"NoSrvSimpleService\",\"waitForReady\":true}]}]}}]"' + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=ipv4-no-config-for-cpp.resolver-tests-version-1.grpctestingexp. \ + --type=A \ + --ttl=2100 \ + "1.2.3.4" + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=ipv4-no-config-for-cpp.resolver-tests-version-1.grpctestingexp. \ + --type=TXT \ + --ttl=2100 \ + '"grpc_config=[{\"clientLanguage\":[\"python\"],\"serviceConfig\":{\"loadBalancingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"PythonService\",\"waitForReady\":true}]}]}}]"' + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=ipv4-cpp-config-has-zero-percentage.resolver-tests-version-1.grpctestingexp. \ + --type=A \ + --ttl=2100 \ + "1.2.3.4" + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=ipv4-cpp-config-has-zero-percentage.resolver-tests-version-1.grpctestingexp. \ + --type=TXT \ + --ttl=2100 \ + '"grpc_config=[{\"percentage\":0,\"serviceConfig\":{\"loadBalancingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"CppService\",\"waitForReady\":true}]}]}}]"' + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=ipv4-second-language-is-cpp.resolver-tests-version-1.grpctestingexp. \ + --type=A \ + --ttl=2100 \ + "1.2.3.4" + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=ipv4-second-language-is-cpp.resolver-tests-version-1.grpctestingexp. \ + --type=TXT \ + --ttl=2100 \ + '"grpc_config=[{\"clientLanguage\":[\"go\"],\"serviceConfig\":{\"loadBalancingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"GoService\",\"waitForReady\":true}]}]}},{\"clientLanguage\":[\"c++\"],\"serviceConfig\":{" "\"loadBalancingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"CppService\",\"waitForReady\":true}]}]}}]"' + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=ipv4-config-with-percentages.resolver-tests-version-1.grpctestingexp. \ + --type=A \ + --ttl=2100 \ + "1.2.3.4" + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=ipv4-config-with-percentages.resolver-tests-version-1.grpctestingexp. \ + --type=TXT \ + --ttl=2100 \ + '"grpc_config=[{\"percentage\":0,\"serviceConfig\":{\"loadBalancingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"NeverPickedService\",\"waitForReady\":true}]}]}},{\"percentage\":100,\"serviceConfig\":{\"loadBalanc" "ingPolicy\":\"round_robin\",\"methodConfig\":[{\"name\":[{\"method\":\"Foo\",\"service\":\"AlwaysPickedService\",\"waitForReady\":true}]}]}}]"' + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=_grpclb._tcp.srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. \ + --type=SRV \ + --ttl=2100 \ + "0 0 1234 balancer-for-ipv4-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp." + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=balancer-for-ipv4-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. \ + --type=A \ + --ttl=2100 \ + "1.2.3.4" + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. \ + --type=A \ + --ttl=2100 \ + "1.2.3.4" + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=_grpclb._tcp.srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. \ + --type=SRV \ + --ttl=2100 \ + "0 0 1234 balancer-for-ipv6-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp." + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=balancer-for-ipv6-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. \ + --type=AAAA \ + --ttl=2100 \ + "2607:f8b0:400a:801::1002" + +gcloud dns record-sets transaction add \ + -z=resolver-tests-version-1-grpctestingexp-zone-id \ + --name=srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. \ + --type=AAAA \ + --ttl=2100 \ + "2607:f8b0:400a:801::1002" + +gcloud dns record-sets transaction describe -z=resolver-tests-version-1-grpctestingexp-zone-id +gcloud dns record-sets transaction execute -z=resolver-tests-version-1-grpctestingexp-zone-id +gcloud dns record-sets list -z=resolver-tests-version-1-grpctestingexp-zone-id diff --git a/test/cpp/naming/resolver_component_tests_runner.sh b/test/cpp/naming/resolver_component_tests_runner.sh index cf71c9dcf97..407db5ed668 100755 --- a/test/cpp/naming/resolver_component_tests_runner.sh +++ b/test/cpp/naming/resolver_component_tests_runner.sh @@ -73,7 +73,7 @@ EXIT_CODE=0 # in the resolver. $FLAGS_test_bin_path \ - --target_name='srv-ipv4-single-target.resolver-tests.grpctestingexp.' \ + --target_name='srv-ipv4-single-target.resolver-tests-version-1.grpctestingexp.' \ --expected_addrs='1.2.3.4:1234,True' \ --expected_chosen_service_config='' \ --expected_lb_policy='' \ @@ -81,7 +81,7 @@ $FLAGS_test_bin_path \ wait $! || EXIT_CODE=1 $FLAGS_test_bin_path \ - --target_name='srv-ipv4-multi-target.resolver-tests.grpctestingexp.' \ + --target_name='srv-ipv4-multi-target.resolver-tests-version-1.grpctestingexp.' \ --expected_addrs='1.2.3.5:1234,True;1.2.3.6:1234,True;1.2.3.7:1234,True' \ --expected_chosen_service_config='' \ --expected_lb_policy='' \ @@ -89,7 +89,7 @@ $FLAGS_test_bin_path \ wait $! || EXIT_CODE=1 $FLAGS_test_bin_path \ - --target_name='srv-ipv6-single-target.resolver-tests.grpctestingexp.' \ + --target_name='srv-ipv6-single-target.resolver-tests-version-1.grpctestingexp.' \ --expected_addrs='[2607:f8b0:400a:801::1001]:1234,True' \ --expected_chosen_service_config='' \ --expected_lb_policy='' \ @@ -97,7 +97,7 @@ $FLAGS_test_bin_path \ wait $! || EXIT_CODE=1 $FLAGS_test_bin_path \ - --target_name='srv-ipv6-multi-target.resolver-tests.grpctestingexp.' \ + --target_name='srv-ipv6-multi-target.resolver-tests-version-1.grpctestingexp.' \ --expected_addrs='[2607:f8b0:400a:801::1002]:1234,True;[2607:f8b0:400a:801::1003]:1234,True;[2607:f8b0:400a:801::1004]:1234,True' \ --expected_chosen_service_config='' \ --expected_lb_policy='' \ @@ -105,7 +105,7 @@ $FLAGS_test_bin_path \ wait $! || EXIT_CODE=1 $FLAGS_test_bin_path \ - --target_name='srv-ipv4-simple-service-config.resolver-tests.grpctestingexp.' \ + --target_name='srv-ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp.' \ --expected_addrs='1.2.3.4:1234,True' \ --expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"SimpleService","waitForReady":true}]}]}' \ --expected_lb_policy='round_robin' \ @@ -113,7 +113,7 @@ $FLAGS_test_bin_path \ wait $! || EXIT_CODE=1 $FLAGS_test_bin_path \ - --target_name='ipv4-no-srv-simple-service-config.resolver-tests.grpctestingexp.' \ + --target_name='ipv4-no-srv-simple-service-config.resolver-tests-version-1.grpctestingexp.' \ --expected_addrs='1.2.3.4:443,False' \ --expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"NoSrvSimpleService","waitForReady":true}]}]}' \ --expected_lb_policy='round_robin' \ @@ -121,7 +121,7 @@ $FLAGS_test_bin_path \ wait $! || EXIT_CODE=1 $FLAGS_test_bin_path \ - --target_name='ipv4-no-config-for-cpp.resolver-tests.grpctestingexp.' \ + --target_name='ipv4-no-config-for-cpp.resolver-tests-version-1.grpctestingexp.' \ --expected_addrs='1.2.3.4:443,False' \ --expected_chosen_service_config='' \ --expected_lb_policy='' \ @@ -129,7 +129,7 @@ $FLAGS_test_bin_path \ wait $! || EXIT_CODE=1 $FLAGS_test_bin_path \ - --target_name='ipv4-cpp-config-has-zero-percentage.resolver-tests.grpctestingexp.' \ + --target_name='ipv4-cpp-config-has-zero-percentage.resolver-tests-version-1.grpctestingexp.' \ --expected_addrs='1.2.3.4:443,False' \ --expected_chosen_service_config='' \ --expected_lb_policy='' \ @@ -137,7 +137,7 @@ $FLAGS_test_bin_path \ wait $! || EXIT_CODE=1 $FLAGS_test_bin_path \ - --target_name='ipv4-second-language-is-cpp.resolver-tests.grpctestingexp.' \ + --target_name='ipv4-second-language-is-cpp.resolver-tests-version-1.grpctestingexp.' \ --expected_addrs='1.2.3.4:443,False' \ --expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"CppService","waitForReady":true}]}]}' \ --expected_lb_policy='round_robin' \ @@ -145,7 +145,7 @@ $FLAGS_test_bin_path \ wait $! || EXIT_CODE=1 $FLAGS_test_bin_path \ - --target_name='ipv4-config-with-percentages.resolver-tests.grpctestingexp.' \ + --target_name='ipv4-config-with-percentages.resolver-tests-version-1.grpctestingexp.' \ --expected_addrs='1.2.3.4:443,False' \ --expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"AlwaysPickedService","waitForReady":true}]}]}' \ --expected_lb_policy='round_robin' \ @@ -153,7 +153,7 @@ $FLAGS_test_bin_path \ wait $! || EXIT_CODE=1 $FLAGS_test_bin_path \ - --target_name='srv-ipv4-target-has-backend-and-balancer.resolver-tests.grpctestingexp.' \ + --target_name='srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp.' \ --expected_addrs='1.2.3.4:1234,True;1.2.3.4:443,False' \ --expected_chosen_service_config='' \ --expected_lb_policy='' \ @@ -161,7 +161,7 @@ $FLAGS_test_bin_path \ wait $! || EXIT_CODE=1 $FLAGS_test_bin_path \ - --target_name='srv-ipv6-target-has-backend-and-balancer.resolver-tests.grpctestingexp.' \ + --target_name='srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp.' \ --expected_addrs='[2607:f8b0:400a:801::1002]:1234,True;[2607:f8b0:400a:801::1002]:443,False' \ --expected_chosen_service_config='' \ --expected_lb_policy='' \ @@ -169,7 +169,7 @@ $FLAGS_test_bin_path \ wait $! || EXIT_CODE=1 $FLAGS_test_bin_path \ - --target_name='ipv4-config-causing-fallback-to-tcp.resolver-tests.grpctestingexp.' \ + --target_name='ipv4-config-causing-fallback-to-tcp.resolver-tests-version-1.grpctestingexp.' \ --expected_addrs='1.2.3.4:443,False' \ --expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwo","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooThree","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooFour","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooFive","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooSix","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooSeven","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooEight","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooNine","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTen","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooEleven","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]},{"name":[{"method":"FooTwelve","service":"SimpleService","waitForReady":true}]}]}' \ --expected_lb_policy='' \ diff --git a/test/cpp/naming/resolver_gce_integration_tests_runner.sh b/test/cpp/naming/resolver_gce_integration_tests_runner.sh new file mode 100755 index 00000000000..b20d18d9d10 --- /dev/null +++ b/test/cpp/naming/resolver_gce_integration_tests_runner.sh @@ -0,0 +1,359 @@ +#!/bin/bash +# Copyright 2015 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is auto-generated + +set -ex + +if [[ "$GRPC_DNS_RESOLVER" == "" ]]; then + export GRPC_DNS_RESOLVER=ares +elif [[ "$GRPC_DNS_RESOLVER" != ares ]]; then + echo "Unexpected: GRPC_DNS_RESOLVER=$GRPC_DNS_RESOLVER. This test only works with c-ares resolver" + exit 1 +fi + +cd $(dirname $0)/../../.. + +if [[ "$CONFIG" == "" ]]; then + export CONFIG=opt +fi +make resolver_component_test +echo "Sanity check DNS records are resolveable with dig:" +EXIT_CODE=0 + +ONE_FAILED=0 +dig SRV _grpclb._tcp.srv-ipv4-single-target.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv4-single-target.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig A ipv4-single-target.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig A ipv4-single-target.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig SRV _grpclb._tcp.srv-ipv4-multi-target.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv4-multi-target.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig A ipv4-multi-target.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig A ipv4-multi-target.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig SRV _grpclb._tcp.srv-ipv6-single-target.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv6-single-target.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig AAAA ipv6-single-target.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig AAAA ipv6-single-target.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig SRV _grpclb._tcp.srv-ipv6-multi-target.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv6-multi-target.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig AAAA ipv6-multi-target.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig AAAA ipv6-multi-target.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig TXT srv-ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig TXT srv-ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig A ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig A ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig SRV _grpclb._tcp.srv-ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig A ipv4-no-srv-simple-service-config.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig A ipv4-no-srv-simple-service-config.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig TXT ipv4-no-srv-simple-service-config.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig TXT ipv4-no-srv-simple-service-config.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig A ipv4-no-config-for-cpp.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig A ipv4-no-config-for-cpp.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig TXT ipv4-no-config-for-cpp.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig TXT ipv4-no-config-for-cpp.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig A ipv4-cpp-config-has-zero-percentage.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig A ipv4-cpp-config-has-zero-percentage.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig TXT ipv4-cpp-config-has-zero-percentage.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig TXT ipv4-cpp-config-has-zero-percentage.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig A ipv4-second-language-is-cpp.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig A ipv4-second-language-is-cpp.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig TXT ipv4-second-language-is-cpp.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig TXT ipv4-second-language-is-cpp.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig A ipv4-config-with-percentages.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig A ipv4-config-with-percentages.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig TXT ipv4-config-with-percentages.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig TXT ipv4-config-with-percentages.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig SRV _grpclb._tcp.srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig A balancer-for-ipv4-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig A balancer-for-ipv4-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig A srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig A srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig SRV _grpclb._tcp.srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig SRV _grpclb._tcp.srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig AAAA balancer-for-ipv6-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig AAAA balancer-for-ipv6-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +ONE_FAILED=0 +dig AAAA srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. | grep 'ANSWER SECTION' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Sanity check: dig AAAA srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. FAILED" + exit 1 +fi + +echo "Sanity check PASSED. Run resolver tests:" + +ONE_FAILED=0 +bins/$CONFIG/resolver_component_test \ + --target_name='srv-ipv4-single-target.resolver-tests-version-1.grpctestingexp.' \ + --expected_addrs='1.2.3.4:1234,True' \ + --expected_chosen_service_config='' \ + --expected_lb_policy='' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Test based on target record: srv-ipv4-single-target.resolver-tests-version-1.grpctestingexp. FAILED" + EXIT_CODE=1 +fi + +ONE_FAILED=0 +bins/$CONFIG/resolver_component_test \ + --target_name='srv-ipv4-multi-target.resolver-tests-version-1.grpctestingexp.' \ + --expected_addrs='1.2.3.5:1234,True;1.2.3.6:1234,True;1.2.3.7:1234,True' \ + --expected_chosen_service_config='' \ + --expected_lb_policy='' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Test based on target record: srv-ipv4-multi-target.resolver-tests-version-1.grpctestingexp. FAILED" + EXIT_CODE=1 +fi + +ONE_FAILED=0 +bins/$CONFIG/resolver_component_test \ + --target_name='srv-ipv6-single-target.resolver-tests-version-1.grpctestingexp.' \ + --expected_addrs='[2607:f8b0:400a:801::1001]:1234,True' \ + --expected_chosen_service_config='' \ + --expected_lb_policy='' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Test based on target record: srv-ipv6-single-target.resolver-tests-version-1.grpctestingexp. FAILED" + EXIT_CODE=1 +fi + +ONE_FAILED=0 +bins/$CONFIG/resolver_component_test \ + --target_name='srv-ipv6-multi-target.resolver-tests-version-1.grpctestingexp.' \ + --expected_addrs='[2607:f8b0:400a:801::1002]:1234,True;[2607:f8b0:400a:801::1003]:1234,True;[2607:f8b0:400a:801::1004]:1234,True' \ + --expected_chosen_service_config='' \ + --expected_lb_policy='' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Test based on target record: srv-ipv6-multi-target.resolver-tests-version-1.grpctestingexp. FAILED" + EXIT_CODE=1 +fi + +ONE_FAILED=0 +bins/$CONFIG/resolver_component_test \ + --target_name='srv-ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp.' \ + --expected_addrs='1.2.3.4:1234,True' \ + --expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"SimpleService","waitForReady":true}]}]}' \ + --expected_lb_policy='round_robin' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Test based on target record: srv-ipv4-simple-service-config.resolver-tests-version-1.grpctestingexp. FAILED" + EXIT_CODE=1 +fi + +ONE_FAILED=0 +bins/$CONFIG/resolver_component_test \ + --target_name='ipv4-no-srv-simple-service-config.resolver-tests-version-1.grpctestingexp.' \ + --expected_addrs='1.2.3.4:443,False' \ + --expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"NoSrvSimpleService","waitForReady":true}]}]}' \ + --expected_lb_policy='round_robin' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Test based on target record: ipv4-no-srv-simple-service-config.resolver-tests-version-1.grpctestingexp. FAILED" + EXIT_CODE=1 +fi + +ONE_FAILED=0 +bins/$CONFIG/resolver_component_test \ + --target_name='ipv4-no-config-for-cpp.resolver-tests-version-1.grpctestingexp.' \ + --expected_addrs='1.2.3.4:443,False' \ + --expected_chosen_service_config='' \ + --expected_lb_policy='' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Test based on target record: ipv4-no-config-for-cpp.resolver-tests-version-1.grpctestingexp. FAILED" + EXIT_CODE=1 +fi + +ONE_FAILED=0 +bins/$CONFIG/resolver_component_test \ + --target_name='ipv4-cpp-config-has-zero-percentage.resolver-tests-version-1.grpctestingexp.' \ + --expected_addrs='1.2.3.4:443,False' \ + --expected_chosen_service_config='' \ + --expected_lb_policy='' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Test based on target record: ipv4-cpp-config-has-zero-percentage.resolver-tests-version-1.grpctestingexp. FAILED" + EXIT_CODE=1 +fi + +ONE_FAILED=0 +bins/$CONFIG/resolver_component_test \ + --target_name='ipv4-second-language-is-cpp.resolver-tests-version-1.grpctestingexp.' \ + --expected_addrs='1.2.3.4:443,False' \ + --expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"CppService","waitForReady":true}]}]}' \ + --expected_lb_policy='round_robin' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Test based on target record: ipv4-second-language-is-cpp.resolver-tests-version-1.grpctestingexp. FAILED" + EXIT_CODE=1 +fi + +ONE_FAILED=0 +bins/$CONFIG/resolver_component_test \ + --target_name='ipv4-config-with-percentages.resolver-tests-version-1.grpctestingexp.' \ + --expected_addrs='1.2.3.4:443,False' \ + --expected_chosen_service_config='{"loadBalancingPolicy":"round_robin","methodConfig":[{"name":[{"method":"Foo","service":"AlwaysPickedService","waitForReady":true}]}]}' \ + --expected_lb_policy='round_robin' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Test based on target record: ipv4-config-with-percentages.resolver-tests-version-1.grpctestingexp. FAILED" + EXIT_CODE=1 +fi + +ONE_FAILED=0 +bins/$CONFIG/resolver_component_test \ + --target_name='srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp.' \ + --expected_addrs='1.2.3.4:1234,True;1.2.3.4:443,False' \ + --expected_chosen_service_config='' \ + --expected_lb_policy='' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Test based on target record: srv-ipv4-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. FAILED" + EXIT_CODE=1 +fi + +ONE_FAILED=0 +bins/$CONFIG/resolver_component_test \ + --target_name='srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp.' \ + --expected_addrs='[2607:f8b0:400a:801::1002]:1234,True;[2607:f8b0:400a:801::1002]:443,False' \ + --expected_chosen_service_config='' \ + --expected_lb_policy='' || ONE_FAILED=1 +if [[ "$ONE_FAILED" != 0 ]]; then + echo "Test based on target record: srv-ipv6-target-has-backend-and-balancer.resolver-tests-version-1.grpctestingexp. FAILED" + EXIT_CODE=1 +fi + +exit $EXIT_CODE diff --git a/test/cpp/naming/resolver_test_record_groups.yaml b/test/cpp/naming/resolver_test_record_groups.yaml index 33d774ca701..2b3204335c4 100644 --- a/test/cpp/naming/resolver_test_record_groups.yaml +++ b/test/cpp/naming/resolver_test_record_groups.yaml @@ -1,4 +1,4 @@ -resolver_component_tests_common_zone_name: resolver-tests.grpctestingexp. +resolver_tests_common_zone_name: resolver-tests-version-1.grpctestingexp. resolver_component_tests: - expected_addrs: - {address: '1.2.3.4:1234', is_balancer: true} diff --git a/test/cpp/naming/test_dns_server.py b/test/cpp/naming/test_dns_server.py index 9d4b89cffb5..9f42f65ee61 100755 --- a/test/cpp/naming/test_dns_server.py +++ b/test/cpp/naming/test_dns_server.py @@ -66,7 +66,7 @@ def start_local_dns_server(args): with open(args.records_config_path) as config: test_records_config = yaml.load(config) - common_zone_name = test_records_config['resolver_component_tests_common_zone_name'] + common_zone_name = test_records_config['resolver_tests_common_zone_name'] for group in test_records_config['resolver_component_tests']: for name in group['records'].keys(): for record in group['records'][name]: From 55a807aec8f7b42b944225e104210c63fdf29d1c Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Thu, 28 Sep 2017 08:33:51 -0700 Subject: [PATCH 075/109] Fix race condition in composite credentials. --- .../credentials/composite/composite_credentials.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/core/lib/security/credentials/composite/composite_credentials.c b/src/core/lib/security/credentials/composite/composite_credentials.c index 09fd60a12c2..b67ff48d0f4 100644 --- a/src/core/lib/security/credentials/composite/composite_credentials.c +++ b/src/core/lib/security/credentials/composite/composite_credentials.c @@ -87,6 +87,7 @@ static bool composite_call_get_request_metadata( ctx->on_request_metadata = on_request_metadata; GRPC_CLOSURE_INIT(&ctx->internal_on_request_metadata, composite_call_metadata_cb, ctx, grpc_schedule_on_exec_ctx); + bool synchronous = true; while (ctx->creds_index < ctx->composite_creds->inner.num_creds) { grpc_call_credentials *inner_creds = ctx->composite_creds->inner.creds_array[ctx->creds_index++]; @@ -95,19 +96,12 @@ static bool composite_call_get_request_metadata( ctx->md_array, &ctx->internal_on_request_metadata, error)) { if (*error != GRPC_ERROR_NONE) break; } else { + synchronous = false; // Async return. break; } } - // If we got through all creds synchronously or we got a synchronous - // error on one of them, return synchronously. - if (ctx->creds_index == ctx->composite_creds->inner.num_creds || - *error != GRPC_ERROR_NONE) { - gpr_free(ctx); - return true; - } - // At least one inner cred is returning asynchronously, so we'll - // return asynchronously as well. - return false; + if (synchronous) gpr_free(ctx); + return synchronous; } static void composite_call_cancel_get_request_metadata( From 61b26f97b04b4fec82d6b9ea110ad7743a0ab178 Mon Sep 17 00:00:00 2001 From: "Mark D. Roth" Date: Thu, 28 Sep 2017 08:40:03 -0700 Subject: [PATCH 076/109] Code review changes. --- include/grpc/grpc_security.h | 2 +- src/cpp/client/secure_credentials.cc | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/grpc/grpc_security.h b/include/grpc/grpc_security.h index 42e2ab31117..95b14479354 100644 --- a/include/grpc/grpc_security.h +++ b/include/grpc/grpc_security.h @@ -249,7 +249,7 @@ typedef struct { void *reserved; } grpc_auth_metadata_context; -/** Maximum number of credentials returnable by a credentials plugin via +/** Maximum number of metadata entries returnable by a credentials plugin via a synchronous return. */ #define GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX 4 diff --git a/src/cpp/client/secure_credentials.cc b/src/cpp/client/secure_credentials.cc index 6c06e453263..e2bd1f7747e 100644 --- a/src/cpp/client/secure_credentials.cc +++ b/src/cpp/client/secure_credentials.cc @@ -178,12 +178,12 @@ int MetadataCredentialsPluginWrapper::GetMetadata( w->thread_pool_->Add( std::bind(&MetadataCredentialsPluginWrapper::InvokePlugin, w, context, cb, user_data, nullptr, nullptr, nullptr, nullptr)); - return false; + return 0; } else { // Synchronous return. w->InvokePlugin(context, cb, user_data, creds_md, num_creds_md, status, error_details); - return true; + return 1; } } From fa90655822ed6e02502f9d05b0479e12829042b9 Mon Sep 17 00:00:00 2001 From: Matt Kwong Date: Thu, 28 Sep 2017 10:24:22 -0700 Subject: [PATCH 077/109] Add coverage test to Kokoro --- tools/internal_ci/linux/grpc_coverage.cfg | 24 ++++++++++++++++++ tools/internal_ci/linux/grpc_coverage.sh | 30 +++++++++++++++++++++++ 2 files changed, 54 insertions(+) create mode 100644 tools/internal_ci/linux/grpc_coverage.cfg create mode 100755 tools/internal_ci/linux/grpc_coverage.sh diff --git a/tools/internal_ci/linux/grpc_coverage.cfg b/tools/internal_ci/linux/grpc_coverage.cfg new file mode 100644 index 00000000000..56b7745b21f --- /dev/null +++ b/tools/internal_ci/linux/grpc_coverage.cfg @@ -0,0 +1,24 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Config file for the internal CI (in protobuf text format) + +# Location of the continuous shell script in repository. +build_file: "grpc/tools/internal_ci/linux/grpc_coverage.sh" +timeout_mins: 420 +action { + define_artifacts { + regex: "github/grpc/reports/**" + } +} diff --git a/tools/internal_ci/linux/grpc_coverage.sh b/tools/internal_ci/linux/grpc_coverage.sh new file mode 100755 index 00000000000..31623d2618c --- /dev/null +++ b/tools/internal_ci/linux/grpc_coverage.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ex + +# Enter the gRPC repo root +cd $(dirname $0)/../../.. + +source tools/internal_ci/helper_scripts/prepare_build_linux_rc + +python tools/run_tests/run_tests.py \ + --use_docker \ + -t \ + -l all \ + -c gcov \ + -x report.xml \ + -j 16 + \ No newline at end of file From 8bba3bfc25383e87f76b0717cb4ed1113f07b9f0 Mon Sep 17 00:00:00 2001 From: Nathaniel Manista Date: Mon, 26 Jun 2017 16:33:25 +0000 Subject: [PATCH 078/109] Pin pip at 9.0.1 --- templates/tools/dockerfile/python_deps.include | 2 +- tools/distrib/pylint_code.sh | 2 +- tools/distrib/python/docgen.py | 2 +- tools/distrib/yapf_code.sh | 2 +- tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile | 2 +- .../interoptest/grpc_interop_csharpcoreclr/Dockerfile | 2 +- tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile | 2 +- tools/dockerfile/interoptest/grpc_interop_go/Dockerfile | 2 +- tools/dockerfile/interoptest/grpc_interop_go1.7/Dockerfile | 2 +- tools/dockerfile/interoptest/grpc_interop_go1.8/Dockerfile | 2 +- tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile | 2 +- tools/dockerfile/interoptest/grpc_interop_java/Dockerfile | 2 +- .../dockerfile/interoptest/grpc_interop_java_oracle8/Dockerfile | 2 +- tools/dockerfile/interoptest/grpc_interop_node/Dockerfile | 2 +- tools/dockerfile/interoptest/grpc_interop_python/Dockerfile | 2 +- tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile | 2 +- tools/dockerfile/test/csharp_jessie_x64/Dockerfile | 2 +- tools/dockerfile/test/cxx_alpine_x64/Dockerfile | 2 +- tools/dockerfile/test/cxx_jessie_x64/Dockerfile | 2 +- tools/dockerfile/test/cxx_jessie_x86/Dockerfile | 2 +- tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile | 2 +- tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile | 2 +- tools/dockerfile/test/fuzzer/Dockerfile | 2 +- tools/dockerfile/test/multilang_jessie_x64/Dockerfile | 2 +- tools/dockerfile/test/node_jessie_x64/Dockerfile | 2 +- tools/dockerfile/test/php7_jessie_x64/Dockerfile | 2 +- tools/dockerfile/test/php_jessie_x64/Dockerfile | 2 +- tools/dockerfile/test/python_alpine_x64/Dockerfile | 2 +- tools/dockerfile/test/python_jessie_x64/Dockerfile | 2 +- tools/dockerfile/test/python_pyenv_x64/Dockerfile | 2 +- tools/dockerfile/test/ruby_jessie_x64/Dockerfile | 2 +- tools/dockerfile/test/sanity/Dockerfile | 2 +- tools/gce/linux_performance_worker_init.sh | 1 + tools/run_tests/helper_scripts/build_python.sh | 2 +- 34 files changed, 34 insertions(+), 33 deletions(-) diff --git a/templates/tools/dockerfile/python_deps.include b/templates/tools/dockerfile/python_deps.include index 94b854ad212..bf1f57f267a 100644 --- a/templates/tools/dockerfile/python_deps.include +++ b/templates/tools/dockerfile/python_deps.include @@ -9,6 +9,6 @@ RUN apt-get update && apt-get install -y ${'\\'} python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/distrib/pylint_code.sh b/tools/distrib/pylint_code.sh index 3c9235b5480..7175f1e15b4 100755 --- a/tools/distrib/pylint_code.sh +++ b/tools/distrib/pylint_code.sh @@ -29,7 +29,7 @@ VIRTUALENV=python_pylint_venv virtualenv $VIRTUALENV PYTHON=$(realpath $VIRTUALENV/bin/python) -$PYTHON -m pip install --upgrade pip +$PYTHON -m pip install --upgrade pip==9.0.1 $PYTHON -m pip install pylint==1.6.5 for dir in "${DIRS[@]}"; do diff --git a/tools/distrib/python/docgen.py b/tools/distrib/python/docgen.py index 6f6d43c7d95..1822e51d095 100755 --- a/tools/distrib/python/docgen.py +++ b/tools/distrib/python/docgen.py @@ -60,7 +60,7 @@ environment.update({ subprocess_arguments_list = [ {'args': ['virtualenv', VIRTUALENV_DIR], 'env': environment}, - {'args': [VIRTUALENV_PIP_PATH, 'install', '--upgrade', 'pip'], + {'args': [VIRTUALENV_PIP_PATH, 'install', '--upgrade', 'pip==9.0.1'], 'env': environment}, {'args': [VIRTUALENV_PIP_PATH, 'install', '-r', REQUIREMENTS_PATH], 'env': environment}, diff --git a/tools/distrib/yapf_code.sh b/tools/distrib/yapf_code.sh index dbb6b5c41f2..e5beb70117a 100755 --- a/tools/distrib/yapf_code.sh +++ b/tools/distrib/yapf_code.sh @@ -33,7 +33,7 @@ VIRTUALENV=yapf_virtual_environment virtualenv $VIRTUALENV PYTHON=$(realpath "${VIRTUALENV}/bin/python") -$PYTHON -m pip install --upgrade pip +$PYTHON -m pip install --upgrade pip==9.0.1 $PYTHON -m pip install --upgrade futures $PYTHON -m pip install yapf==0.16.0 diff --git a/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile index dbf58023c53..ea82476b65f 100644 --- a/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile @@ -60,7 +60,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/interoptest/grpc_interop_csharpcoreclr/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_csharpcoreclr/Dockerfile index 4ccfbc43c30..56b8be89ec6 100644 --- a/tools/dockerfile/interoptest/grpc_interop_csharpcoreclr/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_csharpcoreclr/Dockerfile @@ -59,7 +59,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 diff --git a/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile index 7cfe98cbc84..38d377ce2fd 100644 --- a/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile @@ -60,7 +60,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile index febe2fa251b..73c41a48386 100644 --- a/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_go/Dockerfile @@ -28,7 +28,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/interoptest/grpc_interop_go1.7/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_go1.7/Dockerfile index 3a516cbb62a..7c083de3653 100644 --- a/tools/dockerfile/interoptest/grpc_interop_go1.7/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_go1.7/Dockerfile @@ -28,7 +28,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/interoptest/grpc_interop_go1.8/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_go1.8/Dockerfile index acb640a81dc..61efc18df92 100644 --- a/tools/dockerfile/interoptest/grpc_interop_go1.8/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_go1.8/Dockerfile @@ -28,7 +28,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile index 354b7bfdb59..278b09a6663 100644 --- a/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile @@ -28,7 +28,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile index 92a542ff76b..d56632440fe 100644 --- a/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_java/Dockerfile @@ -43,7 +43,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/interoptest/grpc_interop_java_oracle8/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_java_oracle8/Dockerfile index 92a542ff76b..d56632440fe 100644 --- a/tools/dockerfile/interoptest/grpc_interop_java_oracle8/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_java_oracle8/Dockerfile @@ -43,7 +43,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile index 4343d56cb49..f4c3e4103e3 100644 --- a/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_node/Dockerfile @@ -60,7 +60,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/interoptest/grpc_interop_python/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_python/Dockerfile index 271c6e75e9d..d165307031d 100644 --- a/tools/dockerfile/interoptest/grpc_interop_python/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_python/Dockerfile @@ -60,7 +60,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile index 7bcada68e0a..2217b10b8b1 100644 --- a/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile +++ b/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile @@ -60,7 +60,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/test/csharp_jessie_x64/Dockerfile b/tools/dockerfile/test/csharp_jessie_x64/Dockerfile index 40d46fcf58b..3e31e67f5f3 100644 --- a/tools/dockerfile/test/csharp_jessie_x64/Dockerfile +++ b/tools/dockerfile/test/csharp_jessie_x64/Dockerfile @@ -64,7 +64,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/test/cxx_alpine_x64/Dockerfile b/tools/dockerfile/test/cxx_alpine_x64/Dockerfile index 1ae50c106f2..af5e7d6f14f 100644 --- a/tools/dockerfile/test/cxx_alpine_x64/Dockerfile +++ b/tools/dockerfile/test/cxx_alpine_x64/Dockerfile @@ -36,7 +36,7 @@ RUN apk update && apk add \ zip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 diff --git a/tools/dockerfile/test/cxx_jessie_x64/Dockerfile b/tools/dockerfile/test/cxx_jessie_x64/Dockerfile index 888a37baca7..3492dd72c54 100644 --- a/tools/dockerfile/test/cxx_jessie_x64/Dockerfile +++ b/tools/dockerfile/test/cxx_jessie_x64/Dockerfile @@ -64,7 +64,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/test/cxx_jessie_x86/Dockerfile b/tools/dockerfile/test/cxx_jessie_x86/Dockerfile index 319f1e18893..f8cbf35d393 100644 --- a/tools/dockerfile/test/cxx_jessie_x86/Dockerfile +++ b/tools/dockerfile/test/cxx_jessie_x86/Dockerfile @@ -64,7 +64,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile b/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile index 61f005d9da3..6966d6b8d37 100644 --- a/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile +++ b/tools/dockerfile/test/cxx_ubuntu1404_x64/Dockerfile @@ -64,7 +64,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile b/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile index f35247eccb5..016034aa50e 100644 --- a/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile +++ b/tools/dockerfile/test/cxx_ubuntu1604_x64/Dockerfile @@ -64,7 +64,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/test/fuzzer/Dockerfile b/tools/dockerfile/test/fuzzer/Dockerfile index ce1badfeb1b..50104ad7ae7 100644 --- a/tools/dockerfile/test/fuzzer/Dockerfile +++ b/tools/dockerfile/test/fuzzer/Dockerfile @@ -64,7 +64,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/test/multilang_jessie_x64/Dockerfile b/tools/dockerfile/test/multilang_jessie_x64/Dockerfile index 59fe4d8f93b..1a4b6815760 100644 --- a/tools/dockerfile/test/multilang_jessie_x64/Dockerfile +++ b/tools/dockerfile/test/multilang_jessie_x64/Dockerfile @@ -121,7 +121,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/test/node_jessie_x64/Dockerfile b/tools/dockerfile/test/node_jessie_x64/Dockerfile index 103be8412b4..4f18dbae794 100644 --- a/tools/dockerfile/test/node_jessie_x64/Dockerfile +++ b/tools/dockerfile/test/node_jessie_x64/Dockerfile @@ -75,7 +75,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/test/php7_jessie_x64/Dockerfile b/tools/dockerfile/test/php7_jessie_x64/Dockerfile index f6d426bcd6a..1399502840f 100644 --- a/tools/dockerfile/test/php7_jessie_x64/Dockerfile +++ b/tools/dockerfile/test/php7_jessie_x64/Dockerfile @@ -75,7 +75,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/test/php_jessie_x64/Dockerfile b/tools/dockerfile/test/php_jessie_x64/Dockerfile index ae82a8d99f1..56dc6045e15 100644 --- a/tools/dockerfile/test/php_jessie_x64/Dockerfile +++ b/tools/dockerfile/test/php_jessie_x64/Dockerfile @@ -64,7 +64,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/test/python_alpine_x64/Dockerfile b/tools/dockerfile/test/python_alpine_x64/Dockerfile index 7bd11d74079..7584ab8767a 100644 --- a/tools/dockerfile/test/python_alpine_x64/Dockerfile +++ b/tools/dockerfile/test/python_alpine_x64/Dockerfile @@ -36,7 +36,7 @@ RUN apk update && apk add \ zip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 diff --git a/tools/dockerfile/test/python_jessie_x64/Dockerfile b/tools/dockerfile/test/python_jessie_x64/Dockerfile index d5d781cd1ac..8d89f5022f2 100644 --- a/tools/dockerfile/test/python_jessie_x64/Dockerfile +++ b/tools/dockerfile/test/python_jessie_x64/Dockerfile @@ -64,7 +64,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/test/python_pyenv_x64/Dockerfile b/tools/dockerfile/test/python_pyenv_x64/Dockerfile index 3b4ad12b6d9..f8cbdc5726e 100644 --- a/tools/dockerfile/test/python_pyenv_x64/Dockerfile +++ b/tools/dockerfile/test/python_pyenv_x64/Dockerfile @@ -64,7 +64,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/test/ruby_jessie_x64/Dockerfile b/tools/dockerfile/test/ruby_jessie_x64/Dockerfile index 3d879bb0c90..5d7f80bf889 100644 --- a/tools/dockerfile/test/ruby_jessie_x64/Dockerfile +++ b/tools/dockerfile/test/ruby_jessie_x64/Dockerfile @@ -64,7 +64,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/dockerfile/test/sanity/Dockerfile b/tools/dockerfile/test/sanity/Dockerfile index dff979dc371..487ce15e2e7 100644 --- a/tools/dockerfile/test/sanity/Dockerfile +++ b/tools/dockerfile/test/sanity/Dockerfile @@ -64,7 +64,7 @@ RUN apt-get update && apt-get install -y \ python-pip # Install Python packages from PyPI -RUN pip install pip --upgrade +RUN pip install --upgrade pip==9.0.1 RUN pip install virtualenv RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 twisted==17.5.0 diff --git a/tools/gce/linux_performance_worker_init.sh b/tools/gce/linux_performance_worker_init.sh index 88d8de74028..8d900f1d16a 100755 --- a/tools/gce/linux_performance_worker_init.sh +++ b/tools/gce/linux_performance_worker_init.sh @@ -72,6 +72,7 @@ sudo apt-get install -y netperf sudo apt-get install -y libgflags-dev libgtest-dev libc++-dev clang # Python dependencies +sudo pip install --upgrade pip==9.0.1 sudo pip install tabulate sudo pip install google-api-python-client sudo pip install virtualenv diff --git a/tools/run_tests/helper_scripts/build_python.sh b/tools/run_tests/helper_scripts/build_python.sh index be650553dc8..e3620821f3d 100755 --- a/tools/run_tests/helper_scripts/build_python.sh +++ b/tools/run_tests/helper_scripts/build_python.sh @@ -152,7 +152,7 @@ pip_install_dir() { cd $PWD } -$VENV_PYTHON -m pip install --upgrade pip +$VENV_PYTHON -m pip install --upgrade pip==9.0.1 $VENV_PYTHON -m pip install setuptools $VENV_PYTHON -m pip install cython $VENV_PYTHON -m pip install six enum34 protobuf futures From bdf998a1840a5b35c9ad84d27c8ca4ce2642b06a Mon Sep 17 00:00:00 2001 From: Yash Tibrewal Date: Wed, 27 Sep 2017 17:07:43 -0700 Subject: [PATCH 079/109] Fix compression tests for when they expect the client to receive the completion tag --- test/core/end2end/tests/compressed_payload.c | 2 +- test/core/end2end/tests/stream_compression_compressed_payload.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/core/end2end/tests/compressed_payload.c b/test/core/end2end/tests/compressed_payload.c index ce86e97b83e..ba03773036b 100644 --- a/test/core/end2end/tests/compressed_payload.c +++ b/test/core/end2end/tests/compressed_payload.c @@ -193,6 +193,7 @@ static void request_for_disabled_algorithm( GPR_ASSERT(GRPC_CALL_OK == error); CQ_EXPECT_COMPLETION(cqv, tag(101), true); + CQ_EXPECT_COMPLETION(cqv, tag(1), true); cq_verify(cqv); op = ops; @@ -221,7 +222,6 @@ static void request_for_disabled_algorithm( GPR_ASSERT(GRPC_CALL_OK == error); CQ_EXPECT_COMPLETION(cqv, tag(103), true); - CQ_EXPECT_COMPLETION(cqv, tag(1), true); cq_verify(cqv); /* call was cancelled (closed) ... */ diff --git a/test/core/end2end/tests/stream_compression_compressed_payload.c b/test/core/end2end/tests/stream_compression_compressed_payload.c index 094c4de1c05..8b47741cd40 100644 --- a/test/core/end2end/tests/stream_compression_compressed_payload.c +++ b/test/core/end2end/tests/stream_compression_compressed_payload.c @@ -193,6 +193,7 @@ static void request_for_disabled_algorithm( GPR_ASSERT(GRPC_CALL_OK == error); CQ_EXPECT_COMPLETION(cqv, tag(101), true); + CQ_EXPECT_COMPLETION(cqv, tag(1), true); cq_verify(cqv); op = ops; @@ -221,7 +222,6 @@ static void request_for_disabled_algorithm( GPR_ASSERT(GRPC_CALL_OK == error); CQ_EXPECT_COMPLETION(cqv, tag(103), true); - CQ_EXPECT_COMPLETION(cqv, tag(1), true); cq_verify(cqv); /* call was cancelled (closed) ... */ From 292c1ef3dc98ef9f3eeacef81dca853735119352 Mon Sep 17 00:00:00 2001 From: Matt Kwong Date: Thu, 28 Sep 2017 11:09:25 -0700 Subject: [PATCH 080/109] Start port_server before running daily performance profile --- tools/internal_ci/linux/grpc_performance_profile_daily.sh | 2 ++ tools/jenkins/run_performance_profile_daily.sh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/tools/internal_ci/linux/grpc_performance_profile_daily.sh b/tools/internal_ci/linux/grpc_performance_profile_daily.sh index 25523e21b80..34d41bc04c6 100755 --- a/tools/internal_ci/linux/grpc_performance_profile_daily.sh +++ b/tools/internal_ci/linux/grpc_performance_profile_daily.sh @@ -22,6 +22,8 @@ source tools/internal_ci/helper_scripts/prepare_build_linux_perf_rc CPUS=`python -c 'import multiprocessing; print multiprocessing.cpu_count()'` +./tools/run_tests/start_port_server.py || true + make CONFIG=opt memory_profile_test memory_profile_client memory_profile_server -j $CPUS bins/opt/memory_profile_test bq load microbenchmarks.memory memory_usage.csv diff --git a/tools/jenkins/run_performance_profile_daily.sh b/tools/jenkins/run_performance_profile_daily.sh index 04a2464aee1..48d82a9b7f6 100755 --- a/tools/jenkins/run_performance_profile_daily.sh +++ b/tools/jenkins/run_performance_profile_daily.sh @@ -29,4 +29,6 @@ fi BENCHMARKS_TO_RUN="bm_fullstack_unary_ping_pong bm_fullstack_streaming_ping_pong bm_fullstack_streaming_pump bm_closure bm_cq bm_call_create bm_error bm_chttp2_hpack bm_chttp2_transport bm_pollset bm_metadata" +./tools/run_tests/start_port_server.py || true + $PYTHON tools/run_tests/run_microbenchmark.py --collect summary perf latency -b $BENCHMARKS_TO_RUN From 30f1d0fe79d45c8d89a654faf00431366d75ba84 Mon Sep 17 00:00:00 2001 From: Ken Payson Date: Thu, 28 Sep 2017 14:07:32 -0700 Subject: [PATCH 081/109] Specify min windows version as Vista for Python --- include/grpc/impl/codegen/port_platform.h | 4 ---- setup.py | 4 +--- src/core/lib/iomgr/socket_utils_windows.c | 4 ---- 3 files changed, 1 insertion(+), 11 deletions(-) diff --git a/include/grpc/impl/codegen/port_platform.h b/include/grpc/impl/codegen/port_platform.h index 7e5f85253e1..0f4316c9547 100644 --- a/include/grpc/impl/codegen/port_platform.h +++ b/include/grpc/impl/codegen/port_platform.h @@ -291,10 +291,6 @@ #endif #ifdef _MSC_VER -#ifdef _PYTHON_MSVC -// The Python 3.5 Windows runtime is missing InetNtop -#define GPR_WIN_INET_NTOP -#endif // _PYTHON_MSVC #if _MSC_VER < 1700 typedef __int8 int8_t; typedef __int16 int16_t; diff --git a/setup.py b/setup.py index 12882413ce3..90c9316b0da 100644 --- a/setup.py +++ b/setup.py @@ -110,8 +110,6 @@ if EXTRA_ENV_COMPILE_ARGS is None: EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s' else: EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64' - elif 'win32' in sys.platform: - EXTRA_ENV_COMPILE_ARGS += ' -D_PYTHON_MSVC' elif "linux" in sys.platform: EXTRA_ENV_COMPILE_ARGS += ' -std=c++11 -std=gnu99 -fvisibility=hidden -fno-wrapv -fno-exceptions' elif "darwin" in sys.platform: @@ -163,7 +161,7 @@ if "win32" in sys.platform: # TODO(zyc): Re-enble c-ares on x64 and x86 windows after fixing the # ares_library_init compilation issue DEFINE_MACROS += (('WIN32_LEAN_AND_MEAN', 1), ('CARES_STATICLIB', 1), - ('GRPC_ARES', 0),) + ('GRPC_ARES', 0), ('NTDDI_VERSION', 0x06000000),) if '64bit' in platform.architecture()[0]: DEFINE_MACROS += (('MS_WIN64', 1),) elif sys.version_info >= (3, 5): diff --git a/src/core/lib/iomgr/socket_utils_windows.c b/src/core/lib/iomgr/socket_utils_windows.c index 2732c159aae..6e85e4b61fd 100644 --- a/src/core/lib/iomgr/socket_utils_windows.c +++ b/src/core/lib/iomgr/socket_utils_windows.c @@ -26,12 +26,8 @@ #include const char *grpc_inet_ntop(int af, const void *src, char *dst, size_t size) { -#ifdef GPR_WIN_INET_NTOP - return inet_ntop(af, src, dst, size); -#else /* Windows InetNtopA wants a mutable ip pointer */ return InetNtopA(af, (void *)src, dst, size); -#endif /* GPR_WIN_INET_NTOP */ } #endif /* GRPC_WINDOWS_SOCKETUTILS */ From 3e3bbdff917e5c56f6c29bc35c8eb989192325e7 Mon Sep 17 00:00:00 2001 From: Nathaniel Manista Date: Thu, 28 Sep 2017 22:36:31 +0000 Subject: [PATCH 082/109] Raise exception instances rather than classes --- src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi index d860173b5d4..782a684d255 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi @@ -454,7 +454,7 @@ cdef class _MetadataIterator: self.i = self.i + 1 return result else: - raise StopIteration + raise StopIteration() # TODO(https://github.com/grpc/grpc/issues/7950): Eliminate this; just use an @@ -518,7 +518,7 @@ cdef class MetadataArray: def __getitem__(self, size_t i): if i >= self.c_metadata_array.count: - raise IndexError + raise IndexError() key = _slice_bytes(self.c_metadata_array.metadata[i].key) value = _slice_bytes(self.c_metadata_array.metadata[i].value) return Metadatum(key=key, value=value) @@ -720,7 +720,7 @@ cdef class _OperationsIterator: self.i = self.i + 1 return result else: - raise StopIteration + raise StopIteration() cdef class Operations: From 4ceb743c82606381a8c734e7d33481b400aea68c Mon Sep 17 00:00:00 2001 From: Nathaniel Manista Date: Thu, 28 Sep 2017 22:37:38 +0000 Subject: [PATCH 083/109] Drop unused staticmethods --- src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi index 782a684d255..4f87261e173 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi @@ -171,14 +171,6 @@ cdef class Timespec: gpr_convert_clock_type(self.c_time, GPR_CLOCK_REALTIME)) return real_time.seconds + real_time.nanoseconds / 1e9 - @staticmethod - def infinite_future(): - return Timespec(float("+inf")) - - @staticmethod - def infinite_past(): - return Timespec(float("-inf")) - def __richcmp__(Timespec self not None, Timespec other not None, int op): cdef gpr_timespec self_c_time = self.c_time cdef gpr_timespec other_c_time = other.c_time From 3efe754799ac46f7be6502ba3e20a39743d9fb0d Mon Sep 17 00:00:00 2001 From: Nathaniel Manista Date: Thu, 28 Sep 2017 22:38:06 +0000 Subject: [PATCH 084/109] "resopnse" typo correction --- .../grpcio_tests/tests/unit/_metadata_code_details_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py index 9f72b1fcb58..6faab94be64 100644 --- a/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py +++ b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py @@ -29,7 +29,7 @@ _SERIALIZED_RESPONSE = b'\x49\x50\x51' _REQUEST_SERIALIZER = lambda unused_request: _SERIALIZED_REQUEST _REQUEST_DESERIALIZER = lambda unused_serialized_request: object() _RESPONSE_SERIALIZER = lambda unused_response: _SERIALIZED_RESPONSE -_RESPONSE_DESERIALIZER = lambda unused_serialized_resopnse: object() +_RESPONSE_DESERIALIZER = lambda unused_serialized_response: object() _SERVICE = 'test.TestService' _UNARY_UNARY = 'UnaryUnary' From 460af54440c944129d2dc3bd351ab3c43bfa1185 Mon Sep 17 00:00:00 2001 From: ncteisen Date: Thu, 28 Sep 2017 16:12:15 -0700 Subject: [PATCH 085/109] Shuffle new and old jobs together --- .../profiling/microbenchmarks/bm_diff/bm_main.py | 16 ++++++++++++++-- .../profiling/microbenchmarks/bm_diff/bm_run.py | 8 +++++--- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_main.py b/tools/profiling/microbenchmarks/bm_diff/bm_main.py index 5aa11ac391e..516d110b97a 100755 --- a/tools/profiling/microbenchmarks/bm_diff/bm_main.py +++ b/tools/profiling/microbenchmarks/bm_diff/bm_main.py @@ -23,6 +23,7 @@ import bm_diff import sys import os +import random import argparse import multiprocessing import subprocess @@ -32,6 +33,12 @@ sys.path.append( os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils')) import comment_on_pr +sys.path.append( + os.path.join( + os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests', + 'python_utils')) +import jobset + def _args(): argp = argparse.ArgumentParser( @@ -125,8 +132,13 @@ def main(args): subprocess.check_call(['git', 'checkout', where_am_i]) subprocess.check_call(['git', 'submodule', 'update']) - bm_run.run('new', args.benchmarks, args.jobs, args.loops, args.regex, args.counters) - bm_run.run(old, args.benchmarks, args.jobs, args.loops, args.regex, args.counters) + jobs_list = [] + jobs_list += bm_run.create_jobs('new', args.benchmarks, args.loops, args.regex, args.counters) + jobs_list += bm_run.create_jobs(old, args.benchmarks, args.loops, args.regex, args.counters) + + # shuffle all jobs to eliminate noise from GCE CPU drift + random.shuffle(jobs_list, random.SystemRandom().random) + jobset.run(jobs_list, maxjobs=args.jobs) diff, note = bm_diff.diff(args.benchmarks, args.loops, args.regex, args.track, old, 'new', args.counters) diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_run.py b/tools/profiling/microbenchmarks/bm_diff/bm_run.py index 206f7c5845f..d80ce3b611e 100755 --- a/tools/profiling/microbenchmarks/bm_diff/bm_run.py +++ b/tools/profiling/microbenchmarks/bm_diff/bm_run.py @@ -99,7 +99,7 @@ def _collect_bm_data(bm, cfg, name, regex, idx, loops): return jobs_list -def run(name, benchmarks, jobs, loops, regex, counters): +def create_jobs(name, benchmarks, loops, regex, counters): jobs_list = [] for loop in range(0, loops): for bm in benchmarks: @@ -108,9 +108,11 @@ def run(name, benchmarks, jobs, loops, regex, counters): jobs_list += _collect_bm_data(bm, 'counters', name, regex, loop, loops) random.shuffle(jobs_list, random.SystemRandom().random) - jobset.run(jobs_list, maxjobs=jobs) + return jobs_list if __name__ == '__main__': args = _args() - run(args.name, args.benchmarks, args.jobs, args.loops, args.regex, args.counters) + jobs_list = create_jobs(args.name, args.benchmarks, args.loops, + args.regex, args.counters) + jobset.run(jobs_list, maxjobs=args.jobs) From c2d5edfef59a35a4258e9f1fb9b1078f97436c20 Mon Sep 17 00:00:00 2001 From: ncteisen Date: Thu, 28 Sep 2017 19:20:02 -0700 Subject: [PATCH 086/109] Increase CPU cost --- tools/profiling/microbenchmarks/bm_diff/bm_run.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_run.py b/tools/profiling/microbenchmarks/bm_diff/bm_run.py index d80ce3b611e..81db5a226a2 100755 --- a/tools/profiling/microbenchmarks/bm_diff/bm_run.py +++ b/tools/profiling/microbenchmarks/bm_diff/bm_run.py @@ -95,6 +95,7 @@ def _collect_bm_data(bm, cfg, name, regex, idx, loops): shortname='%s %s %s %s %d/%d' % (bm, line, cfg, name, idx + 1, loops), verbose_success=True, + cpu_cost=2, timeout_seconds=60 * 60)) # one hour return jobs_list From cfd20c4262ca5b214e75b612baad15ad85e47275 Mon Sep 17 00:00:00 2001 From: yang-g Date: Thu, 14 Sep 2017 09:12:34 -0700 Subject: [PATCH 087/109] Add CompositeCallCredentials --- src/cpp/client/secure_credentials.cc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/cpp/client/secure_credentials.cc b/src/cpp/client/secure_credentials.cc index 057a058a3fb..b3377c09e91 100644 --- a/src/cpp/client/secure_credentials.cc +++ b/src/cpp/client/secure_credentials.cc @@ -150,6 +150,18 @@ std::shared_ptr CompositeChannelCredentials( return nullptr; } +std::shared_ptr CompositeCallCredentials( + const std::shared_ptr& creds1, + const std::shared_ptr& creds2) { + SecureCallCredentials* s_creds1 = creds1->AsSecureCredentials(); + SecureCallCredentials* s_creds2 = creds2->AsSecureCredentials(); + if (s_creds1 != nullptr && s_creds2 != nullptr) { + return WrapCallCredentials(grpc_composite_call_credentials_create( + s_creds1->GetRawCreds(), s_creds2->GetRawCreds(), nullptr)); + } + return nullptr; +} + void MetadataCredentialsPluginWrapper::Destroy(void* wrapper) { if (wrapper == nullptr) return; MetadataCredentialsPluginWrapper* w = From 3c00328b6295929d94c4fbc35e1084f71f7f5ff1 Mon Sep 17 00:00:00 2001 From: yang-g Date: Wed, 20 Sep 2017 13:58:13 -0700 Subject: [PATCH 088/109] Add a simple test --- test/cpp/end2end/end2end_test.cc | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc index e54cd03ca20..5dae5b014bd 100644 --- a/test/cpp/end2end/end2end_test.cc +++ b/test/cpp/end2end/end2end_test.cc @@ -1673,6 +1673,34 @@ TEST_P(SecureEnd2endTest, BlockingAuthMetadataPluginFailure) { kTestCredsPluginErrorMsg); } +TEST_P(SecureEnd2endTest, CompositeCallCreds) { + ResetStub(); + EchoRequest request; + EchoResponse response; + ClientContext context; + const char kMetadataKey1[] = "call-creds-key1"; + const char kMetadataKey2[] = "call-creds-key2"; + const char kMetadataVal1[] = "call-creds-val1"; + const char kMetadataVal2[] = "call-creds-val2"; + + context.set_credentials(CompositeCallCredentials( + MetadataCredentialsFromPlugin(std::unique_ptr( + new TestMetadataCredentialsPlugin(kMetadataKey1, kMetadataVal1, true, + true))), + MetadataCredentialsFromPlugin(std::unique_ptr( + new TestMetadataCredentialsPlugin(kMetadataKey2, kMetadataVal2, true, + true))))); + request.set_message("Hello"); + request.mutable_param()->set_echo_metadata(true); + + Status s = stub_->Echo(&context, request, &response); + EXPECT_TRUE(s.ok()); + EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(), + kMetadataKey1, kMetadataVal1)); + EXPECT_TRUE(MetadataContains(context.GetServerTrailingMetadata(), + kMetadataKey2, kMetadataVal2)); +} + TEST_P(SecureEnd2endTest, ClientAuthContext) { ResetStub(); EchoRequest request; From 103991eb73aba3cd4cf9b58f7fca963404233148 Mon Sep 17 00:00:00 2001 From: Nathaniel Manista Date: Fri, 29 Sep 2017 04:22:16 +0000 Subject: [PATCH 089/109] Devolve staticmethod to ordinary function --- .../grpc/_cython/_cygrpc/credentials.pxd.pxi | 3 ++- .../grpc/_cython/_cygrpc/credentials.pyx.pxi | 22 +++++++++---------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi index a0e69dd6131..918ba7c7a0b 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pxd.pxi @@ -41,7 +41,8 @@ cdef class CredentialsMetadataPlugin: cdef object plugin_callback cdef bytes plugin_name - cdef grpc_metadata_credentials_plugin make_c_plugin(self) + +cdef grpc_metadata_credentials_plugin _c_plugin(CredentialsMetadataPlugin plugin) cdef class AuthMetadataContext: diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi index 57816f1cab0..92b9d65d71b 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi @@ -89,20 +89,20 @@ cdef class CredentialsMetadataPlugin: self.plugin_callback = plugin_callback self.plugin_name = name - @staticmethod - cdef grpc_metadata_credentials_plugin make_c_plugin(self): - cdef grpc_metadata_credentials_plugin result - result.get_metadata = plugin_get_metadata - result.destroy = plugin_destroy_c_plugin_state - result.state = self - result.type = self.plugin_name - cpython.Py_INCREF(self) - return result - def __dealloc__(self): grpc_shutdown() +cdef grpc_metadata_credentials_plugin _c_plugin(CredentialsMetadataPlugin plugin): + cdef grpc_metadata_credentials_plugin c_plugin + c_plugin.get_metadata = plugin_get_metadata + c_plugin.destroy = plugin_destroy_c_plugin_state + c_plugin.state = plugin + c_plugin.type = plugin.plugin_name + cpython.Py_INCREF(plugin) + return c_plugin + + cdef class AuthMetadataContext: def __cinit__(self): @@ -239,7 +239,7 @@ def call_credentials_google_iam(authorization_token, authority_selector): def call_credentials_metadata_plugin(CredentialsMetadataPlugin plugin): cdef CallCredentials credentials = CallCredentials() - cdef grpc_metadata_credentials_plugin c_plugin = plugin.make_c_plugin() + cdef grpc_metadata_credentials_plugin c_plugin = _c_plugin(plugin) with nogil: credentials.c_credentials = ( grpc_metadata_credentials_create_from_plugin(c_plugin, NULL)) From daa70e67ccd9d9359c6692fa132a64417c8d57ee Mon Sep 17 00:00:00 2001 From: "Nicolas \"Pixel\" Noble" Date: Fri, 29 Sep 2017 19:18:41 +0200 Subject: [PATCH 090/109] Adding Copyright info. --- tools/distrib/pull_requests_interval.sh | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tools/distrib/pull_requests_interval.sh b/tools/distrib/pull_requests_interval.sh index 43114990aa2..7a6c702daa6 100755 --- a/tools/distrib/pull_requests_interval.sh +++ b/tools/distrib/pull_requests_interval.sh @@ -1,4 +1,17 @@ #!/bin/bash +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. if [ "x$1" = "x" ] ; then echo "Usage: $0 [second ref]" From 520cc8f55544bcbba3630da934607776d551d9f7 Mon Sep 17 00:00:00 2001 From: ncteisen Date: Fri, 29 Sep 2017 10:36:28 -0700 Subject: [PATCH 091/109] Fix ClangTidy --- src/cpp/client/secure_credentials.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cpp/client/secure_credentials.h b/src/cpp/client/secure_credentials.h index fa1e31996ad..ed9afb37b00 100644 --- a/src/cpp/client/secure_credentials.h +++ b/src/cpp/client/secure_credentials.h @@ -73,7 +73,7 @@ class MetadataCredentialsPluginWrapper final : private GrpcLibraryCodegen { grpc_auth_metadata_context context, grpc_credentials_plugin_metadata_cb cb, void* user_data, grpc_metadata creds_md[GRPC_METADATA_CREDENTIALS_PLUGIN_SYNC_MAX], - size_t* num_creds_md, grpc_status_code* status, + size_t* num_creds_md, grpc_status_code* status_code, const char** error_details); std::unique_ptr thread_pool_; std::unique_ptr plugin_; From b5eaf7734cd8c85263afa5a773ce0f7a33d75b0a Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Wed, 1 Feb 2017 22:15:02 -0800 Subject: [PATCH 092/109] gRPC CLI batch mode --- test/cpp/util/grpc_tool.cc | 125 ++++++++++++++++++++++++++++++-- test/cpp/util/grpc_tool_test.cc | 55 ++++++++++++++ 2 files changed, 175 insertions(+), 5 deletions(-) diff --git a/test/cpp/util/grpc_tool.cc b/test/cpp/util/grpc_tool.cc index bb6f8780202..7bf1a595e57 100644 --- a/test/cpp/util/grpc_tool.cc +++ b/test/cpp/util/grpc_tool.cc @@ -58,6 +58,11 @@ DEFINE_string(protofiles, "", "Name of the proto file."); DEFINE_bool(binary_input, false, "Input in binary format"); DEFINE_bool(binary_output, false, "Output in binary format"); DEFINE_string(infile, "", "Input file (default is stdin)"); +DEFINE_bool(batch, false, + "Input contains multiple requests. Please do not use this to send " + "more than a few RPCs. gRPC CLI has very different performance " + "characteristics compared with normal RPC calls which make it " + "unsuitable for loadtesting or significant production traffic."); namespace { @@ -460,12 +465,17 @@ bool GrpcTool::CallMethod(int argc, const char** argv, return false; } + if (argc == 3) { + request_text = argv[2]; + } + if (parser->IsStreaming(method_name, true /* is_request */)) { std::istream* input_stream; std::ifstream input_file; - if (argc == 3) { - request_text = argv[2]; + if (FLAGS_batch) { + fprintf(stderr, "Batch mode for streaming RPC is not supported.\n"); + return false; } std::multimap client_metadata; @@ -549,8 +559,115 @@ bool GrpcTool::CallMethod(int argc, const char** argv, } } else { // parser->IsStreaming(method_name, true /* is_request */) + if (FLAGS_batch) { + if (parser->IsStreaming(method_name, false /* is_request */)) { + fprintf(stderr, "Batch mode for streaming RPC is not supported.\n"); + return false; + } + + std::istream* input_stream; + std::ifstream input_file; + + if (FLAGS_infile.empty()) { + if (isatty(STDIN_FILENO)) { + print_mode = true; + fprintf(stderr, "reading request messages from stdin...\n"); + } + input_stream = &std::cin; + } else { + input_file.open(FLAGS_infile, std::ios::in | std::ios::binary); + input_stream = &input_file; + } + + std::multimap client_metadata; + ParseMetadataFlag(&client_metadata); + if (print_mode) { + PrintMetadata(client_metadata, "Sending client initial metadata:"); + } + + std::stringstream request_ss; + grpc::string line; + while (!request_text.empty() || + (!input_stream->eof() && getline(*input_stream, line))) { + if (!request_text.empty()) { + if (FLAGS_binary_input) { + serialized_request_proto = request_text; + request_text.clear(); + } else { + serialized_request_proto = parser->GetSerializedProtoFromMethod( + method_name, request_text, true /* is_request */); + request_text.clear(); + if (parser->HasError()) { + if (print_mode) { + fprintf(stderr, "Failed to parse request.\n"); + } + continue; + } + } + + grpc::string serialized_response_proto; + std::multimap + server_initial_metadata, server_trailing_metadata; + CliCall call(channel, formatted_method_name, client_metadata); + call.Write(serialized_request_proto); + call.WritesDone(); + if (!call.Read(&serialized_response_proto, + &server_initial_metadata)) { + fprintf(stderr, "Failed to read response.\n"); + } + Status status = call.Finish(&server_trailing_metadata); + + if (status.ok()) { + if (print_mode) { + fprintf(stderr, "Rpc succeeded with OK status.\n"); + PrintMetadata(server_initial_metadata, + "Received initial metadata from server:"); + PrintMetadata(server_trailing_metadata, + "Received trailing metadata from server:"); + } + + if (FLAGS_binary_output) { + if (!callback(serialized_response_proto)) { + break; + } + } else { + grpc::string response_text = parser->GetTextFormatFromMethod( + method_name, serialized_response_proto, + false /* is_request */); + if (parser->HasError() && print_mode) { + fprintf(stderr, "Failed to parse response.\n"); + } else { + if (!callback(response_text)) { + break; + } + } + } + } else { + if (print_mode) { + fprintf(stderr, + "Rpc failed with status code %d, error message: %s\n", + status.error_code(), status.error_message().c_str()); + } + } + } else { + if (line.length() == 0) { + request_text = request_ss.str(); + request_ss.str(grpc::string()); + request_ss.clear(); + } else { + request_ss << line << ' '; + } + } + } + + if (input_file.is_open()) { + input_file.close(); + } + + return true; + } + if (argc == 3) { - request_text = argv[2]; if (!FLAGS_infile.empty()) { fprintf(stderr, "warning: request given in argv, ignoring --infile\n"); } @@ -571,9 +688,7 @@ bool GrpcTool::CallMethod(int argc, const char** argv, if (FLAGS_binary_input) { serialized_request_proto = request_text; - // formatted_method_name = method_name; } else { - // formatted_method_name = parser->GetFormattedMethodName(method_name); serialized_request_proto = parser->GetSerializedProtoFromMethod( method_name, request_text, true /* is_request */); if (parser->HasError()) { diff --git a/test/cpp/util/grpc_tool_test.cc b/test/cpp/util/grpc_tool_test.cc index dd00581f2b8..d0b3d7b81ba 100644 --- a/test/cpp/util/grpc_tool_test.cc +++ b/test/cpp/util/grpc_tool_test.cc @@ -84,6 +84,7 @@ namespace testing { DECLARE_bool(binary_input); DECLARE_bool(binary_output); DECLARE_bool(l); +DECLARE_bool(batch); namespace { @@ -399,6 +400,60 @@ TEST_F(GrpcToolTest, CallCommand) { ShutdownServer(); } +TEST_F(GrpcToolTest, CallCommandBatch) { + // Test input "grpc_cli call Echo" + std::stringstream output_stream; + + const grpc::string server_address = SetUpServer(); + const char* argv[] = {"grpc_cli", "call", server_address.c_str(), "Echo", + "message: 'Hello0'"}; + + // Mock std::cin input "message: 'Hello1'\n\n message: 'Hello2'\n\n" + std::streambuf* orig = std::cin.rdbuf(); + std::istringstream ss("message: 'Hello1'\n\n message: 'Hello2'\n\n"); + std::cin.rdbuf(ss.rdbuf()); + + FLAGS_batch = true; + EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), + std::bind(PrintStream, &output_stream, + std::placeholders::_1))); + FLAGS_batch = false; + + // Expected output: "message: "Hello0"\nmessage: "Hello1"\nmessage: + // "Hello2"\n" + EXPECT_TRUE(NULL != strstr(output_stream.str().c_str(), + "message: \"Hello0\"\nmessage: " + "\"Hello1\"\nmessage: \"Hello2\"\n")); + std::cin.rdbuf(orig); + ShutdownServer(); +} + +TEST_F(GrpcToolTest, CallCommandBatchWithBadRequest) { + // Test input "grpc_cli call Echo" + std::stringstream output_stream; + + const grpc::string server_address = SetUpServer(); + const char* argv[] = {"grpc_cli", "call", server_address.c_str(), "Echo", + "message: 'Hello0'"}; + + // Mock std::cin input "message: 1\n\n message: 'Hello2'\n\n" + std::streambuf* orig = std::cin.rdbuf(); + std::istringstream ss("message: 1\n\n message: 'Hello2'\n\n"); + std::cin.rdbuf(ss.rdbuf()); + + FLAGS_batch = true; + EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv, TestCliCredentials(), + std::bind(PrintStream, &output_stream, + std::placeholders::_1))); + FLAGS_batch = false; + + // Expected output: "message: "Hello0"\nmessage: "Hello2"\n" + EXPECT_TRUE(NULL != strstr(output_stream.str().c_str(), + "message: \"Hello0\"\nmessage: \"Hello2\"\n")); + std::cin.rdbuf(orig); + ShutdownServer(); +} + TEST_F(GrpcToolTest, CallCommandRequestStream) { // Test input: grpc_cli call localhost: RequestStream "message: // 'Hello0'" From 5dd2f48e242cd7ce788b84a6b07612098b4d96e3 Mon Sep 17 00:00:00 2001 From: Yuchen Zeng Date: Fri, 29 Sep 2017 14:52:33 -0700 Subject: [PATCH 093/109] Fix windows build --- test/cpp/util/grpc_tool.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/cpp/util/grpc_tool.cc b/test/cpp/util/grpc_tool.cc index 7bf1a595e57..cd6084ac6dc 100644 --- a/test/cpp/util/grpc_tool.cc +++ b/test/cpp/util/grpc_tool.cc @@ -569,7 +569,7 @@ bool GrpcTool::CallMethod(int argc, const char** argv, std::ifstream input_file; if (FLAGS_infile.empty()) { - if (isatty(STDIN_FILENO)) { + if (isatty(fileno(stdin))) { print_mode = true; fprintf(stderr, "reading request messages from stdin...\n"); } From 3771adebb9fc03b3dfb1486614b6120c376aa123 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Vo=C3=9F?= Date: Sat, 30 Sep 2017 13:01:42 +0200 Subject: [PATCH 094/109] Fix up whitespace --- src/core/lib/iomgr/tcp_client_uv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c index f2b23aae2e6..0d9e7ed5f6e 100644 --- a/src/core/lib/iomgr/tcp_client_uv.c +++ b/src/core/lib/iomgr/tcp_client_uv.c @@ -145,7 +145,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx, connect->resource_quota = resource_quota; uv_tcp_init(uv_default_loop(), connect->tcp_handle); connect->connect_req.data = connect; - connect->refs = 2; // One for the connect operation, one for the timer. + connect->refs = 2; // One for the connect operation, one for the timer. if (GRPC_TRACER_ON(grpc_tcp_trace)) { gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting", From 4c64dc9af87ba1826c4cbeef887b69663c5966c0 Mon Sep 17 00:00:00 2001 From: Jan Tattermusch Date: Wed, 27 Sep 2017 16:53:47 +0200 Subject: [PATCH 095/109] workaround verconf.h problem --- tools/distrib/build_ruby_environment_macos.sh | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/tools/distrib/build_ruby_environment_macos.sh b/tools/distrib/build_ruby_environment_macos.sh index c2240ce9761..fe0c5a4d70b 100644 --- a/tools/distrib/build_ruby_environment_macos.sh +++ b/tools/distrib/build_ruby_environment_macos.sh @@ -21,9 +21,10 @@ CROSS_RUBY=`mktemp tmpfile.XXXXXXXX` curl https://raw.githubusercontent.com/rake-compiler/rake-compiler/v1.0.3/tasks/bin/cross-ruby.rake > $CROSS_RUBY +# See https://github.com/grpc/grpc/issues/12161 for verconf.h patch details patch $CROSS_RUBY << EOF ---- cross-ruby.rake 2016-02-05 16:26:53.000000000 -0800 -+++ cross-ruby.rake.patched 2016-02-05 16:27:33.000000000 -0800 +--- cross-ruby.rake 2017-09-27 16:46:00.311020325 +0200 ++++ patched 2017-09-27 16:49:46.127016895 +0200 @@ -133,7 +133,8 @@ "--host=#{MINGW_HOST}", "--target=#{MINGW_TARGET}", @@ -32,8 +33,16 @@ patch $CROSS_RUBY << EOF + '--enable-static', + '--disable-shared', '--disable-install-doc', - '--without-tk', - '--without-tcl' + '--with-ext=' + ] +@@ -151,6 +152,7 @@ + # make + file "#{USER_HOME}/builds/#{MINGW_HOST}/#{RUBY_CC_VERSION}/ruby.exe" => ["#{USER_HOME}/builds/#{MINGW_HOST}/#{RUBY_CC_VERSION}/Makefile"] do |t| + chdir File.dirname(t.prerequisites.first) do ++ sh "test -s verconf.h || rm -f verconf.h" # if verconf.h has size 0, make sure it gets re-built by make + sh MAKE + end + end EOF MAKE="make -j8" From 6bb6fd9d1b7f45efe3faed36876cbb52a4b22fe1 Mon Sep 17 00:00:00 2001 From: Jan Tattermusch Date: Wed, 27 Sep 2017 16:56:26 +0200 Subject: [PATCH 096/109] simplify macos artifact build script --- tools/internal_ci/macos/grpc_build_artifacts.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/internal_ci/macos/grpc_build_artifacts.sh b/tools/internal_ci/macos/grpc_build_artifacts.sh index 09784e3bb47..eb4568c32b1 100755 --- a/tools/internal_ci/macos/grpc_build_artifacts.sh +++ b/tools/internal_ci/macos/grpc_build_artifacts.sh @@ -27,8 +27,7 @@ python3.5 -m pip install cython setuptools wheel python3.6 -m pip install cython setuptools wheel # needed to build ruby artifacts -wget https://raw.githubusercontent.com/grpc/grpc/master/tools/distrib/build_ruby_environment_macos.sh -bash build_ruby_environment_macos.sh +time bash tools/distrib/build_ruby_environment_macos.sh gem install rubygems-update update_rubygems From 64e48b4fdb9a40b9507865ae4bdde4a4fffbffe4 Mon Sep 17 00:00:00 2001 From: Jan Tattermusch Date: Mon, 2 Oct 2017 11:35:01 +0200 Subject: [PATCH 097/109] coreutils already installed --- tools/internal_ci/helper_scripts/prepare_build_macos_rc | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/internal_ci/helper_scripts/prepare_build_macos_rc b/tools/internal_ci/helper_scripts/prepare_build_macos_rc index bec529f85e9..c785779b515 100644 --- a/tools/internal_ci/helper_scripts/prepare_build_macos_rc +++ b/tools/internal_ci/helper_scripts/prepare_build_macos_rc @@ -51,7 +51,6 @@ export LANG=en_US.UTF-8 pod repo update # needed by python # python -brew install coreutils # we need grealpath pip install virtualenv --user python pip install -U six tox setuptools twisted pyyaml --user python export PYTHONPATH=/Library/Python/3.4/site-packages From 15ccb3d38afa58981930a546aa1eab489b7bfacb Mon Sep 17 00:00:00 2001 From: Jan Tattermusch Date: Mon, 2 Oct 2017 17:53:49 +0200 Subject: [PATCH 098/109] debug clock skew --- tools/internal_ci/helper_scripts/prepare_build_macos_rc | 3 +++ tools/internal_ci/macos/grpc_run_tests_matrix.sh | 3 +++ 2 files changed, 6 insertions(+) diff --git a/tools/internal_ci/helper_scripts/prepare_build_macos_rc b/tools/internal_ci/helper_scripts/prepare_build_macos_rc index bec529f85e9..9cb24675101 100644 --- a/tools/internal_ci/helper_scripts/prepare_build_macos_rc +++ b/tools/internal_ci/helper_scripts/prepare_build_macos_rc @@ -59,4 +59,7 @@ export PYTHONPATH=/Library/Python/3.4/site-packages # set xcode version for Obj-C tests sudo xcode-select -switch /Applications/Xcode_8.2.1.app/Contents/Developer +# TODO(jtattermusch): better debugging of clock skew, remove once not needed +date + git submodule update --init diff --git a/tools/internal_ci/macos/grpc_run_tests_matrix.sh b/tools/internal_ci/macos/grpc_run_tests_matrix.sh index 8e7fd54a629..6e0c2bb4874 100755 --- a/tools/internal_ci/macos/grpc_run_tests_matrix.sh +++ b/tools/internal_ci/macos/grpc_run_tests_matrix.sh @@ -28,6 +28,9 @@ ps aux | grep port_server\\.py | awk '{print $2}' | xargs kill -9 # Reveal leftover processes that might be left behind by the build ps aux | grep -i kbuilder +# TODO(jtattermusch): better debugging of clock skew, remove once not needed +date + echo 'Exiting gRPC main test script.' if [ "$FAILED" != "" ] From dbfcd45af8d553d6c52ace15850a712584c0d5d5 Mon Sep 17 00:00:00 2001 From: Alexander Polcyn Date: Sun, 1 Oct 2017 15:34:29 -0700 Subject: [PATCH 099/109] Limit max jobs cpu agnostic way, to avoid overloading the test environment --- tools/run_tests/python_utils/jobset.py | 9 +++++++-- tools/run_tests/run_tests.py | 9 ++++++++- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/tools/run_tests/python_utils/jobset.py b/tools/run_tests/python_utils/jobset.py index 062c79a0dec..82a3bc14352 100755 --- a/tools/run_tests/python_utils/jobset.py +++ b/tools/run_tests/python_utils/jobset.py @@ -364,7 +364,7 @@ class Job(object): class Jobset(object): """Manages one run of jobs.""" - def __init__(self, check_cancelled, maxjobs, newline_on_success, travis, + def __init__(self, check_cancelled, maxjobs, maxjobs_cpu_agnostic, newline_on_success, travis, stop_on_failure, add_env, quiet_success, max_time): self._running = set() self._check_cancelled = check_cancelled @@ -372,6 +372,7 @@ class Jobset(object): self._failures = 0 self._completed = 0 self._maxjobs = maxjobs + self._maxjobs_cpu_agnostic = maxjobs_cpu_agnostic self._newline_on_success = newline_on_success self._travis = travis self._stop_on_failure = stop_on_failure @@ -406,7 +407,9 @@ class Jobset(object): if self.cancelled(): return False current_cpu_cost = self.cpu_cost() if current_cpu_cost == 0: break - if current_cpu_cost + spec.cpu_cost <= self._maxjobs: break + if current_cpu_cost + spec.cpu_cost <= self._maxjobs: + if len(self._running) < self._maxjobs_cpu_agnostic: + break self.reap() if self.cancelled(): return False job = Job(spec, @@ -491,6 +494,7 @@ def tag_remaining(xs): def run(cmdlines, check_cancelled=_never_cancelled, maxjobs=None, + maxjobs_cpu_agnostic=None, newline_on_success=False, travis=False, infinite_runs=False, @@ -509,6 +513,7 @@ def run(cmdlines, return 0, resultset js = Jobset(check_cancelled, maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS, + maxjobs_cpu_agnostic if maxjobs_cpu_agnostic is not None else _DEFAULT_MAX_JOBS, newline_on_success, travis, stop_on_failure, add_env, quiet_success, max_time) for cmdline, remaining in tag_remaining(cmdlines): diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index b38108d456c..29055848e2c 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -117,6 +117,13 @@ def run_shell_command(cmd, env=None, cwd=None): e.cmd, e.returncode, e.output) raise +def max_parallel_tests_for_current_platform(): + # Too much test parallelization has only been seen to be a problem + # so far on windows. + if jobset.platform_string() == 'windows': + return 64 + return 1e6 + # SimpleConfig: just compile with CONFIG=config, and run the binary to test class Config(object): @@ -1553,7 +1560,7 @@ def _build_and_run( jobset.message('START', 'Running tests quietly, only failing tests will be reported', do_newline=True) num_test_failures, resultset = jobset.run( all_runs, check_cancelled, newline_on_success=newline_on_success, - travis=args.travis, maxjobs=args.jobs, + travis=args.travis, maxjobs=args.jobs, maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(), stop_on_failure=args.stop_on_failure, quiet_success=args.quiet_success, max_time=args.max_time) if resultset: From 3b8f40a5c49c0e68dc989907e3c51b689c15b7da Mon Sep 17 00:00:00 2001 From: Alexander Polcyn Date: Mon, 2 Oct 2017 10:43:51 -0700 Subject: [PATCH 100/109] Tentatively reduce parallelization limit on linux/macos to 128 --- tools/run_tests/run_tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index 29055848e2c..076045614cb 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -122,7 +122,7 @@ def max_parallel_tests_for_current_platform(): # so far on windows. if jobset.platform_string() == 'windows': return 64 - return 1e6 + return 128 # SimpleConfig: just compile with CONFIG=config, and run the binary to test class Config(object): From 89cf5a409355ec4dd2083da0fcf01e964606d219 Mon Sep 17 00:00:00 2001 From: Muxi Yan Date: Mon, 2 Oct 2017 13:19:56 -0700 Subject: [PATCH 101/109] Eliminate gRPC-Core umbrella header warning by tweaking module.modulemap and excluding some files for other build systems --- gRPC-Core.podspec | 11 ---- include/grpc/module.modulemap | 63 ++++++++++++++++++- templates/gRPC-Core.podspec.template | 10 ++- .../include/grpc/module.modulemap.template | 32 ++++++++++ 4 files changed, 101 insertions(+), 15 deletions(-) create mode 100644 templates/include/grpc/module.modulemap.template diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index c76b9a3f4bd..d34c271dbaf 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -105,8 +105,6 @@ Pod::Spec.new do |s| ss.source_files = 'include/grpc/support/alloc.h', 'include/grpc/support/atm.h', 'include/grpc/support/atm_gcc_atomic.h', - 'include/grpc/support/atm_gcc_sync.h', - 'include/grpc/support/atm_windows.h', 'include/grpc/support/avl.h', 'include/grpc/support/cmdline.h', 'include/grpc/support/cpu.h', @@ -121,18 +119,13 @@ Pod::Spec.new do |s| 'include/grpc/support/sync_custom.h', 'include/grpc/support/sync_generic.h', 'include/grpc/support/sync_posix.h', - 'include/grpc/support/sync_windows.h', 'include/grpc/support/thd.h', 'include/grpc/support/time.h', 'include/grpc/support/tls.h', - 'include/grpc/support/tls_gcc.h', - 'include/grpc/support/tls_msvc.h', 'include/grpc/support/tls_pthread.h', 'include/grpc/support/useful.h', 'include/grpc/impl/codegen/atm.h', 'include/grpc/impl/codegen/atm_gcc_atomic.h', - 'include/grpc/impl/codegen/atm_gcc_sync.h', - 'include/grpc/impl/codegen/atm_windows.h', 'include/grpc/impl/codegen/gpr_slice.h', 'include/grpc/impl/codegen/gpr_types.h', 'include/grpc/impl/codegen/port_platform.h', @@ -140,7 +133,6 @@ Pod::Spec.new do |s| 'include/grpc/impl/codegen/sync_custom.h', 'include/grpc/impl/codegen/sync_generic.h', 'include/grpc/impl/codegen/sync_posix.h', - 'include/grpc/impl/codegen/sync_windows.h', 'include/grpc/impl/codegen/byte_buffer.h', 'include/grpc/impl/codegen/byte_buffer_reader.h', 'include/grpc/impl/codegen/compression_types.h', @@ -152,8 +144,6 @@ Pod::Spec.new do |s| 'include/grpc/impl/codegen/status.h', 'include/grpc/impl/codegen/atm.h', 'include/grpc/impl/codegen/atm_gcc_atomic.h', - 'include/grpc/impl/codegen/atm_gcc_sync.h', - 'include/grpc/impl/codegen/atm_windows.h', 'include/grpc/impl/codegen/gpr_slice.h', 'include/grpc/impl/codegen/gpr_types.h', 'include/grpc/impl/codegen/port_platform.h', @@ -161,7 +151,6 @@ Pod::Spec.new do |s| 'include/grpc/impl/codegen/sync_custom.h', 'include/grpc/impl/codegen/sync_generic.h', 'include/grpc/impl/codegen/sync_posix.h', - 'include/grpc/impl/codegen/sync_windows.h', 'include/grpc/grpc_security.h', 'include/grpc/byte_buffer.h', 'include/grpc/byte_buffer_reader.h', diff --git a/include/grpc/module.modulemap b/include/grpc/module.modulemap index 51bfef2cc93..226cc6cf871 100644 --- a/include/grpc/module.modulemap +++ b/include/grpc/module.modulemap @@ -1,12 +1,69 @@ + framework module grpc { umbrella header "grpc.h" - header "byte_buffer_reader.h" - header "grpc_security.h" - header "grpc_security_constants.h" header "support/alloc.h" + header "support/atm.h" + header "support/atm_gcc_atomic.h" + header "support/avl.h" + header "support/cmdline.h" + header "support/cpu.h" + header "support/histogram.h" + header "support/host_port.h" + header "support/log.h" + header "support/log_windows.h" header "support/port_platform.h" header "support/string_util.h" + header "support/subprocess.h" + header "support/sync.h" + header "support/sync_custom.h" + header "support/sync_generic.h" + header "support/sync_posix.h" + header "support/thd.h" + header "support/time.h" + header "support/tls.h" + header "support/tls_pthread.h" + header "support/useful.h" + header "impl/codegen/atm.h" + header "impl/codegen/atm_gcc_atomic.h" + header "impl/codegen/gpr_slice.h" + header "impl/codegen/gpr_types.h" + header "impl/codegen/port_platform.h" + header "impl/codegen/sync.h" + header "impl/codegen/sync_custom.h" + header "impl/codegen/sync_generic.h" + header "impl/codegen/sync_posix.h" + header "impl/codegen/byte_buffer.h" + header "impl/codegen/byte_buffer_reader.h" + header "impl/codegen/compression_types.h" + header "impl/codegen/connectivity_state.h" + header "impl/codegen/exec_ctx_fwd.h" + header "impl/codegen/grpc_types.h" + header "impl/codegen/propagation_bits.h" + header "impl/codegen/slice.h" + header "impl/codegen/status.h" + header "impl/codegen/atm.h" + header "impl/codegen/atm_gcc_atomic.h" + header "impl/codegen/gpr_slice.h" + header "impl/codegen/gpr_types.h" + header "impl/codegen/port_platform.h" + header "impl/codegen/sync.h" + header "impl/codegen/sync_custom.h" + header "impl/codegen/sync_generic.h" + header "impl/codegen/sync_posix.h" + header "grpc_security.h" + header "byte_buffer.h" + header "byte_buffer_reader.h" + header "compression.h" + header "grpc.h" + header "grpc_posix.h" + header "grpc_security_constants.h" + header "load_reporting.h" + header "slice.h" + header "slice_buffer.h" + header "status.h" + header "support/workaround_list.h" + header "census.h" export * module * { export * } diff --git a/templates/gRPC-Core.podspec.template b/templates/gRPC-Core.podspec.template index 6077f8098dd..22814849172 100644 --- a/templates/gRPC-Core.podspec.template +++ b/templates/gRPC-Core.podspec.template @@ -31,11 +31,19 @@ return [f for f in out if not f.startswith("third_party/nanopb/")] def grpc_public_headers(libs): + excluded_files = ["include/grpc/support/atm_gcc_sync.h", + "include/grpc/support/atm_windows.h", + "include/grpc/support/sync_windows.h", + "include/grpc/support/tls_gcc.h", + "include/grpc/support/tls_msvc.h", + "include/grpc/impl/codegen/atm_gcc_sync.h", + "include/grpc/impl/codegen/atm_windows.h", + "include/grpc/impl/codegen/sync_windows.h"] out = [] for lib in libs: if lib.name in ("grpc", "gpr"): out += lib.get('public_headers', []) - return out + return [f for f in out if not f in excluded_files] def grpc_private_headers(libs): out = [] diff --git a/templates/include/grpc/module.modulemap.template b/templates/include/grpc/module.modulemap.template new file mode 100644 index 00000000000..8edc4b56acd --- /dev/null +++ b/templates/include/grpc/module.modulemap.template @@ -0,0 +1,32 @@ +%YAML 1.2 +--- | + <%! + def grpc_public_headers_no_dir(libs): + excluded_files = ["include/grpc/support/atm_gcc_sync.h", + "include/grpc/support/atm_windows.h", + "include/grpc/support/sync_windows.h", + "include/grpc/support/tls_gcc.h", + "include/grpc/support/tls_msvc.h", + "include/grpc/impl/codegen/atm_gcc_sync.h", + "include/grpc/impl/codegen/atm_windows.h", + "include/grpc/impl/codegen/sync_windows.h"] + out = [] + for lib in libs: + if lib.name in ("grpc", "gpr"): + out += lib.get('public_headers', []) + out = [f for f in out if f not in excluded_files] + out = [hdr.split('/', 2)[2] for hdr in out] + return out + + def header_lines(files): + return ('\n ').join('header "%s"' % f for f in files) + %> + framework module grpc { + umbrella header "grpc.h" + + ${header_lines(grpc_public_headers_no_dir(libs))} + + export * + module * { export * } + } + From 9887379158b5418d983e79bc1bc8b4ba8c72bd54 Mon Sep 17 00:00:00 2001 From: murgatroid99 Date: Mon, 2 Oct 2017 17:28:27 -0700 Subject: [PATCH 102/109] Update version to 1.8.0-dev, update g word to 'generous' --- BUILD | 4 ++-- CMakeLists.txt | 2 +- Makefile | 4 ++-- build.yaml | 4 ++-- doc/g_stands_for.md | 1 + gRPC-Core.podspec | 2 +- gRPC-ProtoRPC.podspec | 2 +- gRPC-RxLibrary.podspec | 2 +- gRPC.podspec | 2 +- package.json | 2 +- package.xml | 4 ++-- src/core/lib/surface/version.c | 2 +- src/cpp/common/version_cc.cc | 2 +- src/csharp/Grpc.Core/Version.csproj.include | 2 +- src/csharp/Grpc.Core/VersionInfo.cs | 4 ++-- src/csharp/build_packages_dotnetcli.bat | 2 +- src/csharp/build_packages_dotnetcli.sh | 4 ++-- src/node/health_check/package.json | 4 ++-- src/node/tools/package.json | 2 +- src/objective-c/!ProtoCompiler-gRPCPlugin.podspec | 2 +- src/objective-c/GRPCClient/private/version.h | 2 +- src/php/composer.json | 2 +- src/php/ext/grpc/version.h | 2 +- src/python/grpcio/grpc/_grpcio_metadata.py | 2 +- src/python/grpcio/grpc_version.py | 2 +- src/python/grpcio_health_checking/grpc_version.py | 2 +- src/python/grpcio_reflection/grpc_version.py | 2 +- src/python/grpcio_testing/grpc_version.py | 2 +- src/python/grpcio_tests/grpc_version.py | 2 +- src/ruby/lib/grpc/version.rb | 2 +- src/ruby/tools/version.rb | 2 +- tools/distrib/python/grpcio_tools/grpc_version.py | 2 +- tools/doxygen/Doxyfile.c++ | 2 +- tools/doxygen/Doxyfile.c++.internal | 2 +- 34 files changed, 41 insertions(+), 40 deletions(-) diff --git a/BUILD b/BUILD index dfcded21976..79e3aeb8491 100644 --- a/BUILD +++ b/BUILD @@ -34,11 +34,11 @@ load( ) # This should be updated along with build.yaml -g_stands_for = "gambit" +g_stands_for = "generous" core_version = "4.0.0-dev" -version = "1.7.0-dev" +version = "1.8.0-dev" GPR_PUBLIC_HDRS = [ "include/grpc/support/alloc.h", diff --git a/CMakeLists.txt b/CMakeLists.txt index 2e448c92b66..e92e19465bc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -24,7 +24,7 @@ cmake_minimum_required(VERSION 2.8) set(PACKAGE_NAME "grpc") -set(PACKAGE_VERSION "1.7.0-dev") +set(PACKAGE_VERSION "1.8.0-dev") set(PACKAGE_STRING "${PACKAGE_NAME} ${PACKAGE_VERSION}") set(PACKAGE_TARNAME "${PACKAGE_NAME}-${PACKAGE_VERSION}") set(PACKAGE_BUGREPORT "https://github.com/grpc/grpc/issues/") diff --git a/Makefile b/Makefile index 517ddfd90e0..22520ac9a15 100644 --- a/Makefile +++ b/Makefile @@ -411,8 +411,8 @@ Q = @ endif CORE_VERSION = 5.0.0-dev -CPP_VERSION = 1.7.0-dev -CSHARP_VERSION = 1.7.0-dev +CPP_VERSION = 1.8.0-dev +CSHARP_VERSION = 1.8.0-dev CPPFLAGS_NO_ARCH += $(addprefix -I, $(INCLUDES)) $(addprefix -D, $(DEFINES)) CPPFLAGS += $(CPPFLAGS_NO_ARCH) $(ARCH_FLAGS) diff --git a/build.yaml b/build.yaml index 9b5aacc4c8f..7563d4bfc06 100644 --- a/build.yaml +++ b/build.yaml @@ -13,8 +13,8 @@ settings: '#09': Per-language overrides are possible with (eg) ruby_version tag here '#10': See the expand_version.py for all the quirks here core_version: 5.0.0-dev - g_stands_for: gambit - version: 1.7.0-dev + g_stands_for: generous + version: 1.8.0-dev filegroups: - name: census public_headers: diff --git a/doc/g_stands_for.md b/doc/g_stands_for.md index 9a161d67a30..4e2ca33276c 100644 --- a/doc/g_stands_for.md +++ b/doc/g_stands_for.md @@ -11,3 +11,4 @@ future), and the corresponding version numbers that used them: - 1.4 'g' stands for 'gregarious' - 1.6 'g' stands for 'garcia' - 1.7 'g' stands for 'gambit' +- 1.8 'g' stands for 'generous' diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index d34c271dbaf..7b0c72ea1a1 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -22,7 +22,7 @@ Pod::Spec.new do |s| s.name = 'gRPC-Core' - version = '1.7.0-dev' + version = '1.8.0-dev' s.version = version s.summary = 'Core cross-platform gRPC library, written in C' s.homepage = 'https://grpc.io' diff --git a/gRPC-ProtoRPC.podspec b/gRPC-ProtoRPC.podspec index cb40330dab5..db1e8db06c0 100644 --- a/gRPC-ProtoRPC.podspec +++ b/gRPC-ProtoRPC.podspec @@ -21,7 +21,7 @@ Pod::Spec.new do |s| s.name = 'gRPC-ProtoRPC' - version = '1.7.0-dev' + version = '1.8.0-dev' s.version = version s.summary = 'RPC library for Protocol Buffers, based on gRPC' s.homepage = 'https://grpc.io' diff --git a/gRPC-RxLibrary.podspec b/gRPC-RxLibrary.podspec index 52bd8ed0ae1..36897790395 100644 --- a/gRPC-RxLibrary.podspec +++ b/gRPC-RxLibrary.podspec @@ -21,7 +21,7 @@ Pod::Spec.new do |s| s.name = 'gRPC-RxLibrary' - version = '1.7.0-dev' + version = '1.8.0-dev' s.version = version s.summary = 'Reactive Extensions library for iOS/OSX.' s.homepage = 'https://grpc.io' diff --git a/gRPC.podspec b/gRPC.podspec index 79315e46f31..6bec50f0de3 100644 --- a/gRPC.podspec +++ b/gRPC.podspec @@ -20,7 +20,7 @@ Pod::Spec.new do |s| s.name = 'gRPC' - version = '1.7.0-dev' + version = '1.8.0-dev' s.version = version s.summary = 'gRPC client library for iOS/OSX' s.homepage = 'https://grpc.io' diff --git a/package.json b/package.json index 2e31275bf02..a0f0ff1710d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "grpc", - "version": "1.7.0-dev", + "version": "1.8.0-dev", "author": "Google Inc.", "description": "gRPC Library for Node", "homepage": "https://grpc.io/", diff --git a/package.xml b/package.xml index 0eea122b24e..dacb2d3d9a6 100644 --- a/package.xml +++ b/package.xml @@ -13,8 +13,8 @@ 2017-08-24 - 1.7.0dev - 1.7.0dev + 1.8.0dev + 1.8.0dev beta diff --git a/src/core/lib/surface/version.c b/src/core/lib/surface/version.c index fd6ea4daa9f..6cb8e7e1a0d 100644 --- a/src/core/lib/surface/version.c +++ b/src/core/lib/surface/version.c @@ -23,4 +23,4 @@ const char *grpc_version_string(void) { return "5.0.0-dev"; } -const char *grpc_g_stands_for(void) { return "gambit"; } +const char *grpc_g_stands_for(void) { return "generous"; } diff --git a/src/cpp/common/version_cc.cc b/src/cpp/common/version_cc.cc index 2e9a51316d4..8049cbe0c90 100644 --- a/src/cpp/common/version_cc.cc +++ b/src/cpp/common/version_cc.cc @@ -22,5 +22,5 @@ #include namespace grpc { -grpc::string Version() { return "1.7.0-dev"; } +grpc::string Version() { return "1.8.0-dev"; } } diff --git a/src/csharp/Grpc.Core/Version.csproj.include b/src/csharp/Grpc.Core/Version.csproj.include index 124ecab14cd..b9ceaf82543 100755 --- a/src/csharp/Grpc.Core/Version.csproj.include +++ b/src/csharp/Grpc.Core/Version.csproj.include @@ -1,7 +1,7 @@ - 1.7.0-dev + 1.8.0-dev 3.3.0 diff --git a/src/csharp/Grpc.Core/VersionInfo.cs b/src/csharp/Grpc.Core/VersionInfo.cs index 588cc845165..dab938821fa 100644 --- a/src/csharp/Grpc.Core/VersionInfo.cs +++ b/src/csharp/Grpc.Core/VersionInfo.cs @@ -33,11 +33,11 @@ namespace Grpc.Core /// /// Current AssemblyFileVersion of gRPC C# assemblies /// - public const string CurrentAssemblyFileVersion = "1.7.0.0"; + public const string CurrentAssemblyFileVersion = "1.8.0.0"; /// /// Current version of gRPC C# /// - public const string CurrentVersion = "1.7.0-dev"; + public const string CurrentVersion = "1.8.0-dev"; } } diff --git a/src/csharp/build_packages_dotnetcli.bat b/src/csharp/build_packages_dotnetcli.bat index c419d870492..ff013d56800 100755 --- a/src/csharp/build_packages_dotnetcli.bat +++ b/src/csharp/build_packages_dotnetcli.bat @@ -13,7 +13,7 @@ @rem limitations under the License. @rem Current package versions -set VERSION=1.7.0-dev +set VERSION=1.8.0-dev @rem Adjust the location of nuget.exe set NUGET=C:\nuget\nuget.exe diff --git a/src/csharp/build_packages_dotnetcli.sh b/src/csharp/build_packages_dotnetcli.sh index 124dfbb257e..44a4791146b 100755 --- a/src/csharp/build_packages_dotnetcli.sh +++ b/src/csharp/build_packages_dotnetcli.sh @@ -39,7 +39,7 @@ dotnet pack --configuration Release Grpc.Auth --output ../../../artifacts dotnet pack --configuration Release Grpc.HealthCheck --output ../../../artifacts dotnet pack --configuration Release Grpc.Reflection --output ../../../artifacts -nuget pack Grpc.nuspec -Version "1.7.0-dev" -OutputDirectory ../../artifacts -nuget pack Grpc.Tools.nuspec -Version "1.7.0-dev" -OutputDirectory ../../artifacts +nuget pack Grpc.nuspec -Version "1.8.0-dev" -OutputDirectory ../../artifacts +nuget pack Grpc.Tools.nuspec -Version "1.8.0-dev" -OutputDirectory ../../artifacts (cd ../../artifacts && zip csharp_nugets_dotnetcli.zip *.nupkg) diff --git a/src/node/health_check/package.json b/src/node/health_check/package.json index 3c7d3707ee0..6f09c8f9f09 100644 --- a/src/node/health_check/package.json +++ b/src/node/health_check/package.json @@ -1,6 +1,6 @@ { "name": "grpc-health-check", - "version": "1.7.0-dev", + "version": "1.8.0-dev", "author": "Google Inc.", "description": "Health check service for use with gRPC", "repository": { @@ -15,7 +15,7 @@ } ], "dependencies": { - "grpc": "^1.7.0-dev", + "grpc": "^1.8.0-dev", "lodash": "^3.9.3", "google-protobuf": "^3.0.0" }, diff --git a/src/node/tools/package.json b/src/node/tools/package.json index d9b1fb86c91..f88fc65cdf4 100644 --- a/src/node/tools/package.json +++ b/src/node/tools/package.json @@ -1,6 +1,6 @@ { "name": "grpc-tools", - "version": "1.7.0-dev", + "version": "1.8.0-dev", "author": "Google Inc.", "description": "Tools for developing with gRPC on Node.js", "homepage": "https://grpc.io/", diff --git a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec index 7d073c9a848..9065ab9f73e 100644 --- a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec +++ b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec @@ -42,7 +42,7 @@ Pod::Spec.new do |s| # exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed # before them. s.name = '!ProtoCompiler-gRPCPlugin' - v = '1.7.0-dev' + v = '1.8.0-dev' s.version = v s.summary = 'The gRPC ProtoC plugin generates Objective-C files from .proto services.' s.description = <<-DESC diff --git a/src/objective-c/GRPCClient/private/version.h b/src/objective-c/GRPCClient/private/version.h index 843954e84a0..db589d12de6 100644 --- a/src/objective-c/GRPCClient/private/version.h +++ b/src/objective-c/GRPCClient/private/version.h @@ -23,4 +23,4 @@ // `tools/buildgen/generate_projects.sh`. -#define GRPC_OBJC_VERSION_STRING @"1.7.0-dev" +#define GRPC_OBJC_VERSION_STRING @"1.8.0-dev" diff --git a/src/php/composer.json b/src/php/composer.json index 3606a18f341..09471d23fee 100644 --- a/src/php/composer.json +++ b/src/php/composer.json @@ -2,7 +2,7 @@ "name": "grpc/grpc-dev", "description": "gRPC library for PHP - for Developement use only", "license": "Apache-2.0", - "version": "1.7.0", + "version": "1.8.0", "require": { "php": ">=5.5.0", "google/protobuf": "^v3.3.0" diff --git a/src/php/ext/grpc/version.h b/src/php/ext/grpc/version.h index 07d8eee7fe2..93dd563cffb 100644 --- a/src/php/ext/grpc/version.h +++ b/src/php/ext/grpc/version.h @@ -20,6 +20,6 @@ #ifndef VERSION_H #define VERSION_H -#define PHP_GRPC_VERSION "1.7.0dev" +#define PHP_GRPC_VERSION "1.8.0dev" #endif /* VERSION_H */ diff --git a/src/python/grpcio/grpc/_grpcio_metadata.py b/src/python/grpcio/grpc/_grpcio_metadata.py index a4eb358c4ee..0887ac17224 100644 --- a/src/python/grpcio/grpc/_grpcio_metadata.py +++ b/src/python/grpcio/grpc/_grpcio_metadata.py @@ -14,4 +14,4 @@ # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc/_grpcio_metadata.py.template`!!! -__version__ = """1.7.0.dev0""" +__version__ = """1.8.0.dev0""" diff --git a/src/python/grpcio/grpc_version.py b/src/python/grpcio/grpc_version.py index 3194a893d7e..61c41573756 100644 --- a/src/python/grpcio/grpc_version.py +++ b/src/python/grpcio/grpc_version.py @@ -14,4 +14,4 @@ # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_version.py.template`!!! -VERSION='1.7.0.dev0' +VERSION='1.8.0.dev0' diff --git a/src/python/grpcio_health_checking/grpc_version.py b/src/python/grpcio_health_checking/grpc_version.py index ef68bad17a0..889297f0209 100644 --- a/src/python/grpcio_health_checking/grpc_version.py +++ b/src/python/grpcio_health_checking/grpc_version.py @@ -14,4 +14,4 @@ # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_health_checking/grpc_version.py.template`!!! -VERSION='1.7.0.dev0' +VERSION='1.8.0.dev0' diff --git a/src/python/grpcio_reflection/grpc_version.py b/src/python/grpcio_reflection/grpc_version.py index 55ab959cc57..192f4cc2174 100644 --- a/src/python/grpcio_reflection/grpc_version.py +++ b/src/python/grpcio_reflection/grpc_version.py @@ -14,4 +14,4 @@ # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_reflection/grpc_version.py.template`!!! -VERSION='1.7.0.dev0' +VERSION='1.8.0.dev0' diff --git a/src/python/grpcio_testing/grpc_version.py b/src/python/grpcio_testing/grpc_version.py index 592d08efc36..83470c28253 100644 --- a/src/python/grpcio_testing/grpc_version.py +++ b/src/python/grpcio_testing/grpc_version.py @@ -14,4 +14,4 @@ # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_testing/grpc_version.py.template`!!! -VERSION='1.7.0.dev0' +VERSION='1.8.0.dev0' diff --git a/src/python/grpcio_tests/grpc_version.py b/src/python/grpcio_tests/grpc_version.py index 9e54dc9f75b..7065edd3bfc 100644 --- a/src/python/grpcio_tests/grpc_version.py +++ b/src/python/grpcio_tests/grpc_version.py @@ -14,4 +14,4 @@ # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_tests/grpc_version.py.template`!!! -VERSION='1.7.0.dev0' +VERSION='1.8.0.dev0' diff --git a/src/ruby/lib/grpc/version.rb b/src/ruby/lib/grpc/version.rb index 228c01a92c4..3001579ce77 100644 --- a/src/ruby/lib/grpc/version.rb +++ b/src/ruby/lib/grpc/version.rb @@ -14,5 +14,5 @@ # GRPC contains the General RPC module. module GRPC - VERSION = '1.7.0.dev' + VERSION = '1.8.0.dev' end diff --git a/src/ruby/tools/version.rb b/src/ruby/tools/version.rb index ea0c4ae56cc..c584a7cf593 100644 --- a/src/ruby/tools/version.rb +++ b/src/ruby/tools/version.rb @@ -14,6 +14,6 @@ module GRPC module Tools - VERSION = '1.7.0.dev' + VERSION = '1.8.0.dev' end end diff --git a/tools/distrib/python/grpcio_tools/grpc_version.py b/tools/distrib/python/grpcio_tools/grpc_version.py index a4178a58c1f..db92b10c603 100644 --- a/tools/distrib/python/grpcio_tools/grpc_version.py +++ b/tools/distrib/python/grpcio_tools/grpc_version.py @@ -14,4 +14,4 @@ # AUTO-GENERATED FROM `$REPO_ROOT/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template`!!! -VERSION='1.7.0.dev0' +VERSION='1.8.0.dev0' diff --git a/tools/doxygen/Doxyfile.c++ b/tools/doxygen/Doxyfile.c++ index d81b7b4d115..eb27eed075f 100644 --- a/tools/doxygen/Doxyfile.c++ +++ b/tools/doxygen/Doxyfile.c++ @@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC C++" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 1.7.0-dev +PROJECT_NUMBER = 1.8.0-dev # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index eacb40c2123..584dd0af57b 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -40,7 +40,7 @@ PROJECT_NAME = "GRPC C++" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 1.7.0-dev +PROJECT_NUMBER = 1.8.0-dev # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a From 0bb3f2983fcffd4b044cbb3d67cf4b0d2ab177b3 Mon Sep 17 00:00:00 2001 From: murgatroid99 Date: Mon, 2 Oct 2017 17:39:58 -0700 Subject: [PATCH 103/109] Update core version in BUILD file --- BUILD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/BUILD b/BUILD index 79e3aeb8491..1063b74f068 100644 --- a/BUILD +++ b/BUILD @@ -36,7 +36,7 @@ load( # This should be updated along with build.yaml g_stands_for = "generous" -core_version = "4.0.0-dev" +core_version = "5.0.0-dev" version = "1.8.0-dev" From a36b429ae89c509400b893c617347ceb94471d4d Mon Sep 17 00:00:00 2001 From: Florian Nagel Date: Tue, 3 Oct 2017 15:21:14 +0200 Subject: [PATCH 104/109] Fix deprecation warning Server#addProtoService and use Server#addService instead --- examples/node/dynamic_codegen/greeter_server.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/node/dynamic_codegen/greeter_server.js b/examples/node/dynamic_codegen/greeter_server.js index f9cb1b13d66..180f96c28b8 100644 --- a/examples/node/dynamic_codegen/greeter_server.js +++ b/examples/node/dynamic_codegen/greeter_server.js @@ -34,7 +34,7 @@ function sayHello(call, callback) { */ function main() { var server = new grpc.Server(); - server.addProtoService(hello_proto.Greeter.service, {sayHello: sayHello}); + server.addService(hello_proto.Greeter.service, {sayHello: sayHello}); server.bind('0.0.0.0:50051', grpc.ServerCredentials.createInsecure()); server.start(); } From f693013b13c548cae2e5cba3d5ec235faac58eb9 Mon Sep 17 00:00:00 2001 From: Chris Bacon Date: Tue, 3 Oct 2017 15:15:54 +0100 Subject: [PATCH 105/109] De-register cancellation token Fixes #12800 --- src/csharp/Grpc.Core/Internal/AsyncCall.cs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/csharp/Grpc.Core/Internal/AsyncCall.cs b/src/csharp/Grpc.Core/Internal/AsyncCall.cs index 17109de587b..09fb722c816 100644 --- a/src/csharp/Grpc.Core/Internal/AsyncCall.cs +++ b/src/csharp/Grpc.Core/Internal/AsyncCall.cs @@ -34,6 +34,9 @@ namespace Grpc.Core.Internal readonly CallInvocationDetails details; readonly INativeCall injectedNativeCall; // for testing + // Dispose of to de-register cancellation token registration + IDisposable cancellationTokenRegistration; + // Completion of a pending unary response if not null. TaskCompletionSource unaryResponseTcs; @@ -320,6 +323,7 @@ namespace Grpc.Core.Internal protected override void OnAfterReleaseResources() { details.Channel.RemoveCallReference(this); + cancellationTokenRegistration?.Dispose(); } protected override bool IsClient @@ -405,7 +409,7 @@ namespace Grpc.Core.Internal var token = details.Options.CancellationToken; if (token.CanBeCanceled) { - token.Register(() => this.Cancel()); + cancellationTokenRegistration = token.Register(() => this.Cancel()); } } From 92a3805cd37ab41315ded5b5142089db62887b89 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 4 Oct 2017 10:49:20 -0700 Subject: [PATCH 106/109] Submodule bloaty mcbloatface --- .gitmodules | 3 +++ third_party/bloaty | 1 + tools/run_tests/sanity/check_submodules.sh | 1 + 3 files changed, 5 insertions(+) create mode 160000 third_party/bloaty diff --git a/.gitmodules b/.gitmodules index 144fd080ac9..8af00521288 100644 --- a/.gitmodules +++ b/.gitmodules @@ -24,3 +24,6 @@ path = third_party/cares/cares url = https://github.com/c-ares/c-ares.git branch = cares-1_12_0 +[submodule "third_party/bloaty"] + path = third_party/bloaty + url = https://github.com/google/bloaty.git diff --git a/third_party/bloaty b/third_party/bloaty new file mode 160000 index 00000000000..73594cde8c9 --- /dev/null +++ b/third_party/bloaty @@ -0,0 +1 @@ +Subproject commit 73594cde8c9a52a102c4341c244c833aa61b9c06 diff --git a/tools/run_tests/sanity/check_submodules.sh b/tools/run_tests/sanity/check_submodules.sh index 7c934b1ba78..97324f0a3ad 100755 --- a/tools/run_tests/sanity/check_submodules.sh +++ b/tools/run_tests/sanity/check_submodules.sh @@ -34,6 +34,7 @@ cat << EOF | awk '{ print $1 }' | sort > $want_submodules 80a37e0782d2d702d52234b62dd4b9ec74fd2c95 third_party/protobuf (v3.4.0) cacf7f1d4e3d44d871b605da3b647f07d718623f third_party/zlib (v1.2.11) 3be1924221e1326df520f8498d704a5c4c8d0cce third_party/cares/cares (cares-1_13_0) + 73594cde8c9a52a102c4341c244c833aa61b9c06 third_party/bloaty EOF diff -u $submodules $want_submodules From e3dbf766f8fc7736ae6a122028e84fcaabb98f93 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 4 Oct 2017 14:01:28 -0700 Subject: [PATCH 107/109] Initial implementation of bloat_diff --- tools/profiling/bloat/bloat_diff.py | 98 +++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100755 tools/profiling/bloat/bloat_diff.py diff --git a/tools/profiling/bloat/bloat_diff.py b/tools/profiling/bloat/bloat_diff.py new file mode 100755 index 00000000000..b9212f1cb9c --- /dev/null +++ b/tools/profiling/bloat/bloat_diff.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python2.7 +# +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import glob +import multiprocessing +import os +import shutil +import subprocess +import sys + +sys.path.append( + os.path.join( + os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils')) +import comment_on_pr + +argp = argparse.ArgumentParser( + description='Perform diff on microbenchmarks') + +argp.add_argument( + '-d', + '--diff_base', + type=str, + help='Commit or branch to compare the current one to') + +argp.add_argument( + '-j', + '--jobs', + type=int, + default=multiprocessing.cpu_count()) + +args = argp.parse_args() + +LIBS = [ + 'libgrpc.so', + 'libgrpc++.so', +] + +def build(where): + subprocess.check_call('make -j%d' % args.jobs, + shell=True, cwd='.') + shutil.rmtree('bloat_diff_%s' % where, ignore_errors=True) + os.rename('libs', 'bloat_diff_%s' % where) + +build('new') + +if args.diff_base: + old = 'old' + where_am_i = subprocess.check_output( + ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip() + subprocess.check_call(['git', 'checkout', args.diff_base]) + subprocess.check_call(['git', 'submodule', 'update']) + try: + try: + build('old') + except subprocess.CalledProcessError, e: + subprocess.check_call(['make', 'clean']) + build('old') + finally: + subprocess.check_call(['git', 'checkout', where_am_i]) + subprocess.check_call(['git', 'submodule', 'update']) + +subprocess.check_call('make -j%d' % args.jobs, + shell=True, cwd='third_party/bloaty') + +text = '' +for lib in LIBS: + text += '****************************************************************\n\n' + text += lib + '\n\n' + old_version = glob.glob('bloat_diff_old/opt/%s' % lib) + new_version = glob.glob('bloat_diff_new/opt/%s' % lib) + assert len(new_version) == 1 + if old_version: + assert len(old_version) == 1 + text += subprocess.check_output('third_party/bloaty/bloaty %s -- %s' % + (new_version[0], old_version[0]), + shell=True) + else: + text += subprocess.check_output('third_party/bloaty/bloaty %s' % + (new_version[0]), + shell=True) + text += '\n\n' + +print text +comment_on_pr.comment_on_pr('```\n%s\n```' % text) From f734e732e3aa94c2db0550e36d007f0114338560 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 4 Oct 2017 14:09:28 -0700 Subject: [PATCH 108/109] Better output --- tools/profiling/bloat/bloat_diff.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tools/profiling/bloat/bloat_diff.py b/tools/profiling/bloat/bloat_diff.py index b9212f1cb9c..9b40685e909 100755 --- a/tools/profiling/bloat/bloat_diff.py +++ b/tools/profiling/bloat/bloat_diff.py @@ -83,14 +83,15 @@ for lib in LIBS: old_version = glob.glob('bloat_diff_old/opt/%s' % lib) new_version = glob.glob('bloat_diff_new/opt/%s' % lib) assert len(new_version) == 1 + cmd = 'third_party/bloaty/bloaty -d compileunits,symbols' if old_version: assert len(old_version) == 1 - text += subprocess.check_output('third_party/bloaty/bloaty %s -- %s' % - (new_version[0], old_version[0]), + text += subprocess.check_output('%s %s -- %s' % + (cmd, new_version[0], old_version[0]), shell=True) else: - text += subprocess.check_output('third_party/bloaty/bloaty %s' % - (new_version[0]), + text += subprocess.check_output('%s %s' % + (cmd, new_version[0]), shell=True) text += '\n\n' From 9125923c077366cbc9bce9dee765dd5beba79544 Mon Sep 17 00:00:00 2001 From: Craig Tiller Date: Wed, 4 Oct 2017 14:20:59 -0700 Subject: [PATCH 109/109] Configure tool to run --- tools/internal_ci/linux/grpc_microbenchmark_diff.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/internal_ci/linux/grpc_microbenchmark_diff.sh b/tools/internal_ci/linux/grpc_microbenchmark_diff.sh index 58ffcf336b8..45add1b02da 100755 --- a/tools/internal_ci/linux/grpc_microbenchmark_diff.sh +++ b/tools/internal_ci/linux/grpc_microbenchmark_diff.sh @@ -25,6 +25,8 @@ cd $(dirname $0)/../../.. source tools/internal_ci/helper_scripts/prepare_build_linux_perf_rc tools/run_tests/start_port_server.py +tools/jenkins/run_c_cpp_test.sh tools/profiling/bloat/bloat_diff.py \ + -d origin/$ghprbTargetBranch || FAILED="true" tools/jenkins/run_c_cpp_test.sh tools/profiling/microbenchmarks/bm_diff/bm_main.py \ -d origin/$ghprbTargetBranch \ -b $BENCHMARKS_TO_RUN || FAILED="true"