diff --git a/BUILD b/BUILD index 281375fc1db..7b55e663cb9 100644 --- a/BUILD +++ b/BUILD @@ -574,6 +574,8 @@ grpc_cc_library( "src/core/lib/compression/compression.c", "src/core/lib/compression/message_compress.c", "src/core/lib/compression/stream_compression.c", + "src/core/lib/debug/stats.c", + "src/core/lib/debug/stats_data.c", "src/core/lib/http/format_request.c", "src/core/lib/http/httpcli.c", "src/core/lib/http/parser.c", @@ -690,8 +692,6 @@ grpc_cc_library( "src/core/lib/transport/timeout_encoding.c", "src/core/lib/transport/transport.c", "src/core/lib/transport/transport_op_string.c", - "src/core/lib/debug/stats.c", - "src/core/lib/debug/stats_data.c", ], hdrs = [ "src/core/lib/channel/channel_args.h", @@ -705,6 +705,8 @@ grpc_cc_library( "src/core/lib/compression/algorithm_metadata.h", "src/core/lib/compression/message_compress.h", "src/core/lib/compression/stream_compression.h", + "src/core/lib/debug/stats.h", + "src/core/lib/debug/stats_data.h", "src/core/lib/http/format_request.h", "src/core/lib/http/httpcli.h", "src/core/lib/http/parser.h", @@ -807,8 +809,6 @@ grpc_cc_library( "src/core/lib/transport/timeout_encoding.h", "src/core/lib/transport/transport.h", "src/core/lib/transport/transport_impl.h", - "src/core/lib/debug/stats.h", - "src/core/lib/debug/stats_data.h", ], external_deps = [ "zlib", diff --git a/config.m4 b/config.m4 index d52e37ca289..656a37e9b48 100644 --- a/config.m4 +++ b/config.m4 @@ -12,7 +12,7 @@ if test "$PHP_GRPC" != "no"; then LIBS="-lpthread $LIBS" CFLAGS="-Wall -Werror -Wno-parentheses-equality -Wno-unused-value -std=c11" - CXXFLAGS="-std=c++11" + CXXFLAGS="-std=c++11 -fno-exceptions -fno-rtti" GRPC_SHARED_LIBADD="-lpthread $GRPC_SHARED_LIBADD" PHP_REQUIRE_CXX() PHP_ADD_LIBRARY(pthread) diff --git a/doc/environment_variables.md b/doc/environment_variables.md index b79cd973633..f90f1d5b10e 100644 --- a/doc/environment_variables.md +++ b/doc/environment_variables.md @@ -50,6 +50,7 @@ some configuration as environment variables that can be set. - channel_stack_builder - traces information about channel stacks being built - executor - traces grpc's internal thread pool ('the executor') - http - traces state in the http2 transport engine + - http2_stream_state - traces all http2 stream state mutations. - http1 - traces HTTP/1.x operations performed by gRPC - inproc - traces the in-process transport - flowctl - traces http2 flow control diff --git a/grpc.gemspec b/grpc.gemspec index 2d0f9fd4504..96323aed106 100644 --- a/grpc.gemspec +++ b/grpc.gemspec @@ -29,6 +29,7 @@ Gem::Specification.new do |s| s.add_dependency 'google-protobuf', '~> 3.1' s.add_dependency 'googleauth', '~> 0.5.1' + s.add_dependency 'googleapis-common-protos-types', '~> 1.0.0' s.add_development_dependency 'bundler', '~> 1.9' s.add_development_dependency 'facter', '~> 2.4' diff --git a/include/grpc/compression.h b/include/grpc/compression.h index 3a8de4b7b8a..13a8dd66ad9 100644 --- a/include/grpc/compression.h +++ b/include/grpc/compression.h @@ -44,13 +44,13 @@ int grpc_stream_compression_algorithm_parse( * algorithm. Note that \a name is statically allocated and must *not* be freed. * Returns 1 upon success, 0 otherwise. */ GRPCAPI int grpc_compression_algorithm_name( - grpc_compression_algorithm algorithm, char **name); + grpc_compression_algorithm algorithm, const char **name); /** Updates \a name with the encoding name corresponding to a valid \a * algorithm. Note that \a name is statically allocated and must *not* be freed. * Returns 1 upon success, 0 otherwise. */ GRPCAPI int grpc_stream_compression_algorithm_name( - grpc_stream_compression_algorithm algorithm, char **name); + grpc_stream_compression_algorithm algorithm, const char **name); /** Returns the compression algorithm corresponding to \a level for the * compression algorithms encoded in the \a accepted_encodings bitset. diff --git a/package.xml b/package.xml index b7c0e679e5d..0c201afdaea 100644 --- a/package.xml +++ b/package.xml @@ -10,7 +10,7 @@ grpc-packages@google.com yes - 2017-05-22 + 2017-08-24 1.7.0dev @@ -25,6 +25,9 @@ - Channel are now by default persistent #11878 - Some bug fixes from 1.4 branch #12109, #12123 - Fixed hang bug when fork() was used #11814 +- License changed to Apache 2.0 +- Added support for php_namespace option in codegen plugin #11886 +- Updated gRPC C Core library version 1.6 diff --git a/setup.py b/setup.py index d3ea83488cc..12882413ce3 100644 --- a/setup.py +++ b/setup.py @@ -70,7 +70,7 @@ CLASSIFIERS = [ 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'License :: OSI Approved :: Apache Software License', -], +] # Environment variable to determine whether or not the Cython extension should # *use* Cython or use the generated C files. Note that this requires the C files diff --git a/src/core/ext/census/base_resources.c b/src/core/ext/census/base_resources.c index 2114bf04cd0..1f2bb39fe0c 100644 --- a/src/core/ext/census/base_resources.c +++ b/src/core/ext/census/base_resources.c @@ -37,20 +37,20 @@ void define_base_resources() { google_census_Resource_BasicUnit numerator = google_census_Resource_BasicUnit_SECS; - resource r = {"client_rpc_latency", // name - "Client RPC latency in seconds", // description - 0, // prefix - 1, // n_numerators - &numerator, // numerators - 0, // n_denominators - NULL}; // denominators + resource r = {(char *)"client_rpc_latency", // name + (char *)"Client RPC latency in seconds", // description + 0, // prefix + 1, // n_numerators + &numerator, // numerators + 0, // n_denominators + NULL}; // denominators define_resource(&r); - r = (resource){"server_rpc_latency", // name - "Server RPC latency in seconds", // description - 0, // prefix - 1, // n_numerators - &numerator, // numerators - 0, // n_denominators - NULL}; // denominators + r = (resource){(char *)"server_rpc_latency", // name + (char *)"Server RPC latency in seconds", // description + 0, // prefix + 1, // n_numerators + &numerator, // numerators + 0, // n_denominators + NULL}; // denominators define_resource(&r); } diff --git a/src/core/ext/filters/client_channel/client_channel.c b/src/core/ext/filters/client_channel/client_channel.c index 129d0f368b7..016199b1f4f 100644 --- a/src/core/ext/filters/client_channel/client_channel.c +++ b/src/core/ext/filters/client_channel/client_channel.c @@ -375,7 +375,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx, } // Extract the following fields from the resolver result, if non-NULL. bool lb_policy_updated = false; - char *lb_policy_name = NULL; + char *lb_policy_name_dup = NULL; bool lb_policy_name_changed = false; grpc_lb_policy *new_lb_policy = NULL; char *service_config_json = NULL; @@ -383,6 +383,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx, grpc_slice_hash_table *method_params_table = NULL; if (chand->resolver_result != NULL) { // Find LB policy name. + const char *lb_policy_name = NULL; const grpc_arg *channel_arg = grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME); if (channel_arg != NULL) { @@ -473,7 +474,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx, // Before we clean up, save a copy of lb_policy_name, since it might // be pointing to data inside chand->resolver_result. // The copy will be saved in chand->lb_policy_name below. - lb_policy_name = gpr_strdup(lb_policy_name); + lb_policy_name_dup = gpr_strdup(lb_policy_name); grpc_channel_args_destroy(exec_ctx, chand->resolver_result); chand->resolver_result = NULL; } @@ -481,8 +482,8 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx, gpr_log(GPR_DEBUG, "chand=%p: resolver result: lb_policy_name=\"%s\"%s, " "service_config=\"%s\"", - chand, lb_policy_name, lb_policy_name_changed ? " (changed)" : "", - service_config_json); + chand, lb_policy_name_dup, + lb_policy_name_changed ? " (changed)" : "", service_config_json); } // Now swap out fields in chand. Note that the new values may still // be NULL if (e.g.) the resolver failed to return results or the @@ -490,9 +491,9 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx, // // First, swap out the data used by cc_get_channel_info(). gpr_mu_lock(&chand->info_mu); - if (lb_policy_name != NULL) { + if (lb_policy_name_dup != NULL) { gpr_free(chand->info_lb_policy_name); - chand->info_lb_policy_name = lb_policy_name; + chand->info_lb_policy_name = lb_policy_name_dup; } if (service_config_json != NULL) { gpr_free(chand->info_service_config_json); diff --git a/src/core/ext/filters/client_channel/client_channel_factory.c b/src/core/ext/filters/client_channel/client_channel_factory.c index e8aa4cda290..57eac8f8757 100644 --- a/src/core/ext/filters/client_channel/client_channel_factory.c +++ b/src/core/ext/filters/client_channel/client_channel_factory.c @@ -63,6 +63,6 @@ static const grpc_arg_pointer_vtable factory_arg_vtable = { grpc_arg grpc_client_channel_factory_create_channel_arg( grpc_client_channel_factory* factory) { - return grpc_channel_arg_pointer_create(GRPC_ARG_CLIENT_CHANNEL_FACTORY, + return grpc_channel_arg_pointer_create((char*)GRPC_ARG_CLIENT_CHANNEL_FACTORY, factory, &factory_arg_vtable); } diff --git a/src/core/ext/filters/client_channel/client_channel_plugin.c b/src/core/ext/filters/client_channel/client_channel_plugin.c index c32e83d0125..1f71c5a7f92 100644 --- a/src/core/ext/filters/client_channel/client_channel_plugin.c +++ b/src/core/ext/filters/client_channel/client_channel_plugin.c @@ -54,8 +54,8 @@ static bool set_default_host_if_unset(grpc_exec_ctx *exec_ctx, char *default_authority = grpc_get_default_authority( exec_ctx, grpc_channel_stack_builder_get_target(builder)); if (default_authority != NULL) { - grpc_arg arg = grpc_channel_arg_string_create(GRPC_ARG_DEFAULT_AUTHORITY, - default_authority); + grpc_arg arg = grpc_channel_arg_string_create( + (char *)GRPC_ARG_DEFAULT_AUTHORITY, default_authority); grpc_channel_args *new_args = grpc_channel_args_copy_and_add(args, &arg, 1); grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder, new_args); diff --git a/src/core/ext/filters/client_channel/http_proxy.c b/src/core/ext/filters/client_channel/http_proxy.c index ef3512ed833..0c3b009e440 100644 --- a/src/core/ext/filters/client_channel/http_proxy.c +++ b/src/core/ext/filters/client_channel/http_proxy.c @@ -157,7 +157,7 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, } grpc_arg args_to_add[2]; args_to_add[0] = grpc_channel_arg_string_create( - GRPC_ARG_HTTP_CONNECT_SERVER, + (char*)GRPC_ARG_HTTP_CONNECT_SERVER, uri->path[0] == '/' ? uri->path + 1 : uri->path); if (user_cred != NULL) { /* Use base64 encoding for user credentials as stated in RFC 7617 */ @@ -166,8 +166,8 @@ static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx, char* header; gpr_asprintf(&header, "Proxy-Authorization:Basic %s", encoded_user_cred); gpr_free(encoded_user_cred); - args_to_add[1] = - grpc_channel_arg_string_create(GRPC_ARG_HTTP_CONNECT_HEADERS, header); + args_to_add[1] = grpc_channel_arg_string_create( + (char*)GRPC_ARG_HTTP_CONNECT_HEADERS, header); *new_args = grpc_channel_args_copy_and_add(args, args_to_add, 2); gpr_free(header); } else { diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c index a776a07d99a..85ef7894ea6 100644 --- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c +++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.c @@ -1560,6 +1560,9 @@ static void lb_on_response_received_locked(grpc_exec_ctx *exec_ctx, void *arg, exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops), &glb_policy->lb_on_response_received); /* loop */ GPR_ASSERT(GRPC_CALL_OK == call_error); + } else { + GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, + "lb_on_response_received_locked_shutdown"); } } else { /* empty payload: call cancelled. */ /* dispose of the "lb_on_response_received_locked" weak ref taken in @@ -1830,8 +1833,8 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx, // Make sure that GRPC_ARG_LB_POLICY_NAME is set in channel args, // since we use this to trigger the client_load_reporting filter. - grpc_arg new_arg = - grpc_channel_arg_string_create(GRPC_ARG_LB_POLICY_NAME, "grpclb"); + grpc_arg new_arg = grpc_channel_arg_string_create( + (char *)GRPC_ARG_LB_POLICY_NAME, (char *)"grpclb"); static const char *args_to_remove[] = {GRPC_ARG_LB_POLICY_NAME}; glb_policy->args = grpc_channel_args_copy_and_add_and_remove( args->args, args_to_remove, GPR_ARRAY_SIZE(args_to_remove), &new_arg, 1); diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c index 8ac1a46abdf..a3a62e9f3ce 100644 --- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c +++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.c @@ -589,7 +589,7 @@ static void rr_connectivity_changed_locked(grpc_exec_ctx *exec_ctx, void *arg, // Dispose of outdated subchannel lists. if (sd->subchannel_list != p->subchannel_list && sd->subchannel_list != p->latest_pending_subchannel_list) { - char *reason = NULL; + const char *reason = NULL; if (sd->subchannel_list->shutting_down) { reason = "sl_outdated_straggler"; rr_subchannel_list_unref(exec_ctx, sd->subchannel_list, reason); diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.c b/src/core/ext/filters/client_channel/lb_policy_factory.c index acf5929746b..4d1405454c9 100644 --- a/src/core/ext/filters/client_channel/lb_policy_factory.c +++ b/src/core/ext/filters/client_channel/lb_policy_factory.c @@ -141,7 +141,7 @@ static const grpc_arg_pointer_vtable lb_addresses_arg_vtable = { grpc_arg grpc_lb_addresses_create_channel_arg( const grpc_lb_addresses* addresses) { return grpc_channel_arg_pointer_create( - GRPC_ARG_LB_ADDRESSES, (void*)addresses, &lb_addresses_arg_vtable); + (char*)GRPC_ARG_LB_ADDRESSES, (void*)addresses, &lb_addresses_arg_vtable); } grpc_lb_addresses* grpc_lb_addresses_find_channel_arg( diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c index 371c59b8cf3..9bb229ad958 100644 --- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c +++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.c @@ -249,7 +249,7 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg, service_config_string); args_to_remove[num_args_to_remove++] = GRPC_ARG_SERVICE_CONFIG; new_args[num_args_to_add++] = grpc_channel_arg_string_create( - GRPC_ARG_SERVICE_CONFIG, service_config_string); + (char *)GRPC_ARG_SERVICE_CONFIG, service_config_string); service_config = grpc_service_config_create(service_config_string); if (service_config != NULL) { const char *lb_policy_name = @@ -257,7 +257,7 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg, if (lb_policy_name != NULL) { args_to_remove[num_args_to_remove++] = GRPC_ARG_LB_POLICY_NAME; new_args[num_args_to_add++] = grpc_channel_arg_string_create( - GRPC_ARG_LB_POLICY_NAME, (char *)lb_policy_name); + (char *)GRPC_ARG_LB_POLICY_NAME, (char *)lb_policy_name); } } } diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c index c4676e0299c..69ea440ae6a 100644 --- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c +++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.c @@ -210,7 +210,7 @@ grpc_arg grpc_fake_resolver_response_generator_arg( grpc_fake_resolver_response_generator* generator) { grpc_arg arg; arg.type = GRPC_ARG_POINTER; - arg.key = GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR; + arg.key = (char*)GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR; arg.value.pointer.p = generator; arg.value.pointer.vtable = &response_generator_arg_vtable; return arg; diff --git a/src/core/ext/filters/client_channel/subchannel.c b/src/core/ext/filters/client_channel/subchannel.c index bc9c3cc782f..40a51c72d67 100644 --- a/src/core/ext/filters/client_channel/subchannel.c +++ b/src/core/ext/filters/client_channel/subchannel.c @@ -811,6 +811,6 @@ const char *grpc_get_subchannel_address_uri_arg(const grpc_channel_args *args) { grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address *addr) { return grpc_channel_arg_string_create( - GRPC_ARG_SUBCHANNEL_ADDRESS, + (char *)GRPC_ARG_SUBCHANNEL_ADDRESS, addr->len > 0 ? grpc_sockaddr_to_uri(addr) : gpr_strdup("")); } diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.c b/src/core/ext/filters/http/message_compress/message_compress_filter.c index 0145dffab01..f785e1355d3 100644 --- a/src/core/ext/filters/http/message_compress/message_compress_filter.c +++ b/src/core/ext/filters/http/message_compress/message_compress_filter.c @@ -244,7 +244,7 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx, &calld->slices, &tmp); if (did_compress) { if (GRPC_TRACER_ON(grpc_compression_trace)) { - char *algo_name; + const char *algo_name; const size_t before_size = calld->slices.length; const size_t after_size = tmp.length; const float savings_ratio = 1.0f - (float)after_size / (float)before_size; @@ -258,7 +258,7 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx, send_flags |= GRPC_WRITE_INTERNAL_COMPRESS; } else { if (GRPC_TRACER_ON(grpc_compression_trace)) { - char *algo_name; + const char *algo_name; GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm, &algo_name)); gpr_log(GPR_DEBUG, diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_plugin.c b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.c index f56afd27d2c..2486ead4272 100644 --- a/src/core/ext/filters/load_reporting/server_load_reporting_plugin.c +++ b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.c @@ -55,7 +55,8 @@ static bool maybe_add_server_load_reporting_filter( } grpc_arg grpc_load_reporting_enable_arg() { - return grpc_channel_arg_integer_create(GRPC_ARG_ENABLE_LOAD_REPORTING, 1); + return grpc_channel_arg_integer_create((char *)GRPC_ARG_ENABLE_LOAD_REPORTING, + 1); } /* Plugin registration */ diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create.c b/src/core/ext/transport/chttp2/client/insecure/channel_create.c index cccb347bf10..6410a6043db 100644 --- a/src/core/ext/transport/chttp2/client/insecure/channel_create.c +++ b/src/core/ext/transport/chttp2/client/insecure/channel_create.c @@ -55,7 +55,7 @@ static grpc_channel *client_channel_factory_create_channel( } // Add channel arg containing the server URI. grpc_arg arg = grpc_channel_arg_string_create( - GRPC_ARG_SERVER_URI, + (char *)GRPC_ARG_SERVER_URI, grpc_resolver_factory_add_default_prefix_if_needed(exec_ctx, target)); const char *to_remove[] = {GRPC_ARG_SERVER_URI}; grpc_channel_args *new_args = diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c index 0346d50b6ce..dd88136f7b5 100644 --- a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c +++ b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.c @@ -42,7 +42,7 @@ grpc_channel *grpc_insecure_channel_create_from_fd( (target, fd, args)); grpc_arg default_authority_arg = grpc_channel_arg_string_create( - GRPC_ARG_DEFAULT_AUTHORITY, "test.authority"); + (char *)GRPC_ARG_DEFAULT_AUTHORITY, (char *)"test.authority"); grpc_channel_args *final_args = grpc_channel_args_copy_and_add(args, &default_authority_arg, 1); diff --git a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c index 78551df9c3c..6d09953830c 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_plugin.c +++ b/src/core/ext/transport/chttp2/transport/chttp2_plugin.c @@ -23,6 +23,7 @@ void grpc_chttp2_plugin_init(void) { grpc_register_tracer(&grpc_http_trace); grpc_register_tracer(&grpc_flowctl_trace); + grpc_register_tracer(&grpc_trace_http2_stream_state); #ifndef NDEBUG grpc_register_tracer(&grpc_trace_chttp2_refcount); #endif diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.h b/src/core/ext/transport/chttp2/transport/chttp2_transport.h index 0c4e2a91c03..55fb1a8343e 100644 --- a/src/core/ext/transport/chttp2/transport/chttp2_transport.h +++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.h @@ -25,6 +25,7 @@ extern grpc_tracer_flag grpc_http_trace; extern grpc_tracer_flag grpc_flowctl_trace; +extern grpc_tracer_flag grpc_trace_http2_stream_state; #ifndef NDEBUG extern grpc_tracer_flag grpc_trace_chttp2_refcount; diff --git a/src/core/ext/transport/chttp2/transport/stream_lists.c b/src/core/ext/transport/chttp2/transport/stream_lists.c index 7cc85dea9c8..47cd22d1773 100644 --- a/src/core/ext/transport/chttp2/transport/stream_lists.c +++ b/src/core/ext/transport/chttp2/transport/stream_lists.c @@ -20,6 +20,27 @@ #include +static char *stream_list_id_string(grpc_chttp2_stream_list_id id) { + switch (id) { + case GRPC_CHTTP2_LIST_WRITABLE: + return "writable"; + case GRPC_CHTTP2_LIST_WRITING: + return "writing"; + case GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT: + return "stalled_by_transport"; + case GRPC_CHTTP2_LIST_STALLED_BY_STREAM: + return "stalled_by_stream"; + case GRPC_CHTTP2_LIST_WAITING_FOR_CONCURRENCY: + return "waiting_for_concurrency"; + case STREAM_LIST_COUNT: + GPR_UNREACHABLE_CODE(return "unknown"); + } + GPR_UNREACHABLE_CODE(return "unknown"); +} + +grpc_tracer_flag grpc_trace_http2_stream_state = + GRPC_TRACER_INITIALIZER(false, "http2_stream_state"); + /* core list management */ static bool stream_list_empty(grpc_chttp2_transport *t, @@ -44,6 +65,10 @@ static bool stream_list_pop(grpc_chttp2_transport *t, s->included[id] = 0; } *stream = s; + if (s && GRPC_TRACER_ON(grpc_trace_http2_stream_state)) { + gpr_log(GPR_DEBUG, "%p[%d][%s]: pop from %s", t, s->id, + t->is_client ? "cli" : "svr", stream_list_id_string(id)); + } return s != 0; } @@ -62,6 +87,10 @@ static void stream_list_remove(grpc_chttp2_transport *t, grpc_chttp2_stream *s, } else { t->lists[id].tail = s->links[id].prev; } + if (GRPC_TRACER_ON(grpc_trace_http2_stream_state)) { + gpr_log(GPR_DEBUG, "%p[%d][%s]: remove from %s", t, s->id, + t->is_client ? "cli" : "svr", stream_list_id_string(id)); + } } static bool stream_list_maybe_remove(grpc_chttp2_transport *t, @@ -90,6 +119,10 @@ static void stream_list_add_tail(grpc_chttp2_transport *t, } t->lists[id].tail = s; s->included[id] = 1; + if (GRPC_TRACER_ON(grpc_trace_http2_stream_state)) { + gpr_log(GPR_DEBUG, "%p[%d][%s]: add to %s", t, s->id, + t->is_client ? "cli" : "svr", stream_list_id_string(id)); + } } static bool stream_list_add(grpc_chttp2_transport *t, grpc_chttp2_stream *s, @@ -150,17 +183,12 @@ void grpc_chttp2_list_remove_waiting_for_concurrency(grpc_chttp2_transport *t, void grpc_chttp2_list_add_stalled_by_transport(grpc_chttp2_transport *t, grpc_chttp2_stream *s) { - GRPC_FLOW_CONTROL_IF_TRACING( - gpr_log(GPR_DEBUG, "stream %u stalled by transport", s->id)); stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT); } bool grpc_chttp2_list_pop_stalled_by_transport(grpc_chttp2_transport *t, grpc_chttp2_stream **s) { - bool ret = stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT); - GRPC_FLOW_CONTROL_IF_TRACING(if (ret) gpr_log( - GPR_DEBUG, "stream %u un-stalled by transport", (*s)->id)); - return ret; + return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_TRANSPORT); } void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t, @@ -170,23 +198,15 @@ void grpc_chttp2_list_remove_stalled_by_transport(grpc_chttp2_transport *t, void grpc_chttp2_list_add_stalled_by_stream(grpc_chttp2_transport *t, grpc_chttp2_stream *s) { - GRPC_FLOW_CONTROL_IF_TRACING( - gpr_log(GPR_DEBUG, "stream %u stalled by stream", s->id)); stream_list_add(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM); } bool grpc_chttp2_list_pop_stalled_by_stream(grpc_chttp2_transport *t, grpc_chttp2_stream **s) { - bool ret = stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM); - GRPC_FLOW_CONTROL_IF_TRACING( - if (ret) gpr_log(GPR_DEBUG, "stream %u un-stalled by stream", (*s)->id)); - return ret; + return stream_list_pop(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM); } bool grpc_chttp2_list_remove_stalled_by_stream(grpc_chttp2_transport *t, grpc_chttp2_stream *s) { - bool ret = stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM); - GRPC_FLOW_CONTROL_IF_TRACING( - if (ret) gpr_log(GPR_DEBUG, "stream %u un-stalled by stream", s->id)); - return ret; + return stream_list_maybe_remove(t, s, GRPC_CHTTP2_LIST_STALLED_BY_STREAM); } diff --git a/src/core/ext/transport/inproc/inproc_transport.c b/src/core/ext/transport/inproc/inproc_transport.c index cd3e76a0b5b..31739d07dd6 100644 --- a/src/core/ext/transport/inproc/inproc_transport.c +++ b/src/core/ext/transport/inproc/inproc_transport.c @@ -1263,8 +1263,8 @@ grpc_channel *grpc_inproc_channel_create(grpc_server *server, grpc_arg default_authority_arg; default_authority_arg.type = GRPC_ARG_STRING; - default_authority_arg.key = GRPC_ARG_DEFAULT_AUTHORITY; - default_authority_arg.value.string = "inproc.authority"; + default_authority_arg.key = (char *)GRPC_ARG_DEFAULT_AUTHORITY; + default_authority_arg.value.string = (char *)"inproc.authority"; grpc_channel_args *client_args = grpc_channel_args_copy_and_add(args, &default_authority_arg, 1); diff --git a/src/core/lib/channel/channel_args.c b/src/core/lib/channel/channel_args.c index 9215707f1c3..30248b3c609 100644 --- a/src/core/lib/channel/channel_args.c +++ b/src/core/lib/channel/channel_args.c @@ -243,7 +243,7 @@ grpc_channel_args *grpc_channel_args_set_compression_algorithm( GPR_ASSERT(algorithm < GRPC_COMPRESS_ALGORITHMS_COUNT); grpc_arg tmp; tmp.type = GRPC_ARG_INTEGER; - tmp.key = GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM; + tmp.key = (char *)GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM; tmp.value.integer = algorithm; return grpc_channel_args_copy_and_add(a, &tmp, 1); } @@ -253,7 +253,7 @@ grpc_channel_args *grpc_channel_args_set_stream_compression_algorithm( GPR_ASSERT(algorithm < GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT); grpc_arg tmp; tmp.type = GRPC_ARG_INTEGER; - tmp.key = GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM; + tmp.key = (char *)GRPC_STREAM_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM; tmp.value.integer = algorithm; return grpc_channel_args_copy_and_add(a, &tmp, 1); } @@ -308,7 +308,7 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state( if (grpc_channel_args_get_compression_algorithm(*a) == algorithm && state == 0) { - char *algo_name = NULL; + const char *algo_name = NULL; GPR_ASSERT(grpc_compression_algorithm_name(algorithm, &algo_name) != 0); gpr_log(GPR_ERROR, "Tried to disable default compression algorithm '%s'. The " @@ -324,7 +324,7 @@ grpc_channel_args *grpc_channel_args_compression_algorithm_set_state( /* create a new arg */ grpc_arg tmp; tmp.type = GRPC_ARG_INTEGER; - tmp.key = GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET; + tmp.key = (char *)GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET; /* all enabled by default */ tmp.value.integer = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1; if (state != 0) { @@ -349,7 +349,7 @@ grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state( if (grpc_channel_args_get_stream_compression_algorithm(*a) == algorithm && state == 0) { - char *algo_name = NULL; + const char *algo_name = NULL; GPR_ASSERT(grpc_stream_compression_algorithm_name(algorithm, &algo_name) != 0); gpr_log(GPR_ERROR, @@ -366,7 +366,7 @@ grpc_channel_args *grpc_channel_args_stream_compression_algorithm_set_state( /* create a new arg */ grpc_arg tmp; tmp.type = GRPC_ARG_INTEGER; - tmp.key = GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET; + tmp.key = (char *)GRPC_STREAM_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET; /* all enabled by default */ tmp.value.integer = (1u << GRPC_STREAM_COMPRESS_ALGORITHMS_COUNT) - 1; if (state != 0) { diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h index ae1cac31f74..f0de80f0c04 100644 --- a/src/core/lib/channel/channel_stack.h +++ b/src/core/lib/channel/channel_stack.h @@ -281,7 +281,7 @@ grpc_channel_stack *grpc_channel_stack_from_top_element( /* Given the top element of a call stack, get the call stack itself */ grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem); -void grpc_call_log_op(char *file, int line, gpr_log_severity severity, +void grpc_call_log_op(const char *file, int line, gpr_log_severity severity, grpc_call_element *elem, grpc_transport_stream_op_batch *op); diff --git a/src/core/lib/compression/compression.c b/src/core/lib/compression/compression.c index ec84c018117..1cfac231295 100644 --- a/src/core/lib/compression/compression.c +++ b/src/core/lib/compression/compression.c @@ -60,7 +60,7 @@ int grpc_stream_compression_algorithm_parse( } int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm, - char **name) { + const char **name) { GRPC_API_TRACE("grpc_compression_algorithm_parse(algorithm=%d, name=%p)", 2, ((int)algorithm, name)); switch (algorithm) { @@ -80,7 +80,7 @@ int grpc_compression_algorithm_name(grpc_compression_algorithm algorithm, } int grpc_stream_compression_algorithm_name( - grpc_stream_compression_algorithm algorithm, char **name) { + grpc_stream_compression_algorithm algorithm, const char **name) { GRPC_API_TRACE( "grpc_stream_compression_algorithm_parse(algorithm=%d, name=%p)", 2, ((int)algorithm, name)); diff --git a/src/core/lib/debug/trace.h b/src/core/lib/debug/trace.h index dd9e6a30fe2..64f2e3fc331 100644 --- a/src/core/lib/debug/trace.h +++ b/src/core/lib/debug/trace.h @@ -35,7 +35,7 @@ typedef struct { #else bool value; #endif - char *name; + const char *name; } grpc_tracer_flag; #ifdef GRPC_THREADSAFE_TRACER diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.c index 97c28865258..c553fa3981e 100644 --- a/src/core/lib/http/httpcli_security_connector.c +++ b/src/core/lib/http/httpcli_security_connector.c @@ -43,7 +43,8 @@ static void httpcli_ssl_destroy(grpc_exec_ctx *exec_ctx, grpc_httpcli_ssl_channel_security_connector *c = (grpc_httpcli_ssl_channel_security_connector *)sc; if (c->handshaker_factory != NULL) { - tsi_ssl_client_handshaker_factory_destroy(c->handshaker_factory); + tsi_ssl_client_handshaker_factory_unref(c->handshaker_factory); + c->handshaker_factory = NULL; } if (c->secure_peer_name != NULL) gpr_free(c->secure_peer_name); gpr_free(sc); diff --git a/src/core/lib/iomgr/closure.c b/src/core/lib/iomgr/closure.c index 7236e23cf70..00edefc6ae4 100644 --- a/src/core/lib/iomgr/closure.c +++ b/src/core/lib/iomgr/closure.c @@ -167,7 +167,14 @@ void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c, GPR_TIMER_BEGIN("grpc_closure_sched", 0); if (c != NULL) { #ifndef NDEBUG - GPR_ASSERT(!c->scheduled); + if (c->scheduled) { + gpr_log(GPR_ERROR, + "Closure already scheduled. (closure: %p, created: [%s:%d], " + "previously scheduled at: [%s: %d] run?: %s", + c, c->file_created, c->line_created, c->file_initiated, + c->line_initiated, c->run ? "true" : "false"); + abort(); + } c->scheduled = true; c->file_initiated = file; c->line_initiated = line; @@ -191,7 +198,14 @@ void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) { while (c != NULL) { grpc_closure *next = c->next_data.next; #ifndef NDEBUG - GPR_ASSERT(!c->scheduled); + if (c->scheduled) { + gpr_log(GPR_ERROR, + "Closure already scheduled. (closure: %p, created: [%s:%d], " + "previously scheduled at: [%s: %d] run?: %s", + c, c->file_created, c->line_created, c->file_initiated, + c->line_initiated, c->run ? "true" : "false"); + abort(); + } c->scheduled = true; c->file_initiated = file; c->line_initiated = line; diff --git a/src/core/lib/iomgr/error.c b/src/core/lib/iomgr/error.c index d5e00a8f643..aa05501537b 100644 --- a/src/core/lib/iomgr/error.c +++ b/src/core/lib/iomgr/error.c @@ -641,7 +641,7 @@ static char *key_time(grpc_error_times which) { static char *fmt_time(gpr_timespec tm) { char *out; - char *pfx = "!!"; + const char *pfx = "!!"; switch (tm.clock_type) { case GPR_CLOCK_MONOTONIC: pfx = "@monotonic:"; diff --git a/src/core/lib/iomgr/resolve_address_posix.c b/src/core/lib/iomgr/resolve_address_posix.c index 082e3b7947b..60cfeebd47d 100644 --- a/src/core/lib/iomgr/resolve_address_posix.c +++ b/src/core/lib/iomgr/resolve_address_posix.c @@ -85,7 +85,7 @@ static grpc_error *blocking_resolve_address_impl( if (s != 0) { /* Retry if well-known service name is recognized */ - char *svc[][2] = {{"http", "80"}, {"https", "443"}}; + const char *svc[][2] = {{"http", "80"}, {"https", "443"}}; for (i = 0; i < GPR_ARRAY_SIZE(svc); i++) { if (strcmp(port, svc[i][0]) == 0) { GRPC_SCHEDULING_START_BLOCKING_REGION; diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c index a7568b995f5..2a9e939d40a 100644 --- a/src/core/lib/security/transport/security_connector.c +++ b/src/core/lib/security/transport/security_connector.c @@ -455,14 +455,14 @@ grpc_server_security_connector *grpc_fake_server_security_connector_create( typedef struct { grpc_channel_security_connector base; - tsi_ssl_client_handshaker_factory *handshaker_factory; + tsi_ssl_client_handshaker_factory *client_handshaker_factory; char *target_name; char *overridden_target_name; } grpc_ssl_channel_security_connector; typedef struct { grpc_server_security_connector base; - tsi_ssl_server_handshaker_factory *handshaker_factory; + tsi_ssl_server_handshaker_factory *server_handshaker_factory; } grpc_ssl_server_security_connector; static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx, @@ -470,9 +470,8 @@ static void ssl_channel_destroy(grpc_exec_ctx *exec_ctx, grpc_ssl_channel_security_connector *c = (grpc_ssl_channel_security_connector *)sc; grpc_call_credentials_unref(exec_ctx, c->base.request_metadata_creds); - if (c->handshaker_factory != NULL) { - tsi_ssl_client_handshaker_factory_destroy(c->handshaker_factory); - } + tsi_ssl_client_handshaker_factory_unref(c->client_handshaker_factory); + c->client_handshaker_factory = NULL; if (c->target_name != NULL) gpr_free(c->target_name); if (c->overridden_target_name != NULL) gpr_free(c->overridden_target_name); gpr_free(sc); @@ -482,9 +481,8 @@ static void ssl_server_destroy(grpc_exec_ctx *exec_ctx, grpc_security_connector *sc) { grpc_ssl_server_security_connector *c = (grpc_ssl_server_security_connector *)sc; - if (c->handshaker_factory != NULL) { - tsi_ssl_server_handshaker_factory_destroy(c->handshaker_factory); - } + tsi_ssl_server_handshaker_factory_unref(c->server_handshaker_factory); + c->server_handshaker_factory = NULL; gpr_free(sc); } @@ -496,7 +494,7 @@ static void ssl_channel_add_handshakers(grpc_exec_ctx *exec_ctx, // Instantiate TSI handshaker. tsi_handshaker *tsi_hs = NULL; tsi_result result = tsi_ssl_client_handshaker_factory_create_handshaker( - c->handshaker_factory, + c->client_handshaker_factory, c->overridden_target_name != NULL ? c->overridden_target_name : c->target_name, &tsi_hs); @@ -521,7 +519,7 @@ static void ssl_server_add_handshakers(grpc_exec_ctx *exec_ctx, // Instantiate TSI handshaker. tsi_handshaker *tsi_hs = NULL; tsi_result result = tsi_ssl_server_handshaker_factory_create_handshaker( - c->handshaker_factory, &tsi_hs); + c->server_handshaker_factory, &tsi_hs); if (result != TSI_OK) { gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.", tsi_result_to_string(result)); @@ -852,7 +850,7 @@ grpc_security_status grpc_ssl_channel_security_connector_create( result = tsi_create_ssl_client_handshaker_factory( has_key_cert_pair ? &config->pem_key_cert_pair : NULL, pem_root_certs, ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols, - &c->handshaker_factory); + &c->client_handshaker_factory); if (result != TSI_OK) { gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.", tsi_result_to_string(result)); @@ -897,7 +895,7 @@ grpc_security_status grpc_ssl_server_security_connector_create( config->pem_root_certs, get_tsi_client_certificate_request_type( config->client_certificate_request), ssl_cipher_suites(), alpn_protocol_strings, (uint16_t)num_alpn_protocols, - &c->handshaker_factory); + &c->server_handshaker_factory); if (result != TSI_OK) { gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.", tsi_result_to_string(result)); diff --git a/src/core/lib/support/log_linux.c b/src/core/lib/support/log_linux.c index 61d23464276..77550186932 100644 --- a/src/core/lib/support/log_linux.c +++ b/src/core/lib/support/log_linux.c @@ -57,7 +57,7 @@ void gpr_log(const char *file, int line, gpr_log_severity severity, } void gpr_default_log(gpr_log_func_args *args) { - char *final_slash; + const char *final_slash; char *prefix; const char *display_file; char time_buffer[64]; diff --git a/src/core/lib/support/string.c b/src/core/lib/support/string.c index 523e43445be..6b172df82f6 100644 --- a/src/core/lib/support/string.c +++ b/src/core/lib/support/string.c @@ -276,7 +276,7 @@ static void add_string_to_split(const char *beg, const char *end, char ***strs, void gpr_string_split(const char *input, const char *sep, char ***strs, size_t *nstrs) { - char *next; + const char *next; *strs = NULL; *nstrs = 0; size_t capstrs = 0; diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c index 135257c1ac0..dc98532663f 100644 --- a/src/core/lib/surface/call.c +++ b/src/core/lib/surface/call.c @@ -1512,7 +1512,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx, } else if (grpc_compression_options_is_stream_compression_algorithm_enabled( &compression_options, algo) == 0) { /* check if algorithm is supported by current channel config */ - char *algo_name = NULL; + const char *algo_name = NULL; grpc_stream_compression_algorithm_name(algo, &algo_name); gpr_asprintf(&error_msg, "Stream compression algorithm '%s' is disabled.", algo_name); @@ -1526,7 +1526,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx, if (!GPR_BITGET(call->stream_encodings_accepted_by_peer, call->incoming_stream_compression_algorithm)) { if (GRPC_TRACER_ON(grpc_compression_trace)) { - char *algo_name = NULL; + const char *algo_name = NULL; grpc_stream_compression_algorithm_name( call->incoming_stream_compression_algorithm, &algo_name); gpr_log( @@ -1553,7 +1553,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx, } else if (grpc_compression_options_is_algorithm_enabled( &compression_options, algo) == 0) { /* check if algorithm is supported by current channel config */ - char *algo_name = NULL; + const char *algo_name = NULL; grpc_compression_algorithm_name(algo, &algo_name); gpr_asprintf(&error_msg, "Compression algorithm '%s' is disabled.", algo_name); @@ -1569,7 +1569,7 @@ static void validate_filtered_metadata(grpc_exec_ctx *exec_ctx, if (!GPR_BITGET(call->encodings_accepted_by_peer, call->incoming_compression_algorithm)) { if (GRPC_TRACER_ON(grpc_compression_trace)) { - char *algo_name = NULL; + const char *algo_name = NULL; grpc_compression_algorithm_name(call->incoming_compression_algorithm, &algo_name); gpr_log(GPR_ERROR, diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c index 13fb6cccf85..559bcc9792c 100644 --- a/src/core/lib/surface/completion_queue.c +++ b/src/core/lib/surface/completion_queue.c @@ -566,13 +566,13 @@ static void cq_check_tag(grpc_completion_queue *cq, void *tag, bool lock_cq) {} * true if the increment was successful; false if the counter is zero */ static bool atm_inc_if_nonzero(gpr_atm *counter) { while (true) { - gpr_atm count = gpr_atm_no_barrier_load(counter); + gpr_atm count = gpr_atm_acq_load(counter); /* If zero, we are done. If not, we must to a CAS (instead of an atomic * increment) to maintain the contract: do not increment the counter if it * is zero. */ if (count == 0) { return false; - } else if (gpr_atm_no_barrier_cas(counter, count, count + 1)) { + } else if (gpr_atm_full_cas(counter, count, count + 1)) { break; } } @@ -644,8 +644,12 @@ static void cq_end_op_for_next(grpc_exec_ctx *exec_ctx, /* Add the completion to the queue */ bool is_first = cq_event_queue_push(&cqd->queue, storage); gpr_atm_no_barrier_fetch_add(&cqd->things_queued_ever, 1); - bool will_definitely_shutdown = - gpr_atm_no_barrier_load(&cqd->pending_events) == 1; + + /* Since we do not hold the cq lock here, it is important to do an 'acquire' + load here (instead of a 'no_barrier' load) to match with the release store + (done via gpr_atm_full_fetch_add(pending_events, -1)) in cq_shutdown_next + */ + bool will_definitely_shutdown = gpr_atm_acq_load(&cqd->pending_events) == 1; if (!will_definitely_shutdown) { /* Only kick if this is the first item queued */ @@ -889,7 +893,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline, } } - if (gpr_atm_no_barrier_load(&cqd->pending_events) == 0) { + if (gpr_atm_acq_load(&cqd->pending_events) == 0) { /* Before returning, check if the queue has any items left over (since gpr_mpscq_pop() can sometimes return NULL even if the queue is not empty. If so, keep retrying but do not return GRPC_QUEUE_SHUTDOWN */ @@ -935,7 +939,7 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline, } if (cq_event_queue_num_items(&cqd->queue) > 0 && - gpr_atm_no_barrier_load(&cqd->pending_events) > 0) { + gpr_atm_acq_load(&cqd->pending_events) > 0) { gpr_mu_lock(cq->mu); cq->poller_vtable->kick(&exec_ctx, POLLSET_FROM_CQ(cq), NULL); gpr_mu_unlock(cq->mu); @@ -986,6 +990,9 @@ static void cq_shutdown_next(grpc_exec_ctx *exec_ctx, return; } cqd->shutdown_called = true; + /* Doing a full_fetch_add (i.e acq/release) here to match with + * cq_begin_op_for_next and and cq_end_op_for_next functions which read/write + * on this counter without necessarily holding a lock on cq */ if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) { cq_finish_shutdown_next(exec_ctx, cq); } diff --git a/src/core/lib/transport/transport_op_string.c b/src/core/lib/transport/transport_op_string.c index 409a6c41039..858664715c6 100644 --- a/src/core/lib/transport/transport_op_string.c +++ b/src/core/lib/transport/transport_op_string.c @@ -197,7 +197,7 @@ char *grpc_transport_op_string(grpc_transport_op *op) { return out; } -void grpc_call_log_op(char *file, int line, gpr_log_severity severity, +void grpc_call_log_op(const char *file, int line, gpr_log_severity severity, grpc_call_element *elem, grpc_transport_stream_op_batch *op) { char *str = grpc_transport_stream_op_batch_string(op); diff --git a/src/core/tsi/ssl_transport_security.c b/src/core/tsi/ssl_transport_security.c index 1fd65928f9c..7ebf9dd96f0 100644 --- a/src/core/tsi/ssl_transport_security.c +++ b/src/core/tsi/ssl_transport_security.c @@ -67,7 +67,13 @@ /* --- Structure definitions. ---*/ +struct tsi_ssl_handshaker_factory { + const tsi_ssl_handshaker_factory_vtable *vtable; + gpr_refcount refcount; +}; + struct tsi_ssl_client_handshaker_factory { + tsi_ssl_handshaker_factory base; SSL_CTX *ssl_context; unsigned char *alpn_protocol_list; size_t alpn_protocol_list_length; @@ -77,6 +83,7 @@ struct tsi_ssl_server_handshaker_factory { /* Several contexts to support SNI. The tsi_peer array contains the subject names of the server certificates associated with the contexts at the same index. */ + tsi_ssl_handshaker_factory base; SSL_CTX **ssl_contexts; tsi_peer *ssl_context_x509_subject_names; size_t ssl_context_count; @@ -90,6 +97,7 @@ typedef struct { BIO *into_ssl; BIO *from_ssl; tsi_result result; + tsi_ssl_handshaker_factory *factory_ref; } tsi_ssl_handshaker; typedef struct { @@ -846,6 +854,47 @@ static const tsi_frame_protector_vtable frame_protector_vtable = { ssl_protector_destroy, }; +/* --- tsi_server_handshaker_factory methods implementation. --- */ + +static void tsi_ssl_handshaker_factory_destroy( + tsi_ssl_handshaker_factory *self) { + if (self == NULL) return; + + if (self->vtable != NULL && self->vtable->destroy != NULL) { + self->vtable->destroy(self); + } + /* Note, we don't free(self) here because this object is always directly + * embedded in another object. If tsi_ssl_handshaker_factory_init allocates + * any memory, it should be free'd here. */ +} + +static tsi_ssl_handshaker_factory *tsi_ssl_handshaker_factory_ref( + tsi_ssl_handshaker_factory *self) { + if (self == NULL) return NULL; + gpr_refn(&self->refcount, 1); + return self; +} + +static void tsi_ssl_handshaker_factory_unref(tsi_ssl_handshaker_factory *self) { + if (self == NULL) return; + + if (gpr_unref(&self->refcount)) { + tsi_ssl_handshaker_factory_destroy(self); + } +} + +static tsi_ssl_handshaker_factory_vtable handshaker_factory_vtable = {NULL}; + +/* Initializes a tsi_ssl_handshaker_factory object. Caller is responsible for + * allocating memory for the factory. */ +static void tsi_ssl_handshaker_factory_init( + tsi_ssl_handshaker_factory *factory) { + GPR_ASSERT(factory != NULL); + + factory->vtable = &handshaker_factory_vtable; + gpr_ref_init(&factory->refcount, 1); +} + /* --- tsi_handshaker methods implementation. ---*/ static tsi_result ssl_handshaker_get_bytes_to_send_to_peer(tsi_handshaker *self, @@ -1013,6 +1062,7 @@ static tsi_result ssl_handshaker_create_frame_protector( static void ssl_handshaker_destroy(tsi_handshaker *self) { tsi_ssl_handshaker *impl = (tsi_ssl_handshaker *)self; SSL_free(impl->ssl); /* The BIO objects are owned by ssl */ + tsi_ssl_handshaker_factory_unref(impl->factory_ref); gpr_free(impl); } @@ -1030,6 +1080,7 @@ static const tsi_handshaker_vtable handshaker_vtable = { static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client, const char *server_name_indication, + tsi_ssl_handshaker_factory *factory, tsi_handshaker **handshaker) { SSL *ssl = SSL_new(ctx); BIO *into_ssl = NULL; @@ -1085,6 +1136,8 @@ static tsi_result create_tsi_ssl_handshaker(SSL_CTX *ctx, int is_client, impl->from_ssl = from_ssl; impl->result = TSI_HANDSHAKE_IN_PROGRESS; impl->base.vtable = &handshaker_vtable; + impl->factory_ref = tsi_ssl_handshaker_factory_ref(factory); + *handshaker = &impl->base; return TSI_OK; } @@ -1121,11 +1174,20 @@ tsi_result tsi_ssl_client_handshaker_factory_create_handshaker( tsi_ssl_client_handshaker_factory *self, const char *server_name_indication, tsi_handshaker **handshaker) { return create_tsi_ssl_handshaker(self->ssl_context, 1, server_name_indication, - handshaker); + &self->base, handshaker); } -void tsi_ssl_client_handshaker_factory_destroy( +void tsi_ssl_client_handshaker_factory_unref( tsi_ssl_client_handshaker_factory *self) { + if (self == NULL) return; + tsi_ssl_handshaker_factory_unref(&self->base); +} + +static void tsi_ssl_client_handshaker_factory_destroy( + tsi_ssl_handshaker_factory *factory) { + if (factory == NULL) return; + tsi_ssl_client_handshaker_factory *self = + (tsi_ssl_client_handshaker_factory *)factory; if (self->ssl_context != NULL) SSL_CTX_free(self->ssl_context); if (self->alpn_protocol_list != NULL) gpr_free(self->alpn_protocol_list); gpr_free(self); @@ -1150,11 +1212,21 @@ tsi_result tsi_ssl_server_handshaker_factory_create_handshaker( if (self->ssl_context_count == 0) return TSI_INVALID_ARGUMENT; /* Create the handshaker with the first context. We will switch if needed because of SNI in ssl_server_handshaker_factory_servername_callback. */ - return create_tsi_ssl_handshaker(self->ssl_contexts[0], 0, NULL, handshaker); + return create_tsi_ssl_handshaker(self->ssl_contexts[0], 0, NULL, &self->base, + handshaker); } -void tsi_ssl_server_handshaker_factory_destroy( +void tsi_ssl_server_handshaker_factory_unref( tsi_ssl_server_handshaker_factory *self) { + if (self == NULL) return; + tsi_ssl_handshaker_factory_unref(&self->base); +} + +static void tsi_ssl_server_handshaker_factory_destroy( + tsi_ssl_handshaker_factory *factory) { + if (factory == NULL) return; + tsi_ssl_server_handshaker_factory *self = + (tsi_ssl_server_handshaker_factory *)factory; size_t i; for (i = 0; i < self->ssl_context_count; i++) { if (self->ssl_contexts[i] != NULL) { @@ -1263,6 +1335,9 @@ static int server_handshaker_factory_npn_advertised_callback( /* --- tsi_ssl_handshaker_factory constructors. --- */ +static tsi_ssl_handshaker_factory_vtable client_handshaker_factory_vtable = { + tsi_ssl_client_handshaker_factory_destroy}; + tsi_result tsi_create_ssl_client_handshaker_factory( const tsi_ssl_pem_key_cert_pair *pem_key_cert_pair, const char *pem_root_certs, const char *cipher_suites, @@ -1285,6 +1360,9 @@ tsi_result tsi_create_ssl_client_handshaker_factory( } impl = gpr_zalloc(sizeof(*impl)); + tsi_ssl_handshaker_factory_init(&impl->base); + impl->base.vtable = &client_handshaker_factory_vtable; + impl->ssl_context = ssl_context; do { @@ -1322,7 +1400,7 @@ tsi_result tsi_create_ssl_client_handshaker_factory( } } while (0); if (result != TSI_OK) { - tsi_ssl_client_handshaker_factory_destroy(impl); + tsi_ssl_handshaker_factory_unref(&impl->base); return result; } SSL_CTX_set_verify(ssl_context, SSL_VERIFY_PEER, NULL); @@ -1332,6 +1410,9 @@ tsi_result tsi_create_ssl_client_handshaker_factory( return TSI_OK; } +static tsi_ssl_handshaker_factory_vtable server_handshaker_factory_vtable = { + tsi_ssl_server_handshaker_factory_destroy}; + tsi_result tsi_create_ssl_server_handshaker_factory( const tsi_ssl_pem_key_cert_pair *pem_key_cert_pairs, size_t num_key_cert_pairs, const char *pem_client_root_certs, @@ -1364,12 +1445,15 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex( } impl = gpr_zalloc(sizeof(*impl)); + tsi_ssl_handshaker_factory_init(&impl->base); + impl->base.vtable = &server_handshaker_factory_vtable; + impl->ssl_contexts = gpr_zalloc(num_key_cert_pairs * sizeof(SSL_CTX *)); impl->ssl_context_x509_subject_names = gpr_zalloc(num_key_cert_pairs * sizeof(tsi_peer)); if (impl->ssl_contexts == NULL || impl->ssl_context_x509_subject_names == NULL) { - tsi_ssl_server_handshaker_factory_destroy(impl); + tsi_ssl_handshaker_factory_unref(&impl->base); return TSI_OUT_OF_RESOURCES; } impl->ssl_context_count = num_key_cert_pairs; @@ -1379,7 +1463,7 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex( &impl->alpn_protocol_list, &impl->alpn_protocol_list_length); if (result != TSI_OK) { - tsi_ssl_server_handshaker_factory_destroy(impl); + tsi_ssl_handshaker_factory_unref(&impl->base); return result; } } @@ -1451,10 +1535,11 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex( } while (0); if (result != TSI_OK) { - tsi_ssl_server_handshaker_factory_destroy(impl); + tsi_ssl_handshaker_factory_unref(&impl->base); return result; } } + *factory = impl; return TSI_OK; } @@ -1501,3 +1586,15 @@ int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name) { return 0; /* Not found. */ } + +/* --- Testing support. --- */ +const tsi_ssl_handshaker_factory_vtable *tsi_ssl_handshaker_factory_swap_vtable( + tsi_ssl_handshaker_factory *factory, + tsi_ssl_handshaker_factory_vtable *new_vtable) { + GPR_ASSERT(factory != NULL); + GPR_ASSERT(factory->vtable != NULL); + + const tsi_ssl_handshaker_factory_vtable *orig_vtable = factory->vtable; + factory->vtable = new_vtable; + return orig_vtable; +} diff --git a/src/core/tsi/ssl_transport_security.h b/src/core/tsi/ssl_transport_security.h index 177599930bc..3abfdf5ed87 100644 --- a/src/core/tsi/ssl_transport_security.h +++ b/src/core/tsi/ssl_transport_security.h @@ -96,10 +96,10 @@ tsi_result tsi_ssl_client_handshaker_factory_create_handshaker( tsi_ssl_client_handshaker_factory *self, const char *server_name_indication, tsi_handshaker **handshaker); -/* Destroys the handshaker factory. WARNING: it is unsafe to destroy a factory - while handshakers created with this factory are still in use. */ -void tsi_ssl_client_handshaker_factory_destroy( - tsi_ssl_client_handshaker_factory *self); +/* Decrements reference count of the handshaker factory. Handshaker factory will + * be destroyed once no references exist. */ +void tsi_ssl_client_handshaker_factory_unref( + tsi_ssl_client_handshaker_factory *factory); /* --- tsi_ssl_server_handshaker_factory object --- @@ -158,9 +158,9 @@ tsi_result tsi_create_ssl_server_handshaker_factory_ex( tsi_result tsi_ssl_server_handshaker_factory_create_handshaker( tsi_ssl_server_handshaker_factory *self, tsi_handshaker **handshaker); -/* Destroys the handshaker factory. WARNING: it is unsafe to destroy a factory - while handshakers created with this factory are still in use. */ -void tsi_ssl_server_handshaker_factory_destroy( +/* Decrements reference count of the handshaker factory. Handshaker factory will + * be destroyed once no references exist. */ +void tsi_ssl_server_handshaker_factory_unref( tsi_ssl_server_handshaker_factory *self); /* Util that checks that an ssl peer matches a specific name. @@ -170,6 +170,29 @@ void tsi_ssl_server_handshaker_factory_destroy( - handle public suffix wildchar more strictly (e.g. *.co.uk) */ int tsi_ssl_peer_matches_name(const tsi_peer *peer, const char *name); +/* --- Testing support. --- + + These functions and typedefs are not intended to be used outside of testing. + */ + +/* Base type of client and server handshaker factories. */ +typedef struct tsi_ssl_handshaker_factory tsi_ssl_handshaker_factory; + +/* Function pointer to handshaker_factory destructor. */ +typedef void (*tsi_ssl_handshaker_factory_destructor)( + tsi_ssl_handshaker_factory *factory); + +/* Virtual table for tsi_ssl_handshaker_factory. */ +typedef struct { + tsi_ssl_handshaker_factory_destructor destroy; +} tsi_ssl_handshaker_factory_vtable; + +/* Set destructor of handshaker_factory to new_destructor, returns previous + destructor. */ +const tsi_ssl_handshaker_factory_vtable *tsi_ssl_handshaker_factory_swap_vtable( + tsi_ssl_handshaker_factory *factory, + tsi_ssl_handshaker_factory_vtable *new_vtable); + #ifdef __cplusplus } #endif diff --git a/src/cpp/client/client_context.cc b/src/cpp/client/client_context.cc index 3af8bdc11ac..40e95f3c05d 100644 --- a/src/cpp/client/client_context.cc +++ b/src/cpp/client/client_context.cc @@ -96,7 +96,7 @@ void ClientContext::set_call(grpc_call* call, void ClientContext::set_compression_algorithm( grpc_compression_algorithm algorithm) { - char* algorithm_name = nullptr; + const char* algorithm_name = nullptr; if (!grpc_compression_algorithm_name(algorithm, &algorithm_name)) { gpr_log(GPR_ERROR, "Name for compression algorithm '%d' unknown.", algorithm); diff --git a/src/cpp/server/server_context.cc b/src/cpp/server/server_context.cc index 4913682f1d1..d7876a000b4 100644 --- a/src/cpp/server/server_context.cc +++ b/src/cpp/server/server_context.cc @@ -190,7 +190,7 @@ bool ServerContext::IsCancelled() const { void ServerContext::set_compression_algorithm( grpc_compression_algorithm algorithm) { - char* algorithm_name = NULL; + const char* algorithm_name = NULL; if (!grpc_compression_algorithm_name(algorithm, &algorithm_name)) { gpr_log(GPR_ERROR, "Name for compression algorithm '%d' unknown.", algorithm); diff --git a/src/csharp/Grpc.Auth/Grpc.Auth.csproj b/src/csharp/Grpc.Auth/Grpc.Auth.csproj index abf326459c1..bbcbd95be5f 100755 --- a/src/csharp/Grpc.Auth/Grpc.Auth.csproj +++ b/src/csharp/Grpc.Auth/Grpc.Auth.csproj @@ -15,7 +15,6 @@ gRPC RPC Protocol HTTP/2 Auth OAuth2 https://github.com/grpc/grpc https://github.com/grpc/grpc/blob/master/LICENSE - 1.6.0 true true true diff --git a/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj b/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj index 9ad6fd0c616..4d6767fa985 100755 --- a/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj +++ b/src/csharp/Grpc.Core.Testing/Grpc.Core.Testing.csproj @@ -15,7 +15,6 @@ gRPC test testing https://github.com/grpc/grpc https://github.com/grpc/grpc/blob/master/LICENSE - 1.6.0 true true true diff --git a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj index 6df68fda589..18993a93e00 100755 --- a/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj +++ b/src/csharp/Grpc.Core.Tests/Grpc.Core.Tests.csproj @@ -8,8 +8,6 @@ Grpc.Core.Tests Exe Grpc.Core.Tests - $(PackageTargetFallback);portable-net45 - 1.0.4 true @@ -21,7 +19,6 @@ - diff --git a/src/csharp/Grpc.Core/Grpc.Core.csproj b/src/csharp/Grpc.Core/Grpc.Core.csproj index dde800aaddb..d9950b2f201 100755 --- a/src/csharp/Grpc.Core/Grpc.Core.csproj +++ b/src/csharp/Grpc.Core/Grpc.Core.csproj @@ -14,7 +14,6 @@ gRPC RPC Protocol HTTP/2 https://github.com/grpc/grpc https://github.com/grpc/grpc/blob/master/LICENSE - 1.6.0 true true true @@ -65,7 +64,7 @@ - + diff --git a/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj b/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj index 74deed65840..db4e3ef4e3f 100755 --- a/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj +++ b/src/csharp/Grpc.Examples.MathClient/Grpc.Examples.MathClient.csproj @@ -8,7 +8,6 @@ Grpc.Examples.MathClient Exe Grpc.Examples.MathClient - 1.0.4 true diff --git a/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj b/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj index 1abf2614985..b12b418d015 100755 --- a/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj +++ b/src/csharp/Grpc.Examples.MathServer/Grpc.Examples.MathServer.csproj @@ -8,7 +8,6 @@ Grpc.Examples.MathServer Exe Grpc.Examples.MathServer - 1.0.4 true diff --git a/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj b/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj index d2a13ed6e14..3ccc9adfaf7 100755 --- a/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj +++ b/src/csharp/Grpc.Examples.Tests/Grpc.Examples.Tests.csproj @@ -8,8 +8,6 @@ Grpc.Examples.Tests Exe Grpc.Examples.Tests - $(PackageTargetFallback);portable-net45 - 1.0.4 true diff --git a/src/csharp/Grpc.Examples/Grpc.Examples.csproj b/src/csharp/Grpc.Examples/Grpc.Examples.csproj index 491d313f178..baa3b4ce6c1 100755 --- a/src/csharp/Grpc.Examples/Grpc.Examples.csproj +++ b/src/csharp/Grpc.Examples/Grpc.Examples.csproj @@ -7,7 +7,6 @@ net45;netcoreapp1.0 Grpc.Examples Grpc.Examples - 1.0.4 true diff --git a/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj b/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj index 2ccf46b9b9c..9da0539dcb9 100755 --- a/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj +++ b/src/csharp/Grpc.HealthCheck.Tests/Grpc.HealthCheck.Tests.csproj @@ -8,8 +8,6 @@ Grpc.HealthCheck.Tests Exe Grpc.HealthCheck.Tests - $(PackageTargetFallback);portable-net45 - 1.0.4 true diff --git a/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj index 3eb90434f3f..681719d124a 100755 --- a/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj +++ b/src/csharp/Grpc.HealthCheck/Grpc.HealthCheck.csproj @@ -14,7 +14,6 @@ gRPC health check https://github.com/grpc/grpc https://github.com/grpc/grpc/blob/master/LICENSE - 1.6.0 true true true diff --git a/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj b/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj index c67beea7cd3..35713156ea2 100755 --- a/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj +++ b/src/csharp/Grpc.IntegrationTesting.Client/Grpc.IntegrationTesting.Client.csproj @@ -8,8 +8,6 @@ Grpc.IntegrationTesting.Client Exe Grpc.IntegrationTesting.Client - $(PackageTargetFallback);portable-net45 - 1.0.4 true diff --git a/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj b/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj index e452257b1b1..3ecefe3bc4f 100755 --- a/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj +++ b/src/csharp/Grpc.IntegrationTesting.QpsWorker/Grpc.IntegrationTesting.QpsWorker.csproj @@ -9,8 +9,6 @@ Exe Grpc.IntegrationTesting.QpsWorker true - $(PackageTargetFallback);portable-net45 - 1.0.4 true diff --git a/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj b/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj index a1fb316fdb1..1092b2c21ef 100755 --- a/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj +++ b/src/csharp/Grpc.IntegrationTesting.Server/Grpc.IntegrationTesting.Server.csproj @@ -8,8 +8,6 @@ Grpc.IntegrationTesting.Server Exe Grpc.IntegrationTesting.Server - $(PackageTargetFallback);portable-net45 - 1.0.4 true diff --git a/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj b/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj index f64bea3d2be..22272547f61 100755 --- a/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj +++ b/src/csharp/Grpc.IntegrationTesting.StressClient/Grpc.IntegrationTesting.StressClient.csproj @@ -8,8 +8,6 @@ Grpc.IntegrationTesting.StressClient Exe Grpc.IntegrationTesting.StressClient - $(PackageTargetFallback);portable-net45 - 1.0.4 true diff --git a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj index f5077fe0f74..c02c9844e3d 100755 --- a/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj +++ b/src/csharp/Grpc.IntegrationTesting/Grpc.IntegrationTesting.csproj @@ -8,8 +8,6 @@ Grpc.IntegrationTesting Exe Grpc.IntegrationTesting - $(PackageTargetFallback);portable-net45 - 1.0.4 true @@ -31,10 +29,6 @@ - - - - diff --git a/src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj b/src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj index 17797e1e1e8..108357e4eb4 100644 --- a/src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj +++ b/src/csharp/Grpc.Microbenchmarks/Grpc.Microbenchmarks.csproj @@ -8,8 +8,6 @@ Grpc.Microbenchmarks Exe Grpc.Microbenchmarks - $(PackageTargetFallback);portable-net45 - 1.0.4 true diff --git a/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj b/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj index cf756c68ad3..d3686971245 100755 --- a/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj +++ b/src/csharp/Grpc.Reflection.Tests/Grpc.Reflection.Tests.csproj @@ -8,8 +8,6 @@ Grpc.Reflection.Tests Exe Grpc.Reflection.Tests - $(PackageTargetFallback);portable-net45 - 1.0.4 true diff --git a/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj b/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj index b77fd69aeeb..704eea5c17a 100755 --- a/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj +++ b/src/csharp/Grpc.Reflection/Grpc.Reflection.csproj @@ -14,7 +14,6 @@ gRPC reflection https://github.com/grpc/grpc https://github.com/grpc/grpc/blob/master/LICENSE - 1.6.0 true true true diff --git a/src/csharp/doc/.gitignore b/src/csharp/doc/.gitignore new file mode 100644 index 00000000000..09ee235efcd --- /dev/null +++ b/src/csharp/doc/.gitignore @@ -0,0 +1,2 @@ +html +obj diff --git a/src/csharp/doc/README.md b/src/csharp/doc/README.md index 585500b5cab..46cce013a18 100644 --- a/src/csharp/doc/README.md +++ b/src/csharp/doc/README.md @@ -1,2 +1,9 @@ +DocFX-generated C# API Reference +-------------------------------- -SandCastle project files to generate HTML reference documentation. \ No newline at end of file +Install docfx based on instructions here: https://github.com/dotnet/docfx + +``` +# generate docfx documentation into ./html directory +$ docfx +``` diff --git a/src/csharp/doc/docfx.json b/src/csharp/doc/docfx.json new file mode 100644 index 00000000000..7219d0e7a69 --- /dev/null +++ b/src/csharp/doc/docfx.json @@ -0,0 +1,37 @@ +{ + "metadata": [ + { + "src": [ + { + "files": ["Grpc.Core/Grpc.Core.csproj", + "Grpc.Auth/Grpc.Auth.csproj", + "Grpc.Core.Testing/Grpc.Core.Testing.csproj", + "Grpc.HealthCheck/Grpc.HealthCheck.csproj", + "Grpc.Reflection/Grpc.HealthCheck.csproj"], + "exclude": [ "**/bin/**", "**/obj/**" ], + "cwd": ".." + } + ], + "properties": { "TargetFramework": "net45" }, + "dest": "obj/api" + } + ], + "build": { + "content": [ + { + "files": [ "**/*.yml" ], + "cwd": "obj/api", + "dest": "api" + }, + { + "files": [ "toc.yml"], + } + ], + "globalMetadata": { + "_appTitle": "gRPC C#", + "_enableSearch": true, + "_disableContribution": true + }, + "dest": "html" + } +} diff --git a/src/csharp/doc/grpc_csharp_public.shfbproj b/src/csharp/doc/grpc_csharp_public.shfbproj deleted file mode 100644 index fab953da353..00000000000 --- a/src/csharp/doc/grpc_csharp_public.shfbproj +++ /dev/null @@ -1,83 +0,0 @@ - - - - - Debug - AnyCPU - 2.0 - {77e3da09-fc92-486f-a90a-99ca788e8b59} - 2015.6.5.0 - - Documentation - Documentation - Documentation - - .NET Framework 4.5 - ..\..\..\doc\ref\csharp\html - en-US - - - - - - - OnlyWarningsAndErrors - Website - False - True - False - True - 1.0.0.0 - 2 - False - Standard - Blank - True - VS2013 - False - MemberName - gRPC C# - AboveNamespaces - Documentation - - Provides OAuth2 based authentication for gRPC. <c>Grpc.Auth</c> currently consists of a set of very lightweight wrappers and uses C# <a href="https://www.nuget.org/packages/Google.Apis.Auth/">Google.Apis.Auth</a> library. - Main namespace for gRPC C# functionality. Contains concepts representing both client side and server side gRPC logic. - -<seealso cref="Grpc.Core.Channel"/> -<seealso cref="Grpc.Core.Server"/> - Provides functionality to redirect gRPC logs to application-specified destination. - Various utilities for gRPC C#. - - Summary, Parameter, AutoDocumentCtors, Namespace, TypeParameter, AutoDocumentDispose - - - - - - - - - - - - - - - - - - - - - - - - - - - OnBuildSuccess - - \ No newline at end of file diff --git a/src/csharp/doc/toc.yml b/src/csharp/doc/toc.yml new file mode 100644 index 00000000000..c3a1e415ab1 --- /dev/null +++ b/src/csharp/doc/toc.yml @@ -0,0 +1,3 @@ +- name: API Documentation + href: obj/api/ + homepage: obj/api/Grpc.Core.yml diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi index 28c30e5d357..237f4307990 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi @@ -41,9 +41,8 @@ cdef class CompletionQueue: cdef object user_tag = None cdef Call operation_call = None cdef CallDetails request_call_details = None - cdef Metadata request_metadata = None + cdef object request_metadata = None cdef Operations batch_operations = None - cdef Operation batch_operation = None if event.type == GRPC_QUEUE_TIMEOUT: return Event( event.type, False, None, None, None, None, False, None) @@ -63,14 +62,8 @@ cdef class CompletionQueue: operation_call = tag.operation_call request_call_details = tag.request_call_details if tag.request_metadata is not None: - request_metadata = tag.request_metadata - request_metadata._claim_slice_ownership() + request_metadata = tuple(tag.request_metadata) batch_operations = tag.batch_operations - if tag.batch_operations is not None: - for op in batch_operations.operations: - batch_operation = op - if batch_operation._received_metadata is not None: - batch_operation._received_metadata._claim_slice_ownership() if tag.is_new_request: # Stuff in the tag not explicitly handled by us needs to live through # the life of the call diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi index 98d7a9820df..57816f1cab0 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/credentials.pyx.pxi @@ -76,7 +76,7 @@ cdef class CredentialsMetadataPlugin: """ Args: plugin_callback (callable): Callback accepting a service URL (str/bytes) - and callback object (accepting a Metadata, + and callback object (accepting a MetadataArray, grpc_status_code, and a str/bytes error message). This argument when called should be non-blocking and eventually call the callback object with the appropriate status code/details and metadata (if @@ -129,8 +129,7 @@ cdef void plugin_get_metadata( def python_callback( Metadata metadata, grpc_status_code status, bytes error_details): - cb(user_data, metadata.c_metadata_array.metadata, - metadata.c_metadata_array.count, status, error_details) + cb(user_data, metadata.c_metadata, metadata.c_count, status, error_details) called_flag[0] = True cdef CredentialsMetadataPlugin self = state cdef AuthMetadataContext cy_context = AuthMetadataContext() @@ -139,8 +138,8 @@ cdef void plugin_get_metadata( self.plugin_callback(cy_context, python_callback) except Exception as error: if not called_flag[0]: - cb(user_data, Metadata([]).c_metadata_array.metadata, - 0, StatusCode.unknown, traceback.format_exc().encode()) + cb(user_data, NULL, 0, StatusCode.unknown, + traceback.format_exc().encode()) cdef void plugin_destroy_c_plugin_state(void *state) with gil: cpython.Py_DECREF(state) diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi index 5950bfa0e6a..28cf1144510 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi @@ -59,6 +59,7 @@ cdef extern from "grpc/grpc.h": grpc_slice grpc_slice_malloc(size_t length) nogil grpc_slice grpc_slice_from_copied_string(const char *source) nogil grpc_slice grpc_slice_from_copied_buffer(const char *source, size_t len) nogil + grpc_slice grpc_slice_copy(grpc_slice s) nogil # Declare functions for function-like macros (because Cython)... void *grpc_slice_start_ptr "GRPC_SLICE_START_PTR" (grpc_slice s) nogil diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi index 8ace6aeb525..9c40ebf0c28 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi @@ -37,7 +37,7 @@ cdef class OperationTag: cdef Server shutting_down_server cdef Call operation_call cdef CallDetails request_call_details - cdef Metadata request_metadata + cdef MetadataArray request_metadata cdef Operations batch_operations cdef bint is_new_request @@ -51,7 +51,7 @@ cdef class Event: # For Server.request_call cdef readonly bint is_new_request cdef readonly CallDetails request_call_details - cdef readonly Metadata request_metadata + cdef readonly object request_metadata # For server calls cdef readonly Call operation_call @@ -92,15 +92,20 @@ cdef class Metadatum: cdef class Metadata: + cdef grpc_metadata *c_metadata + cdef readonly size_t c_count + + +cdef class MetadataArray: + cdef grpc_metadata_array c_metadata_array - cdef void _claim_slice_ownership(self) cdef class Operation: cdef grpc_op c_op cdef ByteBuffer _received_message - cdef Metadata _received_metadata + cdef MetadataArray _received_metadata cdef grpc_status_code _received_status_code cdef grpc_slice _status_details cdef int _received_cancelled diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi index 1b2ddd2469c..0a2a6eee051 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi @@ -238,7 +238,7 @@ cdef class Event: def __cinit__(self, grpc_completion_type type, bint success, object tag, Call operation_call, CallDetails request_call_details, - Metadata request_metadata, + object request_metadata, bint is_new_request, Operations batch_operations): self.type = type @@ -437,48 +437,79 @@ cdef class Metadatum: cdef class _MetadataIterator: cdef size_t i - cdef Metadata metadata + cdef size_t _length + cdef object _metadatum_indexable - def __cinit__(self, Metadata metadata not None): + def __cinit__(self, length, metadatum_indexable): + self._length = length + self._metadatum_indexable = metadatum_indexable self.i = 0 - self.metadata = metadata def __iter__(self): return self def __next__(self): - if self.i < len(self.metadata): - result = self.metadata[self.i] + if self.i < self._length: + result = self._metadatum_indexable[self.i] self.i = self.i + 1 return result else: raise StopIteration +# TODO(https://github.com/grpc/grpc/issues/7950): Eliminate this; just use an +# ordinary sequence of pairs of bytestrings all the way down to the +# grpc_call_start_batch call. cdef class Metadata: + """Metadata being passed from application to core.""" def __cinit__(self, metadata_iterable): + metadata_sequence = tuple(metadata_iterable) + cdef size_t count = len(metadata_sequence) with nogil: grpc_init() - grpc_metadata_array_init(&self.c_metadata_array) - metadata = list(metadata_iterable) - for metadatum in metadata: - if not isinstance(metadatum, Metadatum): - raise TypeError("expected list of Metadatum") - self.c_metadata_array.count = len(metadata) - self.c_metadata_array.capacity = len(metadata) + self.c_metadata = gpr_malloc( + count * sizeof(grpc_metadata)) + self.c_count = count + for index, metadatum in enumerate(metadata_sequence): + self.c_metadata[index].key = grpc_slice_copy( + (metadatum).c_metadata.key) + self.c_metadata[index].value = grpc_slice_copy( + (metadatum).c_metadata.value) + + def __dealloc__(self): + with nogil: + for index in range(self.c_count): + grpc_slice_unref(self.c_metadata[index].key) + grpc_slice_unref(self.c_metadata[index].value) + gpr_free(self.c_metadata) + grpc_shutdown() + + def __len__(self): + return self.c_count + + def __getitem__(self, size_t index): + if index < self.c_count: + key = _slice_bytes(self.c_metadata[index].key) + value = _slice_bytes(self.c_metadata[index].value) + return Metadatum(key, value) + else: + raise IndexError() + + def __iter__(self): + return _MetadataIterator(self.c_count, self) + + +cdef class MetadataArray: + """Metadata being passed from core to application.""" + + def __cinit__(self): with nogil: - self.c_metadata_array.metadata = gpr_malloc( - self.c_metadata_array.count*sizeof(grpc_metadata) - ) - for i in range(self.c_metadata_array.count): - (metadata[i])._copy_metadatum(&self.c_metadata_array.metadata[i]) + grpc_init() + grpc_metadata_array_init(&self.c_metadata_array) def __dealloc__(self): with nogil: - # this frees the allocated memory for the grpc_metadata_array (although - # it'd be nice if that were documented somewhere...) - # TODO(atash): document this in the C core grpc_metadata_array_destroy(&self.c_metadata_array) grpc_shutdown() @@ -493,21 +524,7 @@ cdef class Metadata: return Metadatum(key=key, value=value) def __iter__(self): - return _MetadataIterator(self) - - cdef void _claim_slice_ownership(self): - cdef grpc_metadata_array new_c_metadata_array - grpc_metadata_array_init(&new_c_metadata_array) - new_c_metadata_array.metadata = gpr_malloc( - self.c_metadata_array.count*sizeof(grpc_metadata)) - new_c_metadata_array.count = self.c_metadata_array.count - for i in range(self.c_metadata_array.count): - new_c_metadata_array.metadata[i].key = _copy_slice( - self.c_metadata_array.metadata[i].key) - new_c_metadata_array.metadata[i].value = _copy_slice( - self.c_metadata_array.metadata[i].value) - grpc_metadata_array_destroy(&self.c_metadata_array) - self.c_metadata_array = new_c_metadata_array + return _MetadataIterator(self.c_metadata_array.count, self) cdef class Operation: @@ -547,14 +564,13 @@ cdef class Operation: if (self.c_op.type != GRPC_OP_RECV_INITIAL_METADATA and self.c_op.type != GRPC_OP_RECV_STATUS_ON_CLIENT): raise TypeError("self must be an operation receiving metadata") - return self._received_metadata - - @property - def received_metadata_or_none(self): - if (self.c_op.type != GRPC_OP_RECV_INITIAL_METADATA and - self.c_op.type != GRPC_OP_RECV_STATUS_ON_CLIENT): - return None - return self._received_metadata + # TODO(https://github.com/grpc/grpc/issues/7950): Drop the "all Cython + # objects must be legitimate for use from Python at any time" policy in + # place today, shift the policy toward "Operation objects are only usable + # while their calls are active", and move this making-a-copy-because-this- + # data-needs-to-live-much-longer-than-the-call-from-which-it-arose to the + # lowest Python layer. + return tuple(self._received_metadata) @property def received_status_code(self): @@ -601,9 +617,8 @@ def operation_send_initial_metadata(Metadata metadata, int flags): cdef Operation op = Operation() op.c_op.type = GRPC_OP_SEND_INITIAL_METADATA op.c_op.flags = flags - op.c_op.data.send_initial_metadata.count = metadata.c_metadata_array.count - op.c_op.data.send_initial_metadata.metadata = ( - metadata.c_metadata_array.metadata) + op.c_op.data.send_initial_metadata.count = metadata.c_count + op.c_op.data.send_initial_metadata.metadata = metadata.c_metadata op.references.append(metadata) op.is_valid = True return op @@ -631,9 +646,8 @@ def operation_send_status_from_server( op.c_op.type = GRPC_OP_SEND_STATUS_FROM_SERVER op.c_op.flags = flags op.c_op.data.send_status_from_server.trailing_metadata_count = ( - metadata.c_metadata_array.count) - op.c_op.data.send_status_from_server.trailing_metadata = ( - metadata.c_metadata_array.metadata) + metadata.c_count) + op.c_op.data.send_status_from_server.trailing_metadata = metadata.c_metadata op.c_op.data.send_status_from_server.status = code grpc_slice_unref(op._status_details) op._status_details = _slice_from_bytes(details) @@ -646,7 +660,7 @@ def operation_receive_initial_metadata(int flags): cdef Operation op = Operation() op.c_op.type = GRPC_OP_RECV_INITIAL_METADATA op.c_op.flags = flags - op._received_metadata = Metadata([]) + op._received_metadata = MetadataArray() op.c_op.data.receive_initial_metadata.receive_initial_metadata = ( &op._received_metadata.c_metadata_array) op.is_valid = True @@ -669,7 +683,7 @@ def operation_receive_status_on_client(int flags): cdef Operation op = Operation() op.c_op.type = GRPC_OP_RECV_STATUS_ON_CLIENT op.c_op.flags = flags - op._received_metadata = Metadata([]) + op._received_metadata = MetadataArray() op.c_op.data.receive_status_on_client.trailing_metadata = ( &op._received_metadata.c_metadata_array) op.c_op.data.receive_status_on_client.status = ( diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi index dd276fd57b5..b8db27469f9 100644 --- a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi +++ b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi @@ -44,7 +44,7 @@ cdef class Server: cdef OperationTag operation_tag = OperationTag(tag) operation_tag.operation_call = Call() operation_tag.request_call_details = CallDetails() - operation_tag.request_metadata = Metadata([]) + operation_tag.request_metadata = MetadataArray() operation_tag.references.extend([self, call_queue, server_queue]) operation_tag.is_new_request = True operation_tag.batch_operations = Operations([]) diff --git a/src/python/grpcio_health_checking/setup.py b/src/python/grpcio_health_checking/setup.py index 0299b4cca95..1f5e9c5130b 100644 --- a/src/python/grpcio_health_checking/setup.py +++ b/src/python/grpcio_health_checking/setup.py @@ -34,7 +34,7 @@ CLASSIFIERS = [ 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'License :: OSI Approved :: Apache Software License', -], +] PACKAGE_DIRECTORIES = { '': '.', diff --git a/src/python/grpcio_reflection/setup.py b/src/python/grpcio_reflection/setup.py index bed2311b598..9360550afbc 100644 --- a/src/python/grpcio_reflection/setup.py +++ b/src/python/grpcio_reflection/setup.py @@ -35,7 +35,7 @@ CLASSIFIERS = [ 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'License :: OSI Approved :: Apache Software License', -], +] PACKAGE_DIRECTORIES = { '': '.', diff --git a/src/python/grpcio_testing/grpc_testing/__init__.py b/src/python/grpcio_testing/grpc_testing/__init__.py index 917e11808e0..994274500c7 100644 --- a/src/python/grpcio_testing/grpc_testing/__init__.py +++ b/src/python/grpcio_testing/grpc_testing/__init__.py @@ -213,7 +213,7 @@ class StreamStreamChannelRpc(six.with_metaclass(abc.ABCMeta)): raise NotImplementedError() -class Channel(six.with_metaclass(abc.ABCMeta), grpc.Channel): +class Channel(six.with_metaclass(abc.ABCMeta, grpc.Channel)): """A grpc.Channel double with which to test a system that invokes RPCs.""" @abc.abstractmethod diff --git a/src/python/grpcio_testing/grpc_version.py b/src/python/grpcio_testing/grpc_version.py index 41a75d46f68..592d08efc36 100644 --- a/src/python/grpcio_testing/grpc_version.py +++ b/src/python/grpcio_testing/grpc_version.py @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_reflection/grpc_version.py.template`!!! +# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_testing/grpc_version.py.template`!!! -VERSION = '1.5.0.dev0' +VERSION='1.7.0.dev0' diff --git a/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py b/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py index 83f21ecbbb7..424b153ff8d 100644 --- a/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py +++ b/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py @@ -12,19 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -import argparse import contextlib -import distutils.spawn -import errno -import itertools +import importlib import os -import pkg_resources +from os import path +import pkgutil import shutil -import subprocess import sys import tempfile import threading -import time import unittest from six import moves @@ -33,12 +29,22 @@ from grpc.beta import implementations from grpc.beta import interfaces from grpc.framework.foundation import future from grpc.framework.interfaces.face import face +from grpc_tools import protoc from tests.unit.framework.common import test_constants -import tests.protoc_plugin.protos.payload.test_payload_pb2 as payload_pb2 -import tests.protoc_plugin.protos.requests.r.test_requests_pb2 as request_pb2 -import tests.protoc_plugin.protos.responses.test_responses_pb2 as response_pb2 -import tests.protoc_plugin.protos.service.test_service_pb2 as service_pb2 +_RELATIVE_PROTO_PATH = 'relative_proto_path' +_RELATIVE_PYTHON_OUT = 'relative_python_out' + +_PROTO_FILES_PATH_COMPONENTS = ( + ('beta_grpc_plugin_test', 'payload', 'test_payload.proto',), + ('beta_grpc_plugin_test', 'requests', 'r', 'test_requests.proto',), + ('beta_grpc_plugin_test', 'responses', 'test_responses.proto',), + ('beta_grpc_plugin_test', 'service', 'test_service.proto',),) + +_PAYLOAD_PB2 = 'beta_grpc_plugin_test.payload.test_payload_pb2' +_REQUESTS_PB2 = 'beta_grpc_plugin_test.requests.r.test_requests_pb2' +_RESPONSES_PB2 = 'beta_grpc_plugin_test.responses.test_responses_pb2' +_SERVICE_PB2 = 'beta_grpc_plugin_test.service.test_service_pb2' # Identifiers of entities we expect to find in the generated module. SERVICER_IDENTIFIER = 'BetaTestServiceServicer' @@ -47,12 +53,50 @@ SERVER_FACTORY_IDENTIFIER = 'beta_create_TestService_server' STUB_FACTORY_IDENTIFIER = 'beta_create_TestService_stub' +@contextlib.contextmanager +def _system_path(path_insertion): + old_system_path = sys.path[:] + sys.path = sys.path[0:1] + path_insertion + sys.path[1:] + yield + sys.path = old_system_path + + +def _create_directory_tree(root, path_components_sequence): + created = set() + for path_components in path_components_sequence: + thus_far = '' + for path_component in path_components: + relative_path = path.join(thus_far, path_component) + if relative_path not in created: + os.makedirs(path.join(root, relative_path)) + created.add(relative_path) + thus_far = path.join(thus_far, path_component) + + +def _massage_proto_content(raw_proto_content): + imports_substituted = raw_proto_content.replace( + b'import "tests/protoc_plugin/protos/', + b'import "beta_grpc_plugin_test/') + package_statement_substituted = imports_substituted.replace( + b'package grpc_protoc_plugin;', b'package beta_grpc_protoc_plugin;') + return package_statement_substituted + + +def _packagify(directory): + for subdirectory, _, _ in os.walk(directory): + init_file_name = path.join(subdirectory, '__init__.py') + with open(init_file_name, 'wb') as init_file: + init_file.write(b'') + + class _ServicerMethods(object): - def __init__(self): + def __init__(self, payload_pb2, responses_pb2): self._condition = threading.Condition() self._paused = False self._fail = False + self._payload_pb2 = payload_pb2 + self._responses_pb2 = responses_pb2 @contextlib.contextmanager def pause(self): # pylint: disable=invalid-name @@ -79,22 +123,22 @@ class _ServicerMethods(object): self._condition.wait() def UnaryCall(self, request, unused_rpc_context): - response = response_pb2.SimpleResponse() - response.payload.payload_type = payload_pb2.COMPRESSABLE + response = self._responses_pb2.SimpleResponse() + response.payload.payload_type = self._payload_pb2.COMPRESSABLE response.payload.payload_compressable = 'a' * request.response_size self._control() return response def StreamingOutputCall(self, request, unused_rpc_context): for parameter in request.response_parameters: - response = response_pb2.StreamingOutputCallResponse() - response.payload.payload_type = payload_pb2.COMPRESSABLE + response = self._responses_pb2.StreamingOutputCallResponse() + response.payload.payload_type = self._payload_pb2.COMPRESSABLE response.payload.payload_compressable = 'a' * parameter.size self._control() yield response def StreamingInputCall(self, request_iter, unused_rpc_context): - response = response_pb2.StreamingInputCallResponse() + response = self._responses_pb2.StreamingInputCallResponse() aggregated_payload_size = 0 for request in request_iter: aggregated_payload_size += len(request.payload.payload_compressable) @@ -105,8 +149,8 @@ class _ServicerMethods(object): def FullDuplexCall(self, request_iter, unused_rpc_context): for request in request_iter: for parameter in request.response_parameters: - response = response_pb2.StreamingOutputCallResponse() - response.payload.payload_type = payload_pb2.COMPRESSABLE + response = self._responses_pb2.StreamingOutputCallResponse() + response.payload.payload_type = self._payload_pb2.COMPRESSABLE response.payload.payload_compressable = 'a' * parameter.size self._control() yield response @@ -115,8 +159,8 @@ class _ServicerMethods(object): responses = [] for request in request_iter: for parameter in request.response_parameters: - response = response_pb2.StreamingOutputCallResponse() - response.payload.payload_type = payload_pb2.COMPRESSABLE + response = self._responses_pb2.StreamingOutputCallResponse() + response.payload.payload_type = self._payload_pb2.COMPRESSABLE response.payload.payload_compressable = 'a' * parameter.size self._control() responses.append(response) @@ -125,7 +169,7 @@ class _ServicerMethods(object): @contextlib.contextmanager -def _CreateService(): +def _CreateService(payload_pb2, responses_pb2, service_pb2): """Provides a servicer backend and a stub. The servicer is just the implementation of the actual servicer passed to the @@ -136,7 +180,7 @@ def _CreateService(): the service bound to the stub and and stub is the stub on which to invoke RPCs. """ - servicer_methods = _ServicerMethods() + servicer_methods = _ServicerMethods(payload_pb2, responses_pb2) class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)): @@ -161,12 +205,12 @@ def _CreateService(): server.start() channel = implementations.insecure_channel('localhost', port) stub = getattr(service_pb2, STUB_FACTORY_IDENTIFIER)(channel) - yield (servicer_methods, stub) + yield servicer_methods, stub, server.stop(0) @contextlib.contextmanager -def _CreateIncompleteService(): +def _CreateIncompleteService(service_pb2): """Provides a servicer backend that fails to implement methods and its stub. The servicer is just the implementation of the actual servicer passed to the @@ -192,16 +236,16 @@ def _CreateIncompleteService(): server.stop(0) -def _streaming_input_request_iterator(): +def _streaming_input_request_iterator(payload_pb2, requests_pb2): for _ in range(3): - request = request_pb2.StreamingInputCallRequest() + request = requests_pb2.StreamingInputCallRequest() request.payload.payload_type = payload_pb2.COMPRESSABLE request.payload.payload_compressable = 'a' yield request -def _streaming_output_request(): - request = request_pb2.StreamingOutputCallRequest() +def _streaming_output_request(requests_pb2): + request = requests_pb2.StreamingOutputCallRequest() sizes = [1, 2, 3] request.response_parameters.add(size=sizes[0], interval_us=0) request.response_parameters.add(size=sizes[1], interval_us=0) @@ -209,11 +253,11 @@ def _streaming_output_request(): return request -def _full_duplex_request_iterator(): - request = request_pb2.StreamingOutputCallRequest() +def _full_duplex_request_iterator(requests_pb2): + request = requests_pb2.StreamingOutputCallRequest() request.response_parameters.add(size=1, interval_us=0) yield request - request = request_pb2.StreamingOutputCallRequest() + request = requests_pb2.StreamingOutputCallRequest() request.response_parameters.add(size=2, interval_us=0) request.response_parameters.add(size=3, interval_us=0) yield request @@ -227,22 +271,78 @@ class PythonPluginTest(unittest.TestCase): methods and does not exist for response-streaming methods. """ + def setUp(self): + self._directory = tempfile.mkdtemp(dir='.') + self._proto_path = path.join(self._directory, _RELATIVE_PROTO_PATH) + self._python_out = path.join(self._directory, _RELATIVE_PYTHON_OUT) + + os.makedirs(self._proto_path) + os.makedirs(self._python_out) + + directories_path_components = { + proto_file_path_components[:-1] + for proto_file_path_components in _PROTO_FILES_PATH_COMPONENTS + } + _create_directory_tree(self._proto_path, directories_path_components) + self._proto_file_names = set() + for proto_file_path_components in _PROTO_FILES_PATH_COMPONENTS: + raw_proto_content = pkgutil.get_data( + 'tests.protoc_plugin.protos', + path.join(*proto_file_path_components[1:])) + massaged_proto_content = _massage_proto_content(raw_proto_content) + proto_file_name = path.join(self._proto_path, + *proto_file_path_components) + with open(proto_file_name, 'wb') as proto_file: + proto_file.write(massaged_proto_content) + self._proto_file_names.add(proto_file_name) + + def tearDown(self): + shutil.rmtree(self._directory) + + def _protoc(self): + args = [ + '', + '--proto_path={}'.format(self._proto_path), + '--python_out={}'.format(self._python_out), + '--grpc_python_out=grpc_1_0:{}'.format(self._python_out), + ] + list(self._proto_file_names) + protoc_exit_code = protoc.main(args) + self.assertEqual(0, protoc_exit_code) + + _packagify(self._python_out) + + with _system_path([ + self._python_out, + ]): + self._payload_pb2 = importlib.import_module(_PAYLOAD_PB2) + self._requests_pb2 = importlib.import_module(_REQUESTS_PB2) + self._responses_pb2 = importlib.import_module(_RESPONSES_PB2) + self._service_pb2 = importlib.import_module(_SERVICE_PB2) + def testImportAttributes(self): + self._protoc() + # check that we can access the generated module and its members. - self.assertIsNotNone(getattr(service_pb2, SERVICER_IDENTIFIER, None)) - self.assertIsNotNone(getattr(service_pb2, STUB_IDENTIFIER, None)) self.assertIsNotNone( - getattr(service_pb2, SERVER_FACTORY_IDENTIFIER, None)) + getattr(self._service_pb2, SERVICER_IDENTIFIER, None)) + self.assertIsNotNone(getattr(self._service_pb2, STUB_IDENTIFIER, None)) self.assertIsNotNone( - getattr(service_pb2, STUB_FACTORY_IDENTIFIER, None)) + getattr(self._service_pb2, SERVER_FACTORY_IDENTIFIER, None)) + self.assertIsNotNone( + getattr(self._service_pb2, STUB_FACTORY_IDENTIFIER, None)) def testUpDown(self): - with _CreateService(): - request_pb2.SimpleRequest(response_size=13) + self._protoc() + + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2): + self._requests_pb2.SimpleRequest(response_size=13) def testIncompleteServicer(self): - with _CreateIncompleteService() as (_, stub): - request = request_pb2.SimpleRequest(response_size=13) + self._protoc() + + with _CreateIncompleteService(self._service_pb2) as (_, stub): + request = self._requests_pb2.SimpleRequest(response_size=13) try: stub.UnaryCall(request, test_constants.LONG_TIMEOUT) except face.AbortionError as error: @@ -250,15 +350,21 @@ class PythonPluginTest(unittest.TestCase): error.code) def testUnaryCall(self): - with _CreateService() as (methods, stub): - request = request_pb2.SimpleRequest(response_size=13) + self._protoc() + + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): + request = self._requests_pb2.SimpleRequest(response_size=13) response = stub.UnaryCall(request, test_constants.LONG_TIMEOUT) expected_response = methods.UnaryCall(request, 'not a real context!') self.assertEqual(expected_response, response) def testUnaryCallFuture(self): - with _CreateService() as (methods, stub): - request = request_pb2.SimpleRequest(response_size=13) + self._protoc() + + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): + request = self._requests_pb2.SimpleRequest(response_size=13) # Check that the call does not block waiting for the server to respond. with methods.pause(): response_future = stub.UnaryCall.future( @@ -268,8 +374,11 @@ class PythonPluginTest(unittest.TestCase): self.assertEqual(expected_response, response) def testUnaryCallFutureExpired(self): - with _CreateService() as (methods, stub): - request = request_pb2.SimpleRequest(response_size=13) + self._protoc() + + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): + request = self._requests_pb2.SimpleRequest(response_size=13) with methods.pause(): response_future = stub.UnaryCall.future( request, test_constants.SHORT_TIMEOUT) @@ -277,24 +386,33 @@ class PythonPluginTest(unittest.TestCase): response_future.result() def testUnaryCallFutureCancelled(self): - with _CreateService() as (methods, stub): - request = request_pb2.SimpleRequest(response_size=13) + self._protoc() + + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): + request = self._requests_pb2.SimpleRequest(response_size=13) with methods.pause(): response_future = stub.UnaryCall.future(request, 1) response_future.cancel() self.assertTrue(response_future.cancelled()) def testUnaryCallFutureFailed(self): - with _CreateService() as (methods, stub): - request = request_pb2.SimpleRequest(response_size=13) + self._protoc() + + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): + request = self._requests_pb2.SimpleRequest(response_size=13) with methods.fail(): response_future = stub.UnaryCall.future( request, test_constants.LONG_TIMEOUT) self.assertIsNotNone(response_future.exception()) def testStreamingOutputCall(self): - with _CreateService() as (methods, stub): - request = _streaming_output_request() + self._protoc() + + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): + request = _streaming_output_request(self._requests_pb2) responses = stub.StreamingOutputCall(request, test_constants.LONG_TIMEOUT) expected_responses = methods.StreamingOutputCall( @@ -304,8 +422,11 @@ class PythonPluginTest(unittest.TestCase): self.assertEqual(expected_response, response) def testStreamingOutputCallExpired(self): - with _CreateService() as (methods, stub): - request = _streaming_output_request() + self._protoc() + + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): + request = _streaming_output_request(self._requests_pb2) with methods.pause(): responses = stub.StreamingOutputCall( request, test_constants.SHORT_TIMEOUT) @@ -313,8 +434,11 @@ class PythonPluginTest(unittest.TestCase): list(responses) def testStreamingOutputCallCancelled(self): - with _CreateService() as (methods, stub): - request = _streaming_output_request() + self._protoc() + + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): + request = _streaming_output_request(self._requests_pb2) responses = stub.StreamingOutputCall(request, test_constants.LONG_TIMEOUT) next(responses) @@ -323,8 +447,11 @@ class PythonPluginTest(unittest.TestCase): next(responses) def testStreamingOutputCallFailed(self): - with _CreateService() as (methods, stub): - request = _streaming_output_request() + self._protoc() + + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): + request = _streaming_output_request(self._requests_pb2) with methods.fail(): responses = stub.StreamingOutputCall(request, 1) self.assertIsNotNone(responses) @@ -332,30 +459,46 @@ class PythonPluginTest(unittest.TestCase): next(responses) def testStreamingInputCall(self): - with _CreateService() as (methods, stub): + self._protoc() + + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): response = stub.StreamingInputCall( - _streaming_input_request_iterator(), + _streaming_input_request_iterator(self._payload_pb2, + self._requests_pb2), test_constants.LONG_TIMEOUT) expected_response = methods.StreamingInputCall( - _streaming_input_request_iterator(), 'not a real RpcContext!') + _streaming_input_request_iterator(self._payload_pb2, + self._requests_pb2), + 'not a real RpcContext!') self.assertEqual(expected_response, response) def testStreamingInputCallFuture(self): - with _CreateService() as (methods, stub): + self._protoc() + + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): with methods.pause(): response_future = stub.StreamingInputCall.future( - _streaming_input_request_iterator(), + _streaming_input_request_iterator(self._payload_pb2, + self._requests_pb2), test_constants.LONG_TIMEOUT) response = response_future.result() expected_response = methods.StreamingInputCall( - _streaming_input_request_iterator(), 'not a real RpcContext!') + _streaming_input_request_iterator(self._payload_pb2, + self._requests_pb2), + 'not a real RpcContext!') self.assertEqual(expected_response, response) def testStreamingInputCallFutureExpired(self): - with _CreateService() as (methods, stub): + self._protoc() + + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): with methods.pause(): response_future = stub.StreamingInputCall.future( - _streaming_input_request_iterator(), + _streaming_input_request_iterator(self._payload_pb2, + self._requests_pb2), test_constants.SHORT_TIMEOUT) with self.assertRaises(face.ExpirationError): response_future.result() @@ -363,10 +506,14 @@ class PythonPluginTest(unittest.TestCase): face.ExpirationError) def testStreamingInputCallFutureCancelled(self): - with _CreateService() as (methods, stub): + self._protoc() + + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): with methods.pause(): response_future = stub.StreamingInputCall.future( - _streaming_input_request_iterator(), + _streaming_input_request_iterator(self._payload_pb2, + self._requests_pb2), test_constants.LONG_TIMEOUT) response_future.cancel() self.assertTrue(response_future.cancelled()) @@ -374,26 +521,38 @@ class PythonPluginTest(unittest.TestCase): response_future.result() def testStreamingInputCallFutureFailed(self): - with _CreateService() as (methods, stub): + self._protoc() + + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): with methods.fail(): response_future = stub.StreamingInputCall.future( - _streaming_input_request_iterator(), + _streaming_input_request_iterator(self._payload_pb2, + self._requests_pb2), test_constants.LONG_TIMEOUT) self.assertIsNotNone(response_future.exception()) def testFullDuplexCall(self): - with _CreateService() as (methods, stub): - responses = stub.FullDuplexCall(_full_duplex_request_iterator(), - test_constants.LONG_TIMEOUT) + self._protoc() + + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): + responses = stub.FullDuplexCall( + _full_duplex_request_iterator(self._requests_pb2), + test_constants.LONG_TIMEOUT) expected_responses = methods.FullDuplexCall( - _full_duplex_request_iterator(), 'not a real RpcContext!') + _full_duplex_request_iterator(self._requests_pb2), + 'not a real RpcContext!') for expected_response, response in moves.zip_longest( expected_responses, responses): self.assertEqual(expected_response, response) def testFullDuplexCallExpired(self): - request_iterator = _full_duplex_request_iterator() - with _CreateService() as (methods, stub): + self._protoc() + + request_iterator = _full_duplex_request_iterator(self._requests_pb2) + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): with methods.pause(): responses = stub.FullDuplexCall(request_iterator, test_constants.SHORT_TIMEOUT) @@ -401,8 +560,11 @@ class PythonPluginTest(unittest.TestCase): list(responses) def testFullDuplexCallCancelled(self): - with _CreateService() as (methods, stub): - request_iterator = _full_duplex_request_iterator() + self._protoc() + + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): + request_iterator = _full_duplex_request_iterator(self._requests_pb2) responses = stub.FullDuplexCall(request_iterator, test_constants.LONG_TIMEOUT) next(responses) @@ -411,8 +573,11 @@ class PythonPluginTest(unittest.TestCase): next(responses) def testFullDuplexCallFailed(self): - request_iterator = _full_duplex_request_iterator() - with _CreateService() as (methods, stub): + self._protoc() + + request_iterator = _full_duplex_request_iterator(self._requests_pb2) + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): with methods.fail(): responses = stub.FullDuplexCall(request_iterator, test_constants.LONG_TIMEOUT) @@ -421,13 +586,16 @@ class PythonPluginTest(unittest.TestCase): next(responses) def testHalfDuplexCall(self): - with _CreateService() as (methods, stub): + self._protoc() + + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): def half_duplex_request_iterator(): - request = request_pb2.StreamingOutputCallRequest() + request = self._requests_pb2.StreamingOutputCallRequest() request.response_parameters.add(size=1, interval_us=0) yield request - request = request_pb2.StreamingOutputCallRequest() + request = self._requests_pb2.StreamingOutputCallRequest() request.response_parameters.add(size=2, interval_us=0) request.response_parameters.add(size=3, interval_us=0) yield request @@ -441,6 +609,8 @@ class PythonPluginTest(unittest.TestCase): self.assertEqual(expected_response, response) def testHalfDuplexCallWedged(self): + self._protoc() + condition = threading.Condition() wait_cell = [False] @@ -455,14 +625,15 @@ class PythonPluginTest(unittest.TestCase): condition.notify_all() def half_duplex_request_iterator(): - request = request_pb2.StreamingOutputCallRequest() + request = self._requests_pb2.StreamingOutputCallRequest() request.response_parameters.add(size=1, interval_us=0) yield request with condition: while wait_cell[0]: condition.wait() - with _CreateService() as (methods, stub): + with _CreateService(self._payload_pb2, self._responses_pb2, + self._service_pb2) as (methods, stub): with wait(): responses = stub.HalfDuplexCall(half_duplex_request_iterator(), test_constants.SHORT_TIMEOUT) diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h index c5c848ae447..55a7eed1a47 100644 --- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h +++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h @@ -164,10 +164,10 @@ extern census_record_values_type census_record_values_import; typedef int(*grpc_compression_algorithm_parse_type)(grpc_slice value, grpc_compression_algorithm *algorithm); extern grpc_compression_algorithm_parse_type grpc_compression_algorithm_parse_import; #define grpc_compression_algorithm_parse grpc_compression_algorithm_parse_import -typedef int(*grpc_compression_algorithm_name_type)(grpc_compression_algorithm algorithm, char **name); +typedef int(*grpc_compression_algorithm_name_type)(grpc_compression_algorithm algorithm, const char **name); extern grpc_compression_algorithm_name_type grpc_compression_algorithm_name_import; #define grpc_compression_algorithm_name grpc_compression_algorithm_name_import -typedef int(*grpc_stream_compression_algorithm_name_type)(grpc_stream_compression_algorithm algorithm, char **name); +typedef int(*grpc_stream_compression_algorithm_name_type)(grpc_stream_compression_algorithm algorithm, const char **name); extern grpc_stream_compression_algorithm_name_type grpc_stream_compression_algorithm_name_import; #define grpc_stream_compression_algorithm_name grpc_stream_compression_algorithm_name_import typedef grpc_compression_algorithm(*grpc_compression_algorithm_for_level_type)(grpc_compression_level level, uint32_t accepted_encodings); diff --git a/src/ruby/lib/grpc/google_rpc_status_utils.rb b/src/ruby/lib/grpc/google_rpc_status_utils.rb new file mode 100644 index 00000000000..fdadd6b76e4 --- /dev/null +++ b/src/ruby/lib/grpc/google_rpc_status_utils.rb @@ -0,0 +1,28 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +require_relative './grpc' +require 'google/rpc/status_pb' + +# GRPC contains the General RPC module. +module GRPC + # GoogleRpcStatusUtils provides utilities to convert between a + # GRPC::Core::Status and a deserialized Google::Rpc::Status proto + class GoogleRpcStatusUtils + def self.extract_google_rpc_status(status) + fail ArgumentError, 'bad type' unless status.is_a? Struct::Status + Google::Rpc::Status.decode(status.metadata['grpc-status-details-bin']) + end + end +end diff --git a/src/ruby/spec/google_rpc_status_utils_spec.rb b/src/ruby/spec/google_rpc_status_utils_spec.rb new file mode 100644 index 00000000000..fe221c30ddd --- /dev/null +++ b/src/ruby/spec/google_rpc_status_utils_spec.rb @@ -0,0 +1,223 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +require 'grpc' +require_relative '../lib/grpc/google_rpc_status_utils' +require_relative '../pb/src/proto/grpc/testing/messages_pb' +require_relative '../pb/src/proto/grpc/testing/messages_pb' +require 'google/protobuf/well_known_types' + +include GRPC::Core + +describe 'conversion from a status struct to a google protobuf status' do + it 'fails if the input is not a status struct' do + begin + GRPC::GoogleRpcStatusUtils.extract_google_rpc_status('string') + rescue => e + exception = e + end + expect(exception.is_a?(ArgumentError)).to be true + expect(exception.message.include?('bad type')).to be true + end + + it 'fails with some error if the header key is missing' do + status = Struct::Status.new(1, 'details', key: 'val') + expect(status.metadata.nil?).to be false + expect do + GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status) + end.to raise_error(StandardError) + end + + it 'fails with some error if the header key fails to deserialize' do + status = Struct::Status.new(1, 'details', + 'grpc-status-details-bin' => 'string_val') + expect do + GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status) + end.to raise_error(StandardError) + end + + it 'silently ignores erroneous mismatch between messages in '\ + 'status struct and protobuf status' do + proto = Google::Rpc::Status.new(code: 1, message: 'proto message') + encoded_proto = Google::Rpc::Status.encode(proto) + status = Struct::Status.new(1, 'struct message', + 'grpc-status-details-bin' => encoded_proto) + rpc_status = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status) + expect(rpc_status).to eq(proto) + end + + it 'silently ignores erroneous mismatch between codes in status struct '\ + 'and protobuf status' do + proto = Google::Rpc::Status.new(code: 1, message: 'matching message') + encoded_proto = Google::Rpc::Status.encode(proto) + status = Struct::Status.new(2, 'matching message', + 'grpc-status-details-bin' => encoded_proto) + rpc_status = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status) + expect(rpc_status).to eq(proto) + end + + it 'can succesfully convert a status struct into a google protobuf status '\ + 'when there are no rpcstatus details' do + proto = Google::Rpc::Status.new(code: 1, message: 'matching message') + encoded_proto = Google::Rpc::Status.encode(proto) + status = Struct::Status.new(1, 'matching message', + 'grpc-status-details-bin' => encoded_proto) + out = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status) + expect(out.code).to eq(1) + expect(out.message).to eq('matching message') + expect(out.details).to eq([]) + end + + it 'can succesfully convert a status struct into a google protobuf '\ + 'status when there are multiple rpcstatus details' do + simple_request_any = Google::Protobuf::Any.new + simple_request = Grpc::Testing::SimpleRequest.new( + payload: Grpc::Testing::Payload.new(body: 'request')) + simple_request_any.pack(simple_request) + simple_response_any = Google::Protobuf::Any.new + simple_response = Grpc::Testing::SimpleResponse.new( + payload: Grpc::Testing::Payload.new(body: 'response')) + simple_response_any.pack(simple_response) + payload_any = Google::Protobuf::Any.new + payload = Grpc::Testing::Payload.new(body: 'payload') + payload_any.pack(payload) + proto = Google::Rpc::Status.new(code: 1, + message: 'matching message', + details: [ + simple_request_any, + simple_response_any, + payload_any + ]) + encoded_proto = Google::Rpc::Status.encode(proto) + status = Struct::Status.new(1, 'matching message', + 'grpc-status-details-bin' => encoded_proto) + out = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status(status) + expect(out.code).to eq(1) + expect(out.message).to eq('matching message') + expect(out.details[0].unpack( + Grpc::Testing::SimpleRequest)).to eq(simple_request) + expect(out.details[1].unpack( + Grpc::Testing::SimpleResponse)).to eq(simple_response) + expect(out.details[2].unpack( + Grpc::Testing::Payload)).to eq(payload) + end +end + +# Test message +class EchoMsg + def self.marshal(_o) + '' + end + + def self.unmarshal(_o) + EchoMsg.new + end +end + +# A test service that fills in the "reserved" grpc-status-details-bin trailer, +# for client-side testing of GoogleRpcStatus protobuf extraction from trailers. +class GoogleRpcStatusTestService + include GRPC::GenericService + rpc :an_rpc, EchoMsg, EchoMsg + + def initialize(encoded_rpc_status) + @encoded_rpc_status = encoded_rpc_status + end + + def an_rpc(_, _) + # TODO: create a server-side utility API for sending a google rpc status. + # Applications are not expected to set the grpc-status-details-bin + # ("grpc"-fixed and reserved for library use) manually. + # Doing so here is only for testing of the client-side api for extracting + # a google rpc status, which is useful + # when the interacting with a server that does fill in this trailer. + fail GRPC::Unknown.new('test message', + 'grpc-status-details-bin' => @encoded_rpc_status) + end +end + +GoogleRpcStatusTestStub = GoogleRpcStatusTestService.rpc_stub_class + +describe 'receving a google rpc status from a remote endpoint' do + def start_server(encoded_rpc_status) + @srv = GRPC::RpcServer.new(pool_size: 1) + @server_port = @srv.add_http2_port('localhost:0', + :this_port_is_insecure) + @srv.handle(GoogleRpcStatusTestService.new(encoded_rpc_status)) + @server_thd = Thread.new { @srv.run } + @srv.wait_till_running + end + + def stop_server + expect(@srv.stopped?).to be(false) + @srv.stop + @server_thd.join + expect(@srv.stopped?).to be(true) + end + + before(:each) do + simple_request_any = Google::Protobuf::Any.new + simple_request = Grpc::Testing::SimpleRequest.new( + payload: Grpc::Testing::Payload.new(body: 'request')) + simple_request_any.pack(simple_request) + simple_response_any = Google::Protobuf::Any.new + simple_response = Grpc::Testing::SimpleResponse.new( + payload: Grpc::Testing::Payload.new(body: 'response')) + simple_response_any.pack(simple_response) + payload_any = Google::Protobuf::Any.new + payload = Grpc::Testing::Payload.new(body: 'payload') + payload_any.pack(payload) + @expected_proto = Google::Rpc::Status.new( + code: StatusCodes::UNKNOWN, + message: 'test message', + details: [simple_request_any, simple_response_any, payload_any]) + start_server(Google::Rpc::Status.encode(@expected_proto)) + end + + after(:each) do + stop_server + end + + it 'should receive be able to extract a google rpc status from the '\ + 'status struct taken from a BadStatus exception' do + stub = GoogleRpcStatusTestStub.new("localhost:#{@server_port}", + :this_channel_is_insecure) + begin + stub.an_rpc(EchoMsg.new) + rescue GRPC::BadStatus => e + rpc_status = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status( + e.to_status) + end + expect(rpc_status).to eq(@expected_proto) + end + + it 'should receive be able to extract a google rpc status from the '\ + 'status struct taken from the op view of a call' do + stub = GoogleRpcStatusTestStub.new("localhost:#{@server_port}", + :this_channel_is_insecure) + op = stub.an_rpc(EchoMsg.new, return_op: true) + begin + op.execute + rescue GRPC::BadStatus => e + status_from_exception = e.to_status + end + rpc_status = GRPC::GoogleRpcStatusUtils.extract_google_rpc_status( + op.status) + expect(rpc_status).to eq(@expected_proto) + # "to_status" on the bad status should give the same result + # as "status" on the "op view". + expect(GRPC::GoogleRpcStatusUtils.extract_google_rpc_status( + status_from_exception)).to eq(rpc_status) + end +end diff --git a/templates/config.m4.template b/templates/config.m4.template index f91893c2bd3..cd93fbd0fbe 100644 --- a/templates/config.m4.template +++ b/templates/config.m4.template @@ -14,7 +14,7 @@ LIBS="-lpthread $LIBS" CFLAGS="-Wall -Werror -Wno-parentheses-equality -Wno-unused-value -std=c11" - CXXFLAGS="-std=c++11" + CXXFLAGS="-std=c++11 -fno-exceptions -fno-rtti" GRPC_SHARED_LIBADD="-lpthread $GRPC_SHARED_LIBADD" PHP_REQUIRE_CXX() PHP_ADD_LIBRARY(pthread) diff --git a/templates/grpc.gemspec.template b/templates/grpc.gemspec.template index e62e5b27215..215d5f9df9a 100644 --- a/templates/grpc.gemspec.template +++ b/templates/grpc.gemspec.template @@ -31,6 +31,7 @@ s.add_dependency 'google-protobuf', '~> 3.1' s.add_dependency 'googleauth', '~> 0.5.1' + s.add_dependency 'googleapis-common-protos-types', '~> 1.0.0' s.add_development_dependency 'bundler', '~> 1.9' s.add_development_dependency 'facter', '~> 2.4' diff --git a/templates/package.xml.template b/templates/package.xml.template index 15da704a478..f10f75b8c06 100644 --- a/templates/package.xml.template +++ b/templates/package.xml.template @@ -12,7 +12,7 @@ grpc-packages@google.com yes - 2017-05-22 + 2017-08-24 ${settings.php_version.php()} @@ -27,6 +27,9 @@ - Channel are now by default persistent #11878 - Some bug fixes from 1.4 branch #12109, #12123 - Fixed hang bug when fork() was used #11814 + - License changed to Apache 2.0 + - Added support for php_namespace option in codegen plugin #11886 + - Updated gRPC C Core library version 1.6 diff --git a/templates/src/python/grpcio_testing/grpc_version.py.template b/templates/src/python/grpcio_testing/grpc_version.py.template new file mode 100644 index 00000000000..74db811d60e --- /dev/null +++ b/templates/src/python/grpcio_testing/grpc_version.py.template @@ -0,0 +1,19 @@ +%YAML 1.2 +--- | + # Copyright 2017 gRPC authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_testing/grpc_version.py.template`!!! + + VERSION='${settings.python_version.pep440()}' diff --git a/test/core/compression/algorithm_test.c b/test/core/compression/algorithm_test.c index e37c8c30100..a11e6e90ace 100644 --- a/test/core/compression/algorithm_test.c +++ b/test/core/compression/algorithm_test.c @@ -35,7 +35,7 @@ static void test_algorithm_mesh(void) { gpr_log(GPR_DEBUG, "test_algorithm_mesh"); for (i = 0; i < GRPC_COMPRESS_ALGORITHMS_COUNT; i++) { - char *name; + const char *name; grpc_compression_algorithm parsed; grpc_slice mdstr; grpc_mdelem mdelem; diff --git a/test/core/compression/compression_test.c b/test/core/compression/compression_test.c index 4b6026ec775..326a8003003 100644 --- a/test/core/compression/compression_test.c +++ b/test/core/compression/compression_test.c @@ -57,7 +57,7 @@ static void test_compression_algorithm_parse(void) { static void test_compression_algorithm_name(void) { int success; - char *name; + const char *name; size_t i; const char *valid_names[] = {"identity", "gzip", "deflate"}; const grpc_compression_algorithm valid_algorithms[] = { diff --git a/test/core/compression/message_compress_test.c b/test/core/compression/message_compress_test.c index fd692fe5a51..f7f4893deef 100644 --- a/test/core/compression/message_compress_test.c +++ b/test/core/compression/message_compress_test.c @@ -49,7 +49,7 @@ static void assert_passthrough(grpc_slice value, grpc_slice_buffer output; grpc_slice final; int was_compressed; - char *algorithm_name; + const char *algorithm_name; GPR_ASSERT(grpc_compression_algorithm_name(algorithm, &algorithm_name) != 0); gpr_log( diff --git a/test/core/end2end/tests/compressed_payload.c b/test/core/end2end/tests/compressed_payload.c index 429717a7be0..639af15454b 100644 --- a/test/core/end2end/tests/compressed_payload.c +++ b/test/core/end2end/tests/compressed_payload.c @@ -228,7 +228,7 @@ static void request_for_disabled_algorithm( /* with a certain error */ GPR_ASSERT(status == expected_error); - char *algo_name = NULL; + const char *algo_name = NULL; GPR_ASSERT(grpc_compression_algorithm_name(algorithm_to_disable, &algo_name)); char *expected_details = NULL; gpr_asprintf(&expected_details, "Compression algorithm '%s' is disabled.", diff --git a/test/core/end2end/tests/stream_compression_compressed_payload.c b/test/core/end2end/tests/stream_compression_compressed_payload.c index 11e7999a1c3..5a69091a57f 100644 --- a/test/core/end2end/tests/stream_compression_compressed_payload.c +++ b/test/core/end2end/tests/stream_compression_compressed_payload.c @@ -228,7 +228,7 @@ static void request_for_disabled_algorithm( /* with a certain error */ GPR_ASSERT(status == expected_error); - char *algo_name = NULL; + const char *algo_name = NULL; GPR_ASSERT( grpc_stream_compression_algorithm_name(algorithm_to_disable, &algo_name)); char *expected_details = NULL; diff --git a/test/core/tsi/ssl_transport_security_test.c b/test/core/tsi/ssl_transport_security_test.c index 364dfa1b73f..2399b054b1b 100644 --- a/test/core/tsi/ssl_transport_security_test.c +++ b/test/core/tsi/ssl_transport_security_test.c @@ -23,7 +23,9 @@ #include "src/core/lib/iomgr/load_file.h" #include "src/core/lib/security/transport/security_connector.h" #include "src/core/tsi/ssl_transport_security.h" +#include "src/core/tsi/transport_security.h" #include "src/core/tsi/transport_security_adapter.h" +#include "src/core/tsi/transport_security_interface.h" #include "test/core/tsi/transport_security_test_lib.h" #include "test/core/util/test_config.h" @@ -312,10 +314,10 @@ static void ssl_test_destruct(tsi_test_fixture *fixture) { key_cert_lib->bad_client_pem_key_cert_pair); gpr_free(key_cert_lib->root_cert); gpr_free(key_cert_lib); - /* Destroy others. */ - tsi_ssl_server_handshaker_factory_destroy( + /* Unreference others. */ + tsi_ssl_server_handshaker_factory_unref( ssl_fixture->server_handshaker_factory); - tsi_ssl_client_handshaker_factory_destroy( + tsi_ssl_client_handshaker_factory_unref( ssl_fixture->client_handshaker_factory); } @@ -536,6 +538,118 @@ void ssl_tsi_test_do_round_trip_odd_buffer_size() { } } +static const tsi_ssl_handshaker_factory_vtable *original_vtable; +static bool handshaker_factory_destructor_called; + +static void ssl_tsi_test_handshaker_factory_destructor( + tsi_ssl_handshaker_factory *factory) { + GPR_ASSERT(factory != NULL); + handshaker_factory_destructor_called = true; + if (original_vtable != NULL && original_vtable->destroy != NULL) { + original_vtable->destroy(factory); + } +} + +static tsi_ssl_handshaker_factory_vtable test_handshaker_factory_vtable = { + ssl_tsi_test_handshaker_factory_destructor}; + +void test_tsi_ssl_client_handshaker_factory_refcounting() { + int i; + const char *cert_chain = + load_file(SSL_TSI_TEST_CREDENTIALS_DIR, "client.pem"); + + tsi_ssl_client_handshaker_factory *client_handshaker_factory; + GPR_ASSERT(tsi_create_ssl_client_handshaker_factory( + NULL, cert_chain, NULL, NULL, 0, &client_handshaker_factory) == + TSI_OK); + + handshaker_factory_destructor_called = false; + original_vtable = tsi_ssl_handshaker_factory_swap_vtable( + (tsi_ssl_handshaker_factory *)client_handshaker_factory, + &test_handshaker_factory_vtable); + + tsi_handshaker *handshaker[3]; + + for (i = 0; i < 3; ++i) { + GPR_ASSERT(tsi_ssl_client_handshaker_factory_create_handshaker( + client_handshaker_factory, "google.com", &handshaker[i]) == + TSI_OK); + } + + tsi_handshaker_destroy(handshaker[1]); + GPR_ASSERT(!handshaker_factory_destructor_called); + + tsi_handshaker_destroy(handshaker[0]); + GPR_ASSERT(!handshaker_factory_destructor_called); + + tsi_ssl_client_handshaker_factory_unref(client_handshaker_factory); + GPR_ASSERT(!handshaker_factory_destructor_called); + + tsi_handshaker_destroy(handshaker[2]); + GPR_ASSERT(handshaker_factory_destructor_called); + + gpr_free((void *)cert_chain); +} + +void test_tsi_ssl_server_handshaker_factory_refcounting() { + int i; + tsi_ssl_server_handshaker_factory *server_handshaker_factory; + tsi_handshaker *handshaker[3]; + const char *cert_chain = + load_file(SSL_TSI_TEST_CREDENTIALS_DIR, "server0.pem"); + tsi_ssl_pem_key_cert_pair cert_pair; + + cert_pair.cert_chain = cert_chain; + cert_pair.private_key = + load_file(SSL_TSI_TEST_CREDENTIALS_DIR, "server0.key"); + + GPR_ASSERT(tsi_create_ssl_server_handshaker_factory( + &cert_pair, 1, cert_chain, 0, NULL, NULL, 0, + &server_handshaker_factory) == TSI_OK); + + handshaker_factory_destructor_called = false; + original_vtable = tsi_ssl_handshaker_factory_swap_vtable( + (tsi_ssl_handshaker_factory *)server_handshaker_factory, + &test_handshaker_factory_vtable); + + for (i = 0; i < 3; ++i) { + GPR_ASSERT(tsi_ssl_server_handshaker_factory_create_handshaker( + server_handshaker_factory, &handshaker[i]) == TSI_OK); + } + + tsi_handshaker_destroy(handshaker[1]); + GPR_ASSERT(!handshaker_factory_destructor_called); + + tsi_handshaker_destroy(handshaker[0]); + GPR_ASSERT(!handshaker_factory_destructor_called); + + tsi_ssl_server_handshaker_factory_unref(server_handshaker_factory); + GPR_ASSERT(!handshaker_factory_destructor_called); + + tsi_handshaker_destroy(handshaker[2]); + GPR_ASSERT(handshaker_factory_destructor_called); + + ssl_test_pem_key_cert_pair_destroy(cert_pair); +} + +/* Attempting to create a handshaker factory with invalid parameters should fail + * but not crash. */ +void test_tsi_ssl_client_handshaker_factory_bad_params() { + const char *cert_chain = "This is not a valid PEM file."; + + tsi_ssl_client_handshaker_factory *client_handshaker_factory; + GPR_ASSERT(tsi_create_ssl_client_handshaker_factory( + NULL, cert_chain, NULL, NULL, 0, &client_handshaker_factory) == + TSI_INVALID_ARGUMENT); + tsi_ssl_client_handshaker_factory_unref(client_handshaker_factory); +} + +void ssl_tsi_test_handshaker_factory_internals() { + test_tsi_ssl_client_handshaker_factory_refcounting(); + test_tsi_ssl_server_handshaker_factory_refcounting(); + test_tsi_ssl_client_handshaker_factory_bad_params(); +} + int main(int argc, char **argv) { grpc_test_init(argc, argv); grpc_init(); @@ -553,6 +667,7 @@ int main(int argc, char **argv) { ssl_tsi_test_do_handshake_alpn_client_server_ok(); ssl_tsi_test_do_round_trip_for_all_configs(); ssl_tsi_test_do_round_trip_odd_buffer_size(); + ssl_tsi_test_handshaker_factory_internals(); grpc_shutdown(); return 0; } diff --git a/tools/distrib/python/grpcio_tools/setup.py b/tools/distrib/python/grpcio_tools/setup.py index 5c0329bff0c..e0e9226211d 100644 --- a/tools/distrib/python/grpcio_tools/setup.py +++ b/tools/distrib/python/grpcio_tools/setup.py @@ -47,7 +47,7 @@ CLASSIFIERS = [ 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'License :: OSI Approved :: Apache Software License', -], +] PY3 = sys.version_info.major == 3 diff --git a/tools/dockerfile/distribtest/csharp_jessie_x64/Dockerfile b/tools/dockerfile/distribtest/csharp_jessie_x64/Dockerfile index d13eecaa551..02ec4c278a7 100644 --- a/tools/dockerfile/distribtest/csharp_jessie_x64/Dockerfile +++ b/tools/dockerfile/distribtest/csharp_jessie_x64/Dockerfile @@ -14,18 +14,15 @@ FROM debian:jessie -RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF -RUN echo "deb http://download.mono-project.com/repo/debian wheezy main" | tee /etc/apt/sources.list.d/mono-xamarin.list +RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF +RUN echo "deb http://download.mono-project.com/repo/debian jessie main" | tee /etc/apt/sources.list.d/mono-official.list RUN echo "deb http://download.mono-project.com/repo/debian wheezy-apache24-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libjpeg62-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list -RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libtiff-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list RUN apt-get update && apt-get install -y \ mono-devel \ ca-certificates-mono \ - nuget + nuget \ + && apt-get clean -# make sure we have nuget 2.12+ (in case there's an older cached docker image) -RUN apt-get update && apt-get install -y nuget - -RUN apt-get update && apt-get install -y unzip +RUN apt-get update && apt-get install -y unzip && apt-get clean diff --git a/tools/dockerfile/distribtest/csharp_jessie_x86/Dockerfile b/tools/dockerfile/distribtest/csharp_jessie_x86/Dockerfile index 71845b590be..758f3145727 100644 --- a/tools/dockerfile/distribtest/csharp_jessie_x86/Dockerfile +++ b/tools/dockerfile/distribtest/csharp_jessie_x86/Dockerfile @@ -14,18 +14,15 @@ FROM 32bit/debian:jessie -RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF -RUN echo "deb http://download.mono-project.com/repo/debian wheezy main" | tee /etc/apt/sources.list.d/mono-xamarin.list +RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF +RUN echo "deb http://download.mono-project.com/repo/debian jessie main" | tee /etc/apt/sources.list.d/mono-official.list RUN echo "deb http://download.mono-project.com/repo/debian wheezy-apache24-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libjpeg62-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list -RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libtiff-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list RUN apt-get update && apt-get install -y \ mono-devel \ ca-certificates-mono \ - nuget + nuget \ + && apt-get clean -# make sure we have nuget 2.12+ (in case there's an older cached docker image) -RUN apt-get update && apt-get install -y nuget - -RUN apt-get update && apt-get install -y unzip +RUN apt-get update && apt-get install -y unzip && apt-get clean diff --git a/tools/dockerfile/distribtest/csharp_ubuntu1604_x64/Dockerfile b/tools/dockerfile/distribtest/csharp_ubuntu1604_x64/Dockerfile index 6604caa42c2..0f40f18e385 100644 --- a/tools/dockerfile/distribtest/csharp_ubuntu1604_x64/Dockerfile +++ b/tools/dockerfile/distribtest/csharp_ubuntu1604_x64/Dockerfile @@ -17,11 +17,16 @@ FROM ubuntu:16.04 RUN apt-get update && apt-get install -y \ mono-devel \ ca-certificates-mono \ - nuget + nuget \ + && apt-get clean # make sure we have nuget 2.12+ (in case there's an older cached docker image) RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF RUN echo "deb http://download.mono-project.com/repo/debian wheezy main" | tee /etc/apt/sources.list.d/mono-xamarin.list -RUN apt-get update && apt-get install -y nuget +RUN apt-get update && apt-get install -y nuget && apt-get clean -RUN apt-get update && apt-get install -y unzip +# Prevent "Error: SendFailure (Error writing headers)" when fetching nuget packages +# See https://github.com/tianon/docker-brew-ubuntu-core/issues/86 +RUN apt-get update && apt-get install -y tzdata && apt-get clean + +RUN apt-get update && apt-get install -y unzip && apt-get clean diff --git a/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile b/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile deleted file mode 100644 index 4ccfbc43c30..00000000000 --- a/tools/dockerfile/test/csharp_coreclr_x64/Dockerfile +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2015 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM debian:jessie - -# Install Git and basic packages. -RUN apt-get update && apt-get install -y \ - autoconf \ - autotools-dev \ - build-essential \ - bzip2 \ - ccache \ - curl \ - gcc \ - gcc-multilib \ - git \ - golang \ - gyp \ - lcov \ - libc6 \ - libc6-dbg \ - libc6-dev \ - libgtest-dev \ - libtool \ - make \ - perl \ - strace \ - python-dev \ - python-setuptools \ - python-yaml \ - telnet \ - unzip \ - wget \ - zip && apt-get clean - -#================ -# Build profiling -RUN apt-get update && apt-get install -y time && apt-get clean - -#==================== -# Python dependencies - -# Install dependencies - -RUN apt-get update && apt-get install -y \ - python-all-dev \ - python3-all-dev \ - python-pip - -# Install Python packages from PyPI -RUN pip install pip --upgrade -RUN pip install virtualenv -RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.2.0 six==1.10.0 - -#================ -# C# dependencies - -# Update to a newer version of mono -RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF -RUN echo "deb http://download.mono-project.com/repo/debian jessie main" | tee /etc/apt/sources.list.d/mono-official.list -RUN echo "deb http://download.mono-project.com/repo/debian wheezy-apache24-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list -RUN echo "deb http://download.mono-project.com/repo/debian wheezy-libjpeg62-compat main" | tee -a /etc/apt/sources.list.d/mono-xamarin.list - -# Install dependencies -RUN apt-get update && apt-get -y dist-upgrade && apt-get install -y \ - mono-devel \ - ca-certificates-mono \ - nuget \ - && apt-get clean - -RUN nuget update -self - -# Install dotnet SDK based on https://www.microsoft.com/net/core#debian -RUN apt-get update && apt-get install -y curl libunwind8 gettext -# dotnet-dev-1.0.0-preview2-003131 -RUN curl -sSL -o dotnet100.tar.gz https://go.microsoft.com/fwlink/?LinkID=827530 -RUN mkdir -p /opt/dotnet && tar zxf dotnet100.tar.gz -C /opt/dotnet -# dotnet-dev-1.0.1 -RUN curl -sSL -o dotnet101.tar.gz https://go.microsoft.com/fwlink/?LinkID=843453 -RUN mkdir -p /opt/dotnet && tar zxf dotnet101.tar.gz -C /opt/dotnet -RUN ln -s /opt/dotnet/dotnet /usr/local/bin - -# Trigger the population of the local package cache -ENV NUGET_XMLDOC_MODE skip -RUN mkdir warmup \ - && cd warmup \ - && dotnet new \ - && cd .. \ - && rm -rf warmup - -# Prepare ccache -RUN ln -s /usr/bin/ccache /usr/local/bin/gcc -RUN ln -s /usr/bin/ccache /usr/local/bin/g++ -RUN ln -s /usr/bin/ccache /usr/local/bin/cc -RUN ln -s /usr/bin/ccache /usr/local/bin/c++ -RUN ln -s /usr/bin/ccache /usr/local/bin/clang -RUN ln -s /usr/bin/ccache /usr/local/bin/clang++ - - -RUN mkdir /var/local/jenkins - -# Define the default command. -CMD ["bash"] diff --git a/tools/internal_ci/linux/pull_request/grpc_basictests_c_dbg.cfg b/tools/internal_ci/linux/pull_request/grpc_basictests_c_dbg.cfg new file mode 100644 index 00000000000..577cb28ae5f --- /dev/null +++ b/tools/internal_ci/linux/pull_request/grpc_basictests_c_dbg.cfg @@ -0,0 +1,30 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Config file for the internal CI (in protobuf text format) + +# Location of the continuous shell script in repository. +build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh" +timeout_mins: 240 +action { + define_artifacts { + regex: "**/*sponge_log.xml" + regex: "github/grpc/reports/**" + } +} + +env_vars { + key: "RUN_TESTS_FLAGS" + value: "-f basictests linux c dbg --inner_jobs 16 -j 1 --internal_ci --max_time=3600" +} diff --git a/tools/internal_ci/linux/pull_request/grpc_basictests_c_opt.cfg b/tools/internal_ci/linux/pull_request/grpc_basictests_c_opt.cfg new file mode 100644 index 00000000000..9e0b724b2e1 --- /dev/null +++ b/tools/internal_ci/linux/pull_request/grpc_basictests_c_opt.cfg @@ -0,0 +1,30 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Config file for the internal CI (in protobuf text format) + +# Location of the continuous shell script in repository. +build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh" +timeout_mins: 240 +action { + define_artifacts { + regex: "**/*sponge_log.xml" + regex: "github/grpc/reports/**" + } +} + +env_vars { + key: "RUN_TESTS_FLAGS" + value: "-f basictests linux c opt --inner_jobs 16 -j 1 --internal_ci --max_time=3600" +} diff --git a/tools/internal_ci/linux/pull_request/grpc_basictests_cpp_dbg.cfg b/tools/internal_ci/linux/pull_request/grpc_basictests_cpp_dbg.cfg new file mode 100644 index 00000000000..0fda74cf44e --- /dev/null +++ b/tools/internal_ci/linux/pull_request/grpc_basictests_cpp_dbg.cfg @@ -0,0 +1,30 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Config file for the internal CI (in protobuf text format) + +# Location of the continuous shell script in repository. +build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh" +timeout_mins: 240 +action { + define_artifacts { + regex: "**/*sponge_log.xml" + regex: "github/grpc/reports/**" + } +} + +env_vars { + key: "RUN_TESTS_FLAGS" + value: "-f basictests linux c++ dbg --inner_jobs 16 -j 1 --internal_ci --max_time=3600" +} diff --git a/tools/internal_ci/linux/pull_request/grpc_basictests_cpp_opt.cfg b/tools/internal_ci/linux/pull_request/grpc_basictests_cpp_opt.cfg new file mode 100644 index 00000000000..199a8905d94 --- /dev/null +++ b/tools/internal_ci/linux/pull_request/grpc_basictests_cpp_opt.cfg @@ -0,0 +1,30 @@ +# Copyright 2017 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Config file for the internal CI (in protobuf text format) + +# Location of the continuous shell script in repository. +build_file: "grpc/tools/internal_ci/linux/grpc_run_tests_matrix.sh" +timeout_mins: 240 +action { + define_artifacts { + regex: "**/*sponge_log.xml" + regex: "github/grpc/reports/**" + } +} + +env_vars { + key: "RUN_TESTS_FLAGS" + value: "-f basictests linux c++ opt --inner_jobs 16 -j 1 --internal_ci --max_time=3600" +} diff --git a/tools/run_tests/artifacts/artifact_targets.py b/tools/run_tests/artifacts/artifact_targets.py index 12263282ae0..2cc0dfceabf 100644 --- a/tools/run_tests/artifacts/artifact_targets.py +++ b/tools/run_tests/artifacts/artifact_targets.py @@ -158,6 +158,7 @@ class PythonArtifact: return create_jobspec(self.name, ['tools/run_tests/artifacts/build_artifact_python.sh'], environ=environ, + timeout_seconds=60*60, use_workspace=True) def __str__(self): diff --git a/tools/run_tests/artifacts/distribtest_targets.py b/tools/run_tests/artifacts/distribtest_targets.py index fb1be383cd9..797ed51c7f4 100644 --- a/tools/run_tests/artifacts/distribtest_targets.py +++ b/tools/run_tests/artifacts/distribtest_targets.py @@ -105,7 +105,9 @@ class CSharpDistribTest(object): use_workspace=True) elif self.platform == 'windows': if self.arch == 'x64': - environ={'MSBUILD_EXTRA_ARGS': '/p:Platform=x64', + # Use double leading / as the first occurence gets removed by msys bash + # when invoking the .bat file (side-effect of posix path conversion) + environ={'MSBUILD_EXTRA_ARGS': '//p:Platform=x64', 'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug'} else: environ={'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\Debug'} diff --git a/tools/run_tests/artifacts/package_targets.py b/tools/run_tests/artifacts/package_targets.py index 0da13864f0c..671d0f7b45e 100644 --- a/tools/run_tests/artifacts/package_targets.py +++ b/tools/run_tests/artifacts/package_targets.py @@ -78,7 +78,7 @@ class CSharpPackage: if self.linux: return create_docker_jobspec( self.name, - 'tools/dockerfile/test/csharp_coreclr_x64', + 'tools/dockerfile/test/csharp_jessie_x64', 'src/csharp/build_packages_dotnetcli.sh') else: return create_jobspec(self.name, diff --git a/tools/run_tests/python_utils/filter_pull_request_tests.py b/tools/run_tests/python_utils/filter_pull_request_tests.py index 4ad981237b7..f99143fdd78 100644 --- a/tools/run_tests/python_utils/filter_pull_request_tests.py +++ b/tools/run_tests/python_utils/filter_pull_request_tests.py @@ -86,7 +86,6 @@ _WHITELIST_DICT = { '^test/distrib/php/': [_PHP_TEST_SUITE], '^test/distrib/python/': [_PYTHON_TEST_SUITE], '^test/distrib/ruby/': [_RUBY_TEST_SUITE], - '^tools/internal_ci/': [], '^vsprojects/': [_WINDOWS_TEST_SUITE], 'binding\.gyp$': [_NODE_TEST_SUITE], 'composer\.json$': [_PHP_TEST_SUITE], diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index 591a0bebbde..176c1097ee9 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -533,6 +533,7 @@ class PhpLanguage(object): self.config = config self.args = args _check_compiler(self.args.compiler, ['default']) + self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true'] def test_specs(self): return [self.config.job_spec(['src/php/bin/run_tests.sh'], @@ -545,7 +546,7 @@ class PhpLanguage(object): return ['static_c', 'shared_c'] def make_options(self): - return [] + return self._make_options; def build_steps(self): return [['tools/run_tests/helper_scripts/build_php.sh']] @@ -569,6 +570,7 @@ class Php7Language(object): self.config = config self.args = args _check_compiler(self.args.compiler, ['default']) + self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true'] def test_specs(self): return [self.config.job_spec(['src/php/bin/run_tests.sh'], @@ -581,7 +583,7 @@ class Php7Language(object): return ['static_c', 'shared_c'] def make_options(self): - return [] + return self._make_options; def build_steps(self): return [['tools/run_tests/helper_scripts/build_php.sh']]