diff --git a/BUILD b/BUILD index 7cc44ea3bed..203f4929b77 100644 --- a/BUILD +++ b/BUILD @@ -1360,6 +1360,7 @@ cc_library( "src/cpp/common/channel_filter.h", "src/cpp/server/dynamic_thread_pool.h", "src/cpp/server/thread_pool_interface.h", + "src/cpp/thread_manager/thread_manager.h", "src/cpp/client/insecure_credentials.cc", "src/cpp/client/secure_credentials.cc", "src/cpp/common/auth_property_iterator.cc", @@ -1389,6 +1390,7 @@ cc_library( "src/cpp/server/server_context.cc", "src/cpp/server/server_credentials.cc", "src/cpp/server/server_posix.cc", + "src/cpp/thread_manager/thread_manager.cc", "src/cpp/util/byte_buffer_cc.cc", "src/cpp/util/slice_cc.cc", "src/cpp/util/status.cc", @@ -1514,6 +1516,7 @@ cc_library( "src/cpp/common/channel_filter.h", "src/cpp/server/dynamic_thread_pool.h", "src/cpp/server/thread_pool_interface.h", + "src/cpp/thread_manager/thread_manager.h", "src/cpp/client/cronet_credentials.cc", "src/cpp/client/insecure_credentials.cc", "src/cpp/common/insecure_create_auth_context.cc", @@ -1539,6 +1542,7 @@ cc_library( "src/cpp/server/server_context.cc", "src/cpp/server/server_credentials.cc", "src/cpp/server/server_posix.cc", + "src/cpp/thread_manager/thread_manager.cc", "src/cpp/util/byte_buffer_cc.cc", "src/cpp/util/slice_cc.cc", "src/cpp/util/status.cc", @@ -1739,6 +1743,7 @@ cc_library( "src/cpp/common/channel_filter.h", "src/cpp/server/dynamic_thread_pool.h", "src/cpp/server/thread_pool_interface.h", + "src/cpp/thread_manager/thread_manager.h", "src/cpp/client/insecure_credentials.cc", "src/cpp/common/insecure_create_auth_context.cc", "src/cpp/server/insecure_server_credentials.cc", @@ -1763,6 +1768,7 @@ cc_library( "src/cpp/server/server_context.cc", "src/cpp/server/server_credentials.cc", "src/cpp/server/server_posix.cc", + "src/cpp/thread_manager/thread_manager.cc", "src/cpp/util/byte_buffer_cc.cc", "src/cpp/util/slice_cc.cc", "src/cpp/util/status.cc", diff --git a/CMakeLists.txt b/CMakeLists.txt index 207552a7852..cc976d847ee 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1087,6 +1087,7 @@ add_library(grpc++ src/cpp/server/server_context.cc src/cpp/server/server_credentials.cc src/cpp/server/server_posix.cc + src/cpp/thread_manager/thread_manager.cc src/cpp/util/byte_buffer_cc.cc src/cpp/util/slice_cc.cc src/cpp/util/status.cc @@ -1252,6 +1253,7 @@ add_library(grpc++_cronet src/cpp/server/server_context.cc src/cpp/server/server_credentials.cc src/cpp/server/server_posix.cc + src/cpp/thread_manager/thread_manager.cc src/cpp/util/byte_buffer_cc.cc src/cpp/util/slice_cc.cc src/cpp/util/status.cc @@ -1508,6 +1510,7 @@ add_library(grpc++_unsecure src/cpp/server/server_context.cc src/cpp/server/server_credentials.cc src/cpp/server/server_posix.cc + src/cpp/thread_manager/thread_manager.cc src/cpp/util/byte_buffer_cc.cc src/cpp/util/slice_cc.cc src/cpp/util/status.cc diff --git a/Makefile b/Makefile index 3ad2e2d92fe..db6dd429b8a 100644 --- a/Makefile +++ b/Makefile @@ -1087,6 +1087,7 @@ shutdown_test: $(BINDIR)/$(CONFIG)/shutdown_test status_test: $(BINDIR)/$(CONFIG)/status_test streaming_throughput_test: $(BINDIR)/$(CONFIG)/streaming_throughput_test stress_test: $(BINDIR)/$(CONFIG)/stress_test +thread_manager_test: $(BINDIR)/$(CONFIG)/thread_manager_test thread_stress_test: $(BINDIR)/$(CONFIG)/thread_stress_test public_headers_must_be_c89: $(BINDIR)/$(CONFIG)/public_headers_must_be_c89 boringssl_aes_test: $(BINDIR)/$(CONFIG)/boringssl_aes_test @@ -1465,6 +1466,7 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/status_test \ $(BINDIR)/$(CONFIG)/streaming_throughput_test \ $(BINDIR)/$(CONFIG)/stress_test \ + $(BINDIR)/$(CONFIG)/thread_manager_test \ $(BINDIR)/$(CONFIG)/thread_stress_test \ $(BINDIR)/$(CONFIG)/boringssl_aes_test \ $(BINDIR)/$(CONFIG)/boringssl_asn1_test \ @@ -1553,6 +1555,7 @@ buildtests_cxx: privatelibs_cxx \ $(BINDIR)/$(CONFIG)/status_test \ $(BINDIR)/$(CONFIG)/streaming_throughput_test \ $(BINDIR)/$(CONFIG)/stress_test \ + $(BINDIR)/$(CONFIG)/thread_manager_test \ $(BINDIR)/$(CONFIG)/thread_stress_test \ endif @@ -1869,6 +1872,8 @@ test_cxx: buildtests_cxx $(Q) $(BINDIR)/$(CONFIG)/status_test || ( echo test status_test failed ; exit 1 ) $(E) "[RUN] Testing streaming_throughput_test" $(Q) $(BINDIR)/$(CONFIG)/streaming_throughput_test || ( echo test streaming_throughput_test failed ; exit 1 ) + $(E) "[RUN] Testing thread_manager_test" + $(Q) $(BINDIR)/$(CONFIG)/thread_manager_test || ( echo test thread_manager_test failed ; exit 1 ) $(E) "[RUN] Testing thread_stress_test" $(Q) $(BINDIR)/$(CONFIG)/thread_stress_test || ( echo test thread_stress_test failed ; exit 1 ) @@ -3734,6 +3739,7 @@ LIBGRPC++_SRC = \ src/cpp/server/server_context.cc \ src/cpp/server/server_credentials.cc \ src/cpp/server/server_posix.cc \ + src/cpp/thread_manager/thread_manager.cc \ src/cpp/util/byte_buffer_cc.cc \ src/cpp/util/slice_cc.cc \ src/cpp/util/status.cc \ @@ -3928,6 +3934,7 @@ LIBGRPC++_CRONET_SRC = \ src/cpp/server/server_context.cc \ src/cpp/server/server_credentials.cc \ src/cpp/server/server_posix.cc \ + src/cpp/thread_manager/thread_manager.cc \ src/cpp/util/byte_buffer_cc.cc \ src/cpp/util/slice_cc.cc \ src/cpp/util/status.cc \ @@ -4509,6 +4516,7 @@ LIBGRPC++_UNSECURE_SRC = \ src/cpp/server/server_context.cc \ src/cpp/server/server_credentials.cc \ src/cpp/server/server_posix.cc \ + src/cpp/thread_manager/thread_manager.cc \ src/cpp/util/byte_buffer_cc.cc \ src/cpp/util/slice_cc.cc \ src/cpp/util/status.cc \ @@ -13577,6 +13585,49 @@ $(OBJDIR)/$(CONFIG)/test/cpp/interop/stress_test.o: $(GENDIR)/src/proto/grpc/tes $(OBJDIR)/$(CONFIG)/test/cpp/util/metrics_server.o: $(GENDIR)/src/proto/grpc/testing/empty.pb.cc $(GENDIR)/src/proto/grpc/testing/empty.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/metrics.pb.cc $(GENDIR)/src/proto/grpc/testing/metrics.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/test.pb.cc $(GENDIR)/src/proto/grpc/testing/test.grpc.pb.cc +THREAD_MANAGER_TEST_SRC = \ + test/cpp/thread_manager/thread_manager_test.cc \ + +THREAD_MANAGER_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(THREAD_MANAGER_TEST_SRC)))) +ifeq ($(NO_SECURE),true) + +# You can't build secure targets if you don't have OpenSSL. + +$(BINDIR)/$(CONFIG)/thread_manager_test: openssl_dep_error + +else + + + + +ifeq ($(NO_PROTOBUF),true) + +# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+. + +$(BINDIR)/$(CONFIG)/thread_manager_test: protobuf_dep_error + +else + +$(BINDIR)/$(CONFIG)/thread_manager_test: $(PROTOBUF_DEP) $(THREAD_MANAGER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a + $(E) "[LD] Linking $@" + $(Q) mkdir -p `dirname $@` + $(Q) $(LDXX) $(LDFLAGS) $(THREAD_MANAGER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/thread_manager_test + +endif + +endif + +$(OBJDIR)/$(CONFIG)/test/cpp/thread_manager/thread_manager_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a + +deps_thread_manager_test: $(THREAD_MANAGER_TEST_OBJS:.o=.dep) + +ifneq ($(NO_SECURE),true) +ifneq ($(NO_DEPS),true) +-include $(THREAD_MANAGER_TEST_OBJS:.o=.dep) +endif +endif + + THREAD_STRESS_TEST_SRC = \ test/cpp/end2end/thread_stress_test.cc \ diff --git a/build.yaml b/build.yaml index f2106b353b3..56bca7fc759 100644 --- a/build.yaml +++ b/build.yaml @@ -744,6 +744,7 @@ filegroups: - src/cpp/common/channel_filter.h - src/cpp/server/dynamic_thread_pool.h - src/cpp/server/thread_pool_interface.h + - src/cpp/thread_manager/thread_manager.h src: - src/cpp/client/channel_cc.cc - src/cpp/client/client_context.cc @@ -766,6 +767,7 @@ filegroups: - src/cpp/server/server_context.cc - src/cpp/server/server_credentials.cc - src/cpp/server/server_posix.cc + - src/cpp/thread_manager/thread_manager.cc - src/cpp/util/byte_buffer_cc.cc - src/cpp/util/slice_cc.cc - src/cpp/util/status.cc @@ -3508,6 +3510,16 @@ targets: - gpr_test_util - gpr - grpc++_test_config +- name: thread_manager_test + build: test + language: c++ + src: + - test/cpp/thread_manager/thread_manager_test.cc + deps: + - grpc++ + - grpc + - gpr + - grpc++_test_config - name: thread_stress_test gtest: true cpu_cost: 100 diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec index d1bedee63a0..58e73f648d0 100644 --- a/gRPC-Core.podspec +++ b/gRPC-Core.podspec @@ -35,7 +35,7 @@ Pod::Spec.new do |s| s.name = 'gRPC-Core' - version = '1.0.0' + version = '1.0.1' s.version = version s.summary = 'Core cross-platform gRPC library, written in C' s.homepage = 'http://www.grpc.io' @@ -186,7 +186,7 @@ Pod::Spec.new do |s| ss.header_mappings_dir = '.' ss.libraries = 'z' ss.dependency "#{s.name}/Interface", version - ss.dependency 'BoringSSL', '~> 6.0' + ss.dependency 'BoringSSL', '~> 7.0' # To save you from scrolling, this is the last part of the podspec. ss.source_files = 'src/core/lib/profiling/timers.h', diff --git a/gRPC-ProtoRPC.podspec b/gRPC-ProtoRPC.podspec index f6426fb4deb..61d4b62d391 100644 --- a/gRPC-ProtoRPC.podspec +++ b/gRPC-ProtoRPC.podspec @@ -30,7 +30,7 @@ Pod::Spec.new do |s| s.name = 'gRPC-ProtoRPC' - version = '1.0.0' + version = '1.0.1' s.version = version s.summary = 'RPC library for Protocol Buffers, based on gRPC' s.homepage = 'http://www.grpc.io' diff --git a/gRPC-RxLibrary.podspec b/gRPC-RxLibrary.podspec index 316843436ba..d59385c039a 100644 --- a/gRPC-RxLibrary.podspec +++ b/gRPC-RxLibrary.podspec @@ -30,7 +30,7 @@ Pod::Spec.new do |s| s.name = 'gRPC-RxLibrary' - version = '1.0.0' + version = '1.0.1' s.version = version s.summary = 'Reactive Extensions library for iOS/OSX.' s.homepage = 'http://www.grpc.io' diff --git a/gRPC.podspec b/gRPC.podspec index 9a479349531..76410b17d28 100644 --- a/gRPC.podspec +++ b/gRPC.podspec @@ -30,7 +30,7 @@ Pod::Spec.new do |s| s.name = 'gRPC' - version = '1.0.0' + version = '1.0.1' s.version = version s.summary = 'gRPC client library for iOS/OSX' s.homepage = 'http://www.grpc.io' diff --git a/include/grpc++/impl/codegen/server_interface.h b/include/grpc++/impl/codegen/server_interface.h index 4a00d7a3a13..5c41ca51b45 100644 --- a/include/grpc++/impl/codegen/server_interface.h +++ b/include/grpc++/impl/codegen/server_interface.h @@ -126,12 +126,6 @@ class ServerInterface : public CallHook { /// \return true on a successful shutdown. virtual bool Start(ServerCompletionQueue** cqs, size_t num_cqs) = 0; - /// Process one or more incoming calls. - virtual void RunRpc() = 0; - - /// Schedule \a RunRpc to run in the threadpool. - virtual void ScheduleCallback() = 0; - virtual void ShutdownInternal(gpr_timespec deadline) = 0; virtual int max_receive_message_size() const = 0; diff --git a/include/grpc++/server.h b/include/grpc++/server.h index f51a6c658f0..a6d70c7577b 100644 --- a/include/grpc++/server.h +++ b/include/grpc++/server.h @@ -105,18 +105,41 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { class AsyncRequest; class ShutdownRequest; + /// SyncRequestThreadManager is an implementation of ThreadManager. This class + /// is responsible for polling for incoming RPCs and calling the RPC handlers. + /// This is only used in case of a Sync server (i.e a server exposing a sync + /// interface) + class SyncRequestThreadManager; + class UnimplementedAsyncRequestContext; class UnimplementedAsyncRequest; class UnimplementedAsyncResponse; /// Server constructors. To be used by \a ServerBuilder only. /// - /// \param thread_pool The threadpool instance to use for call processing. - /// \param thread_pool_owned Does the server own the \a thread_pool instance? - /// \param max_receive_message_size Maximum message length that the channel - /// can receive. - Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned, - int max_receive_message_size, ChannelArguments* args); + /// \param max_message_size Maximum message length that the channel can + /// receive. + /// + /// \param args The channel args + /// + /// \param sync_server_cqs The completion queues to use if the server is a + /// synchronous server (or a hybrid server). The server polls for new RPCs on + /// these queues + /// + /// \param min_pollers The minimum number of polling threads per server + /// completion queue (in param sync_server_cqs) to use for listening to + /// incoming requests (used only in case of sync server) + /// + /// \param max_pollers The maximum number of polling threads per server + /// completion queue (in param sync_server_cqs) to use for listening to + /// incoming requests (used only in case of sync server) + /// + /// \param sync_cq_timeout_msec The timeout to use when calling AsyncNext() on + /// server completion queues passed via sync_server_cqs param. + Server(int max_message_size, ChannelArguments* args, + std::shared_ptr>> + sync_server_cqs, + int min_pollers, int max_pollers, int sync_cq_timeout_msec); /// Register a service. This call does not take ownership of the service. /// The service must exist for the lifetime of the Server instance. @@ -151,12 +174,6 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { /// \return true on a successful shutdown. bool Start(ServerCompletionQueue** cqs, size_t num_cqs) GRPC_OVERRIDE; - /// Process one or more incoming calls. - void RunRpc() GRPC_OVERRIDE; - - /// Schedule \a RunRpc to run in the threadpool. - void ScheduleCallback() GRPC_OVERRIDE; - void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) GRPC_OVERRIDE; void ShutdownInternal(gpr_timespec deadline) GRPC_OVERRIDE; @@ -171,34 +188,31 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen { const int max_receive_message_size_; - // Completion queue. - CompletionQueue cq_; + /// The following completion queues are ONLY used in case of Sync API i.e if + /// the server has any services with sync methods. The server uses these + /// completion queues to poll for new RPCs + std::shared_ptr>> + sync_server_cqs_; + + /// List of ThreadManager instances (one for each cq in the sync_server_cqs) + std::vector> sync_req_mgrs_; // Sever status grpc::mutex mu_; bool started_; bool shutdown_; - bool shutdown_notified_; - // The number of threads which are running callbacks. - int num_running_cb_; - grpc::condition_variable callback_cv_; + bool shutdown_notified_; // Was notify called on the shutdown_cv_ grpc::condition_variable shutdown_cv_; std::shared_ptr global_callbacks_; - std::list* sync_methods_; std::vector services_; - std::unique_ptr unknown_method_; bool has_generic_service_; - // Pointer to the c grpc server. + // Pointer to the wrapped grpc_server. grpc_server* server_; - ThreadPoolInterface* thread_pool_; - // Whether the thread pool is created and owned by the server. - bool thread_pool_owned_; - std::unique_ptr server_initializer_; }; diff --git a/include/grpc++/server_builder.h b/include/grpc++/server_builder.h index 15333df60e1..9252c6a63a8 100644 --- a/include/grpc++/server_builder.h +++ b/include/grpc++/server_builder.h @@ -34,6 +34,7 @@ #ifndef GRPCXX_SERVER_BUILDER_H #define GRPCXX_SERVER_BUILDER_H +#include #include #include #include @@ -42,6 +43,8 @@ #include #include #include +#include +#include struct grpc_resource_quota; @@ -66,6 +69,8 @@ class ServerBuilder { ServerBuilder(); ~ServerBuilder(); + enum SyncServerOption { NUM_CQS, MIN_POLLERS, MAX_POLLERS, CQ_TIMEOUT_MSEC }; + /// Register a service. This call does not take ownership of the service. /// The service must exist for the lifetime of the \a Server instance returned /// by \a BuildAndStart(). @@ -122,6 +127,9 @@ class ServerBuilder { ServerBuilder& SetOption(std::unique_ptr option); + /// Only useful if this is a Synchronous server. + ServerBuilder& SetSyncServerOption(SyncServerOption option, int value); + /// Tries to bind \a server to the given \a addr. /// /// It can be invoked multiple times. @@ -177,6 +185,28 @@ class ServerBuilder { int* selected_port; }; + struct SyncServerSettings { + SyncServerSettings() + : num_cqs(GPR_MAX(gpr_cpu_num_cores(), 4)), + min_pollers(1), + max_pollers(INT_MAX), + cq_timeout_msec(1000) {} + + // Number of server completion queues to create to listen to incoming RPCs. + int num_cqs; + + // Minimum number of threads per completion queue that should be listening + // to incoming RPCs. + int min_pollers; + + // Maximum number of threads per completion queue that can be listening to + // incoming RPCs. + int max_pollers; + + // The timeout for server completion queue's AsyncNext call. + int cq_timeout_msec; + }; + typedef std::unique_ptr HostString; struct NamedService { explicit NamedService(Service* s) : service(s) {} @@ -191,7 +221,12 @@ class ServerBuilder { std::vector> options_; std::vector> services_; std::vector ports_; + + SyncServerSettings sync_server_settings_; + + // List of completion queues added via AddCompletionQueue() method std::vector cqs_; + std::shared_ptr creds_; std::vector> plugins_; grpc_resource_quota* resource_quota_; diff --git a/package.json b/package.json index c6b2600209b..5506e9bbdf8 100644 --- a/package.json +++ b/package.json @@ -25,26 +25,28 @@ "coverage": "./node_modules/.bin/istanbul cover ./node_modules/.bin/_mocha src/node/test", "install": "./node_modules/.bin/node-pre-gyp install --fallback-to-build" }, - "bundledDependencies": ["node-pre-gyp"], + "bundledDependencies": [ + "node-pre-gyp" + ], "dependencies": { "arguejs": "^0.2.3", - "lodash": "^3.9.3", + "lodash": "^4.15.0", "nan": "^2.0.0", - "protobufjs": "^4.0.0" + "node-pre-gyp": "^0.6.0", + "protobufjs": "^5.0.0" }, "devDependencies": { - "async": "^1.5.0", + "async": "^2.0.1", "body-parser": "^1.15.2", "express": "^4.14.0", "google-auth-library": "^0.9.2", "google-protobuf": "^3.0.0", - "istanbul": "^0.3.21", + "istanbul": "^0.4.4", "jsdoc": "^3.3.2", "jshint": "^2.5.0", "minimist": "^1.1.0", - "mocha": "^2.3.4", - "mocha-jenkins-reporter": "^0.1.9", - "mustache": "^2.0.0", + "mocha": "^3.0.2", + "mocha-jenkins-reporter": "^0.2.3", "poisson-process": "^0.2.1" }, "engines": { @@ -52,11 +54,10 @@ }, "binary": { "module_name": "grpc_node", - "module_path": "./build/Release/", + "module_path": "src/node/extension_binary", "host": "https://storage.googleapis.com/", "remote_path": "grpc-precompiled-binaries/node/{name}/v{version}", - "package_name": "{node_abi}-{platform}-{arch}.tar.gz", - "module_path": "src/node/extension_binary" + "package_name": "{node_abi}-{platform}-{arch}.tar.gz" }, "files": [ "LICENSE", @@ -77,7 +78,7 @@ ], "main": "src/node/index.js", "license": "BSD-3-Clause", - "jshintConfig" : { + "jshintConfig": { "bitwise": true, "curly": true, "eqeqeq": true, diff --git a/src/compiler/python_generator.cc b/src/compiler/python_generator.cc index 6830f49931d..febaf135b6f 100644 --- a/src/compiler/python_generator.cc +++ b/src/compiler/python_generator.cc @@ -35,6 +35,8 @@ #include #include #include +#include +#include #include #include #include @@ -66,66 +68,11 @@ using std::vector; namespace grpc_python_generator { -GeneratorConfiguration::GeneratorConfiguration() - : grpc_package_root("grpc"), beta_package_root("grpc.beta") {} - -PythonGrpcGenerator::PythonGrpcGenerator(const GeneratorConfiguration& config) - : config_(config) {} - -PythonGrpcGenerator::~PythonGrpcGenerator() {} - -bool PythonGrpcGenerator::Generate(const FileDescriptor* file, - const grpc::string& parameter, - GeneratorContext* context, - grpc::string* error) const { - // Get output file name. - grpc::string file_name; - static const int proto_suffix_length = strlen(".proto"); - if (file->name().size() > static_cast(proto_suffix_length) && - file->name().find_last_of(".proto") == file->name().size() - 1) { - file_name = - file->name().substr(0, file->name().size() - proto_suffix_length) + - "_pb2.py"; - } else { - *error = "Invalid proto file name. Proto file must end with .proto"; - return false; - } - - std::unique_ptr output( - context->OpenForInsert(file_name, "module_scope")); - CodedOutputStream coded_out(output.get()); - bool success = false; - grpc::string code = ""; - tie(success, code) = grpc_python_generator::GetServices(file, config_); - if (success) { - coded_out.WriteRaw(code.data(), code.size()); - return true; - } else { - return false; - } -} - namespace { -////////////////////////////////// -// BEGIN FORMATTING BOILERPLATE // -////////////////////////////////// - -// Converts an initializer list of the form { key0, value0, key1, value1, ... } -// into a map of key* to value*. Is merely a readability helper for later code. -map ListToDict( - const initializer_list& values) { - assert(values.size() % 2 == 0); - map value_map; - auto value_iter = values.begin(); - for (unsigned i = 0; i < values.size() / 2; ++i) { - grpc::string key = *value_iter; - ++value_iter; - grpc::string value = *value_iter; - value_map[key] = value; - ++value_iter; - } - return value_map; -} + +typedef vector DescriptorVector; +typedef map StringMap; +typedef vector StringVector; // Provides RAII indentation handling. Use as: // { @@ -146,10 +93,6 @@ class IndentScope { Printer* printer_; }; -//////////////////////////////// -// END FORMATTING BOILERPLATE // -//////////////////////////////// - // TODO(https://github.com/google/protobuf/issues/888): // Export `ModuleName` from protobuf's // `src/google/protobuf/compiler/python/python_generator.cc` file. @@ -173,11 +116,61 @@ grpc::string ModuleAlias(const grpc::string& filename) { return module_name; } -bool GetModuleAndMessagePath(const Descriptor* type, - const ServiceDescriptor* service, - grpc::string* out) { +// Tucks all generator state in an anonymous namespace away from +// PythonGrpcGenerator and the header file, mostly to encourage future changes +// to not require updates to the grpcio-tools C++ code part. Assumes that it is +// only ever used from a single thread. +struct PrivateGenerator { + const GeneratorConfiguration& config; + const FileDescriptor* file; + + bool generate_in_pb2_grpc; + + Printer* out; + + PrivateGenerator(const GeneratorConfiguration& config, + const FileDescriptor* file); + + std::pair GetGrpcServices(); + + private: + bool PrintPreamble(); + bool PrintBetaPreamble(); + bool PrintGAServices(); + bool PrintBetaServices(); + + bool PrintAddServicerToServer( + const grpc::string& package_qualified_service_name, + const ServiceDescriptor* service); + bool PrintServicer(const ServiceDescriptor* service); + bool PrintStub(const grpc::string& package_qualified_service_name, + const ServiceDescriptor* service); + + bool PrintBetaServicer(const ServiceDescriptor* service); + bool PrintBetaServerFactory( + const grpc::string& package_qualified_service_name, + const ServiceDescriptor* service); + bool PrintBetaStub(const ServiceDescriptor* service); + bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name, + const ServiceDescriptor* service); + + // Get all comments (leading, leading_detached, trailing) and print them as a + // docstring. Any leading space of a line will be removed, but the line + // wrapping will not be changed. + template + void PrintAllComments(const DescriptorType* descriptor); + + bool GetModuleAndMessagePath(const Descriptor* type, grpc::string* out); +}; + +PrivateGenerator::PrivateGenerator(const GeneratorConfiguration& config, + const FileDescriptor* file) + : config(config), file(file) {} + +bool PrivateGenerator::GetModuleAndMessagePath(const Descriptor* type, + grpc::string* out) { const Descriptor* path_elem_type = type; - vector message_path; + DescriptorVector message_path; do { message_path.push_back(path_elem_type); path_elem_type = path_elem_type->containing_type(); @@ -188,12 +181,16 @@ bool GetModuleAndMessagePath(const Descriptor* type, file_name.find_last_of(".proto") == file_name.size() - 1)) { return false; } - grpc::string service_file_name = service->file()->name(); - grpc::string module = - service_file_name == file_name ? "" : ModuleAlias(file_name) + "."; + grpc::string generator_file_name = file->name(); + grpc::string module; + if (generator_file_name != file_name || generate_in_pb2_grpc) { + module = ModuleAlias(file_name) + "."; + } else { + module = ""; + } grpc::string message_type; - for (auto path_iter = message_path.rbegin(); path_iter != message_path.rend(); - ++path_iter) { + for (DescriptorVector::reverse_iterator path_iter = message_path.rbegin(); + path_iter != message_path.rend(); ++path_iter) { message_type += (*path_iter)->name() + "."; } // no pop_back prior to C++11 @@ -202,33 +199,31 @@ bool GetModuleAndMessagePath(const Descriptor* type, return true; } -// Get all comments (leading, leading_detached, trailing) and print them as a -// docstring. Any leading space of a line will be removed, but the line wrapping -// will not be changed. template -static void PrintAllComments(const DescriptorType* desc, Printer* printer) { - std::vector comments; - grpc_generator::GetComment(desc, grpc_generator::COMMENTTYPE_LEADING_DETACHED, - &comments); - grpc_generator::GetComment(desc, grpc_generator::COMMENTTYPE_LEADING, +void PrivateGenerator::PrintAllComments(const DescriptorType* descriptor) { + StringVector comments; + grpc_generator::GetComment( + descriptor, grpc_generator::COMMENTTYPE_LEADING_DETACHED, &comments); + grpc_generator::GetComment(descriptor, grpc_generator::COMMENTTYPE_LEADING, &comments); - grpc_generator::GetComment(desc, grpc_generator::COMMENTTYPE_TRAILING, + grpc_generator::GetComment(descriptor, grpc_generator::COMMENTTYPE_TRAILING, &comments); if (comments.empty()) { return; } - printer->Print("\"\"\""); - for (auto it = comments.begin(); it != comments.end(); ++it) { + out->Print("\"\"\""); + for (StringVector::iterator it = comments.begin(); it != comments.end(); + ++it) { size_t start_pos = it->find_first_not_of(' '); if (start_pos != grpc::string::npos) { - printer->Print(it->c_str() + start_pos); + out->Print(it->c_str() + start_pos); } - printer->Print("\n"); + out->Print("\n"); } - printer->Print("\"\"\"\n"); + out->Print("\"\"\"\n"); } -bool PrintBetaServicer(const ServiceDescriptor* service, Printer* out) { +bool PrivateGenerator::PrintBetaServicer(const ServiceDescriptor* service) { out->Print("\n\n"); out->Print("class Beta$Service$Servicer(object):\n", "Service", service->name()); @@ -241,16 +236,16 @@ bool PrintBetaServicer(const ServiceDescriptor* service, Printer* out) { "generated\n" "only to ease transition from grpcio<0.15.0 to " "grpcio>=0.15.0.\"\"\"\n"); - PrintAllComments(service, out); + PrintAllComments(service); for (int i = 0; i < service->method_count(); ++i) { - auto meth = service->method(i); + const MethodDescriptor* method = service->method(i); grpc::string arg_name = - meth->client_streaming() ? "request_iterator" : "request"; + method->client_streaming() ? "request_iterator" : "request"; out->Print("def $Method$(self, $ArgName$, context):\n", "Method", - meth->name(), "ArgName", arg_name); + method->name(), "ArgName", arg_name); { IndentScope raii_method_indent(out); - PrintAllComments(meth, out); + PrintAllComments(method); out->Print("context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)\n"); } } @@ -258,7 +253,7 @@ bool PrintBetaServicer(const ServiceDescriptor* service, Printer* out) { return true; } -bool PrintBetaStub(const ServiceDescriptor* service, Printer* out) { +bool PrivateGenerator::PrintBetaStub(const ServiceDescriptor* service) { out->Print("\n\n"); out->Print("class Beta$Service$Stub(object):\n", "Service", service->name()); { @@ -270,30 +265,33 @@ bool PrintBetaStub(const ServiceDescriptor* service, Printer* out) { "generated\n" "only to ease transition from grpcio<0.15.0 to " "grpcio>=0.15.0.\"\"\"\n"); - PrintAllComments(service, out); + PrintAllComments(service); for (int i = 0; i < service->method_count(); ++i) { - const MethodDescriptor* meth = service->method(i); + const MethodDescriptor* method = service->method(i); grpc::string arg_name = - meth->client_streaming() ? "request_iterator" : "request"; - auto methdict = ListToDict({"Method", meth->name(), "ArgName", arg_name}); - out->Print(methdict, + method->client_streaming() ? "request_iterator" : "request"; + StringMap method_dict; + method_dict["Method"] = method->name(); + method_dict["ArgName"] = arg_name; + out->Print(method_dict, "def $Method$(self, $ArgName$, timeout, metadata=None, " "with_call=False, protocol_options=None):\n"); { IndentScope raii_method_indent(out); - PrintAllComments(meth, out); + PrintAllComments(method); out->Print("raise NotImplementedError()\n"); } - if (!meth->server_streaming()) { - out->Print(methdict, "$Method$.future = None\n"); + if (!method->server_streaming()) { + out->Print(method_dict, "$Method$.future = None\n"); } } } return true; } -bool PrintBetaServerFactory(const grpc::string& package_qualified_service_name, - const ServiceDescriptor* service, Printer* out) { +bool PrivateGenerator::PrintBetaServerFactory( + const grpc::string& package_qualified_service_name, + const ServiceDescriptor* service) { out->Print("\n\n"); out->Print( "def beta_create_$Service$_server(servicer, pool=None, " @@ -307,9 +305,9 @@ bool PrintBetaServerFactory(const grpc::string& package_qualified_service_name, "file not marked beta) for all further purposes. This function was\n" "generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0" "\"\"\"\n"); - map method_implementation_constructors; - map input_message_modules_and_classes; - map output_message_modules_and_classes; + StringMap method_implementation_constructors; + StringMap input_message_modules_and_classes; + StringMap output_message_modules_and_classes; for (int i = 0; i < service->method_count(); ++i) { const MethodDescriptor* method = service->method(i); const grpc::string method_implementation_constructor = @@ -317,12 +315,12 @@ bool PrintBetaServerFactory(const grpc::string& package_qualified_service_name, grpc::string(method->server_streaming() ? "stream_" : "unary_") + "inline"; grpc::string input_message_module_and_class; - if (!GetModuleAndMessagePath(method->input_type(), service, + if (!GetModuleAndMessagePath(method->input_type(), &input_message_module_and_class)) { return false; } grpc::string output_message_module_and_class; - if (!GetModuleAndMessagePath(method->output_type(), service, + if (!GetModuleAndMessagePath(method->output_type(), &output_message_module_and_class)) { return false; } @@ -334,7 +332,7 @@ bool PrintBetaServerFactory(const grpc::string& package_qualified_service_name, make_pair(method->name(), output_message_module_and_class)); } out->Print("request_deserializers = {\n"); - for (auto name_and_input_module_class_pair = + for (StringMap::iterator name_and_input_module_class_pair = input_message_modules_and_classes.begin(); name_and_input_module_class_pair != input_message_modules_and_classes.end(); @@ -349,7 +347,7 @@ bool PrintBetaServerFactory(const grpc::string& package_qualified_service_name, } out->Print("}\n"); out->Print("response_serializers = {\n"); - for (auto name_and_output_module_class_pair = + for (StringMap::iterator name_and_output_module_class_pair = output_message_modules_and_classes.begin(); name_and_output_module_class_pair != output_message_modules_and_classes.end(); @@ -365,7 +363,7 @@ bool PrintBetaServerFactory(const grpc::string& package_qualified_service_name, } out->Print("}\n"); out->Print("method_implementations = {\n"); - for (auto name_and_implementation_constructor = + for (StringMap::iterator name_and_implementation_constructor = method_implementation_constructors.begin(); name_and_implementation_constructor != method_implementation_constructors.end(); @@ -395,11 +393,11 @@ bool PrintBetaServerFactory(const grpc::string& package_qualified_service_name, return true; } -bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name, - const ServiceDescriptor* service, Printer* out) { - map dict = ListToDict({ - "Service", service->name(), - }); +bool PrivateGenerator::PrintBetaStubFactory( + const grpc::string& package_qualified_service_name, + const ServiceDescriptor* service) { + StringMap dict; + dict["Service"] = service->name(); out->Print("\n\n"); out->Print(dict, "def beta_create_$Service$_stub(channel, host=None," @@ -412,21 +410,21 @@ bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name, "file not marked beta) for all further purposes. This function was\n" "generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0" "\"\"\"\n"); - map method_cardinalities; - map input_message_modules_and_classes; - map output_message_modules_and_classes; + StringMap method_cardinalities; + StringMap input_message_modules_and_classes; + StringMap output_message_modules_and_classes; for (int i = 0; i < service->method_count(); ++i) { const MethodDescriptor* method = service->method(i); const grpc::string method_cardinality = grpc::string(method->client_streaming() ? "STREAM" : "UNARY") + "_" + grpc::string(method->server_streaming() ? "STREAM" : "UNARY"); grpc::string input_message_module_and_class; - if (!GetModuleAndMessagePath(method->input_type(), service, + if (!GetModuleAndMessagePath(method->input_type(), &input_message_module_and_class)) { return false; } grpc::string output_message_module_and_class; - if (!GetModuleAndMessagePath(method->output_type(), service, + if (!GetModuleAndMessagePath(method->output_type(), &output_message_module_and_class)) { return false; } @@ -438,7 +436,7 @@ bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name, make_pair(method->name(), output_message_module_and_class)); } out->Print("request_serializers = {\n"); - for (auto name_and_input_module_class_pair = + for (StringMap::iterator name_and_input_module_class_pair = input_message_modules_and_classes.begin(); name_and_input_module_class_pair != input_message_modules_and_classes.end(); @@ -453,7 +451,7 @@ bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name, } out->Print("}\n"); out->Print("response_deserializers = {\n"); - for (auto name_and_output_module_class_pair = + for (StringMap::iterator name_and_output_module_class_pair = output_message_modules_and_classes.begin(); name_and_output_module_class_pair != output_message_modules_and_classes.end(); @@ -469,7 +467,8 @@ bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name, } out->Print("}\n"); out->Print("cardinalities = {\n"); - for (auto name_and_cardinality = method_cardinalities.begin(); + for (StringMap::iterator name_and_cardinality = + method_cardinalities.begin(); name_and_cardinality != method_cardinalities.end(); name_and_cardinality++) { IndentScope raii_descriptions_indent(out); @@ -493,13 +492,14 @@ bool PrintBetaStubFactory(const grpc::string& package_qualified_service_name, return true; } -bool PrintStub(const grpc::string& package_qualified_service_name, - const ServiceDescriptor* service, Printer* out) { +bool PrivateGenerator::PrintStub( + const grpc::string& package_qualified_service_name, + const ServiceDescriptor* service) { out->Print("\n\n"); out->Print("class $Service$Stub(object):\n", "Service", service->name()); { IndentScope raii_class_indent(out); - PrintAllComments(service, out); + PrintAllComments(service); out->Print("\n"); out->Print("def __init__(self, channel):\n"); { @@ -513,17 +513,17 @@ bool PrintStub(const grpc::string& package_qualified_service_name, } out->Print("\"\"\"\n"); for (int i = 0; i < service->method_count(); ++i) { - auto method = service->method(i); - auto multi_callable_constructor = + const MethodDescriptor* method = service->method(i); + grpc::string multi_callable_constructor = grpc::string(method->client_streaming() ? "stream" : "unary") + "_" + grpc::string(method->server_streaming() ? "stream" : "unary"); grpc::string request_module_and_class; - if (!GetModuleAndMessagePath(method->input_type(), service, + if (!GetModuleAndMessagePath(method->input_type(), &request_module_and_class)) { return false; } grpc::string response_module_and_class; - if (!GetModuleAndMessagePath(method->output_type(), service, + if (!GetModuleAndMessagePath(method->output_type(), &response_module_and_class)) { return false; } @@ -550,14 +550,14 @@ bool PrintStub(const grpc::string& package_qualified_service_name, return true; } -bool PrintServicer(const ServiceDescriptor* service, Printer* out) { +bool PrivateGenerator::PrintServicer(const ServiceDescriptor* service) { out->Print("\n\n"); out->Print("class $Service$Servicer(object):\n", "Service", service->name()); { IndentScope raii_class_indent(out); - PrintAllComments(service, out); + PrintAllComments(service); for (int i = 0; i < service->method_count(); ++i) { - auto method = service->method(i); + const MethodDescriptor* method = service->method(i); grpc::string arg_name = method->client_streaming() ? "request_iterator" : "request"; out->Print("\n"); @@ -565,7 +565,7 @@ bool PrintServicer(const ServiceDescriptor* service, Printer* out) { method->name(), "ArgName", arg_name); { IndentScope raii_method_indent(out); - PrintAllComments(method, out); + PrintAllComments(method); out->Print("context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n"); out->Print("context.set_details('Method not implemented!')\n"); out->Print("raise NotImplementedError('Method not implemented!')\n"); @@ -575,9 +575,9 @@ bool PrintServicer(const ServiceDescriptor* service, Printer* out) { return true; } -bool PrintAddServicerToServer( +bool PrivateGenerator::PrintAddServicerToServer( const grpc::string& package_qualified_service_name, - const ServiceDescriptor* service, Printer* out) { + const ServiceDescriptor* service) { out->Print("\n\n"); out->Print("def add_$Service$Servicer_to_server(servicer, server):\n", "Service", service->name()); @@ -588,19 +588,19 @@ bool PrintAddServicerToServer( IndentScope raii_dict_first_indent(out); IndentScope raii_dict_second_indent(out); for (int i = 0; i < service->method_count(); ++i) { - auto method = service->method(i); - auto method_handler_constructor = + const MethodDescriptor* method = service->method(i); + grpc::string method_handler_constructor = grpc::string(method->client_streaming() ? "stream" : "unary") + "_" + grpc::string(method->server_streaming() ? "stream" : "unary") + "_rpc_method_handler"; grpc::string request_module_and_class; - if (!GetModuleAndMessagePath(method->input_type(), service, + if (!GetModuleAndMessagePath(method->input_type(), &request_module_and_class)) { return false; } grpc::string response_module_and_class; - if (!GetModuleAndMessagePath(method->output_type(), service, + if (!GetModuleAndMessagePath(method->output_type(), &response_module_and_class)) { return false; } @@ -635,53 +635,173 @@ bool PrintAddServicerToServer( return true; } -bool PrintPreamble(const FileDescriptor* file, - const GeneratorConfiguration& config, Printer* out) { - out->Print("import $Package$\n", "Package", config.grpc_package_root); +bool PrivateGenerator::PrintBetaPreamble() { out->Print("from $Package$ import implementations as beta_implementations\n", "Package", config.beta_package_root); out->Print("from $Package$ import interfaces as beta_interfaces\n", "Package", config.beta_package_root); + return true; +} + +bool PrivateGenerator::PrintPreamble() { + out->Print("import $Package$\n", "Package", config.grpc_package_root); out->Print("from grpc.framework.common import cardinality\n"); out->Print( "from grpc.framework.interfaces.face import utilities as " "face_utilities\n"); + if (generate_in_pb2_grpc) { + out->Print("\n"); + for (int i = 0; i < file->service_count(); ++i) { + const ServiceDescriptor* service = file->service(i); + for (int j = 0; j < service->method_count(); ++j) { + const MethodDescriptor* method = service->method(j); + const Descriptor* types[2] = {method->input_type(), + method->output_type()}; + for (int k = 0; k < 2; ++k) { + const Descriptor* type = types[k]; + grpc::string type_file_name = type->file()->name(); + grpc::string module_name = ModuleName(type_file_name); + grpc::string module_alias = ModuleAlias(type_file_name); + out->Print("import $ModuleName$ as $ModuleAlias$\n", "ModuleName", + module_name, "ModuleAlias", module_alias); + } + } + } + } return true; } -} // namespace +bool PrivateGenerator::PrintGAServices() { + grpc::string package = file->package(); + if (!package.empty()) { + package = package.append("."); + } + for (int i = 0; i < file->service_count(); ++i) { + const ServiceDescriptor* service = file->service(i); + grpc::string package_qualified_service_name = package + service->name(); + if (!(PrintStub(package_qualified_service_name, service) && + PrintServicer(service) && + PrintAddServicerToServer(package_qualified_service_name, service))) { + return false; + } + } + return true; +} -pair GetServices(const FileDescriptor* file, - const GeneratorConfiguration& config) { +bool PrivateGenerator::PrintBetaServices() { + grpc::string package = file->package(); + if (!package.empty()) { + package = package.append("."); + } + for (int i = 0; i < file->service_count(); ++i) { + const ServiceDescriptor* service = file->service(i); + grpc::string package_qualified_service_name = package + service->name(); + if (!(PrintBetaServicer(service) && PrintBetaStub(service) && + PrintBetaServerFactory(package_qualified_service_name, service) && + PrintBetaStubFactory(package_qualified_service_name, service))) { + return false; + } + } + return true; +} + +pair PrivateGenerator::GetGrpcServices() { grpc::string output; { // Scope the output stream so it closes and finalizes output to the string. StringOutputStream output_stream(&output); - Printer out(&output_stream, '$'); - if (!PrintPreamble(file, config, &out)) { - return make_pair(false, ""); - } - auto package = file->package(); - if (!package.empty()) { - package = package.append("."); - } - for (int i = 0; i < file->service_count(); ++i) { - auto service = file->service(i); - auto package_qualified_service_name = package + service->name(); - if (!(PrintStub(package_qualified_service_name, service, &out) && - PrintServicer(service, &out) && - PrintAddServicerToServer(package_qualified_service_name, service, - &out) && - PrintBetaServicer(service, &out) && PrintBetaStub(service, &out) && - PrintBetaServerFactory(package_qualified_service_name, service, - &out) && - PrintBetaStubFactory(package_qualified_service_name, service, - &out))) { + Printer out_printer(&output_stream, '$'); + out = &out_printer; + + if (generate_in_pb2_grpc) { + if (!PrintPreamble()) { + return make_pair(false, ""); + } + if (!PrintGAServices()) { return make_pair(false, ""); } + } else { + out->Print("try:\n"); + { + IndentScope raii_dict_try_indent(out); + out->Print( + "# THESE ELEMENTS WILL BE DEPRECATED.\n" + "# Please use the generated *_pb2_grpc.py files instead.\n"); + if (!PrintPreamble()) { + return make_pair(false, ""); + } + if (!PrintBetaPreamble()) { + return make_pair(false, ""); + } + if (!PrintGAServices()) { + return make_pair(false, ""); + } + if (!PrintBetaServices()) { + return make_pair(false, ""); + } + } + out->Print("except ImportError:\n"); + { + IndentScope raii_dict_except_indent(out); + out->Print("pass"); + } } } return make_pair(true, std::move(output)); } +} // namespace + +GeneratorConfiguration::GeneratorConfiguration() + : grpc_package_root("grpc"), beta_package_root("grpc.beta") {} + +PythonGrpcGenerator::PythonGrpcGenerator(const GeneratorConfiguration& config) + : config_(config) {} + +PythonGrpcGenerator::~PythonGrpcGenerator() {} + +bool PythonGrpcGenerator::Generate(const FileDescriptor* file, + const grpc::string& parameter, + GeneratorContext* context, + grpc::string* error) const { + // Get output file name. + grpc::string pb2_file_name; + grpc::string pb2_grpc_file_name; + static const int proto_suffix_length = strlen(".proto"); + if (file->name().size() > static_cast(proto_suffix_length) && + file->name().find_last_of(".proto") == file->name().size() - 1) { + grpc::string base = + file->name().substr(0, file->name().size() - proto_suffix_length); + pb2_file_name = base + "_pb2.py"; + pb2_grpc_file_name = base + "_pb2_grpc.py"; + } else { + *error = "Invalid proto file name. Proto file must end with .proto"; + return false; + } + + PrivateGenerator generator(config_, file); + + std::unique_ptr pb2_output( + context->OpenForAppend(pb2_file_name)); + std::unique_ptr grpc_output( + context->Open(pb2_grpc_file_name)); + CodedOutputStream pb2_coded_out(pb2_output.get()); + CodedOutputStream grpc_coded_out(grpc_output.get()); + bool success = false; + grpc::string pb2_code; + grpc::string grpc_code; + generator.generate_in_pb2_grpc = false; + tie(success, pb2_code) = generator.GetGrpcServices(); + if (success) { + generator.generate_in_pb2_grpc = true; + tie(success, grpc_code) = generator.GetGrpcServices(); + if (success) { + pb2_coded_out.WriteRaw(pb2_code.data(), pb2_code.size()); + grpc_coded_out.WriteRaw(grpc_code.data(), grpc_code.size()); + return true; + } + } + return false; +} + } // namespace grpc_python_generator diff --git a/src/compiler/python_generator.h b/src/compiler/python_generator.h index 9bbb83bca6e..6a95255d40e 100644 --- a/src/compiler/python_generator.h +++ b/src/compiler/python_generator.h @@ -62,10 +62,6 @@ class PythonGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator { GeneratorConfiguration config_; }; -std::pair GetServices( - const grpc::protobuf::FileDescriptor* file, - const GeneratorConfiguration& config); - } // namespace grpc_python_generator #endif // GRPC_INTERNAL_COMPILER_PYTHON_GENERATOR_H diff --git a/src/compiler/ruby_generator.cc b/src/compiler/ruby_generator.cc index 02202568cb4..c85babf1b86 100644 --- a/src/compiler/ruby_generator.cc +++ b/src/compiler/ruby_generator.cc @@ -119,6 +119,43 @@ void PrintService(const ServiceDescriptor *service, const grpc::string &package, } // namespace +// The following functions are copied directly from the source for the protoc +// ruby generator +// to ensure compatibility (with the exception of int and string type changes). +// See +// https://github.com/google/protobuf/blob/master/src/google/protobuf/compiler/ruby/ruby_generator.cc#L250 +// TODO: keep up to date with protoc code generation, though this behavior isn't +// expected to change +bool IsLower(char ch) { return ch >= 'a' && ch <= 'z'; } + +char ToUpper(char ch) { return IsLower(ch) ? (ch - 'a' + 'A') : ch; } + +// Package names in protobuf are snake_case by convention, but Ruby module +// names must be PascalCased. +// +// foo_bar_baz -> FooBarBaz +grpc::string PackageToModule(const grpc::string &name) { + bool next_upper = true; + grpc::string result; + result.reserve(name.size()); + + for (grpc::string::size_type i = 0; i < name.size(); i++) { + if (name[i] == '_') { + next_upper = true; + } else { + if (next_upper) { + result.push_back(ToUpper(name[i])); + } else { + result.push_back(name[i]); + } + next_upper = false; + } + } + + return result; +} +// end copying of protoc generator for ruby code + grpc::string GetServices(const FileDescriptor *file) { grpc::string output; { @@ -162,7 +199,7 @@ grpc::string GetServices(const FileDescriptor *file) { std::vector modules = Split(file->package(), '.'); for (size_t i = 0; i < modules.size(); ++i) { std::map module_vars = ListToDict({ - "module.name", CapitalizeFirst(modules[i]), + "module.name", PackageToModule(modules[i]), }); out.Print(module_vars, "module $module.name$\n"); out.Indent(); diff --git a/src/core/lib/iomgr/endpoint_pair_uv.c b/src/core/lib/iomgr/endpoint_pair_uv.c index 7941e203886..ff24894c6db 100644 --- a/src/core/lib/iomgr/endpoint_pair_uv.c +++ b/src/core/lib/iomgr/endpoint_pair_uv.c @@ -41,8 +41,9 @@ #include "src/core/lib/iomgr/endpoint_pair.h" -grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name, - size_t read_slice_size) { +grpc_endpoint_pair grpc_iomgr_create_endpoint_pair( + const char *name, grpc_resource_quota *resource_quota, + size_t read_slice_size) { grpc_endpoint_pair endpoint_pair; // TODO(mlumish): implement this properly under libuv GPR_ASSERT(false && diff --git a/src/core/lib/iomgr/port.h b/src/core/lib/iomgr/port.h index c0bb3b5a23e..f1897bb91f2 100644 --- a/src/core/lib/iomgr/port.h +++ b/src/core/lib/iomgr/port.h @@ -90,7 +90,6 @@ #define GRPC_POSIX_SOCKETUTILS #endif #elif defined(GPR_APPLE) -#define GRPC_HAVE_IP_PKTINFO 1 #define GRPC_HAVE_SO_NOSIGPIPE 1 #define GRPC_HAVE_UNIX_SOCKET 1 #define GRPC_MSG_IOVLEN_TYPE int @@ -102,7 +101,6 @@ #define GRPC_TIMER_USE_GENERIC 1 #elif defined(GPR_FREEBSD) #define GRPC_HAVE_IPV6_RECVPKTINFO 1 -#define GRPC_HAVE_IP_PKTINFO 1 #define GRPC_HAVE_SO_NOSIGPIPE 1 #define GRPC_HAVE_UNIX_SOCKET 1 #define GRPC_POSIX_NO_SPECIAL_WAKEUP_FD 1 diff --git a/src/core/lib/iomgr/resource_quota.c b/src/core/lib/iomgr/resource_quota.c index ddc7a88c5bc..f8b710f2404 100644 --- a/src/core/lib/iomgr/resource_quota.c +++ b/src/core/lib/iomgr/resource_quota.c @@ -166,8 +166,11 @@ static void rq_step(grpc_exec_ctx *exec_ctx, void *rq, grpc_error *error) { do { if (rq_alloc(exec_ctx, resource_quota)) goto done; } while (rq_reclaim_from_per_user_free_pool(exec_ctx, resource_quota)); - rq_reclaim(exec_ctx, resource_quota, false) || - rq_reclaim(exec_ctx, resource_quota, true); + + if (!rq_reclaim(exec_ctx, resource_quota, false)) { + rq_reclaim(exec_ctx, resource_quota, true); + } + done: grpc_resource_quota_internal_unref(exec_ctx, resource_quota); } diff --git a/src/core/lib/tsi/ssl_transport_security.c b/src/core/lib/tsi/ssl_transport_security.c index 749b46e19f4..366dca95077 100644 --- a/src/core/lib/tsi/ssl_transport_security.c +++ b/src/core/lib/tsi/ssl_transport_security.c @@ -31,9 +31,6 @@ * */ -#include "src/core/lib/iomgr/sockaddr.h" - -#include "src/core/lib/iomgr/socket_utils.h" #include "src/core/lib/tsi/ssl_transport_security.h" #include @@ -41,6 +38,15 @@ #include #include +/* TODO(jboeuf): refactor inet_ntop into a portability header. */ +/* Note: for whomever reads this and tries to refactor this, this + can't be in grpc, it has to be in gpr. */ +#ifdef GPR_WINDOWS +#include +#else +#include +#endif + #include #include #include @@ -349,8 +355,8 @@ static tsi_result add_subject_alt_names_properties_to_peer( result = TSI_INTERNAL_ERROR; break; } - const char *name = grpc_inet_ntop(af, subject_alt_name->d.iPAddress->data, - ntop_buf, INET6_ADDRSTRLEN); + const char *name = inet_ntop(af, subject_alt_name->d.iPAddress->data, + ntop_buf, INET6_ADDRSTRLEN); if (name == NULL) { gpr_log(GPR_ERROR, "Could not get IP string from asn1 octet."); result = TSI_INTERNAL_ERROR; diff --git a/src/cpp/server/server_builder.cc b/src/cpp/server/server_builder.cc index 953a4337ece..00a90bb184c 100644 --- a/src/cpp/server/server_builder.cc +++ b/src/cpp/server/server_builder.cc @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -55,6 +56,7 @@ static void do_plugin_list_init(void) { ServerBuilder::ServerBuilder() : max_receive_message_size_(-1), max_send_message_size_(-1), + sync_server_settings_(SyncServerSettings()), resource_quota_(nullptr), generic_service_(nullptr) { gpr_once_init(&once_init_plugin_list, do_plugin_list_init); @@ -63,6 +65,7 @@ ServerBuilder::ServerBuilder() auto& factory = *it; plugins_.emplace_back(factory()); } + // all compression algorithms enabled by default. enabled_compression_algorithms_bitset_ = (1u << GRPC_COMPRESS_ALGORITHMS_COUNT) - 1; @@ -102,7 +105,7 @@ ServerBuilder& ServerBuilder::RegisterAsyncGenericService( gpr_log(GPR_ERROR, "Adding multiple AsyncGenericService is unsupported for now. " "Dropping the service %p", - service); + (void*)service); } else { generic_service_ = service; } @@ -115,6 +118,25 @@ ServerBuilder& ServerBuilder::SetOption( return *this; } +ServerBuilder& ServerBuilder::SetSyncServerOption( + ServerBuilder::SyncServerOption option, int val) { + switch (option) { + case NUM_CQS: + sync_server_settings_.num_cqs = val; + break; + case MIN_POLLERS: + sync_server_settings_.min_pollers = val; + break; + case MAX_POLLERS: + sync_server_settings_.max_pollers = val; + break; + case CQ_TIMEOUT_MSEC: + sync_server_settings_.cq_timeout_msec = val; + break; + } + return *this; +} + ServerBuilder& ServerBuilder::SetCompressionAlgorithmSupportStatus( grpc_compression_algorithm algorithm, bool enabled) { if (enabled) { @@ -157,35 +179,24 @@ ServerBuilder& ServerBuilder::AddListeningPort( } std::unique_ptr ServerBuilder::BuildAndStart() { - std::unique_ptr thread_pool; - bool has_sync_methods = false; - for (auto it = services_.begin(); it != services_.end(); ++it) { - if ((*it)->service->has_synchronous_methods()) { - if (!thread_pool) { - thread_pool.reset(CreateDefaultThreadPool()); - has_sync_methods = true; - break; - } - } - } ChannelArguments args; for (auto option = options_.begin(); option != options_.end(); ++option) { (*option)->UpdateArguments(&args); (*option)->UpdatePlugins(&plugins_); } + for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) { - if (!thread_pool && (*plugin)->has_sync_methods()) { - thread_pool.reset(CreateDefaultThreadPool()); - has_sync_methods = true; - } (*plugin)->UpdateChannelArguments(&args); } + if (max_receive_message_size_ >= 0) { args.SetInt(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH, max_receive_message_size_); } + if (max_send_message_size_ >= 0) { args.SetInt(GRPC_ARG_MAX_SEND_MESSAGE_LENGTH, max_send_message_size_); } + args.SetInt(GRPC_COMPRESSION_CHANNEL_ENABLED_ALGORITHMS_BITSET, enabled_compression_algorithms_bitset_); if (maybe_default_compression_level_.is_set) { @@ -196,31 +207,89 @@ std::unique_ptr ServerBuilder::BuildAndStart() { args.SetInt(GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM, maybe_default_compression_algorithm_.algorithm); } + if (resource_quota_ != nullptr) { args.SetPointerWithVtable(GRPC_ARG_RESOURCE_QUOTA, resource_quota_, grpc_resource_quota_arg_vtable()); } - std::unique_ptr server(new Server(thread_pool.release(), true, - max_receive_message_size_, &args)); + + // == Determine if the server has any syncrhonous methods == + bool has_sync_methods = false; + for (auto it = services_.begin(); it != services_.end(); ++it) { + if ((*it)->service->has_synchronous_methods()) { + has_sync_methods = true; + break; + } + } + + if (!has_sync_methods) { + for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) { + if ((*plugin)->has_sync_methods()) { + has_sync_methods = true; + break; + } + } + } + + // If this is a Sync server, i.e a server expositing sync API, then the server + // needs to create some completion queues to listen for incoming requests. + // 'sync_server_cqs' are those internal completion queues. + // + // This is different from the completion queues added to the server via + // ServerBuilder's AddCompletionQueue() method (those completion queues + // are in 'cqs_' member variable of ServerBuilder object) + std::shared_ptr>> + sync_server_cqs(std::make_shared< + std::vector>>()); + + if (has_sync_methods) { + // This is a Sync server + gpr_log(GPR_INFO, + "Synchronous server. Num CQs: %d, Min pollers: %d, Max Pollers: " + "%d, CQ timeout (msec): %d", + sync_server_settings_.num_cqs, sync_server_settings_.min_pollers, + sync_server_settings_.max_pollers, + sync_server_settings_.cq_timeout_msec); + + // Create completion queues to listen to incoming rpc requests + for (int i = 0; i < sync_server_settings_.num_cqs; i++) { + sync_server_cqs->emplace_back(new ServerCompletionQueue()); + } + } + + std::unique_ptr server(new Server( + max_receive_message_size_, &args, sync_server_cqs, + sync_server_settings_.min_pollers, sync_server_settings_.max_pollers, + sync_server_settings_.cq_timeout_msec)); + ServerInitializer* initializer = server->initializer(); - // If the server has atleast one sync methods, we know that this is a Sync - // server or a Hybrid server and the completion queue (server->cq_) would be - // frequently polled. - int num_frequently_polled_cqs = has_sync_methods ? 1 : 0; - - for (auto cq = cqs_.begin(); cq != cqs_.end(); ++cq) { - // A completion queue that is not polled frequently (by calling Next() or - // AsyncNext()) is not safe to use for listening to incoming channels. - // Register all such completion queues as non-listening completion queues - // with the GRPC core library. - if ((*cq)->IsFrequentlyPolled()) { - grpc_server_register_completion_queue(server->server_, (*cq)->cq(), + // Register all the completion queues with the server. i.e + // 1. sync_server_cqs: internal completion queues created IF this is a sync + // server + // 2. cqs_: Completion queues added via AddCompletionQueue() call + + // All sync cqs (if any) are frequently polled by ThreadManager + int num_frequently_polled_cqs = sync_server_cqs->size(); + + for (auto it = sync_server_cqs->begin(); it != sync_server_cqs->end(); ++it) { + grpc_server_register_completion_queue(server->server_, (*it)->cq(), + nullptr); + } + + // cqs_ contains the completion queue added by calling the ServerBuilder's + // AddCompletionQueue() API. Some of them may not be frequently polled (i.e by + // calling Next() or AsyncNext()) and hence are not safe to be used for + // listening to incoming channels. Such completion queues must be registered + // as non-listening queues + for (auto it = cqs_.begin(); it != cqs_.end(); ++it) { + if ((*it)->IsFrequentlyPolled()) { + grpc_server_register_completion_queue(server->server_, (*it)->cq(), nullptr); num_frequently_polled_cqs++; } else { grpc_server_register_non_listening_completion_queue(server->server_, - (*cq)->cq(), nullptr); + (*it)->cq(), nullptr); } } @@ -236,9 +305,11 @@ std::unique_ptr ServerBuilder::BuildAndStart() { return nullptr; } } + for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) { (*plugin)->InitServer(initializer); } + if (generic_service_) { server->RegisterAsyncGenericService(generic_service_); } else { @@ -251,6 +322,7 @@ std::unique_ptr ServerBuilder::BuildAndStart() { } } } + for (auto port = ports_.begin(); port != ports_.end(); port++) { int r = server->AddListeningPort(port->addr, port->creds.get()); if (!r) return nullptr; @@ -258,13 +330,16 @@ std::unique_ptr ServerBuilder::BuildAndStart() { *port->selected_port = r; } } + auto cqs_data = cqs_.empty() ? nullptr : &cqs_[0]; if (!server->Start(cqs_data, cqs_.size())) { return nullptr; } + for (auto plugin = plugins_.begin(); plugin != plugins_.end(); plugin++) { (*plugin)->Finish(initializer); } + return server; } diff --git a/src/cpp/server/server_cc.cc b/src/cpp/server/server_cc.cc index 3f89275370e..d46942d2573 100644 --- a/src/cpp/server/server_cc.cc +++ b/src/cpp/server/server_cc.cc @@ -1,5 +1,4 @@ /* - * * Copyright 2015, Google Inc. * All rights reserved. * @@ -52,7 +51,7 @@ #include #include "src/core/lib/profiling/timers.h" -#include "src/cpp/server/thread_pool_interface.h" +#include "src/cpp/thread_manager/thread_manager.h" namespace grpc { @@ -118,12 +117,9 @@ class Server::UnimplementedAsyncResponse GRPC_FINAL UnimplementedAsyncRequest* const request_; }; -class Server::ShutdownRequest GRPC_FINAL : public CompletionQueueTag { +class ShutdownTag : public CompletionQueueTag { public: - bool FinalizeResult(void** tag, bool* status) { - delete this; - return false; - } + bool FinalizeResult(void** tag, bool* status) { return false; } }; class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { @@ -147,36 +143,6 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { grpc_metadata_array_destroy(&request_metadata_); } - static SyncRequest* Wait(CompletionQueue* cq, bool* ok) { - void* tag = nullptr; - *ok = false; - if (!cq->Next(&tag, ok)) { - return nullptr; - } - auto* mrd = static_cast(tag); - GPR_ASSERT(mrd->in_flight_); - return mrd; - } - - static bool AsyncWait(CompletionQueue* cq, SyncRequest** req, bool* ok, - gpr_timespec deadline) { - void* tag = nullptr; - *ok = false; - switch (cq->AsyncNext(&tag, ok, deadline)) { - case CompletionQueue::TIMEOUT: - *req = nullptr; - return true; - case CompletionQueue::SHUTDOWN: - *req = nullptr; - return false; - case CompletionQueue::GOT_EVENT: - *req = static_cast(tag); - GPR_ASSERT((*req)->in_flight_); - return true; - } - GPR_UNREACHABLE_CODE(return false); - } - void SetupRequest() { cq_ = grpc_completion_queue_create(nullptr); } void TeardownRequest() { @@ -266,7 +232,6 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { void* const tag_; bool in_flight_; const bool has_request_payload_; - uint32_t incoming_flags_; grpc_call* call_; grpc_call_details* call_details_; gpr_timespec deadline_; @@ -275,33 +240,141 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag { grpc_completion_queue* cq_; }; +// Implementation of ThreadManager. Each instance of SyncRequestThreadManager +// manages a pool of threads that poll for incoming Sync RPCs and call the +// appropriate RPC handlers +class Server::SyncRequestThreadManager : public ThreadManager { + public: + SyncRequestThreadManager(Server* server, CompletionQueue* server_cq, + std::shared_ptr global_callbacks, + int min_pollers, int max_pollers, + int cq_timeout_msec) + : ThreadManager(min_pollers, max_pollers), + server_(server), + server_cq_(server_cq), + cq_timeout_msec_(cq_timeout_msec), + global_callbacks_(global_callbacks) {} + + WorkStatus PollForWork(void** tag, bool* ok) GRPC_OVERRIDE { + *tag = nullptr; + gpr_timespec deadline = + gpr_time_from_millis(cq_timeout_msec_, GPR_TIMESPAN); + + switch (server_cq_->AsyncNext(tag, ok, deadline)) { + case CompletionQueue::TIMEOUT: + return TIMEOUT; + case CompletionQueue::SHUTDOWN: + return SHUTDOWN; + case CompletionQueue::GOT_EVENT: + return WORK_FOUND; + } + + GPR_UNREACHABLE_CODE(return TIMEOUT); + } + + void DoWork(void* tag, bool ok) GRPC_OVERRIDE { + SyncRequest* sync_req = static_cast(tag); + + if (!sync_req) { + // No tag. Nothing to work on. This is an unlikley scenario and possibly a + // bug in RPC Manager implementation. + gpr_log(GPR_ERROR, "Sync server. DoWork() was called with NULL tag"); + return; + } + + if (ok) { + // Calldata takes ownership of the completion queue inside sync_req + SyncRequest::CallData cd(server_, sync_req); + { + // Prepare for the next request + if (!IsShutdown()) { + sync_req->SetupRequest(); // Create new completion queue for sync_req + sync_req->Request(server_->c_server(), server_cq_->cq()); + } + } + + GPR_TIMER_SCOPE("cd.Run()", 0); + cd.Run(global_callbacks_); + } + // TODO (sreek) If ok is false here (which it isn't in case of + // grpc_request_registered_call), we should still re-queue the request + // object + } + + void AddSyncMethod(RpcServiceMethod* method, void* tag) { + sync_requests_.emplace_back(new SyncRequest(method, tag)); + } + + void AddUnknownSyncMethod() { + if (!sync_requests_.empty()) { + unknown_method_.reset(new RpcServiceMethod( + "unknown", RpcMethod::BIDI_STREAMING, new UnknownMethodHandler)); + sync_requests_.emplace_back( + new SyncRequest(unknown_method_.get(), nullptr)); + } + } + + void ShutdownAndDrainCompletionQueue() { + server_cq_->Shutdown(); + + // Drain any pending items from the queue + void* tag; + bool ok; + while (server_cq_->Next(&tag, &ok)) { + // Nothing to be done here + } + } + + void Start() { + if (!sync_requests_.empty()) { + for (auto m = sync_requests_.begin(); m != sync_requests_.end(); m++) { + (*m)->SetupRequest(); + (*m)->Request(server_->c_server(), server_cq_->cq()); + } + + Initialize(); // ThreadManager's Initialize() + } + } + + private: + Server* server_; + CompletionQueue* server_cq_; + int cq_timeout_msec_; + std::vector> sync_requests_; + std::unique_ptr unknown_method_; + std::shared_ptr global_callbacks_; +}; + static internal::GrpcLibraryInitializer g_gli_initializer; -Server::Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned, - int max_receive_message_size, ChannelArguments* args) +Server::Server( + int max_receive_message_size, ChannelArguments* args, + std::shared_ptr>> + sync_server_cqs, + int min_pollers, int max_pollers, int sync_cq_timeout_msec) : max_receive_message_size_(max_receive_message_size), + sync_server_cqs_(sync_server_cqs), started_(false), shutdown_(false), shutdown_notified_(false), - num_running_cb_(0), - sync_methods_(new std::list), has_generic_service_(false), server_(nullptr), - thread_pool_(thread_pool), - thread_pool_owned_(thread_pool_owned), server_initializer_(new ServerInitializer(this)) { g_gli_initializer.summon(); gpr_once_init(&g_once_init_callbacks, InitGlobalCallbacks); global_callbacks_ = g_callbacks; global_callbacks_->UpdateArguments(args); + + for (auto it = sync_server_cqs_->begin(); it != sync_server_cqs_->end(); + it++) { + sync_req_mgrs_.emplace_back(new SyncRequestThreadManager( + this, (*it).get(), global_callbacks_, min_pollers, max_pollers, + sync_cq_timeout_msec)); + } + grpc_channel_args channel_args; args->SetChannelArgs(&channel_args); + server_ = grpc_server_create(&channel_args, nullptr); - if (thread_pool_ == nullptr) { - grpc_server_register_non_listening_completion_queue(server_, cq_.cq(), - nullptr); - } else { - grpc_server_register_completion_queue(server_, cq_.cq(), nullptr); - } } Server::~Server() { @@ -311,17 +384,14 @@ Server::~Server() { lock.unlock(); Shutdown(); } else if (!started_) { - cq_.Shutdown(); + // Shutdown the completion queues + for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { + (*it)->ShutdownAndDrainCompletionQueue(); + } } } - void* got_tag; - bool ok; - GPR_ASSERT(!cq_.Next(&got_tag, &ok)); + grpc_server_destroy(server_); - if (thread_pool_owned_) { - delete thread_pool_; - } - delete sync_methods_; } void Server::SetGlobalCallbacks(GlobalCallbacks* callbacks) { @@ -352,12 +422,14 @@ bool Server::RegisterService(const grpc::string* host, Service* service) { "Can only register an asynchronous service against one server."); service->server_ = this; } + const char* method_name = nullptr; for (auto it = service->methods_.begin(); it != service->methods_.end(); ++it) { if (it->get() == nullptr) { // Handled by generic service if any. continue; } + RpcServiceMethod* method = it->get(); void* tag = grpc_server_register_method( server_, method->name(), host ? host->c_str() : nullptr, @@ -367,11 +439,15 @@ bool Server::RegisterService(const grpc::string* host, Service* service) { method->name()); return false; } - if (method->handler() == nullptr) { + + if (method->handler() == nullptr) { // Async method method->set_server_tag(tag); } else { - sync_methods_->emplace_back(method, tag); + for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { + (*it)->AddSyncMethod(method, tag); + } } + method_name = method->name(); } @@ -406,28 +482,19 @@ bool Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) { grpc_server_start(server_); if (!has_generic_service_) { - if (!sync_methods_->empty()) { - unknown_method_.reset(new RpcServiceMethod( - "unknown", RpcMethod::BIDI_STREAMING, new UnknownMethodHandler)); - // Use of emplace_back with just constructor arguments is not accepted - // here by gcc-4.4 because it can't match the anonymous nullptr with a - // proper constructor implicitly. Construct the object and use push_back. - sync_methods_->push_back(SyncRequest(unknown_method_.get(), nullptr)); + for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { + (*it)->AddUnknownSyncMethod(); } + for (size_t i = 0; i < num_cqs; i++) { if (cqs[i]->IsFrequentlyPolled()) { new UnimplementedAsyncRequest(this, cqs[i]); } } } - // Start processing rpcs. - if (!sync_methods_->empty()) { - for (auto m = sync_methods_->begin(); m != sync_methods_->end(); m++) { - m->SetupRequest(); - m->Request(server_, cq_.cq()); - } - ScheduleCallback(); + for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { + (*it)->Start(); } return true; @@ -437,29 +504,43 @@ void Server::ShutdownInternal(gpr_timespec deadline) { grpc::unique_lock lock(mu_); if (started_ && !shutdown_) { shutdown_ = true; - grpc_server_shutdown_and_notify(server_, cq_.cq(), new ShutdownRequest()); - cq_.Shutdown(); - lock.unlock(); - // Spin, eating requests until the completion queue is completely shutdown. - // If the deadline expires then cancel anything that's pending and keep - // spinning forever until the work is actually drained. - // Since nothing else needs to touch state guarded by mu_, holding it - // through this loop is fine. - SyncRequest* request; + + /// The completion queue to use for server shutdown completion notification + CompletionQueue shutdown_cq; + ShutdownTag shutdown_tag; // Dummy shutdown tag + grpc_server_shutdown_and_notify(server_, shutdown_cq.cq(), &shutdown_tag); + + // Shutdown all ThreadManagers. This will try to gracefully stop all the + // threads in the ThreadManagers (once they process any inflight requests) + for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { + (*it)->Shutdown(); // ThreadManager's Shutdown() + } + + shutdown_cq.Shutdown(); + + void* tag; bool ok; - while (SyncRequest::AsyncWait(&cq_, &request, &ok, deadline)) { - if (request == NULL) { // deadline expired - grpc_server_cancel_all_calls(server_); - deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC); - } else if (ok) { - SyncRequest::CallData call_data(this, request); - } + CompletionQueue::NextStatus status = + shutdown_cq.AsyncNext(&tag, &ok, deadline); + + // If this timed out, it means we are done with the grace period for a clean + // shutdown. We should force a shutdown now by cancelling all inflight calls + if (status == CompletionQueue::NextStatus::TIMEOUT) { + grpc_server_cancel_all_calls(server_); } - lock.lock(); + // Else in case of SHUTDOWN or GOT_EVENT, it means that the server has + // successfully shutdown - // Wait for running callbacks to finish. - while (num_running_cb_ != 0) { - callback_cv_.wait(lock); + // Wait for threads in all ThreadManagers to terminate + for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) { + (*it)->Wait(); + (*it)->ShutdownAndDrainCompletionQueue(); + } + + // Drain the shutdown queue (if the previous call to AsyncNext() timed out + // and we didn't remove the tag from the queue yet) + while (shutdown_cq.Next(&tag, &ok)) { + // Nothing to be done here. Just ignore ok and tag values } shutdown_notified_ = true; @@ -585,47 +666,6 @@ Server::UnimplementedAsyncResponse::UnimplementedAsyncResponse( request_->stream()->call_.PerformOps(this); } -void Server::ScheduleCallback() { - { - grpc::unique_lock lock(mu_); - num_running_cb_++; - } - thread_pool_->Add(std::bind(&Server::RunRpc, this)); -} - -void Server::RunRpc() { - // Wait for one more incoming rpc. - bool ok; - GPR_TIMER_SCOPE("Server::RunRpc", 0); - auto* mrd = SyncRequest::Wait(&cq_, &ok); - if (mrd) { - ScheduleCallback(); - if (ok) { - SyncRequest::CallData cd(this, mrd); - { - mrd->SetupRequest(); - grpc::unique_lock lock(mu_); - if (!shutdown_) { - mrd->Request(server_, cq_.cq()); - } else { - // destroy the structure that was created - mrd->TeardownRequest(); - } - } - GPR_TIMER_SCOPE("cd.Run()", 0); - cd.Run(global_callbacks_); - } - } - - { - grpc::unique_lock lock(mu_); - num_running_cb_--; - if (shutdown_) { - callback_cv_.notify_all(); - } - } -} - ServerInitializer* Server::initializer() { return server_initializer_.get(); } } // namespace grpc diff --git a/src/cpp/thread_manager/thread_manager.cc b/src/cpp/thread_manager/thread_manager.cc new file mode 100644 index 00000000000..caae4c457d8 --- /dev/null +++ b/src/cpp/thread_manager/thread_manager.cc @@ -0,0 +1,181 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include + +#include "src/cpp/thread_manager/thread_manager.h" + +namespace grpc { + +ThreadManager::WorkerThread::WorkerThread(ThreadManager* thd_mgr) + : thd_mgr_(thd_mgr), thd_(&ThreadManager::WorkerThread::Run, this) {} + +void ThreadManager::WorkerThread::Run() { + thd_mgr_->MainWorkLoop(); + thd_mgr_->MarkAsCompleted(this); +} + +ThreadManager::WorkerThread::~WorkerThread() { thd_.join(); } + +ThreadManager::ThreadManager(int min_pollers, int max_pollers) + : shutdown_(false), + num_pollers_(0), + min_pollers_(min_pollers), + max_pollers_(max_pollers == -1 ? INT_MAX : max_pollers), + num_threads_(0) {} + +ThreadManager::~ThreadManager() { + { + std::unique_lock lock(mu_); + GPR_ASSERT(num_threads_ == 0); + } + + CleanupCompletedThreads(); +} + +void ThreadManager::Wait() { + std::unique_lock lock(mu_); + while (num_threads_ != 0) { + shutdown_cv_.wait(lock); + } +} + +void ThreadManager::Shutdown() { + std::unique_lock lock(mu_); + shutdown_ = true; +} + +bool ThreadManager::IsShutdown() { + std::unique_lock lock(mu_); + return shutdown_; +} + +void ThreadManager::MarkAsCompleted(WorkerThread* thd) { + { + std::unique_lock list_lock(list_mu_); + completed_threads_.push_back(thd); + } + + grpc::unique_lock lock(mu_); + num_threads_--; + if (num_threads_ == 0) { + shutdown_cv_.notify_one(); + } +} + +void ThreadManager::CleanupCompletedThreads() { + std::unique_lock lock(list_mu_); + for (auto thd = completed_threads_.begin(); thd != completed_threads_.end(); + thd = completed_threads_.erase(thd)) { + delete *thd; + } +} + +void ThreadManager::Initialize() { + for (int i = 0; i < min_pollers_; i++) { + MaybeCreatePoller(); + } +} + +// If the number of pollers (i.e threads currently blocked in PollForWork()) is +// less than max threshold (i.e max_pollers_) and the total number of threads is +// below the maximum threshold, we can let the current thread continue as poller +bool ThreadManager::MaybeContinueAsPoller() { + std::unique_lock lock(mu_); + if (shutdown_ || num_pollers_ > max_pollers_) { + return false; + } + + num_pollers_++; + return true; +} + +// Create a new poller if the current number of pollers i.e num_pollers_ (i.e +// threads currently blocked in PollForWork()) is below the threshold (i.e +// min_pollers_) and the total number of threads is below the maximum threshold +void ThreadManager::MaybeCreatePoller() { + grpc::unique_lock lock(mu_); + if (!shutdown_ && num_pollers_ < min_pollers_) { + num_pollers_++; + num_threads_++; + + // Create a new thread (which ends up calling the MainWorkLoop() function + new WorkerThread(this); + } +} + +void ThreadManager::MainWorkLoop() { + void* tag; + bool ok; + + /* + 1. Poll for work (i.e PollForWork()) + 2. After returning from PollForWork, reduce the number of pollers by 1. If + PollForWork() returned a TIMEOUT, then it may indicate that we have more + polling threads than needed. Check if the number of pollers is greater + than min_pollers and if so, terminate the thread. + 3. Since we are short of one poller now, see if a new poller has to be + created (i.e see MaybeCreatePoller() for more details) + 4. Do the actual work (DoWork()) + 5. After doing the work, see it this thread can resume polling work (i.e + see MaybeContinueAsPoller() for more details) */ + do { + WorkStatus work_status = PollForWork(&tag, &ok); + + { + grpc::unique_lock lock(mu_); + num_pollers_--; + + if (work_status == TIMEOUT && num_pollers_ > min_pollers_) { + break; + } + } + + // Note that MaybeCreatePoller does check for shutdown and creates a new + // thread only if ThreadManager is not shutdown + if (work_status == WORK_FOUND) { + MaybeCreatePoller(); + DoWork(tag, ok); + } + } while (MaybeContinueAsPoller()); + + CleanupCompletedThreads(); + + // If we are here, either ThreadManager is shutting down or it already has + // enough threads. +} + +} // namespace grpc diff --git a/src/cpp/thread_manager/thread_manager.h b/src/cpp/thread_manager/thread_manager.h new file mode 100644 index 00000000000..9cfdb8af25f --- /dev/null +++ b/src/cpp/thread_manager/thread_manager.h @@ -0,0 +1,159 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef GRPC_INTERNAL_CPP_THREAD_MANAGER_H +#define GRPC_INTERNAL_CPP_THREAD_MANAGER_H + +#include +#include + +#include +#include +#include + +namespace grpc { + +class ThreadManager { + public: + explicit ThreadManager(int min_pollers, int max_pollers); + virtual ~ThreadManager(); + + // Initializes and Starts the Rpc Manager threads + void Initialize(); + + // The return type of PollForWork() function + enum WorkStatus { WORK_FOUND, SHUTDOWN, TIMEOUT }; + + // "Polls" for new work. + // If the return value is WORK_FOUND: + // - The implementaion of PollForWork() MAY set some opaque identifier to + // (identify the work item found) via the '*tag' parameter + // - The implementaion MUST set the value of 'ok' to 'true' or 'false'. A + // value of 'false' indicates some implemenation specific error (that is + // neither SHUTDOWN nor TIMEOUT) + // - ThreadManager does not interpret the values of 'tag' and 'ok' + // - ThreadManager WILL call DoWork() and pass '*tag' and 'ok' as input to + // DoWork() + // + // If the return value is SHUTDOWN:, + // - ThreadManager WILL NOT call DoWork() and terminates the thead + // + // If the return value is TIMEOUT:, + // - ThreadManager WILL NOT call DoWork() + // - ThreadManager MAY terminate the thread depending on the current number + // of active poller threads and mix_pollers/max_pollers settings + // - Also, the value of timeout is specific to the derived class + // implementation + virtual WorkStatus PollForWork(void** tag, bool* ok) = 0; + + // The implementation of DoWork() is supposed to perform the work found by + // PollForWork(). The tag and ok parameters are the same as returned by + // PollForWork() + // + // The implementation of DoWork() should also do any setup needed to ensure + // that the next call to PollForWork() (not necessarily by the current thread) + // actually finds some work + virtual void DoWork(void* tag, bool ok) = 0; + + // Mark the ThreadManager as shutdown and begin draining the work. This is a + // non-blocking call and the caller should call Wait(), a blocking call which + // returns only once the shutdown is complete + void Shutdown(); + + // Has Shutdown() been called + bool IsShutdown(); + + // A blocking call that returns only after the ThreadManager has shutdown and + // all the threads have drained all the outstanding work + void Wait(); + + private: + // Helper wrapper class around std::thread. This takes a ThreadManager object + // and starts a new std::thread to calls the Run() function. + // + // The Run() function calls ThreadManager::MainWorkLoop() function and once + // that completes, it marks the WorkerThread completed by calling + // ThreadManager::MarkAsCompleted() + class WorkerThread { + public: + WorkerThread(ThreadManager* thd_mgr); + ~WorkerThread(); + + private: + // Calls thd_mgr_->MainWorkLoop() and once that completes, calls + // thd_mgr_>MarkAsCompleted(this) to mark the thread as completed + void Run(); + + ThreadManager* thd_mgr_; + grpc::thread thd_; + }; + + // The main funtion in ThreadManager + void MainWorkLoop(); + + // Create a new poller if the number of current pollers is less than the + // minimum number of pollers needed (i.e min_pollers). + void MaybeCreatePoller(); + + // Returns true if the current thread can resume as a poller. i.e if the + // current number of pollers is less than the max_pollers. + bool MaybeContinueAsPoller(); + + void MarkAsCompleted(WorkerThread* thd); + void CleanupCompletedThreads(); + + // Protects shutdown_, num_pollers_ and num_threads_ + // TODO: sreek - Change num_pollers and num_threads_ to atomics + grpc::mutex mu_; + + bool shutdown_; + grpc::condition_variable shutdown_cv_; + + // Number of threads doing polling + int num_pollers_; + + // The minimum and maximum number of threads that should be doing polling + int min_pollers_; + int max_pollers_; + + // The total number of threads (includes threads includes the threads that are + // currently polling i.e num_pollers_) + int num_threads_; + + grpc::mutex list_mu_; + std::list completed_threads_; +}; + +} // namespace grpc + +#endif // GRPC_INTERNAL_CPP_THREAD_MANAGER_H diff --git a/src/node/ext/byte_buffer.cc b/src/node/ext/byte_buffer.cc index 399cdcd8147..017d7962c31 100644 --- a/src/node/ext/byte_buffer.cc +++ b/src/node/ext/byte_buffer.cc @@ -44,8 +44,8 @@ namespace grpc { namespace node { +using Nan::MaybeLocal; -using v8::Context; using v8::Function; using v8::Local; using v8::Object; @@ -89,15 +89,19 @@ Local ByteBufferToBuffer(grpc_byte_buffer *buffer) { Local MakeFastBuffer(Local slowBuffer) { Nan::EscapableHandleScope scope; Local globalObj = Nan::GetCurrentContext()->Global(); + MaybeLocal constructorValue = Nan::Get( + globalObj, Nan::New("Buffer").ToLocalChecked()); Local bufferConstructor = Local::Cast( - globalObj->Get(Nan::New("Buffer").ToLocalChecked())); - Local consArgs[3] = { + constructorValue.ToLocalChecked()); + const int argc = 3; + Local consArgs[argc] = { slowBuffer, Nan::New(::node::Buffer::Length(slowBuffer)), Nan::New(0) }; - Local fastBuffer = bufferConstructor->NewInstance(3, consArgs); - return scope.Escape(fastBuffer); + MaybeLocal fastBuffer = Nan::NewInstance(bufferConstructor, + argc, consArgs); + return scope.Escape(fastBuffer.ToLocalChecked()); } } // namespace node } // namespace grpc diff --git a/src/node/ext/call.cc b/src/node/ext/call.cc index b48a7bd6987..191e763e0e7 100644 --- a/src/node/ext/call.cc +++ b/src/node/ext/call.cc @@ -669,16 +669,16 @@ NAN_METHOD(Call::New) { return Nan::ThrowTypeError("Call's fourth argument must be a string"); } call = new Call(wrapped_call); - info.This()->SetHiddenValue(Nan::New("channel_").ToLocalChecked(), - channel_object); + Nan::Set(info.This(), Nan::New("channel_").ToLocalChecked(), + channel_object); } call->Wrap(info.This()); info.GetReturnValue().Set(info.This()); } else { const int argc = 4; Local argv[argc] = {info[0], info[1], info[2], info[3]}; - MaybeLocal maybe_instance = constructor->GetFunction()->NewInstance( - argc, argv); + MaybeLocal maybe_instance = Nan::NewInstance( + constructor->GetFunction(), argc, argv); if (maybe_instance.IsEmpty()) { // There's probably a pending exception return; diff --git a/src/node/ext/channel.cc b/src/node/ext/channel.cc index c4028170e73..5bc58b9b324 100644 --- a/src/node/ext/channel.cc +++ b/src/node/ext/channel.cc @@ -208,8 +208,8 @@ NAN_METHOD(Channel::New) { } else { const int argc = 3; Local argv[argc] = {info[0], info[1], info[2]}; - MaybeLocal maybe_instance = constructor->GetFunction()->NewInstance( - argc, argv); + MaybeLocal maybe_instance = Nan::NewInstance( + constructor->GetFunction(), argc, argv); if (maybe_instance.IsEmpty()) { // There's probably a pending exception return; diff --git a/src/node/ext/server.cc b/src/node/ext/server.cc index 29f31ff15ee..70d5b96f397 100644 --- a/src/node/ext/server.cc +++ b/src/node/ext/server.cc @@ -222,7 +222,7 @@ NAN_METHOD(Server::New) { const int argc = 1; Local argv[argc] = {info[0]}; MaybeLocal maybe_instance = - constructor->GetFunction()->NewInstance(argc, argv); + Nan::NewInstance(constructor->GetFunction(), argc, argv); if (maybe_instance.IsEmpty()) { // There's probably a pending exception return; diff --git a/src/node/src/common.js b/src/node/src/common.js index 22159dd39f7..c6c6d597a88 100644 --- a/src/node/src/common.js +++ b/src/node/src/common.js @@ -141,7 +141,7 @@ exports.getProtobufServiceAttrs = function getProtobufServiceAttrs(service, binaryAsBase64 = options.binaryAsBase64; longsAsStrings = options.longsAsStrings; } - return _.object(_.map(service.children, function(method) { + return _.fromPairs(_.map(service.children, function(method) { return [_.camelCase(method.name), { path: prefix + method.name, requestStream: method.requestStream, diff --git a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec index 0c3c3216abe..6e594fd3edc 100644 --- a/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec +++ b/src/objective-c/!ProtoCompiler-gRPCPlugin.podspec @@ -36,7 +36,7 @@ Pod::Spec.new do |s| # exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed # before them. s.name = '!ProtoCompiler-gRPCPlugin' - v = '1.0.0' + v = '1.0.1' s.version = v s.summary = 'The gRPC ProtoC plugin generates Objective-C files from .proto services.' s.description = <<-DESC @@ -95,7 +95,7 @@ Pod::Spec.new do |s| s.preserve_paths = plugin # Restrict the protoc version to the one supported by this plugin. - s.dependency '!ProtoCompiler', '3.0.0' + s.dependency '!ProtoCompiler', '3.0.2' # For the Protobuf dependency not to complain: s.ios.deployment_target = '7.1' s.osx.deployment_target = '10.9' diff --git a/src/objective-c/!ProtoCompiler.podspec b/src/objective-c/!ProtoCompiler.podspec index 5018dedc066..b55f6c93c62 100644 --- a/src/objective-c/!ProtoCompiler.podspec +++ b/src/objective-c/!ProtoCompiler.podspec @@ -36,7 +36,7 @@ Pod::Spec.new do |s| # exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed # before them. s.name = '!ProtoCompiler' - v = '3.0.0' + v = '3.0.2' s.version = v s.summary = 'The Protobuf Compiler (protoc) generates Objective-C files from .proto files' s.description = <<-DESC diff --git a/src/objective-c/BoringSSL.podspec b/src/objective-c/BoringSSL.podspec index e14f39b898c..47b5b1a2e7b 100644 --- a/src/objective-c/BoringSSL.podspec +++ b/src/objective-c/BoringSSL.podspec @@ -31,7 +31,7 @@ Pod::Spec.new do |s| s.name = 'BoringSSL' - version = '6.0' + version = '7.0' s.version = version s.summary = 'BoringSSL is a fork of OpenSSL that is designed to meet Google’s needs.' # Adapted from the homepage: @@ -70,7 +70,7 @@ Pod::Spec.new do |s| s.source = { :git => 'https://boringssl.googlesource.com/boringssl', :tag => "version_for_cocoapods_#{version}", - # :commit => '4ac2dc4c0d48ca45da4f66c40e60d6b425fa94a3', + # :commit => '4fec04b48406111cb88fdd8d196253adc54f7a31', } name = 'openssl' @@ -388,42 +388,42 @@ Pod::Spec.new do |s| 0x28340c19, 0x283480ac, 0x283500ea, - 0x2c322843, - 0x2c32a851, - 0x2c332863, - 0x2c33a875, - 0x2c342889, - 0x2c34a89b, - 0x2c3528b6, - 0x2c35a8c8, - 0x2c3628db, + 0x2c322910, + 0x2c32a91e, + 0x2c332930, + 0x2c33a942, + 0x2c342956, + 0x2c34a968, + 0x2c352983, + 0x2c35a995, + 0x2c3629a8, 0x2c36832d, - 0x2c3728e8, - 0x2c37a8fa, - 0x2c38290d, - 0x2c38a924, - 0x2c392932, - 0x2c39a942, - 0x2c3a2954, - 0x2c3aa968, - 0x2c3b2979, - 0x2c3ba998, - 0x2c3c29ac, - 0x2c3ca9c2, - 0x2c3d29db, - 0x2c3da9f8, - 0x2c3e2a09, - 0x2c3eaa17, - 0x2c3f2a2f, - 0x2c3faa47, - 0x2c402a54, + 0x2c3729b5, + 0x2c37a9c7, + 0x2c3829da, + 0x2c38a9f1, + 0x2c3929ff, + 0x2c39aa0f, + 0x2c3a2a21, + 0x2c3aaa35, + 0x2c3b2a46, + 0x2c3baa65, + 0x2c3c2a79, + 0x2c3caa8f, + 0x2c3d2aa8, + 0x2c3daac5, + 0x2c3e2ad6, + 0x2c3eaae4, + 0x2c3f2afc, + 0x2c3fab14, + 0x2c402b21, 0x2c4090e7, - 0x2c412a65, - 0x2c41aa78, + 0x2c412b32, + 0x2c41ab45, 0x2c4210c0, - 0x2c42aa89, + 0x2c42ab56, 0x2c430720, - 0x2c43a98a, + 0x2c43aa57, 0x30320000, 0x30328015, 0x3033001f, @@ -576,174 +576,183 @@ Pod::Spec.new do |s| 0x403b9861, 0x403c0064, 0x403c8083, - 0x403d1890, - 0x403d98a6, - 0x403e18b5, - 0x403e98c8, - 0x403f18e2, - 0x403f98f0, - 0x40401905, - 0x40409919, - 0x40411936, - 0x40419951, - 0x4042196a, - 0x4042997d, - 0x40431991, - 0x404399a9, - 0x404419c0, + 0x403d18aa, + 0x403d98c0, + 0x403e18cf, + 0x403e98e2, + 0x403f18fc, + 0x403f990a, + 0x4040191f, + 0x40409933, + 0x40411950, + 0x4041996b, + 0x40421984, + 0x40429997, + 0x404319ab, + 0x404399c3, + 0x404419da, 0x404480ac, - 0x404519d5, - 0x404599e7, - 0x40461a0b, - 0x40469a2b, - 0x40471a39, - 0x40479a60, - 0x40481a89, - 0x40489aa2, - 0x40491ab9, - 0x40499ad3, - 0x404a1aea, - 0x404a9b08, - 0x404b1b20, - 0x404b9b37, - 0x404c1b4d, - 0x404c9b5f, - 0x404d1b80, - 0x404d9ba2, - 0x404e1bb6, - 0x404e9bc3, - 0x404f1bf0, - 0x404f9c19, - 0x40501c43, - 0x40509c57, - 0x40511c72, - 0x40519c82, - 0x40521c99, - 0x40529cbd, - 0x40531cd5, - 0x40539ce8, - 0x40541cfd, - 0x40549d20, - 0x40551d2e, - 0x40559d4b, - 0x40561d58, - 0x40569d71, - 0x40571d89, - 0x40579d9c, - 0x40581db1, - 0x40589dc3, - 0x40591df2, - 0x40599e0b, - 0x405a1e1f, - 0x405a9e2f, - 0x405b1e47, - 0x405b9e58, - 0x405c1e6b, - 0x405c9e7c, - 0x405d1e89, - 0x405d9ea0, - 0x405e1ec0, + 0x404519ef, + 0x40459a01, + 0x40461a25, + 0x40469a45, + 0x40471a53, + 0x40479a7a, + 0x40481ab7, + 0x40489ad0, + 0x40491ae7, + 0x40499b01, + 0x404a1b18, + 0x404a9b36, + 0x404b1b4e, + 0x404b9b65, + 0x404c1b7b, + 0x404c9b8d, + 0x404d1bae, + 0x404d9bd0, + 0x404e1be4, + 0x404e9bf1, + 0x404f1c1e, + 0x404f9c47, + 0x40501c71, + 0x40509c85, + 0x40511ca0, + 0x40519cb0, + 0x40521cc7, + 0x40529ceb, + 0x40531d03, + 0x40539d16, + 0x40541d2b, + 0x40549d4e, + 0x40551d5c, + 0x40559d79, + 0x40561d86, + 0x40569d9f, + 0x40571db7, + 0x40579dca, + 0x40581ddf, + 0x40589e06, + 0x40591e35, + 0x40599e62, + 0x405a1e76, + 0x405a9e86, + 0x405b1e9e, + 0x405b9eaf, + 0x405c1ec2, + 0x405c9ee3, + 0x405d1ef0, + 0x405d9f07, + 0x405e1f27, 0x405e8a95, - 0x405f1ee1, - 0x405f9eee, - 0x40601efc, - 0x40609f1e, - 0x40611f46, - 0x40619f5b, - 0x40621f72, - 0x40629f83, - 0x40631f94, - 0x40639fa9, - 0x40641fc0, - 0x40649fd1, - 0x40651fec, - 0x4065a003, - 0x4066201b, - 0x4066a045, - 0x40672070, - 0x4067a091, - 0x406820a4, - 0x4068a0c5, - 0x406920f7, - 0x4069a125, - 0x406a2146, - 0x406aa166, - 0x406b22ee, - 0x406ba311, - 0x406c2327, - 0x406ca553, - 0x406d2582, - 0x406da5aa, - 0x406e25c3, - 0x406ea5db, - 0x406f25fa, - 0x406fa60f, - 0x40702622, - 0x4070a63f, + 0x405f1f48, + 0x405f9f55, + 0x40601f63, + 0x40609f85, + 0x40611fad, + 0x40619fc2, + 0x40621fd9, + 0x40629fea, + 0x40631ffb, + 0x4063a010, + 0x40642027, + 0x4064a053, + 0x4065206e, + 0x4065a085, + 0x4066209d, + 0x4066a0c7, + 0x406720f2, + 0x4067a113, + 0x40682126, + 0x4068a147, + 0x40692179, + 0x4069a1a7, + 0x406a21c8, + 0x406aa1e8, + 0x406b2370, + 0x406ba393, + 0x406c23a9, + 0x406ca60b, + 0x406d263a, + 0x406da662, + 0x406e2690, + 0x406ea6a8, + 0x406f26c7, + 0x406fa6dc, + 0x407026ef, + 0x4070a70c, 0x40710800, - 0x4071a651, - 0x40722664, - 0x4072a67d, - 0x40732695, + 0x4071a71e, + 0x40722731, + 0x4072a74a, + 0x40732762, 0x4073936d, - 0x407426a9, - 0x4074a6c3, - 0x407526d4, - 0x4075a6e8, - 0x407626f6, + 0x40742776, + 0x4074a790, + 0x407527a1, + 0x4075a7b5, + 0x407627c3, 0x407691aa, - 0x4077271b, - 0x4077a73d, - 0x40782758, - 0x4078a791, - 0x407927a8, - 0x4079a7be, - 0x407a27ca, - 0x407aa7dd, - 0x407b27f2, - 0x407ba804, - 0x407c2819, - 0x407ca822, - 0x407d20e0, - 0x407d9c29, - 0x407e276d, - 0x407e9dd3, - 0x407f1a4d, - 0x407f986d, - 0x40801c00, - 0x40809a75, - 0x40811cab, - 0x40819bda, - 0x41f42219, - 0x41f922ab, - 0x41fe219e, - 0x41fea37a, - 0x41ff246b, - 0x42032232, - 0x42082254, - 0x4208a290, - 0x42092182, - 0x4209a2ca, - 0x420a21d9, - 0x420aa1b9, - 0x420b21f9, - 0x420ba272, - 0x420c2487, - 0x420ca347, - 0x420d2361, - 0x420da398, - 0x421223b2, - 0x4217244e, - 0x4217a3f4, - 0x421c2416, - 0x421f23d1, - 0x4221249e, - 0x42262431, - 0x422b2537, - 0x422ba500, - 0x422c251f, - 0x422ca4da, - 0x422d24b9, + 0x407727e8, + 0x4077a80a, + 0x40782825, + 0x4078a85e, + 0x40792875, + 0x4079a88b, + 0x407a2897, + 0x407aa8aa, + 0x407b28bf, + 0x407ba8d1, + 0x407c28e6, + 0x407ca8ef, + 0x407d2162, + 0x407d9c57, + 0x407e283a, + 0x407e9e16, + 0x407f1a67, + 0x407f9887, + 0x40801c2e, + 0x40809a8f, + 0x40811cd9, + 0x40819c08, + 0x4082267b, + 0x4082986d, + 0x40831df1, + 0x4083a038, + 0x40841aa3, + 0x40849e4e, + 0x40851ed3, + 0x41f4229b, + 0x41f9232d, + 0x41fe2220, + 0x41fea3fc, + 0x41ff24ed, + 0x420322b4, + 0x420822d6, + 0x4208a312, + 0x42092204, + 0x4209a34c, + 0x420a225b, + 0x420aa23b, + 0x420b227b, + 0x420ba2f4, + 0x420c2509, + 0x420ca3c9, + 0x420d23e3, + 0x420da41a, + 0x42122434, + 0x421724d0, + 0x4217a476, + 0x421c2498, + 0x421f2453, + 0x42212520, + 0x422624b3, + 0x422b25ef, + 0x422ba59d, + 0x422c25d7, + 0x422ca55c, + 0x422d253b, + 0x422da5bc, + 0x422e2582, 0x4432072b, 0x4432873a, 0x44330746, @@ -786,69 +795,69 @@ Pod::Spec.new do |s| 0x4c3d136d, 0x4c3d937c, 0x4c3e1389, - 0x50322a9b, - 0x5032aaaa, - 0x50332ab5, - 0x5033aac5, - 0x50342ade, - 0x5034aaf8, - 0x50352b06, - 0x5035ab1c, - 0x50362b2e, - 0x5036ab44, - 0x50372b5d, - 0x5037ab70, - 0x50382b88, - 0x5038ab99, - 0x50392bae, - 0x5039abc2, - 0x503a2be2, - 0x503aabf8, - 0x503b2c10, - 0x503bac22, - 0x503c2c3e, - 0x503cac55, - 0x503d2c6e, - 0x503dac84, - 0x503e2c91, - 0x503eaca7, - 0x503f2cb9, + 0x50322b68, + 0x5032ab77, + 0x50332b82, + 0x5033ab92, + 0x50342bab, + 0x5034abc5, + 0x50352bd3, + 0x5035abe9, + 0x50362bfb, + 0x5036ac11, + 0x50372c2a, + 0x5037ac3d, + 0x50382c55, + 0x5038ac66, + 0x50392c7b, + 0x5039ac8f, + 0x503a2caf, + 0x503aacc5, + 0x503b2cdd, + 0x503bacef, + 0x503c2d0b, + 0x503cad22, + 0x503d2d3b, + 0x503dad51, + 0x503e2d5e, + 0x503ead74, + 0x503f2d86, 0x503f8382, - 0x50402ccc, - 0x5040acdc, - 0x50412cf6, - 0x5041ad05, - 0x50422d1f, - 0x5042ad3c, - 0x50432d4c, - 0x5043ad5c, - 0x50442d6b, + 0x50402d99, + 0x5040ada9, + 0x50412dc3, + 0x5041add2, + 0x50422dec, + 0x5042ae09, + 0x50432e19, + 0x5043ae29, + 0x50442e38, 0x5044843f, - 0x50452d7f, - 0x5045ad9d, - 0x50462db0, - 0x5046adc6, - 0x50472dd8, - 0x5047aded, - 0x50482e13, - 0x5048ae21, - 0x50492e34, - 0x5049ae49, - 0x504a2e5f, - 0x504aae6f, - 0x504b2e8f, - 0x504baea2, - 0x504c2ec5, - 0x504caef3, - 0x504d2f05, - 0x504daf22, - 0x504e2f3d, - 0x504eaf59, - 0x504f2f6b, - 0x504faf82, - 0x50502f91, + 0x50452e4c, + 0x5045ae6a, + 0x50462e7d, + 0x5046ae93, + 0x50472ea5, + 0x5047aeba, + 0x50482ee0, + 0x5048aeee, + 0x50492f01, + 0x5049af16, + 0x504a2f2c, + 0x504aaf3c, + 0x504b2f5c, + 0x504baf6f, + 0x504c2f92, + 0x504cafc0, + 0x504d2fd2, + 0x504dafef, + 0x504e300a, + 0x504eb026, + 0x504f3038, + 0x504fb04f, + 0x5050305e, 0x505086ef, - 0x50512fa4, + 0x50513071, 0x58320ec9, 0x68320e8b, 0x68328c25, @@ -1209,6 +1218,7 @@ Pod::Spec.new do |s| "BAD_SSL_FILETYPE\\0" "BAD_WRITE_RETRY\\0" "BIO_NOT_SET\\0" + "BLOCK_CIPHER_PAD_IS_WRONG\\0" "BUFFERED_MESSAGES_ON_CIPHER_CHANGE\\0" "CA_DN_LENGTH_MISMATCH\\0" "CA_DN_TOO_LONG\\0" @@ -1233,6 +1243,7 @@ Pod::Spec.new do |s| "DOWNGRADE_DETECTED\\0" "DTLS_MESSAGE_TOO_BIG\\0" "DUPLICATE_EXTENSION\\0" + "DUPLICATE_KEY_SHARE\\0" "ECC_CERT_NOT_FOR_SIGNING\\0" "EMS_STATE_INCONSISTENT\\0" "ENCRYPTED_LENGTH_TOO_LONG\\0" @@ -1270,15 +1281,18 @@ Pod::Spec.new do |s| "NO_CERTIFICATE_SET\\0" "NO_CIPHERS_AVAILABLE\\0" "NO_CIPHERS_PASSED\\0" + "NO_CIPHERS_SPECIFIED\\0" "NO_CIPHER_MATCH\\0" "NO_COMMON_SIGNATURE_ALGORITHMS\\0" "NO_COMPRESSION_SPECIFIED\\0" + "NO_GROUPS_SPECIFIED\\0" "NO_METHOD_SPECIFIED\\0" "NO_P256_SUPPORT\\0" "NO_PRIVATE_KEY_ASSIGNED\\0" "NO_RENEGOTIATION\\0" "NO_REQUIRED_DIGEST\\0" "NO_SHARED_CIPHER\\0" + "NO_SHARED_GROUP\\0" "NULL_SSL_CTX\\0" "NULL_SSL_METHOD_PASSED\\0" "OLD_SESSION_CIPHER_NOT_RETURNED\\0" @@ -1294,6 +1308,7 @@ Pod::Spec.new do |s| "READ_TIMEOUT_EXPIRED\\0" "RECORD_LENGTH_MISMATCH\\0" "RECORD_TOO_LARGE\\0" + "RENEGOTIATION_EMS_MISMATCH\\0" "RENEGOTIATION_ENCODING_ERR\\0" "RENEGOTIATION_MISMATCH\\0" "REQUIRED_CIPHER_MISSING\\0" @@ -1338,12 +1353,15 @@ Pod::Spec.new do |s| "TLSV1_ALERT_USER_CANCELLED\\0" "TLSV1_BAD_CERTIFICATE_HASH_VALUE\\0" "TLSV1_BAD_CERTIFICATE_STATUS_RESPONSE\\0" + "TLSV1_CERTIFICATE_REQUIRED\\0" "TLSV1_CERTIFICATE_UNOBTAINABLE\\0" + "TLSV1_UNKNOWN_PSK_IDENTITY\\0" "TLSV1_UNRECOGNIZED_NAME\\0" "TLSV1_UNSUPPORTED_EXTENSION\\0" "TLS_PEER_DID_NOT_RESPOND_WITH_CERTIFICATE_LIST\\0" "TLS_RSA_ENCRYPTED_VALUE_LENGTH_IS_WRONG\\0" "TOO_MANY_EMPTY_FRAGMENTS\\0" + "TOO_MANY_KEY_UPDATES\\0" "TOO_MANY_WARNING_ALERTS\\0" "UNABLE_TO_FIND_ECDH_PARAMETERS\\0" "UNEXPECTED_EXTENSION\\0" diff --git a/src/objective-c/CronetFramework.podspec b/src/objective-c/CronetFramework.podspec index 3ebcacf0554..2f47b02c0c0 100644 --- a/src/objective-c/CronetFramework.podspec +++ b/src/objective-c/CronetFramework.podspec @@ -30,14 +30,47 @@ Pod::Spec.new do |s| s.name = "CronetFramework" - s.version = "0.0.2" + s.version = "0.0.3" s.summary = "Cronet, precompiled and used as a framework." s.homepage = "http://chromium.org" - s.license = { :type => 'BSD' } + s.license = { + :type => 'BSD', + :text => <<-LICENSE + Copyright 2015, Google Inc. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + LICENSE + } s.vendored_framework = "Cronet.framework" s.author = "The Chromium Authors" - s.ios.deployment_target = "7.1" + s.ios.deployment_target = "8.0" s.source = { :http => 'https://storage.googleapis.com/grpc-precompiled-binaries/cronet/Cronet.framework.zip' } s.preserve_paths = "Cronet.framework" s.public_header_files = "Cronet.framework/Headers/**/*{.h}" + s.source_files = "Cronet.framework/Headers/**/*{.h}" end diff --git a/src/objective-c/GRPCClient/private/GRPCHost.m b/src/objective-c/GRPCClient/private/GRPCHost.m index 0524472f530..dd0479083de 100644 --- a/src/objective-c/GRPCClient/private/GRPCHost.m +++ b/src/objective-c/GRPCClient/private/GRPCHost.m @@ -51,7 +51,7 @@ NS_ASSUME_NONNULL_BEGIN // TODO(jcanizales): Generate the version in a standalone header, from // templates. Like // templates/src/core/surface/version.c.template . -#define GRPC_OBJC_VERSION_STRING @"1.0.0" +#define GRPC_OBJC_VERSION_STRING @"1.0.1" static NSMutableDictionary *kHostCache; diff --git a/src/php/lib/Grpc/BaseStub.php b/src/php/lib/Grpc/BaseStub.php index 8a7f6572a62..36d94cae2c9 100644 --- a/src/php/lib/Grpc/BaseStub.php +++ b/src/php/lib/Grpc/BaseStub.php @@ -116,7 +116,7 @@ class BaseStub } /** - * @param $timeout in microseconds + * @param int $timeout in microseconds * * @return bool true if channel is ready * @throw Exception if channel is in FATAL_ERROR state @@ -189,7 +189,7 @@ class BaseStub /** * validate and normalize the metadata array. * - * @param $metadata The metadata map + * @param array $metadata The metadata map * * @return $metadata Validated and key-normalized metadata map * @throw InvalidArgumentException if key contains invalid characters @@ -216,8 +216,8 @@ class BaseStub * Call a remote method that takes a single argument and has a * single output. * - * @param string $method The name of the method to call - * @param $argument The argument to the method + * @param string $method The name of the method to call + * @param mixed $argument The argument to the method * @param callable $deserialize A function that deserializes the response * @param array $metadata A metadata map to send to the server * @@ -250,8 +250,8 @@ class BaseStub * Call a remote method that takes a stream of arguments and has a single * output. * - * @param string $method The name of the method to call - * @param $arguments An array or Traversable of arguments to stream to the + * @param string $method The name of the method to call + * @param array $arguments An array or Traversable of arguments to stream to the * server * @param callable $deserialize A function that deserializes the response * @param array $metadata A metadata map to send to the server @@ -284,8 +284,8 @@ class BaseStub * Call a remote method that takes a single argument and returns a stream of * responses. * - * @param string $method The name of the method to call - * @param $argument The argument to the method + * @param string $method The name of the method to call + * @param mixed $argument The argument to the method * @param callable $deserialize A function that deserializes the responses * @param array $metadata A metadata map to send to the server * diff --git a/src/proto/grpc/testing/proto2/empty2.proto b/src/proto/grpc/testing/proto2/empty2.proto new file mode 100644 index 00000000000..51f0fe28b1a --- /dev/null +++ b/src/proto/grpc/testing/proto2/empty2.proto @@ -0,0 +1,37 @@ + +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +package grpc.testing.proto2; + +message EmptyWithExtensions { + extensions 100 to 999; +} diff --git a/src/proto/grpc/testing/proto2/empty2_extensions.proto b/src/proto/grpc/testing/proto2/empty2_extensions.proto new file mode 100644 index 00000000000..0229fe3fbd5 --- /dev/null +++ b/src/proto/grpc/testing/proto2/empty2_extensions.proto @@ -0,0 +1,43 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; + +import "src/proto/grpc/testing/proto2/empty2.proto"; + +package grpc.testing.proto2; + +// Fill emptiness with music. +extend grpc.testing.proto2.EmptyWithExtensions { + optional int64 Deadmau5 = 124; + optional float Madeon = 125; + optional string AboveAndBeyond = 126; + optional bool Tycho = 127; + optional fixed64 Pendulum = 128; +} diff --git a/src/python/.gitignore b/src/python/.gitignore index f158efa4bf1..7b520579a08 100644 --- a/src/python/.gitignore +++ b/src/python/.gitignore @@ -1 +1,3 @@ gens/ +*_pb2.py +*_pb2_grpc.py diff --git a/src/python/grpcio_health_checking/.gitignore b/src/python/grpcio_health_checking/.gitignore index 85af4668866..432c3194f04 100644 --- a/src/python/grpcio_health_checking/.gitignore +++ b/src/python/grpcio_health_checking/.gitignore @@ -1,5 +1,6 @@ *.proto *_pb2.py +*_pb2_grpc.py build/ grpcio_health_checking.egg-info/ dist/ diff --git a/src/python/grpcio_reflection/.gitignore b/src/python/grpcio_reflection/.gitignore new file mode 100644 index 00000000000..c0befdc8ead --- /dev/null +++ b/src/python/grpcio_reflection/.gitignore @@ -0,0 +1,5 @@ +*.proto +*_pb2.py +build/ +grpcio_reflection.egg-info/ +dist/ diff --git a/src/python/grpcio_reflection/grpc/__init__.py b/src/python/grpcio_reflection/grpc/__init__.py new file mode 100644 index 00000000000..70ac5edd483 --- /dev/null +++ b/src/python/grpcio_reflection/grpc/__init__.py @@ -0,0 +1,30 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +__import__('pkg_resources').declare_namespace(__name__) diff --git a/src/python/grpcio_reflection/grpc/reflection/__init__.py b/src/python/grpcio_reflection/grpc/reflection/__init__.py new file mode 100644 index 00000000000..d5ad73a74ab --- /dev/null +++ b/src/python/grpcio_reflection/grpc/reflection/__init__.py @@ -0,0 +1,29 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/src/python/grpcio_reflection/grpc/reflection/v1alpha/__init__.py b/src/python/grpcio_reflection/grpc/reflection/v1alpha/__init__.py new file mode 100644 index 00000000000..d5ad73a74ab --- /dev/null +++ b/src/python/grpcio_reflection/grpc/reflection/v1alpha/__init__.py @@ -0,0 +1,29 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/src/python/grpcio_reflection/grpc/reflection/v1alpha/reflection.py b/src/python/grpcio_reflection/grpc/reflection/v1alpha/reflection.py new file mode 100644 index 00000000000..3c399b0d799 --- /dev/null +++ b/src/python/grpcio_reflection/grpc/reflection/v1alpha/reflection.py @@ -0,0 +1,143 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Reference implementation for reflection in gRPC Python.""" + +import threading + +import grpc +from google.protobuf import descriptor_pb2 +from google.protobuf import descriptor_pool + +from grpc.reflection.v1alpha import reflection_pb2 + +_POOL = descriptor_pool.Default() + +def _not_found_error(): + return reflection_pb2.ServerReflectionResponse( + error_response=reflection_pb2.ErrorResponse( + error_code=grpc.StatusCode.NOT_FOUND.value[0], + error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(), + ) + ) + +def _file_descriptor_response(descriptor): + proto = descriptor_pb2.FileDescriptorProto() + descriptor.CopyToProto(proto) + serialized_proto = proto.SerializeToString() + return reflection_pb2.ServerReflectionResponse( + file_descriptor_response=reflection_pb2.FileDescriptorResponse( + file_descriptor_proto=(serialized_proto,) + ), + ) + + +class ReflectionServicer(reflection_pb2.ServerReflectionServicer): + """Servicer handling RPCs for service statuses.""" + + def __init__(self, service_names, pool=None): + """Constructor. + + Args: + service_names: Iterable of fully-qualified service names available. + """ + self._service_names = list(service_names) + self._pool = _POOL if pool is None else pool + + def _file_by_filename(self, filename): + try: + descriptor = self._pool.FindFileByName(filename) + except KeyError: + return _not_found_error() + else: + return _file_descriptor_response(descriptor) + + def _file_containing_symbol(self, fully_qualified_name): + try: + descriptor = self._pool.FindFileContainingSymbol(fully_qualified_name) + except KeyError: + return _not_found_error() + else: + return _file_descriptor_response(descriptor) + + def _file_containing_extension(containing_type, extension_number): + # TODO(atash) Python protobuf currently doesn't support querying extensions. + # https://github.com/google/protobuf/issues/2248 + return reflection_pb2.ServerReflectionResponse( + error_response=reflection_pb2.ErrorResponse( + error_code=grpc.StatusCode.UNIMPLEMENTED.value[0], + error_message=grpc.StatusCode.UNIMPLMENTED.value[1].encode(), + ) + ) + + def _extension_numbers_of_type(fully_qualified_name): + # TODO(atash) We're allowed to leave this unsupported according to the + # protocol, but we should still eventually implement it. Hits the same issue + # as `_file_containing_extension`, however. + # https://github.com/google/protobuf/issues/2248 + return reflection_pb2.ServerReflectionResponse( + error_response=reflection_pb2.ErrorResponse( + error_code=grpc.StatusCode.UNIMPLEMENTED.value[0], + error_message=grpc.StatusCode.UNIMPLMENTED.value[1].encode(), + ) + ) + + def _list_services(self): + return reflection_pb2.ServerReflectionResponse( + list_services_response=reflection_pb2.ListServiceResponse( + service=[ + reflection_pb2.ServiceResponse(name=service_name) + for service_name in self._service_names + ] + ) + ) + + def ServerReflectionInfo(self, request_iterator, context): + for request in request_iterator: + if request.HasField('file_by_filename'): + yield self._file_by_filename(request.file_by_filename) + elif request.HasField('file_containing_symbol'): + yield self._file_containing_symbol(request.file_containing_symbol) + elif request.HasField('file_containing_extension'): + yield self._file_containing_extension( + request.file_containing_extension.containing_type, + request.file_containing_extension.extension_number) + elif request.HasField('all_extension_numbers_of_type'): + yield _all_extension_numbers_of_type( + request.all_extension_numbers_of_type) + elif request.HasField('list_services'): + yield self._list_services() + else: + yield reflection_pb2.ServerReflectionResponse( + error_response=reflection_pb2.ErrorResponse( + error_code=grpc.StatusCode.INVALID_ARGUMENT.value[0], + error_message=grpc.StatusCode.INVALID_ARGUMENT.value[1].encode(), + ) + ) + diff --git a/src/python/grpcio_reflection/grpc_version.py b/src/python/grpcio_reflection/grpc_version.py new file mode 100644 index 00000000000..9b3c44c0221 --- /dev/null +++ b/src/python/grpcio_reflection/grpc_version.py @@ -0,0 +1,32 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_reflection/grpc_version.py.template`!!! + +VERSION='1.1.0.dev0' diff --git a/src/python/grpcio_reflection/reflection_commands.py b/src/python/grpcio_reflection/reflection_commands.py new file mode 100644 index 00000000000..d189aee5774 --- /dev/null +++ b/src/python/grpcio_reflection/reflection_commands.py @@ -0,0 +1,78 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Provides distutils command classes for the GRPC Python setup process.""" + +import os +import shutil + +import setuptools + +ROOT_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__))) +HEALTH_PROTO = os.path.join(ROOT_DIR, '../../proto/grpc/reflection/v1alpha/reflection.proto') + + +class CopyProtoModules(setuptools.Command): + """Command to copy proto modules from grpc/src/proto.""" + + description = '' + user_options = [] + + def initialize_options(self): + pass + + def finalize_options(self): + pass + + def run(self): + if os.path.isfile(HEALTH_PROTO): + shutil.copyfile( + HEALTH_PROTO, + os.path.join(ROOT_DIR, 'grpc/reflection/v1alpha/reflection.proto')) + + +class BuildPackageProtos(setuptools.Command): + """Command to generate project *_pb2.py modules from proto files.""" + + description = 'build grpc protobuf modules' + user_options = [] + + def initialize_options(self): + pass + + def finalize_options(self): + pass + + def run(self): + # due to limitations of the proto generator, we require that only *one* + # directory is provided as an 'include' directory. We assume it's the '' key + # to `self.distribution.package_dir` (and get a key error if it's not + # there). + from grpc.tools import command + command.build_package_protos(self.distribution.package_dir['']) diff --git a/src/python/grpcio_reflection/setup.py b/src/python/grpcio_reflection/setup.py new file mode 100644 index 00000000000..df95af4de16 --- /dev/null +++ b/src/python/grpcio_reflection/setup.py @@ -0,0 +1,73 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Setup module for the GRPC Python package's optional reflection.""" + +import os +import sys + +import setuptools + +# Ensure we're in the proper directory whether or not we're being used by pip. +os.chdir(os.path.dirname(os.path.abspath(__file__))) + +# Break import-style to ensure we can actually find our commands module. +import reflection_commands +import grpc_version + +PACKAGE_DIRECTORIES = { + '': '.', +} + +SETUP_REQUIRES = ( + 'grpcio-tools>={version}'.format(version=grpc_version.VERSION), +) + +INSTALL_REQUIRES = ( + 'protobuf>=3.0.0', + 'grpcio>={version}'.format(version=grpc_version.VERSION), +) + +COMMAND_CLASS = { + # Run preprocess from the repository *before* doing any packaging! + 'preprocess': reflection_commands.CopyProtoModules, + 'build_package_protos': reflection_commands.BuildPackageProtos, +} + +setuptools.setup( + name='grpcio-reflection', + version=grpc_version.VERSION, + license='3-clause BSD', + package_dir=PACKAGE_DIRECTORIES, + packages=setuptools.find_packages('.'), + namespace_packages=['grpc'], + install_requires=INSTALL_REQUIRES, + setup_requires=SETUP_REQUIRES, + cmdclass=COMMAND_CLASS +) diff --git a/src/python/grpcio_tests/.gitignore b/src/python/grpcio_tests/.gitignore index fc620135dc7..dcba283a8ca 100644 --- a/src/python/grpcio_tests/.gitignore +++ b/src/python/grpcio_tests/.gitignore @@ -1,4 +1,5 @@ proto/ src/ *_pb2.py +*_pb2_grpc.py *.egg-info/ diff --git a/src/python/grpcio_tests/setup.py b/src/python/grpcio_tests/setup.py index 73842066020..01d5fa875b9 100644 --- a/src/python/grpcio_tests/setup.py +++ b/src/python/grpcio_tests/setup.py @@ -80,8 +80,14 @@ PACKAGE_DATA = { 'credentials/server1.key', 'credentials/server1.pem', ], - 'tests.protoc_plugin': [ - 'protoc_plugin_test.proto', + 'tests.protoc_plugin.protos.invocation_testing': [ + 'same.proto', + ], + 'tests.protoc_plugin.protos.invocation_testing.split_messages': [ + 'messages.proto', + ], + 'tests.protoc_plugin.protos.invocation_testing.split_services': [ + 'services.proto', ], 'tests.unit': [ 'credentials/ca.pem', diff --git a/src/python/grpcio_tests/tests/_loader.py b/src/python/grpcio_tests/tests/_loader.py index c2f097f6c61..621bedc7bb8 100644 --- a/src/python/grpcio_tests/tests/_loader.py +++ b/src/python/grpcio_tests/tests/_loader.py @@ -84,11 +84,9 @@ class Loader(object): along. """ for importer, module_name, is_package in ( - pkgutil.iter_modules(package_paths)): + pkgutil.walk_packages(package_paths)): module = importer.find_module(module_name).load_module(module_name) self.visit_module(module) - if is_package: - self.walk_packages(module.__path__) def visit_module(self, module): """Visits the module, adding discovered tests to the test suite. diff --git a/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py b/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py new file mode 100644 index 00000000000..089366a8c72 --- /dev/null +++ b/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py @@ -0,0 +1,304 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import collections +from concurrent import futures +import contextlib +import distutils.spawn +import errno +import importlib +import os +import os.path +import pkgutil +import shutil +import subprocess +import sys +import tempfile +import threading +import unittest + +import grpc +from grpc.tools import protoc +from tests.unit.framework.common import test_constants + +_MESSAGES_IMPORT = b'import "messages.proto";' + +@contextlib.contextmanager +def _system_path(path): + old_system_path = sys.path[:] + sys.path = sys.path[0:1] + path + sys.path[1:] + yield + sys.path = old_system_path + + +class DummySplitServicer(object): + + def __init__(self, request_class, response_class): + self.request_class = request_class + self.response_class = response_class + + def Call(self, request, context): + return self.response_class() + + +class SeparateTestMixin(object): + + def testImportAttributes(self): + with _system_path([self.python_out_directory]): + pb2 = importlib.import_module(self.pb2_import) + pb2.Request + pb2.Response + if self.should_find_services_in_pb2: + pb2.TestServiceServicer + else: + with self.assertRaises(AttributeError): + pb2.TestServiceServicer + + with _system_path([self.grpc_python_out_directory]): + pb2_grpc = importlib.import_module(self.pb2_grpc_import) + pb2_grpc.TestServiceServicer + with self.assertRaises(AttributeError): + pb2_grpc.Request + with self.assertRaises(AttributeError): + pb2_grpc.Response + + def testCall(self): + with _system_path([self.python_out_directory]): + pb2 = importlib.import_module(self.pb2_import) + with _system_path([self.grpc_python_out_directory]): + pb2_grpc = importlib.import_module(self.pb2_grpc_import) + server = grpc.server( + futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE)) + pb2_grpc.add_TestServiceServicer_to_server( + DummySplitServicer( + pb2.Request, pb2.Response), server) + port = server.add_insecure_port('[::]:0') + server.start() + channel = grpc.insecure_channel('localhost:{}'.format(port)) + stub = pb2_grpc.TestServiceStub(channel) + request = pb2.Request() + expected_response = pb2.Response() + response = stub.Call(request) + self.assertEqual(expected_response, response) + + +class CommonTestMixin(object): + + def testImportAttributes(self): + with _system_path([self.python_out_directory]): + pb2 = importlib.import_module(self.pb2_import) + pb2.Request + pb2.Response + if self.should_find_services_in_pb2: + pb2.TestServiceServicer + else: + with self.assertRaises(AttributeError): + pb2.TestServiceServicer + + with _system_path([self.grpc_python_out_directory]): + pb2_grpc = importlib.import_module(self.pb2_grpc_import) + pb2_grpc.TestServiceServicer + with self.assertRaises(AttributeError): + pb2_grpc.Request + with self.assertRaises(AttributeError): + pb2_grpc.Response + + def testCall(self): + with _system_path([self.python_out_directory]): + pb2 = importlib.import_module(self.pb2_import) + with _system_path([self.grpc_python_out_directory]): + pb2_grpc = importlib.import_module(self.pb2_grpc_import) + server = grpc.server( + futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE)) + pb2_grpc.add_TestServiceServicer_to_server( + DummySplitServicer( + pb2.Request, pb2.Response), server) + port = server.add_insecure_port('[::]:0') + server.start() + channel = grpc.insecure_channel('localhost:{}'.format(port)) + stub = pb2_grpc.TestServiceStub(channel) + request = pb2.Request() + expected_response = pb2.Response() + response = stub.Call(request) + self.assertEqual(expected_response, response) + + +class SameSeparateTest(unittest.TestCase, SeparateTestMixin): + + def setUp(self): + same_proto_contents = pkgutil.get_data( + 'tests.protoc_plugin.protos.invocation_testing', 'same.proto') + self.directory = tempfile.mkdtemp(suffix='same_separate', dir='.') + self.proto_directory = os.path.join(self.directory, 'proto_path') + self.python_out_directory = os.path.join(self.directory, 'python_out') + self.grpc_python_out_directory = os.path.join(self.directory, 'grpc_python_out') + os.makedirs(self.proto_directory) + os.makedirs(self.python_out_directory) + os.makedirs(self.grpc_python_out_directory) + same_proto_file = os.path.join(self.proto_directory, 'same_separate.proto') + open(same_proto_file, 'wb').write(same_proto_contents) + protoc_result = protoc.main([ + '', + '--proto_path={}'.format(self.proto_directory), + '--python_out={}'.format(self.python_out_directory), + '--grpc_python_out={}'.format(self.grpc_python_out_directory), + same_proto_file, + ]) + if protoc_result != 0: + raise Exception("unexpected protoc error") + open(os.path.join(self.grpc_python_out_directory, '__init__.py'), 'w').write('') + open(os.path.join(self.python_out_directory, '__init__.py'), 'w').write('') + self.pb2_import = 'same_separate_pb2' + self.pb2_grpc_import = 'same_separate_pb2_grpc' + self.should_find_services_in_pb2 = False + + def tearDown(self): + shutil.rmtree(self.directory) + + +class SameCommonTest(unittest.TestCase, CommonTestMixin): + + def setUp(self): + same_proto_contents = pkgutil.get_data( + 'tests.protoc_plugin.protos.invocation_testing', 'same.proto') + self.directory = tempfile.mkdtemp(suffix='same_common', dir='.') + self.proto_directory = os.path.join(self.directory, 'proto_path') + self.python_out_directory = os.path.join(self.directory, 'python_out') + self.grpc_python_out_directory = self.python_out_directory + os.makedirs(self.proto_directory) + os.makedirs(self.python_out_directory) + same_proto_file = os.path.join(self.proto_directory, 'same_common.proto') + open(same_proto_file, 'wb').write(same_proto_contents) + protoc_result = protoc.main([ + '', + '--proto_path={}'.format(self.proto_directory), + '--python_out={}'.format(self.python_out_directory), + '--grpc_python_out={}'.format(self.grpc_python_out_directory), + same_proto_file, + ]) + if protoc_result != 0: + raise Exception("unexpected protoc error") + open(os.path.join(self.python_out_directory, '__init__.py'), 'w').write('') + self.pb2_import = 'same_common_pb2' + self.pb2_grpc_import = 'same_common_pb2_grpc' + self.should_find_services_in_pb2 = True + + def tearDown(self): + shutil.rmtree(self.directory) + + +class SplitCommonTest(unittest.TestCase, CommonTestMixin): + + def setUp(self): + services_proto_contents = pkgutil.get_data( + 'tests.protoc_plugin.protos.invocation_testing.split_services', + 'services.proto') + messages_proto_contents = pkgutil.get_data( + 'tests.protoc_plugin.protos.invocation_testing.split_messages', + 'messages.proto') + self.directory = tempfile.mkdtemp(suffix='split_common', dir='.') + self.proto_directory = os.path.join(self.directory, 'proto_path') + self.python_out_directory = os.path.join(self.directory, 'python_out') + self.grpc_python_out_directory = self.python_out_directory + os.makedirs(self.proto_directory) + os.makedirs(self.python_out_directory) + services_proto_file = os.path.join(self.proto_directory, + 'split_common_services.proto') + messages_proto_file = os.path.join(self.proto_directory, + 'split_common_messages.proto') + open(services_proto_file, 'wb').write(services_proto_contents.replace( + _MESSAGES_IMPORT, + b'import "split_common_messages.proto";' + )) + open(messages_proto_file, 'wb').write(messages_proto_contents) + protoc_result = protoc.main([ + '', + '--proto_path={}'.format(self.proto_directory), + '--python_out={}'.format(self.python_out_directory), + '--grpc_python_out={}'.format(self.python_out_directory), + services_proto_file, + messages_proto_file, + ]) + if protoc_result != 0: + raise Exception("unexpected protoc error") + open(os.path.join(self.python_out_directory, '__init__.py'), 'w').write('') + self.pb2_import = 'split_common_messages_pb2' + self.pb2_grpc_import = 'split_common_services_pb2_grpc' + self.should_find_services_in_pb2 = False + + def tearDown(self): + shutil.rmtree(self.directory) + + +class SplitSeparateTest(unittest.TestCase, SeparateTestMixin): + + def setUp(self): + services_proto_contents = pkgutil.get_data( + 'tests.protoc_plugin.protos.invocation_testing.split_services', + 'services.proto') + messages_proto_contents = pkgutil.get_data( + 'tests.protoc_plugin.protos.invocation_testing.split_messages', + 'messages.proto') + self.directory = tempfile.mkdtemp(suffix='split_separate', dir='.') + self.proto_directory = os.path.join(self.directory, 'proto_path') + self.python_out_directory = os.path.join(self.directory, 'python_out') + self.grpc_python_out_directory = os.path.join(self.directory, 'grpc_python_out') + os.makedirs(self.proto_directory) + os.makedirs(self.python_out_directory) + os.makedirs(self.grpc_python_out_directory) + services_proto_file = os.path.join(self.proto_directory, + 'split_separate_services.proto') + messages_proto_file = os.path.join(self.proto_directory, + 'split_separate_messages.proto') + open(services_proto_file, 'wb').write(services_proto_contents.replace( + _MESSAGES_IMPORT, + b'import "split_separate_messages.proto";' + )) + open(messages_proto_file, 'wb').write(messages_proto_contents) + protoc_result = protoc.main([ + '', + '--proto_path={}'.format(self.proto_directory), + '--python_out={}'.format(self.python_out_directory), + '--grpc_python_out={}'.format(self.grpc_python_out_directory), + services_proto_file, + messages_proto_file, + ]) + if protoc_result != 0: + raise Exception("unexpected protoc error") + open(os.path.join(self.python_out_directory, '__init__.py'), 'w').write('') + self.pb2_import = 'split_separate_messages_pb2' + self.pb2_grpc_import = 'split_separate_services_pb2_grpc' + self.should_find_services_in_pb2 = False + + def tearDown(self): + shutil.rmtree(self.directory) + + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/__init__.py new file mode 100644 index 00000000000..2f88fa04122 --- /dev/null +++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/__init__.py @@ -0,0 +1,30 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + diff --git a/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/same.proto b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/same.proto new file mode 100644 index 00000000000..269e2fd2c7e --- /dev/null +++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/same.proto @@ -0,0 +1,39 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package grpc_protoc_plugin.invocation_testing; + +message Request {} +message Response {} + +service TestService { + rpc Call(Request) returns (Response); +} diff --git a/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/__init__.py new file mode 100644 index 00000000000..2f88fa04122 --- /dev/null +++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/__init__.py @@ -0,0 +1,30 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + diff --git a/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/messages.proto b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/messages.proto new file mode 100644 index 00000000000..de22dae0492 --- /dev/null +++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_messages/messages.proto @@ -0,0 +1,35 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package grpc_protoc_plugin.invocation_testing.split; + +message Request {} +message Response {} diff --git a/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_services/__init__.py b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_services/__init__.py new file mode 100644 index 00000000000..2f88fa04122 --- /dev/null +++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_services/__init__.py @@ -0,0 +1,30 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + diff --git a/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_services/services.proto b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_services/services.proto new file mode 100644 index 00000000000..af999cd48db --- /dev/null +++ b/src/python/grpcio_tests/tests/protoc_plugin/protos/invocation_testing/split_services/services.proto @@ -0,0 +1,38 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +import "messages.proto"; + +package grpc_protoc_plugin.invocation_testing.split; + +service TestService { + rpc Call(Request) returns (Response); +} diff --git a/src/python/grpcio_tests/tests/reflection/__init__.py b/src/python/grpcio_tests/tests/reflection/__init__.py new file mode 100644 index 00000000000..100a624dc9c --- /dev/null +++ b/src/python/grpcio_tests/tests/reflection/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py b/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py new file mode 100644 index 00000000000..87264cf9ba9 --- /dev/null +++ b/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py @@ -0,0 +1,185 @@ +# Copyright 2016, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Tests of grpc.reflection.v1alpha.reflection.""" + +import unittest + +import grpc +from grpc.framework.foundation import logging_pool +from grpc.reflection.v1alpha import reflection +from grpc.reflection.v1alpha import reflection_pb2 + +from google.protobuf import descriptor_pool +from google.protobuf import descriptor_pb2 + +from src.proto.grpc.testing.proto2 import empty2_extensions_pb2 +from src.proto.grpc.testing import empty_pb2 +from tests.unit.framework.common import test_constants + +_EMPTY_PROTO_FILE_NAME = 'src/proto/grpc/testing/empty.proto' +_EMPTY_PROTO_SYMBOL_NAME = 'grpc.testing.Empty' +_SERVICE_NAMES = ( + 'Angstrom', 'Bohr', 'Curie', 'Dyson', 'Einstein', 'Feynman', 'Galilei') + +def _file_descriptor_to_proto(descriptor): + proto = descriptor_pb2.FileDescriptorProto() + descriptor.CopyToProto(proto) + return proto.SerializeToString() + +class ReflectionServicerTest(unittest.TestCase): + + def setUp(self): + servicer = reflection.ReflectionServicer(service_names=_SERVICE_NAMES) + server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY) + self._server = grpc.server(server_pool) + port = self._server.add_insecure_port('[::]:0') + reflection_pb2.add_ServerReflectionServicer_to_server(servicer, self._server) + self._server.start() + + channel = grpc.insecure_channel('localhost:%d' % port) + self._stub = reflection_pb2.ServerReflectionStub(channel) + + def testFileByName(self): + requests = ( + reflection_pb2.ServerReflectionRequest( + file_by_filename=_EMPTY_PROTO_FILE_NAME + ), + reflection_pb2.ServerReflectionRequest( + file_by_filename='i-donut-exist' + ), + ) + responses = tuple(self._stub.ServerReflectionInfo(requests)) + expected_responses = ( + reflection_pb2.ServerReflectionResponse( + valid_host='', + file_descriptor_response=reflection_pb2.FileDescriptorResponse( + file_descriptor_proto=( + _file_descriptor_to_proto(empty_pb2.DESCRIPTOR), + ) + ) + ), + reflection_pb2.ServerReflectionResponse( + valid_host='', + error_response=reflection_pb2.ErrorResponse( + error_code=grpc.StatusCode.NOT_FOUND.value[0], + error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(), + ) + ), + ) + self.assertEqual(expected_responses, responses) + + def testFileBySymbol(self): + requests = ( + reflection_pb2.ServerReflectionRequest( + file_containing_symbol=_EMPTY_PROTO_SYMBOL_NAME + ), + reflection_pb2.ServerReflectionRequest( + file_containing_symbol='i.donut.exist.co.uk.org.net.me.name.foo' + ), + ) + responses = tuple(self._stub.ServerReflectionInfo(requests)) + expected_responses = ( + reflection_pb2.ServerReflectionResponse( + valid_host='', + file_descriptor_response=reflection_pb2.FileDescriptorResponse( + file_descriptor_proto=( + _file_descriptor_to_proto(empty_pb2.DESCRIPTOR), + ) + ) + ), + reflection_pb2.ServerReflectionResponse( + valid_host='', + error_response=reflection_pb2.ErrorResponse( + error_code=grpc.StatusCode.NOT_FOUND.value[0], + error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(), + ) + ), + ) + self.assertEqual(expected_responses, responses) + + @unittest.skip('TODO(atash): implement file-containing-extension reflection ' + '(see https://github.com/google/protobuf/issues/2248)') + def testFileContainingExtension(self): + requests = ( + reflection_pb2.ServerReflectionRequest( + file_containing_extension=reflection_pb2.ExtensionRequest( + containing_type='grpc.testing.proto2.Empty', + extension_number=125, + ), + ), + reflection_pb2.ServerReflectionRequest( + file_containing_extension=reflection_pb2.ExtensionRequest( + containing_type='i.donut.exist.co.uk.org.net.me.name.foo', + extension_number=55, + ), + ), + ) + responses = tuple(self._stub.ServerReflectionInfo(requests)) + expected_responses = ( + reflection_pb2.ServerReflectionResponse( + valid_host='', + file_descriptor_response=reflection_pb2.FileDescriptorResponse( + file_descriptor_proto=( + _file_descriptor_to_proto(empty_extensions_pb2.DESCRIPTOR), + ) + ) + ), + reflection_pb2.ServerReflectionResponse( + valid_host='', + error_response=reflection_pb2.ErrorResponse( + error_code=grpc.StatusCode.NOT_FOUND.value[0], + error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(), + ) + ), + ) + self.assertEqual(expected_responses, responses) + + def testListServices(self): + requests = ( + reflection_pb2.ServerReflectionRequest( + list_services='', + ), + ) + responses = tuple(self._stub.ServerReflectionInfo(requests)) + expected_responses = ( + reflection_pb2.ServerReflectionResponse( + valid_host='', + list_services_response=reflection_pb2.ListServiceResponse( + service=tuple( + reflection_pb2.ServiceResponse(name=name) + for name in _SERVICE_NAMES + ) + ) + ), + ) + self.assertEqual(expected_responses, responses) + +if __name__ == '__main__': + unittest.main(verbosity=2) diff --git a/src/python/grpcio_tests/tests/tests.json b/src/python/grpcio_tests/tests/tests.json index 2071a33e13a..dd4a0257f54 100644 --- a/src/python/grpcio_tests/tests/tests.json +++ b/src/python/grpcio_tests/tests/tests.json @@ -1,44 +1,49 @@ [ - "_api_test.AllTest", - "_api_test.ChannelConnectivityTest", - "_api_test.ChannelTest", - "_auth_test.AccessTokenCallCredentialsTest", - "_auth_test.GoogleCallCredentialsTest", - "_beta_features_test.BetaFeaturesTest", - "_beta_features_test.ContextManagementAndLifecycleTest", - "_cancel_many_calls_test.CancelManyCallsTest", - "_channel_args_test.ChannelArgsTest", - "_channel_connectivity_test.ChannelConnectivityTest", - "_channel_ready_future_test.ChannelReadyFutureTest", - "_channel_test.ChannelTest", - "_compression_test.CompressionTest", - "_connectivity_channel_test.ConnectivityStatesTest", - "_credentials_test.CredentialsTest", - "_empty_message_test.EmptyMessageTest", - "_exit_test.ExitTest", - "_face_interface_test.DynamicInvokerBlockingInvocationInlineServiceTest", - "_face_interface_test.DynamicInvokerFutureInvocationAsynchronousEventServiceTest", - "_face_interface_test.GenericInvokerBlockingInvocationInlineServiceTest", - "_face_interface_test.GenericInvokerFutureInvocationAsynchronousEventServiceTest", - "_face_interface_test.MultiCallableInvokerBlockingInvocationInlineServiceTest", - "_face_interface_test.MultiCallableInvokerFutureInvocationAsynchronousEventServiceTest", - "_health_servicer_test.HealthServicerTest", - "_implementations_test.CallCredentialsTest", - "_implementations_test.ChannelCredentialsTest", - "_insecure_interop_test.InsecureInteropTest", - "_logging_pool_test.LoggingPoolTest", - "_metadata_code_details_test.MetadataCodeDetailsTest", - "_metadata_test.MetadataTest", - "_not_found_test.NotFoundTest", - "_python_plugin_test.PythonPluginTest", - "_read_some_but_not_all_responses_test.ReadSomeButNotAllResponsesTest", - "_rpc_test.RPCTest", - "_sanity_test.Sanity", - "_secure_interop_test.SecureInteropTest", - "_thread_cleanup_test.CleanupThreadTest", - "_utilities_test.ChannelConnectivityTest", - "beta_python_plugin_test.PythonPluginTest", - "cygrpc_test.InsecureServerInsecureClient", - "cygrpc_test.SecureServerSecureClient", - "cygrpc_test.TypeSmokeTest" + "health_check._health_servicer_test.HealthServicerTest", + "interop._insecure_interop_test.InsecureInteropTest", + "interop._secure_interop_test.SecureInteropTest", + "protoc_plugin._python_plugin_test.PythonPluginTest", + "protoc_plugin._split_definitions_test.SameCommonTest", + "protoc_plugin._split_definitions_test.SameSeparateTest", + "protoc_plugin._split_definitions_test.SplitCommonTest", + "protoc_plugin._split_definitions_test.SplitSeparateTest", + "protoc_plugin.beta_python_plugin_test.PythonPluginTest", + "reflection._reflection_servicer_test.ReflectionServicerTest", + "unit._api_test.AllTest", + "unit._api_test.ChannelConnectivityTest", + "unit._api_test.ChannelTest", + "unit._auth_test.AccessTokenCallCredentialsTest", + "unit._auth_test.GoogleCallCredentialsTest", + "unit._channel_args_test.ChannelArgsTest", + "unit._channel_connectivity_test.ChannelConnectivityTest", + "unit._channel_ready_future_test.ChannelReadyFutureTest", + "unit._compression_test.CompressionTest", + "unit._credentials_test.CredentialsTest", + "unit._cython._cancel_many_calls_test.CancelManyCallsTest", + "unit._cython._channel_test.ChannelTest", + "unit._cython._read_some_but_not_all_responses_test.ReadSomeButNotAllResponsesTest", + "unit._cython.cygrpc_test.InsecureServerInsecureClient", + "unit._cython.cygrpc_test.SecureServerSecureClient", + "unit._cython.cygrpc_test.TypeSmokeTest", + "unit._empty_message_test.EmptyMessageTest", + "unit._exit_test.ExitTest", + "unit._metadata_code_details_test.MetadataCodeDetailsTest", + "unit._metadata_test.MetadataTest", + "unit._rpc_test.RPCTest", + "unit._sanity._sanity_test.Sanity", + "unit._thread_cleanup_test.CleanupThreadTest", + "unit.beta._beta_features_test.BetaFeaturesTest", + "unit.beta._beta_features_test.ContextManagementAndLifecycleTest", + "unit.beta._connectivity_channel_test.ConnectivityStatesTest", + "unit.beta._face_interface_test.DynamicInvokerBlockingInvocationInlineServiceTest", + "unit.beta._face_interface_test.DynamicInvokerFutureInvocationAsynchronousEventServiceTest", + "unit.beta._face_interface_test.GenericInvokerBlockingInvocationInlineServiceTest", + "unit.beta._face_interface_test.GenericInvokerFutureInvocationAsynchronousEventServiceTest", + "unit.beta._face_interface_test.MultiCallableInvokerBlockingInvocationInlineServiceTest", + "unit.beta._face_interface_test.MultiCallableInvokerFutureInvocationAsynchronousEventServiceTest", + "unit.beta._implementations_test.CallCredentialsTest", + "unit.beta._implementations_test.ChannelCredentialsTest", + "unit.beta._not_found_test.NotFoundTest", + "unit.beta._utilities_test.ChannelConnectivityTest", + "unit.framework.foundation._logging_pool_test.LoggingPoolTest" ] diff --git a/src/ruby/ext/grpc/rb_call_credentials.c b/src/ruby/ext/grpc/rb_call_credentials.c index 9b6675da846..280f21c9731 100644 --- a/src/ruby/ext/grpc/rb_call_credentials.c +++ b/src/ruby/ext/grpc/rb_call_credentials.c @@ -86,19 +86,16 @@ static VALUE grpc_rb_call_credentials_callback_rescue(VALUE args, rb_funcall(exception_object, rb_intern("backtrace"), 0), rb_intern("join"), 1, rb_str_new2("\n\tfrom ")); - VALUE rb_exception_info = rb_funcall(exception_object, rb_intern("to_s"), 0); - const char *exception_classname = rb_obj_classname(exception_object); + VALUE rb_exception_info = rb_funcall(exception_object, rb_intern("inspect"), 0); (void)args; - gpr_log(GPR_INFO, "Call credentials callback failed: %s: %s\n%s", - exception_classname, StringValueCStr(rb_exception_info), + gpr_log(GPR_INFO, "Call credentials callback failed: %s\n%s", + StringValueCStr(rb_exception_info), StringValueCStr(backtrace)); rb_hash_aset(result, rb_str_new2("metadata"), Qnil); - /* Currently only gives the exception class name. It should be possible get - more details */ rb_hash_aset(result, rb_str_new2("status"), - INT2NUM(GRPC_STATUS_PERMISSION_DENIED)); + INT2NUM(GRPC_STATUS_UNAUTHENTICATED)); rb_hash_aset(result, rb_str_new2("details"), - rb_str_new2(exception_classname)); + rb_exception_info); return result; } diff --git a/src/ruby/lib/grpc/generic/rpc_server.rb b/src/ruby/lib/grpc/generic/rpc_server.rb index 7dbcb7d4796..57f99c8ce69 100644 --- a/src/ruby/lib/grpc/generic/rpc_server.rb +++ b/src/ruby/lib/grpc/generic/rpc_server.rb @@ -54,6 +54,7 @@ module GRPC DEFAULT_MAX_WAITING_REQUESTS = 60 # Default poll period is 1s + # Used for grpc server shutdown and thread pool shutdown timeouts DEFAULT_POLL_PERIOD = 1 # Signal check period is 0.25s @@ -127,7 +128,7 @@ module GRPC deadline = from_relative_time(@poll_period) @server.close(deadline) @pool.shutdown - @pool.wait_for_termination + @pool.wait_for_termination(@poll_period) end def running_state diff --git a/src/ruby/qps/server.rb b/src/ruby/qps/server.rb index d0c2073dd1e..6175855cd94 100644 --- a/src/ruby/qps/server.rb +++ b/src/ruby/qps/server.rb @@ -63,7 +63,9 @@ class BenchmarkServer cred = :this_port_is_insecure end # Make sure server can handle the large number of calls in benchmarks - @server = GRPC::RpcServer.new(pool_size: 100, max_waiting_requests: 100) + # TODO: @apolcyn, if scenario config increases total outstanding + # calls then will need to increase the pool size too + @server = GRPC::RpcServer.new(pool_size: 1024, max_waiting_requests: 1024) @port = @server.add_http2_port("0.0.0.0:" + port.to_s, cred) @server.handle(BenchmarkServiceImpl.new) @start_time = Time.now diff --git a/src/ruby/spec/generic/client_stub_spec.rb b/src/ruby/spec/generic/client_stub_spec.rb index e68b8db7ab7..607a4a3c5da 100644 --- a/src/ruby/spec/generic/client_stub_spec.rb +++ b/src/ruby/spec/generic/client_stub_spec.rb @@ -168,23 +168,61 @@ describe 'ClientStub' do expect(&blk).to raise_error(GRPC::BadStatus) th.join end + + it 'should receive UNAUTHENTICATED if call credentials plugin fails' do + server_port = create_secure_test_server + th = run_request_response(@sent_msg, @resp, @pass) + + certs = load_test_certs + secure_channel_creds = GRPC::Core::ChannelCredentials.new( + certs[0], nil, nil) + secure_stub_opts = { + channel_args: { + GRPC::Core::Channel::SSL_TARGET => 'foo.test.google.fr' + } + } + stub = GRPC::ClientStub.new("localhost:#{server_port}", + secure_channel_creds, **secure_stub_opts) + + error_message = 'Failing call credentials callback' + failing_auth = proc do + fail error_message + end + creds = GRPC::Core::CallCredentials.new(failing_auth) + + error_occured = false + begin + get_response(stub, credentials: creds) + rescue GRPC::BadStatus => e + error_occured = true + expect(e.code).to eq(GRPC::Core::StatusCodes::UNAUTHENTICATED) + expect(e.details.include?(error_message)).to be true + end + expect(error_occured).to eq(true) + + # Kill the server thread so tests can complete + th.kill + end end describe 'without a call operation' do - def get_response(stub) + def get_response(stub, credentials: nil) + puts credentials.inspect stub.request_response(@method, @sent_msg, noop, noop, - metadata: { k1: 'v1', k2: 'v2' }) + metadata: { k1: 'v1', k2: 'v2' }, + credentials: credentials) end it_behaves_like 'request response' end describe 'via a call operation' do - def get_response(stub, run_start_call_first: false) + def get_response(stub, run_start_call_first: false, credentials: nil) op = stub.request_response(@method, @sent_msg, noop, noop, return_op: true, metadata: { k1: 'v1', k2: 'v2' }, - deadline: from_relative_time(2)) + deadline: from_relative_time(2), + credentials: credentials) expect(op).to be_a(GRPC::ActiveCall::Operation) op.start_call if run_start_call_first result = op.execute @@ -492,6 +530,15 @@ describe 'ClientStub' do end end + def create_secure_test_server + certs = load_test_certs + secure_credentials = GRPC::Core::ServerCredentials.new( + nil, [{ private_key: certs[1], cert_chain: certs[2] }], false) + + @server = GRPC::Core::Server.new(nil) + @server.add_http2_port('0.0.0.0:0', secure_credentials) + end + def create_test_server @server = GRPC::Core::Server.new(nil) @server.add_http2_port('0.0.0.0:0', :this_port_is_insecure) diff --git a/templates/gRPC-Core.podspec.template b/templates/gRPC-Core.podspec.template index 1d5a47336c5..d6928a297cb 100644 --- a/templates/gRPC-Core.podspec.template +++ b/templates/gRPC-Core.podspec.template @@ -62,7 +62,7 @@ %> Pod::Spec.new do |s| s.name = 'gRPC-Core' - version = '1.0.0' + version = '1.0.1' s.version = version s.summary = 'Core cross-platform gRPC library, written in C' s.homepage = 'http://www.grpc.io' @@ -149,7 +149,7 @@ ss.header_mappings_dir = '.' ss.libraries = 'z' ss.dependency "#{s.name}/Interface", version - ss.dependency 'BoringSSL', '~> 6.0' + ss.dependency 'BoringSSL', '~> 7.0' # To save you from scrolling, this is the last part of the podspec. ss.source_files = ${ruby_multiline_list(grpc_private_files(libs), 22)} diff --git a/templates/package.json.template b/templates/package.json.template index 2b3d32ec994..81f39d27f51 100644 --- a/templates/package.json.template +++ b/templates/package.json.template @@ -27,26 +27,28 @@ "coverage": "./node_modules/.bin/istanbul cover ./node_modules/.bin/_mocha src/node/test", "install": "./node_modules/.bin/node-pre-gyp install --fallback-to-build" }, - "bundledDependencies": ["node-pre-gyp"], + "bundledDependencies": [ + "node-pre-gyp" + ], "dependencies": { "arguejs": "^0.2.3", - "lodash": "^3.9.3", + "lodash": "^4.15.0", "nan": "^2.0.0", - "protobufjs": "^4.0.0" + "node-pre-gyp": "^0.6.0", + "protobufjs": "^5.0.0" }, "devDependencies": { - "async": "^1.5.0", + "async": "^2.0.1", "body-parser": "^1.15.2", "express": "^4.14.0", "google-auth-library": "^0.9.2", "google-protobuf": "^3.0.0", - "istanbul": "^0.3.21", + "istanbul": "^0.4.4", "jsdoc": "^3.3.2", "jshint": "^2.5.0", "minimist": "^1.1.0", - "mocha": "^2.3.4", - "mocha-jenkins-reporter": "^0.1.9", - "mustache": "^2.0.0", + "mocha": "^3.0.2", + "mocha-jenkins-reporter": "^0.2.3", "poisson-process": "^0.2.1" }, "engines": { @@ -54,11 +56,10 @@ }, "binary": { "module_name": "grpc_node", - "module_path": "./build/Release/", + "module_path": "src/node/extension_binary", "host": "https://storage.googleapis.com/", "remote_path": "grpc-precompiled-binaries/node/{name}/v{version}", - "package_name": "{node_abi}-{platform}-{arch}.tar.gz", - "module_path": "src/node/extension_binary" + "package_name": "{node_abi}-{platform}-{arch}.tar.gz" }, "files": [ "LICENSE", @@ -79,7 +80,7 @@ ], "main": "src/node/index.js", "license": "BSD-3-Clause", - "jshintConfig" : { + "jshintConfig": { "bitwise": true, "curly": true, "eqeqeq": true, diff --git a/templates/src/python/grpcio_reflection/grpc_version.py.template b/templates/src/python/grpcio_reflection/grpc_version.py.template new file mode 100644 index 00000000000..3e84201f5bc --- /dev/null +++ b/templates/src/python/grpcio_reflection/grpc_version.py.template @@ -0,0 +1,34 @@ +%YAML 1.2 +--- | + # Copyright 2016, Google Inc. + # All rights reserved. + # + # Redistribution and use in source and binary forms, with or without + # modification, are permitted provided that the following conditions are + # met: + # + # * Redistributions of source code must retain the above copyright + # notice, this list of conditions and the following disclaimer. + # * Redistributions in binary form must reproduce the above + # copyright notice, this list of conditions and the following disclaimer + # in the documentation and/or other materials provided with the + # distribution. + # * Neither the name of Google Inc. nor the names of its + # contributors may be used to endorse or promote products derived from + # this software without specific prior written permission. + # + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_reflection/grpc_version.py.template`!!! + + VERSION='${settings.python_version.pep440()}' diff --git a/test/core/end2end/tests/resource_quota_server.c b/test/core/end2end/tests/resource_quota_server.c index 10ceb962ae3..1dcbf587a49 100644 --- a/test/core/end2end/tests/resource_quota_server.c +++ b/test/core/end2end/tests/resource_quota_server.c @@ -100,12 +100,16 @@ static void end_test(grpc_end2end_test_fixture *f) { static grpc_slice generate_random_slice() { size_t i; static const char chars[] = "abcdefghijklmnopqrstuvwxyz1234567890"; - char output[1024 * 1024]; - for (i = 0; i < GPR_ARRAY_SIZE(output) - 1; ++i) { + char *output; + const size_t output_size = 1024 * 1024; + output = gpr_malloc(output_size); + for (i = 0; i < output_size - 1; ++i) { output[i] = chars[rand() % (int)(sizeof(chars) - 1)]; } - output[GPR_ARRAY_SIZE(output) - 1] = '\0'; - return grpc_slice_from_copied_string(output); + output[output_size - 1] = '\0'; + grpc_slice out = grpc_slice_from_copied_string(output); + gpr_free(output); + return out; } void resource_quota_server(grpc_end2end_test_config config) { diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc index feadabd8654..43b7d44255e 100644 --- a/test/cpp/end2end/end2end_test.cc +++ b/test/cpp/end2end/end2end_test.cc @@ -250,6 +250,11 @@ class End2endTest : public ::testing::TestWithParam { builder.RegisterService(&service_); builder.RegisterService("foo.test.youtube.com", &special_service_); builder.RegisterService(&dup_pkg_service_); + + builder.SetSyncServerOption(ServerBuilder::SyncServerOption::NUM_CQS, 4); + builder.SetSyncServerOption( + ServerBuilder::SyncServerOption::CQ_TIMEOUT_MSEC, 10); + server_ = builder.BuildAndStart(); is_server_started_ = true; } @@ -284,6 +289,11 @@ class End2endTest : public ::testing::TestWithParam { ServerBuilder builder; builder.AddListeningPort(proxyaddr.str(), InsecureServerCredentials()); builder.RegisterService(proxy_service_.get()); + + builder.SetSyncServerOption(ServerBuilder::SyncServerOption::NUM_CQS, 4); + builder.SetSyncServerOption( + ServerBuilder::SyncServerOption::CQ_TIMEOUT_MSEC, 10); + proxy_server_ = builder.BuildAndStart(); channel_ = CreateChannel(proxyaddr.str(), InsecureChannelCredentials()); diff --git a/test/cpp/interop/client.cc b/test/cpp/interop/client.cc index 4197ba8bab2..c58910abc3f 100644 --- a/test/cpp/interop/client.cc +++ b/test/cpp/interop/client.cc @@ -54,33 +54,35 @@ DEFINE_int32(server_port, 0, "Server port."); DEFINE_string(server_host, "127.0.0.1", "Server host to connect to"); DEFINE_string(server_host_override, "foo.test.google.fr", "Override the server host which is sent in HTTP header"); -DEFINE_string(test_case, "large_unary", - "Configure different test cases. Valid options are:\n\n" - "all : all test cases;\n" - "cancel_after_begin : cancel stream after starting it;\n" - "cancel_after_first_response: cancel on first response;\n" - "client_compressed_streaming : compressed request streaming with " - "client_compressed_unary : single compressed request;\n" - "client_streaming : request streaming with single response;\n" - "compute_engine_creds: large_unary with compute engine auth;\n" - "custom_metadata: server will echo custom metadata;\n" - "empty_stream : bi-di stream with no request/response;\n" - "empty_unary : empty (zero bytes) request and response;\n" - "half_duplex : half-duplex streaming;\n" - "jwt_token_creds: large_unary with JWT token auth;\n" - "large_unary : single request and (large) response;\n" - "oauth2_auth_token: raw oauth2 access token auth;\n" - "per_rpc_creds: raw oauth2 access token on a single rpc;\n" - "ping_pong : full-duplex streaming;\n" - "response streaming;\n" - "server_compressed_streaming : single request with compressed " - "server_compressed_unary : single compressed response;\n" - "server_streaming : single request with response streaming;\n" - "slow_consumer : single request with response streaming with " - "slow client consumer;\n" - "status_code_and_message: verify status code & message;\n" - "timeout_on_sleeping_server: deadline exceeds on stream;\n" - "unimplemented_method: client calls an unimplemented method;\n"); +DEFINE_string( + test_case, "large_unary", + "Configure different test cases. Valid options are:\n\n" + "all : all test cases;\n" + "cancel_after_begin : cancel stream after starting it;\n" + "cancel_after_first_response: cancel on first response;\n" + "client_compressed_streaming : compressed request streaming with " + "client_compressed_unary : single compressed request;\n" + "client_streaming : request streaming with single response;\n" + "compute_engine_creds: large_unary with compute engine auth;\n" + "custom_metadata: server will echo custom metadata;\n" + "empty_stream : bi-di stream with no request/response;\n" + "empty_unary : empty (zero bytes) request and response;\n" + "half_duplex : half-duplex streaming;\n" + "jwt_token_creds: large_unary with JWT token auth;\n" + "large_unary : single request and (large) response;\n" + "oauth2_auth_token: raw oauth2 access token auth;\n" + "per_rpc_creds: raw oauth2 access token on a single rpc;\n" + "ping_pong : full-duplex streaming;\n" + "response streaming;\n" + "server_compressed_streaming : single request with compressed " + "server_compressed_unary : single compressed response;\n" + "server_streaming : single request with response streaming;\n" + "slow_consumer : single request with response streaming with " + "slow client consumer;\n" + "status_code_and_message: verify status code & message;\n" + "timeout_on_sleeping_server: deadline exceeds on stream;\n" + "unimplemented_method: client calls an unimplemented method;\n" + "unimplemented_service: client calls an unimplemented service;\n"); DEFINE_string(default_service_account, "", "Email of GCE default service account"); DEFINE_string(service_account_key_file, "", @@ -152,6 +154,8 @@ int main(int argc, char** argv) { client.DoCustomMetadata(); } else if (FLAGS_test_case == "unimplemented_method") { client.DoUnimplementedMethod(); + } else if (FLAGS_test_case == "unimplemented_service") { + client.DoUnimplementedService(); } else if (FLAGS_test_case == "cacheable_unary") { client.DoCacheableUnary(); } else if (FLAGS_test_case == "all") { @@ -172,6 +176,7 @@ int main(int argc, char** argv) { client.DoStatusWithMessage(); client.DoCustomMetadata(); client.DoUnimplementedMethod(); + client.DoUnimplementedService(); client.DoCacheableUnary(); // service_account_creds and jwt_token_creds can only run with ssl. if (FLAGS_use_tls) { @@ -207,7 +212,8 @@ int main(int argc, char** argv) { "server_streaming", "status_code_and_message", "timeout_on_sleeping_server", - "unimplemented_method"}; + "unimplemented_method", + "unimplemented_service"}; char* joined_testcases = gpr_strjoin_sep(testcases, GPR_ARRAY_SIZE(testcases), "\n", NULL); diff --git a/test/cpp/interop/interop_client.cc b/test/cpp/interop/interop_client.cc index 1668589cc4a..d1242627ef3 100644 --- a/test/cpp/interop/interop_client.cc +++ b/test/cpp/interop/interop_client.cc @@ -107,6 +107,11 @@ TestService::Stub* InteropClient::ServiceStub::Get() { return stub_.get(); } +UnimplementedService::Stub* +InteropClient::ServiceStub::GetUnimplementedServiceStub() { + return UnimplementedService::NewStub(channel_).get(); +} + void InteropClient::ServiceStub::Reset(std::shared_ptr channel) { channel_ = channel; @@ -162,8 +167,8 @@ bool InteropClient::AssertStatusCode(const Status& s, bool InteropClient::DoEmpty() { gpr_log(GPR_DEBUG, "Sending an empty rpc..."); - Empty request = Empty::default_instance(); - Empty response = Empty::default_instance(); + Empty request; + Empty response; ClientContext context; Status s = serviceStub_.Get()->EmptyCall(&context, request, &response); @@ -1002,11 +1007,30 @@ bool InteropClient::DoCustomMetadata() { return true; } +bool InteropClient::DoUnimplementedService() { + gpr_log(GPR_DEBUG, "Sending a request for an unimplemented service..."); + + Empty request; + Empty response; + ClientContext context; + + UnimplementedService::Stub* stub = serviceStub_.GetUnimplementedServiceStub(); + + Status s = stub->UnimplementedCall(&context, request, &response); + + if (!AssertStatusCode(s, StatusCode::UNIMPLEMENTED)) { + return false; + } + + gpr_log(GPR_DEBUG, "unimplemented service done."); + return true; +} + bool InteropClient::DoUnimplementedMethod() { gpr_log(GPR_DEBUG, "Sending a request for an unimplemented rpc..."); - Empty request = Empty::default_instance(); - Empty response = Empty::default_instance(); + Empty request; + Empty response; ClientContext context; Status s = diff --git a/test/cpp/interop/interop_client.h b/test/cpp/interop/interop_client.h index 0a96e7734d8..7ec7ebee209 100644 --- a/test/cpp/interop/interop_client.h +++ b/test/cpp/interop/interop_client.h @@ -80,6 +80,7 @@ class InteropClient { bool DoStatusWithMessage(); bool DoCustomMetadata(); bool DoUnimplementedMethod(); + bool DoUnimplementedService(); bool DoCacheableUnary(); // Auth tests. // username is a string containing the user email @@ -100,6 +101,7 @@ class InteropClient { ServiceStub(std::shared_ptr channel, bool new_stub_every_call); TestService::Stub* Get(); + UnimplementedService::Stub* GetUnimplementedServiceStub(); void Reset(std::shared_ptr channel); diff --git a/test/cpp/thread_manager/thread_manager_test.cc b/test/cpp/thread_manager/thread_manager_test.cc new file mode 100644 index 00000000000..5c70103947d --- /dev/null +++ b/test/cpp/thread_manager/thread_manager_test.cc @@ -0,0 +1,136 @@ +/* + * + * Copyright 2016, Google Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *is % allowed in string + */ + +#include +#include + +#include +#include +#include +#include + +#include "src/cpp/thread_manager/thread_manager.h" +#include "test/cpp/util/test_config.h" + +namespace grpc { +class ThreadManagerTest GRPC_FINAL : public grpc::ThreadManager { + public: + ThreadManagerTest() + : ThreadManager(kMinPollers, kMaxPollers), + num_do_work_(0), + num_poll_for_work_(0), + num_work_found_(0) {} + + grpc::ThreadManager::WorkStatus PollForWork(void **tag, + bool *ok) GRPC_OVERRIDE; + void DoWork(void *tag, bool ok) GRPC_OVERRIDE; + void PerformTest(); + + private: + void SleepForMs(int sleep_time_ms); + + static const int kMinPollers = 2; + static const int kMaxPollers = 10; + + static const int kPollingTimeoutMsec = 10; + static const int kDoWorkDurationMsec = 1; + + // PollForWork will return SHUTDOWN after these many number of invocations + static const int kMaxNumPollForWork = 50; + + gpr_atm num_do_work_; // Number of calls to DoWork + gpr_atm num_poll_for_work_; // Number of calls to PollForWork + gpr_atm num_work_found_; // Number of times WORK_FOUND was returned +}; + +void ThreadManagerTest::SleepForMs(int duration_ms) { + gpr_timespec sleep_time = + gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), + gpr_time_from_millis(duration_ms, GPR_TIMESPAN)); + gpr_sleep_until(sleep_time); +} + +grpc::ThreadManager::WorkStatus ThreadManagerTest::PollForWork(void **tag, + bool *ok) { + int call_num = gpr_atm_no_barrier_fetch_add(&num_poll_for_work_, 1); + + if (call_num >= kMaxNumPollForWork) { + Shutdown(); + return SHUTDOWN; + } + + // Simulate "polling for work" by sleeping for sometime + SleepForMs(kPollingTimeoutMsec); + + *tag = nullptr; + *ok = true; + + // Return timeout roughly 1 out of every 3 calls + if (call_num % 3 == 0) { + return TIMEOUT; + } else { + gpr_atm_no_barrier_fetch_add(&num_work_found_, 1); + return WORK_FOUND; + } +} + +void ThreadManagerTest::DoWork(void *tag, bool ok) { + gpr_atm_no_barrier_fetch_add(&num_do_work_, 1); + SleepForMs(kDoWorkDurationMsec); // Simulate doing work by sleeping +} + +void ThreadManagerTest::PerformTest() { + // Initialize() starts the ThreadManager + Initialize(); + + // Wait for all the threads to gracefully terminate + Wait(); + + // The number of times DoWork() was called is equal to the number of times + // WORK_FOUND was returned + gpr_log(GPR_DEBUG, "DoWork() called %ld times", + gpr_atm_no_barrier_load(&num_do_work_)); + GPR_ASSERT(gpr_atm_no_barrier_load(&num_do_work_) == + gpr_atm_no_barrier_load(&num_work_found_)); +} +} // namespace grpc + +int main(int argc, char **argv) { + std::srand(std::time(NULL)); + + grpc::testing::InitTest(&argc, &argv, true); + grpc::ThreadManagerTest test_rpc_manager; + test_rpc_manager.PerformTest(); + + return 0; +} diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal index 252bdb7ed17..6c2b475ed0a 100644 --- a/tools/doxygen/Doxyfile.c++.internal +++ b/tools/doxygen/Doxyfile.c++.internal @@ -864,6 +864,7 @@ src/cpp/client/create_channel_internal.h \ src/cpp/common/channel_filter.h \ src/cpp/server/dynamic_thread_pool.h \ src/cpp/server/thread_pool_interface.h \ +src/cpp/thread_manager/thread_manager.h \ src/cpp/client/insecure_credentials.cc \ src/cpp/client/secure_credentials.cc \ src/cpp/common/auth_property_iterator.cc \ @@ -893,6 +894,7 @@ src/cpp/server/server_cc.cc \ src/cpp/server/server_context.cc \ src/cpp/server/server_credentials.cc \ src/cpp/server/server_posix.cc \ +src/cpp/thread_manager/thread_manager.cc \ src/cpp/util/byte_buffer_cc.cc \ src/cpp/util/slice_cc.cc \ src/cpp/util/status.cc \ diff --git a/tools/run_tests/artifact_targets.py b/tools/run_tests/artifact_targets.py index d36f963a7c6..65d34e17e1c 100644 --- a/tools/run_tests/artifact_targets.py +++ b/tools/run_tests/artifact_targets.py @@ -109,8 +109,8 @@ class PythonArtifact: # TODO(atash) get better platform-detection support in core so we don't # need to do this manually... environ['CFLAGS'] = '-DGPR_MANYLINUX1=1' - environ['BUILD_HEALTH_CHECKING'] = 'TRUE' - environ['BUILD_MANYLINUX_WHEEL'] = 'TRUE' + environ['GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS'] = 'TRUE' + environ['GRPC_BUILD_MANYLINUX_WHEEL'] = 'TRUE' return create_docker_jobspec(self.name, 'tools/dockerfile/grpc_artifact_python_manylinux_%s' % self.arch, 'tools/run_tests/build_artifact_python.sh', diff --git a/tools/run_tests/build_artifact_node.bat b/tools/run_tests/build_artifact_node.bat index c5bd726db7e..57d55ef19ec 100644 --- a/tools/run_tests/build_artifact_node.bat +++ b/tools/run_tests/build_artifact_node.bat @@ -52,4 +52,4 @@ if %errorlevel% neq 0 exit /b %errorlevel% goto :EOF :error -exit /b 1 \ No newline at end of file +exit /b 1 diff --git a/tools/run_tests/build_artifact_python.sh b/tools/run_tests/build_artifact_python.sh index 9fed7c5028a..2a1d41fd686 100755 --- a/tools/run_tests/build_artifact_python.sh +++ b/tools/run_tests/build_artifact_python.sh @@ -66,7 +66,7 @@ ${SETARCH_CMD} ${PYTHON} tools/distrib/python/grpcio_tools/setup.py sdist # Build gRPC tools package binary distribution ${SETARCH_CMD} ${PYTHON} tools/distrib/python/grpcio_tools/setup.py bdist_wheel -if [ "$BUILD_MANYLINUX_WHEEL" != "" ] +if [ "$GRPC_BUILD_MANYLINUX_WHEEL" != "" ] then for wheel in dist/*.whl; do ${AUDITWHEEL} repair $wheel -w "$ARTIFACT_DIR" @@ -82,16 +82,21 @@ fi # Wheels are not supported by setup_requires/dependency_links, so we # manually install the dependency. Note we should only do this if we # are in a docker image or in a virtualenv. -if [ "$BUILD_HEALTH_CHECKING" != "" ] +if [ "$GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS" != "" ] then ${PIP} install -rrequirements.txt ${PIP} install grpcio --no-index --find-links "file://$ARTIFACT_DIR/" ${PIP} install grpcio-tools --no-index --find-links "file://$ARTIFACT_DIR/" - # Build gRPC health check source distribution + # Build gRPC health-checking source distribution ${SETARCH_CMD} ${PYTHON} src/python/grpcio_health_checking/setup.py \ preprocess build_package_protos sdist cp -r src/python/grpcio_health_checking/dist/* "$ARTIFACT_DIR" + + # Build gRPC reflection source distribution + ${SETARCH_CMD} ${PYTHON} src/python/grpcio_reflection/setup.py \ + preprocess build_package_protos sdist + cp -r src/python/grpcio_reflection/dist/* "$ARTIFACT_DIR" fi cp -r dist/* "$ARTIFACT_DIR" diff --git a/tools/run_tests/build_python.sh b/tools/run_tests/build_python.sh index 54e2fe53478..fb884ad1668 100755 --- a/tools/run_tests/build_python.sh +++ b/tools/run_tests/build_python.sh @@ -180,9 +180,18 @@ pip_install_dir $ROOT/tools/distrib/python/grpcio_tools # TODO(atash) figure out namespace packages and grpcio-tools and auditwheel # etc... pip_install_dir $ROOT + +# Build/install health checking $VENV_PYTHON $ROOT/src/python/grpcio_health_checking/setup.py preprocess $VENV_PYTHON $ROOT/src/python/grpcio_health_checking/setup.py build_package_protos pip_install_dir $ROOT/src/python/grpcio_health_checking + +# Build/install reflection +$VENV_PYTHON $ROOT/src/python/grpcio_reflection/setup.py preprocess +$VENV_PYTHON $ROOT/src/python/grpcio_reflection/setup.py build_package_protos +pip_install_dir $ROOT/src/python/grpcio_reflection + +# Build/install tests $VENV_PYTHON $ROOT/src/python/grpcio_tests/setup.py preprocess $VENV_PYTHON $ROOT/src/python/grpcio_tests/setup.py build_package_protos pip_install_dir $ROOT/src/python/grpcio_tests diff --git a/tools/run_tests/performance/bq_upload_result.py b/tools/run_tests/performance/bq_upload_result.py index 2a99499843a..9b817b5896b 100755 --- a/tools/run_tests/performance/bq_upload_result.py +++ b/tools/run_tests/performance/bq_upload_result.py @@ -120,6 +120,7 @@ def _flatten_result_inplace(scenario_result): scenario_result['serverCores'] = json.dumps(scenario_result['serverCores']) scenario_result['clientSuccess'] = json.dumps(scenario_result['clientSuccess']) scenario_result['serverSuccess'] = json.dumps(scenario_result['serverSuccess']) + scenario_result['requestResults'] = json.dumps(scenario_result.get('requestResults', [])) def _populate_metadata_inplace(scenario_result): diff --git a/tools/run_tests/performance/scenario_result_schema.json b/tools/run_tests/performance/scenario_result_schema.json index 6bec21df397..3285f212d77 100644 --- a/tools/run_tests/performance/scenario_result_schema.json +++ b/tools/run_tests/performance/scenario_result_schema.json @@ -208,5 +208,10 @@ "name": "serverSuccess", "type": "STRING", "mode": "NULLABLE" + }, + { + "name": "requestResults", + "type": "STRING", + "mode": "NULLABLE" } ] diff --git a/tools/run_tests/pre_build_node.bat b/tools/run_tests/pre_build_node.bat index a29456f9ed9..addb01a2a4d 100644 --- a/tools/run_tests/pre_build_node.bat +++ b/tools/run_tests/pre_build_node.bat @@ -29,6 +29,5 @@ set PATH=%PATH%;C:\Program Files\nodejs\;%APPDATA%\npm -@rem Expire cache after 1 week -call npm update --cache-min 604800 - +@rem Expire cache after 1 day +call npm update --cache-min 86400 diff --git a/tools/run_tests/pre_build_node.sh b/tools/run_tests/pre_build_node.sh index 4879e7ad9bd..e63be9da523 100755 --- a/tools/run_tests/pre_build_node.sh +++ b/tools/run_tests/pre_build_node.sh @@ -37,8 +37,8 @@ set -ex export GRPC_CONFIG=${CONFIG:-opt} -# Expire cache after 1 week -npm update --cache-min 604800 +# Expire cache after 1 day +npm update --cache-min 86400 npm install node-gyp-install ./node_modules/.bin/node-gyp-install diff --git a/tools/run_tests/report_utils.py b/tools/run_tests/report_utils.py index efe5dc999d1..3e18f365102 100644 --- a/tools/run_tests/report_utils.py +++ b/tools/run_tests/report_utils.py @@ -36,7 +36,7 @@ try: from mako.template import Template from mako import exceptions except (ImportError): - pass # Mako not installed but it is ok. + pass # Mako not installed but it is ok. import os import string import xml.etree.cElementTree as ET @@ -63,9 +63,9 @@ def render_junit_xml_report(resultset, xml_report, suite_package='grpc', root = ET.Element('testsuites') testsuite = ET.SubElement(root, 'testsuite', id='1', package=suite_package, name=suite_name) - for shortname, results in resultset.items(): + for shortname, results in resultset.iteritems(): for result in results: - xml_test = ET.SubElement(testsuite, 'testcase', name=shortname) + xml_test = ET.SubElement(testsuite, 'testcase', name=shortname) if result.elapsed_time: xml_test.set('time', str(result.elapsed_time)) ET.SubElement(xml_test, 'system-out').text = _filter_msg(result.message, @@ -79,7 +79,7 @@ def render_junit_xml_report(resultset, xml_report, suite_package='grpc', def render_interop_html_report( - client_langs, server_langs, test_cases, auth_test_cases, http2_cases, + client_langs, server_langs, test_cases, auth_test_cases, http2_cases, resultset, num_failures, cloud_to_prod, prod_servers, http2_interop): """Generate HTML report for interop tests.""" template_file = 'tools/run_tests/interop_html_report.template' @@ -99,7 +99,7 @@ def render_interop_html_report( sorted_server_langs = sorted(server_langs) sorted_prod_servers = sorted(prod_servers) - args = {'client_langs': sorted_client_langs, + args = {'client_langs': sorted_client_langs, 'server_langs': sorted_server_langs, 'test_cases': sorted_test_cases, 'auth_test_cases': sorted_auth_test_cases, @@ -110,9 +110,9 @@ def render_interop_html_report( 'prod_servers': sorted_prod_servers, 'http2_interop': http2_interop} - html_report_out_dir = 'reports' + html_report_out_dir = 'reports' if not os.path.exists(html_report_out_dir): - os.mkdir(html_report_out_dir) + os.mkdir(html_report_out_dir) html_file_path = os.path.join(html_report_out_dir, 'index.html') try: with open(html_file_path, 'w') as output_file: @@ -120,4 +120,3 @@ def render_interop_html_report( except: print(exceptions.text_error_template().render()) raise - diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py index 0c6efda1f47..c73de6b7174 100755 --- a/tools/run_tests/run_interop_tests.py +++ b/tools/run_tests/run_interop_tests.py @@ -64,7 +64,9 @@ _SKIP_SERVER_COMPRESSION = ['server_compressed_unary', _SKIP_COMPRESSION = _SKIP_CLIENT_COMPRESSION + _SKIP_SERVER_COMPRESSION -_SKIP_ADVANCED_GO = ['custom_metadata', 'unimplemented_method'] +_SKIP_ADVANCED_GO = ['custom_metadata', + 'unimplemented_method', + 'unimplemented_service'] _SKIP_ADVANCED = _SKIP_ADVANCED_GO + ['status_code_and_message'] @@ -416,7 +418,8 @@ _TEST_CASES = ['large_unary', 'empty_unary', 'ping_pong', 'timeout_on_sleeping_server', 'custom_metadata', 'status_code_and_message', 'unimplemented_method', 'client_compressed_unary', 'server_compressed_unary', - 'client_compressed_streaming', 'server_compressed_streaming'] + 'client_compressed_streaming', 'server_compressed_streaming', + 'unimplemented_service'] _AUTH_TEST_CASES = ['compute_engine_creds', 'jwt_token_creds', 'oauth2_auth_token', 'per_rpc_creds'] diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py index 2886870d384..911843e9f36 100755 --- a/tools/run_tests/run_tests.py +++ b/tools/run_tests/run_tests.py @@ -363,7 +363,8 @@ class NodeLanguage(object): self.config = config self.args = args _check_compiler(self.args.compiler, ['default', 'node0.12', - 'node4', 'node5', 'node6']) + 'node4', 'node5', 'node6', + 'node7']) if self.args.compiler == 'default': self.node_version = '4' else: @@ -1064,6 +1065,7 @@ argp.add_argument('--compiler', 'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'vs2010', 'vs2013', 'vs2015', 'python2.7', 'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3', + 'node0.12', 'node4', 'node5', 'node6', 'node7', 'coreclr'], default='default', help='Selects compiler to use. Allowed values depend on the platform and language.') diff --git a/tools/run_tests/run_tests_in_workspace.sh b/tools/run_tests/run_tests_in_workspace.sh index 98ef3566db1..9c6c5b76e06 100755 --- a/tools/run_tests/run_tests_in_workspace.sh +++ b/tools/run_tests/run_tests_in_workspace.sh @@ -35,11 +35,13 @@ set -ex cd $(dirname $0)/../.. +export repo_root=$(pwd) rm -rf "${WORKSPACE_NAME}" -# TODO(jtattermusch): clone --recursive fetches the submodules from github. -# Try avoiding that to save time and network capacity. -git clone --recursive . "${WORKSPACE_NAME}" +git clone . "${WORKSPACE_NAME}" +# clone gRPC submodules, use data from locally cloned submodules where possible +git submodule foreach 'cd "${repo_root}/${WORKSPACE_NAME}" \ + && git submodule update --init --reference ${repo_root}/${name} ${name}' echo "Running run_tests.py in workspace ${WORKSPACE_NAME}" python "${WORKSPACE_NAME}/tools/run_tests/run_tests.py" $@ diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py index b65010ad8b4..2656f1ac5dc 100755 --- a/tools/run_tests/run_tests_matrix.py +++ b/tools/run_tests/run_tests_matrix.py @@ -46,16 +46,16 @@ os.chdir(_ROOT) _RUNTESTS_TIMEOUT = 4*60*60 # Number of jobs assigned to each run_tests.py instance -_INNER_JOBS = 2 +_DEFAULT_INNER_JOBS = 2 -def _docker_jobspec(name, runtests_args=[]): +def _docker_jobspec(name, runtests_args=[], inner_jobs=_DEFAULT_INNER_JOBS): """Run a single instance of run_tests.py in a docker container""" test_job = jobset.JobSpec( cmdline=['python', 'tools/run_tests/run_tests.py', '--use_docker', '-t', - '-j', str(_INNER_JOBS), + '-j', str(inner_jobs), '-x', 'report_%s.xml' % name, '--report_suite_name', '%s' % name] + runtests_args, shortname='run_tests_%s' % name, @@ -63,7 +63,7 @@ def _docker_jobspec(name, runtests_args=[]): return test_job -def _workspace_jobspec(name, runtests_args=[], workspace_name=None): +def _workspace_jobspec(name, runtests_args=[], workspace_name=None, inner_jobs=_DEFAULT_INNER_JOBS): """Run a single instance of run_tests.py in a separate workspace""" if not workspace_name: workspace_name = 'workspace_%s' % name @@ -71,7 +71,7 @@ def _workspace_jobspec(name, runtests_args=[], workspace_name=None): test_job = jobset.JobSpec( cmdline=['tools/run_tests/run_tests_in_workspace.sh', '-t', - '-j', str(_INNER_JOBS), + '-j', str(inner_jobs), '-x', '../report_%s.xml' % name, '--report_suite_name', '%s' % name] + runtests_args, environ=env, @@ -82,7 +82,8 @@ def _workspace_jobspec(name, runtests_args=[], workspace_name=None): def _generate_jobs(languages, configs, platforms, arch=None, compiler=None, - labels=[], extra_args=[]): + labels=[], extra_args=[], + inner_jobs=_DEFAULT_INNER_JOBS): result = [] for language in languages: for platform in platforms: @@ -97,68 +98,75 @@ def _generate_jobs(languages, configs, platforms, runtests_args += extra_args if platform == 'linux': - job = _docker_jobspec(name=name, runtests_args=runtests_args) + job = _docker_jobspec(name=name, runtests_args=runtests_args, inner_jobs=inner_jobs) else: - job = _workspace_jobspec(name=name, runtests_args=runtests_args) + job = _workspace_jobspec(name=name, runtests_args=runtests_args, inner_jobs=inner_jobs) job.labels = [platform, config, language] + labels result.append(job) return result -def _create_test_jobs(extra_args=[]): +def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS): test_jobs = [] # supported on linux only test_jobs += _generate_jobs(languages=['sanity', 'php7'], configs=['dbg', 'opt'], platforms=['linux'], labels=['basictests'], - extra_args=extra_args) + extra_args=extra_args, + inner_jobs=inner_jobs) # supported on all platforms. test_jobs += _generate_jobs(languages=['c', 'csharp', 'node', 'python'], configs=['dbg', 'opt'], platforms=['linux', 'macos', 'windows'], labels=['basictests'], - extra_args=extra_args) + extra_args=extra_args, + inner_jobs=inner_jobs) # supported on linux and mac. test_jobs += _generate_jobs(languages=['c++', 'ruby', 'php'], configs=['dbg', 'opt'], platforms=['linux', 'macos'], labels=['basictests'], - extra_args=extra_args) + extra_args=extra_args, + inner_jobs=inner_jobs) # supported on mac only. test_jobs += _generate_jobs(languages=['objc'], configs=['dbg', 'opt'], platforms=['macos'], labels=['basictests'], - extra_args=extra_args) + extra_args=extra_args, + inner_jobs=inner_jobs) # sanitizers test_jobs += _generate_jobs(languages=['c'], configs=['msan', 'asan', 'tsan'], platforms=['linux'], labels=['sanitizers'], - extra_args=extra_args) + extra_args=extra_args, + inner_jobs=inner_jobs) test_jobs += _generate_jobs(languages=['c++'], configs=['asan', 'tsan'], platforms=['linux'], labels=['sanitizers'], - extra_args=extra_args) + extra_args=extra_args, + inner_jobs=inner_jobs) # libuv tests test_jobs += _generate_jobs(languages=['c'], configs=['dbg', 'opt'], platforms=['linux'], labels=['libuv'], - extra_args=extra_args + ['--iomgr_platform=uv']) + extra_args=extra_args + ['--iomgr_platform=uv'], + inner_jobs=inner_jobs) return test_jobs -def _create_portability_test_jobs(extra_args=[]): +def _create_portability_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS): test_jobs = [] # portability C x86 test_jobs += _generate_jobs(languages=['c'], @@ -167,7 +175,8 @@ def _create_portability_test_jobs(extra_args=[]): arch='x86', compiler='default', labels=['portability'], - extra_args=extra_args) + extra_args=extra_args, + inner_jobs=inner_jobs) # portability C and C++ on x64 for compiler in ['gcc4.4', 'gcc4.6', 'gcc5.3', @@ -178,7 +187,8 @@ def _create_portability_test_jobs(extra_args=[]): arch='x64', compiler=compiler, labels=['portability'], - extra_args=extra_args) + extra_args=extra_args, + inner_jobs=inner_jobs) # portability C on Windows for arch in ['x86', 'x64']: @@ -189,7 +199,8 @@ def _create_portability_test_jobs(extra_args=[]): arch=arch, compiler=compiler, labels=['portability'], - extra_args=extra_args) + extra_args=extra_args, + inner_jobs=inner_jobs) test_jobs += _generate_jobs(languages=['python'], configs=['dbg'], @@ -197,7 +208,8 @@ def _create_portability_test_jobs(extra_args=[]): arch='default', compiler='python3.4', labels=['portability'], - extra_args=extra_args) + extra_args=extra_args, + inner_jobs=inner_jobs) test_jobs += _generate_jobs(languages=['csharp'], configs=['dbg'], @@ -205,7 +217,8 @@ def _create_portability_test_jobs(extra_args=[]): arch='default', compiler='coreclr', labels=['portability'], - extra_args=extra_args) + extra_args=extra_args, + inner_jobs=inner_jobs) return test_jobs @@ -220,7 +233,7 @@ def _allowed_labels(): argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.') argp.add_argument('-j', '--jobs', - default=multiprocessing.cpu_count()/_INNER_JOBS, + default=multiprocessing.cpu_count()/_DEFAULT_INNER_JOBS, type=int, help='Number of concurrent run_tests.py instances.') argp.add_argument('-f', '--filter', @@ -249,15 +262,21 @@ argp.add_argument('--base_branch', default='origin/master', type=str, help='Branch that pull request is requesting to merge into') +argp.add_argument('--inner_jobs', + default=_DEFAULT_INNER_JOBS, + type=int, + help='Number of jobs in each run_tests.py instance') args = argp.parse_args() + extra_args = [] if args.build_only: extra_args.append('--build_only') if args.force_default_poller: extra_args.append('--force_default_poller') -all_jobs = _create_test_jobs(extra_args=extra_args) + _create_portability_test_jobs(extra_args=extra_args) +all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \ + _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) jobs = [] for job in all_jobs: diff --git a/tools/run_tests/sources_and_headers.json b/tools/run_tests/sources_and_headers.json index 222af3ebf31..57d3b68c1cb 100644 --- a/tools/run_tests/sources_and_headers.json +++ b/tools/run_tests/sources_and_headers.json @@ -3223,6 +3223,23 @@ "third_party": false, "type": "target" }, + { + "deps": [ + "gpr", + "grpc", + "grpc++", + "grpc++_test_config" + ], + "headers": [], + "is_filegroup": false, + "language": "c++", + "name": "thread_manager_test", + "src": [ + "test/cpp/thread_manager/thread_manager_test.cc" + ], + "third_party": false, + "type": "target" + }, { "deps": [ "gpr", @@ -7477,7 +7494,8 @@ "src/cpp/client/create_channel_internal.h", "src/cpp/common/channel_filter.h", "src/cpp/server/dynamic_thread_pool.h", - "src/cpp/server/thread_pool_interface.h" + "src/cpp/server/thread_pool_interface.h", + "src/cpp/thread_manager/thread_manager.h" ], "is_filegroup": true, "language": "c++", @@ -7556,6 +7574,8 @@ "src/cpp/server/server_credentials.cc", "src/cpp/server/server_posix.cc", "src/cpp/server/thread_pool_interface.h", + "src/cpp/thread_manager/thread_manager.cc", + "src/cpp/thread_manager/thread_manager.h", "src/cpp/util/byte_buffer_cc.cc", "src/cpp/util/slice_cc.cc", "src/cpp/util/status.cc", diff --git a/tools/run_tests/tests.json b/tools/run_tests/tests.json index c3715995d2e..8a0cb841818 100644 --- a/tools/run_tests/tests.json +++ b/tools/run_tests/tests.json @@ -3015,6 +3015,27 @@ "posix" ] }, + { + "args": [], + "ci_platforms": [ + "linux", + "mac", + "posix", + "windows" + ], + "cpu_cost": 1.0, + "exclude_configs": [], + "flaky": false, + "gtest": false, + "language": "c++", + "name": "thread_manager_test", + "platforms": [ + "linux", + "mac", + "posix", + "windows" + ] + }, { "args": [], "ci_platforms": [ diff --git a/vsprojects/vcxproj/grpc++/grpc++.vcxproj b/vsprojects/vcxproj/grpc++/grpc++.vcxproj index 43c5281a024..bf9c3a5c9d4 100644 --- a/vsprojects/vcxproj/grpc++/grpc++.vcxproj +++ b/vsprojects/vcxproj/grpc++/grpc++.vcxproj @@ -364,6 +364,7 @@ + @@ -424,6 +425,8 @@ + + diff --git a/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters b/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters index 6ad212a125c..b88a78ad92d 100644 --- a/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters +++ b/vsprojects/vcxproj/grpc++/grpc++.vcxproj.filters @@ -88,6 +88,9 @@ src\cpp\server + + src\cpp\thread_manager + src\cpp\util @@ -422,6 +425,9 @@ src\cpp\server + + src\cpp\thread_manager + @@ -476,6 +482,9 @@ {321b0980-74ad-e8ca-f23b-deffa5d6bb8f} + + {23f9df56-8604-52a0-e6a2-f01b8e68d0e7} + {f842537a-2bf1-1ec3-b495-7d62c64a1c06} diff --git a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj index 9e6f2c0d0f1..5d0759790c8 100644 --- a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj +++ b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj @@ -360,6 +360,7 @@ + @@ -410,6 +411,8 @@ + + diff --git a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters index c73be4e63fe..bdb71340817 100644 --- a/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters +++ b/vsprojects/vcxproj/grpc++_unsecure/grpc++_unsecure.vcxproj.filters @@ -73,6 +73,9 @@ src\cpp\server + + src\cpp\thread_manager + src\cpp\util @@ -395,6 +398,9 @@ src\cpp\server + + src\cpp\thread_manager + @@ -449,6 +455,9 @@ {8a54a279-d14b-4237-0df3-1ffe1ef5a7af} + + {e5b55f25-d99f-b8e5-9981-7da7fa7ba628} + {fb5d9a64-20ca-5119-ed38-04a3cf94923d} diff --git a/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj b/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj new file mode 100644 index 00000000000..2c35a03a021 --- /dev/null +++ b/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj @@ -0,0 +1,201 @@ + + + + + + Debug + Win32 + + + Debug + x64 + + + Release + Win32 + + + Release + x64 + + + + {08C611E4-7F87-73BE-76CE-C158A4CC05A3} + true + $(SolutionDir)IntDir\$(MSBuildProjectName)\ + + + + v100 + + + v110 + + + v120 + + + v140 + + + Application + true + Unicode + + + Application + false + true + Unicode + + + + + + + + + + + + + + + + thread_manager_test + static + Debug + static + Debug + + + thread_manager_test + static + Release + static + Release + + + + NotUsing + Level3 + Disabled + WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) + true + MultiThreadedDebug + true + None + false + + + Console + true + false + + + + + + NotUsing + Level3 + Disabled + WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions) + true + MultiThreadedDebug + true + None + false + + + Console + true + false + + + + + + NotUsing + Level3 + MaxSpeed + WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) + true + true + true + MultiThreaded + true + None + false + + + Console + true + false + true + true + + + + + + NotUsing + Level3 + MaxSpeed + WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions) + true + true + true + MultiThreaded + true + None + false + + + Console + true + false + true + true + + + + + + + + + + {C187A093-A0FE-489D-A40A-6E33DE0F9FEB} + + + {29D16885-7228-4C31-81ED-5F9187C7F2A9} + + + {B23D3D1A-9438-4EDA-BEB6-9A0A03D17792} + + + {3F7D093D-11F9-C4BC-BEB7-18EB28E3F290} + + + + + + + + + + + + + + + This project references NuGet package(s) that are missing on this computer. Enable NuGet Package Restore to download them. For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}. + + + + + + + + + diff --git a/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj.filters b/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj.filters new file mode 100644 index 00000000000..e1741f8316c --- /dev/null +++ b/vsprojects/vcxproj/test/thread_manager_test/thread_manager_test.vcxproj.filters @@ -0,0 +1,21 @@ + + + + + test\cpp\thread_manager + + + + + + {e9e471cd-7f7e-9abc-af13-ec58851849ac} + + + {b350f72c-af76-7272-4342-1b0fc7a458ee} + + + {6b09ea8d-fbc6-e6fe-f884-b3d3dfcbfc12} + + + +