Merge github.com:grpc/grpc into newlines

pull/8630/head
Craig Tiller 8 years ago
commit 3e2048dd9c
  1. 3
      .gitmodules
  2. 35
      BUILD
  3. 30
      CMakeLists.txt
  4. 275
      Makefile
  5. 1
      binding.gyp
  6. 45
      build.yaml
  7. 1
      config.m4
  8. 88
      doc/cpp-style-guide.md
  9. 3
      gRPC-Core.podspec
  10. 2
      grpc.gemspec
  11. 2
      include/grpc++/alarm.h
  12. 14
      include/grpc++/channel.h
  13. 12
      include/grpc++/ext/proto_server_reflection_plugin.h
  14. 4
      include/grpc++/generic/async_generic_service.h
  15. 2
      include/grpc++/generic/generic_stub.h
  16. 64
      include/grpc++/impl/codegen/async_stream.h
  17. 9
      include/grpc++/impl/codegen/async_unary_call.h
  18. 14
      include/grpc++/impl/codegen/call.h
  19. 6
      include/grpc++/impl/codegen/client_context.h
  20. 83
      include/grpc++/impl/codegen/config.h
  21. 67
      include/grpc++/impl/codegen/core_codegen.h
  22. 10
      include/grpc++/impl/codegen/method_handler_impl.h
  23. 22
      include/grpc++/impl/codegen/proto_utils.h
  24. 2
      include/grpc++/impl/codegen/server_context.h
  25. 10
      include/grpc++/impl/codegen/server_interface.h
  26. 111
      include/grpc++/impl/codegen/sync_no_cxx11.h
  27. 76
      include/grpc++/impl/codegen/sync_stream.h
  28. 4
      include/grpc++/impl/codegen/time.h
  29. 8
      include/grpc++/impl/grpc_library.h
  30. 39
      include/grpc++/impl/sync.h
  31. 45
      include/grpc++/impl/thd.h
  32. 117
      include/grpc++/impl/thd_no_cxx11.h
  33. 2
      include/grpc++/resource_quota.h
  34. 28
      include/grpc++/server.h
  35. 2
      include/grpc++/support/byte_buffer.h
  36. 5
      include/grpc++/support/channel_arguments.h
  37. 2
      include/grpc++/support/slice.h
  38. 2
      package.xml
  39. 2
      setup.py
  40. 57
      src/compiler/cpp_generator.cc
  41. 6
      src/core/ext/client_channel/lb_policy.h
  42. 649
      src/core/ext/lb_policy/grpclb/grpclb.c
  43. 4
      src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
  44. 45
      src/core/ext/lb_policy/round_robin/round_robin.c
  45. 2
      src/core/ext/resolver/dns/native/dns_resolver.c
  46. 2
      src/core/ext/transport/chttp2/client/secure/secure_channel_create.c
  47. 26
      src/core/ext/transport/chttp2/transport/hpack_parser.c
  48. 2
      src/core/lib/iomgr/network_status_tracker.c
  49. 7
      src/core/lib/iomgr/resource_quota.c
  50. 5
      src/core/lib/iomgr/resource_quota.h
  51. 36
      src/core/lib/iomgr/tcp_client_uv.c
  52. 45
      src/core/lib/iomgr/tcp_server_posix.c
  53. 24
      src/core/lib/iomgr/tcp_server_uv.c
  54. 71
      src/core/lib/iomgr/tcp_uv.c
  55. 4
      src/core/lib/iomgr/tcp_uv.h
  56. 4
      src/core/lib/security/transport/security_connector.c
  57. 36
      src/core/lib/transport/pid_controller.c
  58. 39
      src/core/lib/transport/pid_controller.h
  59. 6
      src/cpp/client/channel_cc.cc
  60. 12
      src/cpp/client/client_context.cc
  61. 8
      src/cpp/client/cronet_credentials.cc
  62. 8
      src/cpp/client/insecure_credentials.cc
  63. 14
      src/cpp/client/secure_credentials.h
  64. 5
      src/cpp/common/channel_arguments.cc
  65. 2
      src/cpp/common/channel_filter.h
  66. 21
      src/cpp/common/secure_auth_context.h
  67. 4
      src/cpp/ext/proto_server_reflection.h
  68. 16
      src/cpp/server/dynamic_thread_pool.cc
  69. 17
      src/cpp/server/dynamic_thread_pool.h
  70. 7
      src/cpp/server/insecure_server_credentials.cc
  71. 11
      src/cpp/server/secure_server_credentials.h
  72. 32
      src/cpp/server/server_cc.cc
  73. 17
      src/cpp/server/server_context.cc
  74. 29
      src/cpp/thread_manager/thread_manager.cc
  75. 13
      src/cpp/thread_manager/thread_manager.h
  76. 5
      src/cpp/util/time_cc.cc
  77. 51
      src/google_benchmark/gen_build_yaml.py
  78. 3
      src/proto/grpc/lb/v1/load_balancer.options
  79. 8
      src/proto/grpc/lb/v1/load_balancer.proto
  80. 1
      src/python/grpcio/grpc_core_dependencies.py
  81. 2
      src/ruby/ext/grpc/rb_compression_options.c
  82. 4
      templates/tools/dockerfile/test/cxx_jessie_x64/Dockerfile.template
  83. 4
      templates/tools/dockerfile/test/cxx_jessie_x86/Dockerfile.template
  84. 38
      test/core/client_channel/lb_policies_test.c
  85. 2
      test/core/client_channel/set_initial_connect_string_test.c
  86. 37
      test/core/end2end/connection_refused_test.c
  87. 4
      test/core/end2end/end2end_tests.h
  88. 63
      test/core/end2end/fake_resolver.c
  89. 7
      test/core/end2end/fixtures/h2_census.c
  90. 7
      test/core/end2end/fixtures/h2_compress.c
  91. 128
      test/core/end2end/fixtures/h2_fake_resolver.c
  92. 7
      test/core/end2end/fixtures/h2_fakesec.c
  93. 5
      test/core/end2end/fixtures/h2_fd.c
  94. 7
      test/core/end2end/fixtures/h2_full+pipe.c
  95. 7
      test/core/end2end/fixtures/h2_full+trace.c
  96. 7
      test/core/end2end/fixtures/h2_full.c
  97. 7
      test/core/end2end/fixtures/h2_http_proxy.c
  98. 7
      test/core/end2end/fixtures/h2_load_reporting.c
  99. 7
      test/core/end2end/fixtures/h2_oauth2.c
  100. 7
      test/core/end2end/fixtures/h2_proxy.c
  101. Some files were not shown because too many files have changed in this diff Show More

3
.gitmodules vendored

@ -17,3 +17,6 @@
[submodule "third_party/thrift"]
path = third_party/thrift
url = https://github.com/apache/thrift.git
[submodule "third_party/google_benchmark"]
path = third_party/google_benchmark
url = https://github.com/google/benchmark

35
BUILD

@ -252,6 +252,7 @@ cc_library(
"src/core/lib/transport/metadata.h",
"src/core/lib/transport/metadata_batch.h",
"src/core/lib/transport/method_config.h",
"src/core/lib/transport/pid_controller.h",
"src/core/lib/transport/static_metadata.h",
"src/core/lib/transport/timeout_encoding.h",
"src/core/lib/transport/transport.h",
@ -436,6 +437,7 @@ cc_library(
"src/core/lib/transport/metadata.c",
"src/core/lib/transport/metadata_batch.c",
"src/core/lib/transport/method_config.c",
"src/core/lib/transport/pid_controller.c",
"src/core/lib/transport/static_metadata.c",
"src/core/lib/transport/timeout_encoding.c",
"src/core/lib/transport/transport.c",
@ -676,6 +678,7 @@ cc_library(
"src/core/lib/transport/metadata.h",
"src/core/lib/transport/metadata_batch.h",
"src/core/lib/transport/method_config.h",
"src/core/lib/transport/pid_controller.h",
"src/core/lib/transport/static_metadata.h",
"src/core/lib/transport/timeout_encoding.h",
"src/core/lib/transport/transport.h",
@ -845,6 +848,7 @@ cc_library(
"src/core/lib/transport/metadata.c",
"src/core/lib/transport/metadata_batch.c",
"src/core/lib/transport/method_config.c",
"src/core/lib/transport/pid_controller.c",
"src/core/lib/transport/static_metadata.c",
"src/core/lib/transport/timeout_encoding.c",
"src/core/lib/transport/transport.c",
@ -1055,6 +1059,7 @@ cc_library(
"src/core/lib/transport/metadata.h",
"src/core/lib/transport/metadata_batch.h",
"src/core/lib/transport/method_config.h",
"src/core/lib/transport/pid_controller.h",
"src/core/lib/transport/static_metadata.h",
"src/core/lib/transport/timeout_encoding.h",
"src/core/lib/transport/transport.h",
@ -1216,6 +1221,7 @@ cc_library(
"src/core/lib/transport/metadata.c",
"src/core/lib/transport/metadata_batch.c",
"src/core/lib/transport/method_config.c",
"src/core/lib/transport/pid_controller.c",
"src/core/lib/transport/static_metadata.c",
"src/core/lib/transport/timeout_encoding.c",
"src/core/lib/transport/transport.c",
@ -1402,12 +1408,6 @@ cc_library(
"include/grpc++/impl/server_builder_plugin.h",
"include/grpc++/impl/server_initializer.h",
"include/grpc++/impl/service_type.h",
"include/grpc++/impl/sync.h",
"include/grpc++/impl/sync_cxx11.h",
"include/grpc++/impl/sync_no_cxx11.h",
"include/grpc++/impl/thd.h",
"include/grpc++/impl/thd_cxx11.h",
"include/grpc++/impl/thd_no_cxx11.h",
"include/grpc++/resource_quota.h",
"include/grpc++/security/auth_context.h",
"include/grpc++/security/auth_metadata_processor.h",
@ -1455,9 +1455,6 @@ cc_library(
"include/grpc++/impl/codegen/status_helper.h",
"include/grpc++/impl/codegen/string_ref.h",
"include/grpc++/impl/codegen/stub_options.h",
"include/grpc++/impl/codegen/sync.h",
"include/grpc++/impl/codegen/sync_cxx11.h",
"include/grpc++/impl/codegen/sync_no_cxx11.h",
"include/grpc++/impl/codegen/sync_stream.h",
"include/grpc++/impl/codegen/time.h",
"include/grpc/impl/codegen/byte_buffer_reader.h",
@ -1554,12 +1551,6 @@ cc_library(
"include/grpc++/impl/server_builder_plugin.h",
"include/grpc++/impl/server_initializer.h",
"include/grpc++/impl/service_type.h",
"include/grpc++/impl/sync.h",
"include/grpc++/impl/sync_cxx11.h",
"include/grpc++/impl/sync_no_cxx11.h",
"include/grpc++/impl/thd.h",
"include/grpc++/impl/thd_cxx11.h",
"include/grpc++/impl/thd_no_cxx11.h",
"include/grpc++/resource_quota.h",
"include/grpc++/security/auth_context.h",
"include/grpc++/security/auth_metadata_processor.h",
@ -1607,9 +1598,6 @@ cc_library(
"include/grpc++/impl/codegen/status_helper.h",
"include/grpc++/impl/codegen/string_ref.h",
"include/grpc++/impl/codegen/stub_options.h",
"include/grpc++/impl/codegen/sync.h",
"include/grpc++/impl/codegen/sync_cxx11.h",
"include/grpc++/impl/codegen/sync_no_cxx11.h",
"include/grpc++/impl/codegen/sync_stream.h",
"include/grpc++/impl/codegen/time.h",
"include/grpc/impl/codegen/byte_buffer_reader.h",
@ -1727,12 +1715,6 @@ cc_library(
"include/grpc++/impl/server_builder_plugin.h",
"include/grpc++/impl/server_initializer.h",
"include/grpc++/impl/service_type.h",
"include/grpc++/impl/sync.h",
"include/grpc++/impl/sync_cxx11.h",
"include/grpc++/impl/sync_no_cxx11.h",
"include/grpc++/impl/thd.h",
"include/grpc++/impl/thd_cxx11.h",
"include/grpc++/impl/thd_no_cxx11.h",
"include/grpc++/resource_quota.h",
"include/grpc++/security/auth_context.h",
"include/grpc++/security/auth_metadata_processor.h",
@ -1780,9 +1762,6 @@ cc_library(
"include/grpc++/impl/codegen/status_helper.h",
"include/grpc++/impl/codegen/string_ref.h",
"include/grpc++/impl/codegen/stub_options.h",
"include/grpc++/impl/codegen/sync.h",
"include/grpc++/impl/codegen/sync_cxx11.h",
"include/grpc++/impl/codegen/sync_no_cxx11.h",
"include/grpc++/impl/codegen/sync_stream.h",
"include/grpc++/impl/codegen/time.h",
"include/grpc/impl/codegen/byte_buffer_reader.h",
@ -2100,6 +2079,7 @@ objc_library(
"src/core/lib/transport/metadata.c",
"src/core/lib/transport/metadata_batch.c",
"src/core/lib/transport/method_config.c",
"src/core/lib/transport/pid_controller.c",
"src/core/lib/transport/static_metadata.c",
"src/core/lib/transport/timeout_encoding.c",
"src/core/lib/transport/transport.c",
@ -2319,6 +2299,7 @@ objc_library(
"src/core/lib/transport/metadata.h",
"src/core/lib/transport/metadata_batch.h",
"src/core/lib/transport/method_config.h",
"src/core/lib/transport/pid_controller.h",
"src/core/lib/transport/static_metadata.h",
"src/core/lib/transport/timeout_encoding.h",
"src/core/lib/transport/transport.h",

@ -393,6 +393,7 @@ add_library(grpc
src/core/lib/transport/metadata.c
src/core/lib/transport/metadata_batch.c
src/core/lib/transport/method_config.c
src/core/lib/transport/pid_controller.c
src/core/lib/transport/static_metadata.c
src/core/lib/transport/timeout_encoding.c
src/core/lib/transport/transport.c
@ -665,6 +666,7 @@ add_library(grpc_cronet
src/core/lib/transport/metadata.c
src/core/lib/transport/metadata_batch.c
src/core/lib/transport/method_config.c
src/core/lib/transport/pid_controller.c
src/core/lib/transport/static_metadata.c
src/core/lib/transport/timeout_encoding.c
src/core/lib/transport/transport.c
@ -909,6 +911,7 @@ add_library(grpc_unsecure
src/core/lib/transport/metadata.c
src/core/lib/transport/metadata_batch.c
src/core/lib/transport/method_config.c
src/core/lib/transport/pid_controller.c
src/core/lib/transport/static_metadata.c
src/core/lib/transport/timeout_encoding.c
src/core/lib/transport/transport.c
@ -1121,12 +1124,6 @@ foreach(_hdr
include/grpc++/impl/server_builder_plugin.h
include/grpc++/impl/server_initializer.h
include/grpc++/impl/service_type.h
include/grpc++/impl/sync.h
include/grpc++/impl/sync_cxx11.h
include/grpc++/impl/sync_no_cxx11.h
include/grpc++/impl/thd.h
include/grpc++/impl/thd_cxx11.h
include/grpc++/impl/thd_no_cxx11.h
include/grpc++/resource_quota.h
include/grpc++/security/auth_context.h
include/grpc++/security/auth_metadata_processor.h
@ -1174,9 +1171,6 @@ foreach(_hdr
include/grpc++/impl/codegen/status_helper.h
include/grpc++/impl/codegen/string_ref.h
include/grpc++/impl/codegen/stub_options.h
include/grpc++/impl/codegen/sync.h
include/grpc++/impl/codegen/sync_cxx11.h
include/grpc++/impl/codegen/sync_no_cxx11.h
include/grpc++/impl/codegen/sync_stream.h
include/grpc++/impl/codegen/time.h
include/grpc/impl/codegen/byte_buffer_reader.h
@ -1287,12 +1281,6 @@ foreach(_hdr
include/grpc++/impl/server_builder_plugin.h
include/grpc++/impl/server_initializer.h
include/grpc++/impl/service_type.h
include/grpc++/impl/sync.h
include/grpc++/impl/sync_cxx11.h
include/grpc++/impl/sync_no_cxx11.h
include/grpc++/impl/thd.h
include/grpc++/impl/thd_cxx11.h
include/grpc++/impl/thd_no_cxx11.h
include/grpc++/resource_quota.h
include/grpc++/security/auth_context.h
include/grpc++/security/auth_metadata_processor.h
@ -1340,9 +1328,6 @@ foreach(_hdr
include/grpc++/impl/codegen/status_helper.h
include/grpc++/impl/codegen/string_ref.h
include/grpc++/impl/codegen/stub_options.h
include/grpc++/impl/codegen/sync.h
include/grpc++/impl/codegen/sync_cxx11.h
include/grpc++/impl/codegen/sync_no_cxx11.h
include/grpc++/impl/codegen/sync_stream.h
include/grpc++/impl/codegen/time.h
include/grpc/impl/codegen/byte_buffer_reader.h
@ -1491,12 +1476,6 @@ foreach(_hdr
include/grpc++/impl/server_builder_plugin.h
include/grpc++/impl/server_initializer.h
include/grpc++/impl/service_type.h
include/grpc++/impl/sync.h
include/grpc++/impl/sync_cxx11.h
include/grpc++/impl/sync_no_cxx11.h
include/grpc++/impl/thd.h
include/grpc++/impl/thd_cxx11.h
include/grpc++/impl/thd_no_cxx11.h
include/grpc++/resource_quota.h
include/grpc++/security/auth_context.h
include/grpc++/security/auth_metadata_processor.h
@ -1544,9 +1523,6 @@ foreach(_hdr
include/grpc++/impl/codegen/status_helper.h
include/grpc++/impl/codegen/string_ref.h
include/grpc++/impl/codegen/stub_options.h
include/grpc++/impl/codegen/sync.h
include/grpc++/impl/codegen/sync_cxx11.h
include/grpc++/impl/codegen/sync_no_cxx11.h
include/grpc++/impl/codegen/sync_stream.h
include/grpc++/impl/codegen/time.h
include/grpc/impl/codegen/byte_buffer_reader.h

@ -1028,6 +1028,7 @@ timer_heap_test: $(BINDIR)/$(CONFIG)/timer_heap_test
timer_list_test: $(BINDIR)/$(CONFIG)/timer_list_test
transport_connectivity_state_test: $(BINDIR)/$(CONFIG)/transport_connectivity_state_test
transport_metadata_test: $(BINDIR)/$(CONFIG)/transport_metadata_test
transport_pid_controller_test: $(BINDIR)/$(CONFIG)/transport_pid_controller_test
transport_security_test: $(BINDIR)/$(CONFIG)/transport_security_test
udp_server_test: $(BINDIR)/$(CONFIG)/udp_server_test
uri_fuzzer_test: $(BINDIR)/$(CONFIG)/uri_fuzzer_test
@ -1069,6 +1070,7 @@ interop_test: $(BINDIR)/$(CONFIG)/interop_test
json_run_localhost: $(BINDIR)/$(CONFIG)/json_run_localhost
metrics_client: $(BINDIR)/$(CONFIG)/metrics_client
mock_test: $(BINDIR)/$(CONFIG)/mock_test
noop-benchmark: $(BINDIR)/$(CONFIG)/noop-benchmark
proto_server_reflection_test: $(BINDIR)/$(CONFIG)/proto_server_reflection_test
qps_interarrival_test: $(BINDIR)/$(CONFIG)/qps_interarrival_test
qps_json_driver: $(BINDIR)/$(CONFIG)/qps_json_driver
@ -1076,6 +1078,7 @@ qps_openloop_test: $(BINDIR)/$(CONFIG)/qps_openloop_test
qps_worker: $(BINDIR)/$(CONFIG)/qps_worker
reconnect_interop_client: $(BINDIR)/$(CONFIG)/reconnect_interop_client
reconnect_interop_server: $(BINDIR)/$(CONFIG)/reconnect_interop_server
round_robin_end2end_test: $(BINDIR)/$(CONFIG)/round_robin_end2end_test
secure_auth_context_test: $(BINDIR)/$(CONFIG)/secure_auth_context_test
secure_sync_unary_ping_pong_test: $(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test
server_builder_plugin_test: $(BINDIR)/$(CONFIG)/server_builder_plugin_test
@ -1141,7 +1144,6 @@ bad_ssl_cert_server: $(BINDIR)/$(CONFIG)/bad_ssl_cert_server
bad_ssl_cert_test: $(BINDIR)/$(CONFIG)/bad_ssl_cert_test
h2_census_test: $(BINDIR)/$(CONFIG)/h2_census_test
h2_compress_test: $(BINDIR)/$(CONFIG)/h2_compress_test
h2_fake_resolver_test: $(BINDIR)/$(CONFIG)/h2_fake_resolver_test
h2_fakesec_test: $(BINDIR)/$(CONFIG)/h2_fakesec_test
h2_fd_test: $(BINDIR)/$(CONFIG)/h2_fd_test
h2_full_test: $(BINDIR)/$(CONFIG)/h2_full_test
@ -1160,7 +1162,6 @@ h2_ssl_proxy_test: $(BINDIR)/$(CONFIG)/h2_ssl_proxy_test
h2_uds_test: $(BINDIR)/$(CONFIG)/h2_uds_test
h2_census_nosec_test: $(BINDIR)/$(CONFIG)/h2_census_nosec_test
h2_compress_nosec_test: $(BINDIR)/$(CONFIG)/h2_compress_nosec_test
h2_fake_resolver_nosec_test: $(BINDIR)/$(CONFIG)/h2_fake_resolver_nosec_test
h2_fd_nosec_test: $(BINDIR)/$(CONFIG)/h2_fd_nosec_test
h2_full_nosec_test: $(BINDIR)/$(CONFIG)/h2_full_nosec_test
h2_full+pipe_nosec_test: $(BINDIR)/$(CONFIG)/h2_full+pipe_nosec_test
@ -1236,9 +1237,9 @@ pc_cxx: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++.pc
pc_cxx_unsecure: $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++_unsecure.pc
ifeq ($(EMBED_OPENSSL),true)
privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libboringssl_test_util.a $(LIBDIR)/$(CONFIG)/libboringssl_aes_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_asn1_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_base64_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bio_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bn_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bytestring_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_aead_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cipher_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ed25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_dh_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_digest_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ec_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_err_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_extra_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pbkdf_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_hmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs12_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs8_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_poly1305_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_rsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x509_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ssl_test_lib.a
privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libboringssl_test_util.a $(LIBDIR)/$(CONFIG)/libboringssl_aes_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_asn1_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_base64_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bio_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bn_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_bytestring_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_aead_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cipher_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_cmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ed25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x25519_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_dh_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_digest_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ec_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ecdsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_err_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_extra_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_evp_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pbkdf_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_hmac_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs12_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_pkcs8_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_poly1305_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_rsa_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_x509_test_lib.a $(LIBDIR)/$(CONFIG)/libboringssl_ssl_test_lib.a $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a
else
privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a
privatelibs_cxx: $(LIBDIR)/$(CONFIG)/libgrpc++_proto_reflection_desc_db.a $(LIBDIR)/$(CONFIG)/libgrpc++_test.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_config.a $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_cli_libs.a $(LIBDIR)/$(CONFIG)/libinterop_client_helper.a $(LIBDIR)/$(CONFIG)/libinterop_client_main.a $(LIBDIR)/$(CONFIG)/libinterop_server_helper.a $(LIBDIR)/$(CONFIG)/libinterop_server_lib.a $(LIBDIR)/$(CONFIG)/libinterop_server_main.a $(LIBDIR)/$(CONFIG)/libqps.a $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a
endif
@ -1352,6 +1353,7 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/timer_list_test \
$(BINDIR)/$(CONFIG)/transport_connectivity_state_test \
$(BINDIR)/$(CONFIG)/transport_metadata_test \
$(BINDIR)/$(CONFIG)/transport_pid_controller_test \
$(BINDIR)/$(CONFIG)/transport_security_test \
$(BINDIR)/$(CONFIG)/udp_server_test \
$(BINDIR)/$(CONFIG)/uri_parser_test \
@ -1371,7 +1373,6 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/bad_ssl_cert_test \
$(BINDIR)/$(CONFIG)/h2_census_test \
$(BINDIR)/$(CONFIG)/h2_compress_test \
$(BINDIR)/$(CONFIG)/h2_fake_resolver_test \
$(BINDIR)/$(CONFIG)/h2_fakesec_test \
$(BINDIR)/$(CONFIG)/h2_fd_test \
$(BINDIR)/$(CONFIG)/h2_full_test \
@ -1390,7 +1391,6 @@ buildtests_c: privatelibs_c \
$(BINDIR)/$(CONFIG)/h2_uds_test \
$(BINDIR)/$(CONFIG)/h2_census_nosec_test \
$(BINDIR)/$(CONFIG)/h2_compress_nosec_test \
$(BINDIR)/$(CONFIG)/h2_fake_resolver_nosec_test \
$(BINDIR)/$(CONFIG)/h2_fd_nosec_test \
$(BINDIR)/$(CONFIG)/h2_full_nosec_test \
$(BINDIR)/$(CONFIG)/h2_full+pipe_nosec_test \
@ -1447,6 +1447,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/json_run_localhost \
$(BINDIR)/$(CONFIG)/metrics_client \
$(BINDIR)/$(CONFIG)/mock_test \
$(BINDIR)/$(CONFIG)/noop-benchmark \
$(BINDIR)/$(CONFIG)/proto_server_reflection_test \
$(BINDIR)/$(CONFIG)/qps_interarrival_test \
$(BINDIR)/$(CONFIG)/qps_json_driver \
@ -1454,6 +1455,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/qps_worker \
$(BINDIR)/$(CONFIG)/reconnect_interop_client \
$(BINDIR)/$(CONFIG)/reconnect_interop_server \
$(BINDIR)/$(CONFIG)/round_robin_end2end_test \
$(BINDIR)/$(CONFIG)/secure_auth_context_test \
$(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test \
$(BINDIR)/$(CONFIG)/server_builder_plugin_test \
@ -1536,6 +1538,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/json_run_localhost \
$(BINDIR)/$(CONFIG)/metrics_client \
$(BINDIR)/$(CONFIG)/mock_test \
$(BINDIR)/$(CONFIG)/noop-benchmark \
$(BINDIR)/$(CONFIG)/proto_server_reflection_test \
$(BINDIR)/$(CONFIG)/qps_interarrival_test \
$(BINDIR)/$(CONFIG)/qps_json_driver \
@ -1543,6 +1546,7 @@ buildtests_cxx: privatelibs_cxx \
$(BINDIR)/$(CONFIG)/qps_worker \
$(BINDIR)/$(CONFIG)/reconnect_interop_client \
$(BINDIR)/$(CONFIG)/reconnect_interop_server \
$(BINDIR)/$(CONFIG)/round_robin_end2end_test \
$(BINDIR)/$(CONFIG)/secure_auth_context_test \
$(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test \
$(BINDIR)/$(CONFIG)/server_builder_plugin_test \
@ -1760,6 +1764,8 @@ test_c: buildtests_c
$(Q) $(BINDIR)/$(CONFIG)/transport_connectivity_state_test || ( echo test transport_connectivity_state_test failed ; exit 1 )
$(E) "[RUN] Testing transport_metadata_test"
$(Q) $(BINDIR)/$(CONFIG)/transport_metadata_test || ( echo test transport_metadata_test failed ; exit 1 )
$(E) "[RUN] Testing transport_pid_controller_test"
$(Q) $(BINDIR)/$(CONFIG)/transport_pid_controller_test || ( echo test transport_pid_controller_test failed ; exit 1 )
$(E) "[RUN] Testing transport_security_test"
$(Q) $(BINDIR)/$(CONFIG)/transport_security_test || ( echo test transport_security_test failed ; exit 1 )
$(E) "[RUN] Testing udp_server_test"
@ -1848,10 +1854,14 @@ test_cxx: buildtests_cxx
$(Q) $(BINDIR)/$(CONFIG)/interop_test || ( echo test interop_test failed ; exit 1 )
$(E) "[RUN] Testing mock_test"
$(Q) $(BINDIR)/$(CONFIG)/mock_test || ( echo test mock_test failed ; exit 1 )
$(E) "[RUN] Testing noop-benchmark"
$(Q) $(BINDIR)/$(CONFIG)/noop-benchmark || ( echo test noop-benchmark failed ; exit 1 )
$(E) "[RUN] Testing proto_server_reflection_test"
$(Q) $(BINDIR)/$(CONFIG)/proto_server_reflection_test || ( echo test proto_server_reflection_test failed ; exit 1 )
$(E) "[RUN] Testing qps_openloop_test"
$(Q) $(BINDIR)/$(CONFIG)/qps_openloop_test || ( echo test qps_openloop_test failed ; exit 1 )
$(E) "[RUN] Testing round_robin_end2end_test"
$(Q) $(BINDIR)/$(CONFIG)/round_robin_end2end_test || ( echo test round_robin_end2end_test failed ; exit 1 )
$(E) "[RUN] Testing secure_auth_context_test"
$(Q) $(BINDIR)/$(CONFIG)/secure_auth_context_test || ( echo test secure_auth_context_test failed ; exit 1 )
$(E) "[RUN] Testing secure_sync_unary_ping_pong_test"
@ -2687,6 +2697,7 @@ LIBGRPC_SRC = \
src/core/lib/transport/metadata.c \
src/core/lib/transport/metadata_batch.c \
src/core/lib/transport/method_config.c \
src/core/lib/transport/pid_controller.c \
src/core/lib/transport/static_metadata.c \
src/core/lib/transport/timeout_encoding.c \
src/core/lib/transport/transport.c \
@ -2977,6 +2988,7 @@ LIBGRPC_CRONET_SRC = \
src/core/lib/transport/metadata.c \
src/core/lib/transport/metadata_batch.c \
src/core/lib/transport/method_config.c \
src/core/lib/transport/pid_controller.c \
src/core/lib/transport/static_metadata.c \
src/core/lib/transport/timeout_encoding.c \
src/core/lib/transport/transport.c \
@ -3258,6 +3270,7 @@ LIBGRPC_TEST_UTIL_SRC = \
src/core/lib/transport/metadata.c \
src/core/lib/transport/metadata_batch.c \
src/core/lib/transport/method_config.c \
src/core/lib/transport/pid_controller.c \
src/core/lib/transport/static_metadata.c \
src/core/lib/transport/timeout_encoding.c \
src/core/lib/transport/transport.c \
@ -3468,6 +3481,7 @@ LIBGRPC_UNSECURE_SRC = \
src/core/lib/transport/metadata.c \
src/core/lib/transport/metadata_batch.c \
src/core/lib/transport/method_config.c \
src/core/lib/transport/pid_controller.c \
src/core/lib/transport/static_metadata.c \
src/core/lib/transport/timeout_encoding.c \
src/core/lib/transport/transport.c \
@ -3746,12 +3760,6 @@ PUBLIC_HEADERS_CXX += \
include/grpc++/impl/server_builder_plugin.h \
include/grpc++/impl/server_initializer.h \
include/grpc++/impl/service_type.h \
include/grpc++/impl/sync.h \
include/grpc++/impl/sync_cxx11.h \
include/grpc++/impl/sync_no_cxx11.h \
include/grpc++/impl/thd.h \
include/grpc++/impl/thd_cxx11.h \
include/grpc++/impl/thd_no_cxx11.h \
include/grpc++/resource_quota.h \
include/grpc++/security/auth_context.h \
include/grpc++/security/auth_metadata_processor.h \
@ -3799,9 +3807,6 @@ PUBLIC_HEADERS_CXX += \
include/grpc++/impl/codegen/status_helper.h \
include/grpc++/impl/codegen/string_ref.h \
include/grpc++/impl/codegen/stub_options.h \
include/grpc++/impl/codegen/sync.h \
include/grpc++/impl/codegen/sync_cxx11.h \
include/grpc++/impl/codegen/sync_no_cxx11.h \
include/grpc++/impl/codegen/sync_stream.h \
include/grpc++/impl/codegen/time.h \
include/grpc/impl/codegen/byte_buffer_reader.h \
@ -3941,12 +3946,6 @@ PUBLIC_HEADERS_CXX += \
include/grpc++/impl/server_builder_plugin.h \
include/grpc++/impl/server_initializer.h \
include/grpc++/impl/service_type.h \
include/grpc++/impl/sync.h \
include/grpc++/impl/sync_cxx11.h \
include/grpc++/impl/sync_no_cxx11.h \
include/grpc++/impl/thd.h \
include/grpc++/impl/thd_cxx11.h \
include/grpc++/impl/thd_no_cxx11.h \
include/grpc++/resource_quota.h \
include/grpc++/security/auth_context.h \
include/grpc++/security/auth_metadata_processor.h \
@ -3994,9 +3993,6 @@ PUBLIC_HEADERS_CXX += \
include/grpc++/impl/codegen/status_helper.h \
include/grpc++/impl/codegen/string_ref.h \
include/grpc++/impl/codegen/stub_options.h \
include/grpc++/impl/codegen/sync.h \
include/grpc++/impl/codegen/sync_cxx11.h \
include/grpc++/impl/codegen/sync_no_cxx11.h \
include/grpc++/impl/codegen/sync_stream.h \
include/grpc++/impl/codegen/time.h \
include/grpc/impl/codegen/byte_buffer_reader.h \
@ -4342,9 +4338,6 @@ PUBLIC_HEADERS_CXX += \
include/grpc++/impl/codegen/status_helper.h \
include/grpc++/impl/codegen/string_ref.h \
include/grpc++/impl/codegen/stub_options.h \
include/grpc++/impl/codegen/sync.h \
include/grpc++/impl/codegen/sync_cxx11.h \
include/grpc++/impl/codegen/sync_no_cxx11.h \
include/grpc++/impl/codegen/sync_stream.h \
include/grpc++/impl/codegen/time.h \
include/grpc/impl/codegen/byte_buffer_reader.h \
@ -4475,12 +4468,6 @@ PUBLIC_HEADERS_CXX += \
include/grpc++/impl/server_builder_plugin.h \
include/grpc++/impl/server_initializer.h \
include/grpc++/impl/service_type.h \
include/grpc++/impl/sync.h \
include/grpc++/impl/sync_cxx11.h \
include/grpc++/impl/sync_no_cxx11.h \
include/grpc++/impl/thd.h \
include/grpc++/impl/thd_cxx11.h \
include/grpc++/impl/thd_no_cxx11.h \
include/grpc++/resource_quota.h \
include/grpc++/security/auth_context.h \
include/grpc++/security/auth_metadata_processor.h \
@ -4528,9 +4515,6 @@ PUBLIC_HEADERS_CXX += \
include/grpc++/impl/codegen/status_helper.h \
include/grpc++/impl/codegen/string_ref.h \
include/grpc++/impl/codegen/stub_options.h \
include/grpc++/impl/codegen/sync.h \
include/grpc++/impl/codegen/sync_cxx11.h \
include/grpc++/impl/codegen/sync_no_cxx11.h \
include/grpc++/impl/codegen/sync_stream.h \
include/grpc++/impl/codegen/time.h \
include/grpc/impl/codegen/byte_buffer_reader.h \
@ -6767,6 +6751,55 @@ ifneq ($(NO_DEPS),true)
endif
LIBGOOGLE_BENCHMARK_SRC = \
third_party/google_benchmark/src/benchmark.cc \
third_party/google_benchmark/src/benchmark_register.cc \
third_party/google_benchmark/src/colorprint.cc \
third_party/google_benchmark/src/commandlineflags.cc \
third_party/google_benchmark/src/complexity.cc \
third_party/google_benchmark/src/console_reporter.cc \
third_party/google_benchmark/src/csv_reporter.cc \
third_party/google_benchmark/src/json_reporter.cc \
third_party/google_benchmark/src/reporter.cc \
third_party/google_benchmark/src/sleep.cc \
third_party/google_benchmark/src/string_util.cc \
third_party/google_benchmark/src/sysinfo.cc \
third_party/google_benchmark/src/timers.cc \
PUBLIC_HEADERS_CXX += \
LIBGOOGLE_BENCHMARK_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBGOOGLE_BENCHMARK_SRC))))
$(LIBGOOGLE_BENCHMARK_OBJS): CPPFLAGS += -Ithird_party/google_benchmark/include -DHAVE_POSIX_REGEX
ifeq ($(NO_PROTOBUF),true)
# You can't build a C++ library if you don't have protobuf - a bit overreached, but still okay.
$(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a: protobuf_dep_error
else
$(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a: $(ZLIB_DEP) $(PROTOBUF_DEP) $(LIBGOOGLE_BENCHMARK_OBJS)
$(E) "[AR] Creating $@"
$(Q) mkdir -p `dirname $@`
$(Q) rm -f $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a
$(Q) $(AR) $(AROPTS) $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a $(LIBGOOGLE_BENCHMARK_OBJS)
ifeq ($(SYSTEM),Darwin)
$(Q) ranlib -no_warning_for_no_symbols $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a
endif
endif
ifneq ($(NO_DEPS),true)
-include $(LIBGOOGLE_BENCHMARK_OBJS:.o=.dep)
endif
LIBZ_SRC = \
third_party/zlib/adler32.c \
third_party/zlib/compress.c \
@ -11042,6 +11075,38 @@ endif
endif
TRANSPORT_PID_CONTROLLER_TEST_SRC = \
test/core/transport/pid_controller_test.c \
TRANSPORT_PID_CONTROLLER_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(TRANSPORT_PID_CONTROLLER_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/transport_pid_controller_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/transport_pid_controller_test: $(TRANSPORT_PID_CONTROLLER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(TRANSPORT_PID_CONTROLLER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/transport_pid_controller_test
endif
$(OBJDIR)/$(CONFIG)/test/core/transport/pid_controller_test.o: $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_transport_pid_controller_test: $(TRANSPORT_PID_CONTROLLER_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(TRANSPORT_PID_CONTROLLER_TEST_OBJS:.o=.dep)
endif
endif
TRANSPORT_SECURITY_TEST_SRC = \
test/core/tsi/transport_security_test.c \
@ -12701,6 +12766,49 @@ endif
endif
NOOP-BENCHMARK_SRC = \
test/cpp/microbenchmarks/noop-benchmark.cc \
NOOP-BENCHMARK_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(NOOP-BENCHMARK_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/noop-benchmark: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/noop-benchmark: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/noop-benchmark: $(PROTOBUF_DEP) $(NOOP-BENCHMARK_OBJS) $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(NOOP-BENCHMARK_OBJS) $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/noop-benchmark
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/microbenchmarks/noop-benchmark.o: $(LIBDIR)/$(CONFIG)/libgoogle_benchmark.a
deps_noop-benchmark: $(NOOP-BENCHMARK_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(NOOP-BENCHMARK_OBJS:.o=.dep)
endif
endif
PROTO_SERVER_REFLECTION_TEST_SRC = \
test/cpp/end2end/proto_server_reflection_test.cc \
@ -13022,6 +13130,49 @@ endif
$(OBJDIR)/$(CONFIG)/test/cpp/interop/reconnect_interop_server.o: $(GENDIR)/src/proto/grpc/testing/empty.pb.cc $(GENDIR)/src/proto/grpc/testing/empty.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/test.pb.cc $(GENDIR)/src/proto/grpc/testing/test.grpc.pb.cc
ROUND_ROBIN_END2END_TEST_SRC = \
test/cpp/end2end/round_robin_end2end_test.cc \
ROUND_ROBIN_END2END_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(ROUND_ROBIN_END2END_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/round_robin_end2end_test: openssl_dep_error
else
ifeq ($(NO_PROTOBUF),true)
# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
$(BINDIR)/$(CONFIG)/round_robin_end2end_test: protobuf_dep_error
else
$(BINDIR)/$(CONFIG)/round_robin_end2end_test: $(PROTOBUF_DEP) $(ROUND_ROBIN_END2END_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LDXX) $(LDFLAGS) $(ROUND_ROBIN_END2END_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/round_robin_end2end_test
endif
endif
$(OBJDIR)/$(CONFIG)/test/cpp/end2end/round_robin_end2end_test.o: $(LIBDIR)/$(CONFIG)/libgrpc++_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_round_robin_end2end_test: $(ROUND_ROBIN_END2END_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(ROUND_ROBIN_END2END_TEST_OBJS:.o=.dep)
endif
endif
SECURE_AUTH_CONTEXT_TEST_SRC = \
test/cpp/common/secure_auth_context_test.cc \
@ -14957,38 +15108,6 @@ endif
endif
H2_FAKE_RESOLVER_TEST_SRC = \
test/core/end2end/fixtures/h2_fake_resolver.c \
H2_FAKE_RESOLVER_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(H2_FAKE_RESOLVER_TEST_SRC))))
ifeq ($(NO_SECURE),true)
# You can't build secure targets if you don't have OpenSSL.
$(BINDIR)/$(CONFIG)/h2_fake_resolver_test: openssl_dep_error
else
$(BINDIR)/$(CONFIG)/h2_fake_resolver_test: $(H2_FAKE_RESOLVER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(H2_FAKE_RESOLVER_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/h2_fake_resolver_test
endif
$(OBJDIR)/$(CONFIG)/test/core/end2end/fixtures/h2_fake_resolver.o: $(LIBDIR)/$(CONFIG)/libend2end_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_h2_fake_resolver_test: $(H2_FAKE_RESOLVER_TEST_OBJS:.o=.dep)
ifneq ($(NO_SECURE),true)
ifneq ($(NO_DEPS),true)
-include $(H2_FAKE_RESOLVER_TEST_OBJS:.o=.dep)
endif
endif
H2_FAKESEC_TEST_SRC = \
test/core/end2end/fixtures/h2_fakesec.c \
@ -15541,26 +15660,6 @@ ifneq ($(NO_DEPS),true)
endif
H2_FAKE_RESOLVER_NOSEC_TEST_SRC = \
test/core/end2end/fixtures/h2_fake_resolver.c \
H2_FAKE_RESOLVER_NOSEC_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(H2_FAKE_RESOLVER_NOSEC_TEST_SRC))))
$(BINDIR)/$(CONFIG)/h2_fake_resolver_nosec_test: $(H2_FAKE_RESOLVER_NOSEC_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libend2end_nosec_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
$(E) "[LD] Linking $@"
$(Q) mkdir -p `dirname $@`
$(Q) $(LD) $(LDFLAGS) $(H2_FAKE_RESOLVER_NOSEC_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libend2end_nosec_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) -o $(BINDIR)/$(CONFIG)/h2_fake_resolver_nosec_test
$(OBJDIR)/$(CONFIG)/test/core/end2end/fixtures/h2_fake_resolver.o: $(LIBDIR)/$(CONFIG)/libend2end_nosec_tests.a $(LIBDIR)/$(CONFIG)/libgrpc_test_util_unsecure.a $(LIBDIR)/$(CONFIG)/libgrpc_unsecure.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
deps_h2_fake_resolver_nosec_test: $(H2_FAKE_RESOLVER_NOSEC_TEST_OBJS:.o=.dep)
ifneq ($(NO_DEPS),true)
-include $(H2_FAKE_RESOLVER_NOSEC_TEST_OBJS:.o=.dep)
endif
H2_FD_NOSEC_TEST_SRC = \
test/core/end2end/fixtures/h2_fd.c \

@ -671,6 +671,7 @@
'src/core/lib/transport/metadata.c',
'src/core/lib/transport/metadata_batch.c',
'src/core/lib/transport/method_config.c',
'src/core/lib/transport/pid_controller.c',
'src/core/lib/transport/static_metadata.c',
'src/core/lib/transport/timeout_encoding.c',
'src/core/lib/transport/transport.c',

@ -256,6 +256,7 @@ filegroups:
- src/core/lib/transport/metadata.h
- src/core/lib/transport/metadata_batch.h
- src/core/lib/transport/method_config.h
- src/core/lib/transport/pid_controller.h
- src/core/lib/transport/static_metadata.h
- src/core/lib/transport/timeout_encoding.h
- src/core/lib/transport/transport.h
@ -364,6 +365,7 @@ filegroups:
- src/core/lib/transport/metadata.c
- src/core/lib/transport/metadata_batch.c
- src/core/lib/transport/method_config.c
- src/core/lib/transport/pid_controller.c
- src/core/lib/transport/static_metadata.c
- src/core/lib/transport/timeout_encoding.c
- src/core/lib/transport/transport.c
@ -684,8 +686,6 @@ filegroups:
deps:
- gpr
secure: true
uses:
- grpc_base
- name: grpc++_base
language: c++
public_headers:
@ -710,12 +710,6 @@ filegroups:
- include/grpc++/impl/server_builder_plugin.h
- include/grpc++/impl/server_initializer.h
- include/grpc++/impl/service_type.h
- include/grpc++/impl/sync.h
- include/grpc++/impl/sync_cxx11.h
- include/grpc++/impl/sync_no_cxx11.h
- include/grpc++/impl/thd.h
- include/grpc++/impl/thd_cxx11.h
- include/grpc++/impl/thd_no_cxx11.h
- include/grpc++/resource_quota.h
- include/grpc++/security/auth_context.h
- include/grpc++/security/auth_metadata_processor.h
@ -802,9 +796,6 @@ filegroups:
- include/grpc++/impl/codegen/status_helper.h
- include/grpc++/impl/codegen/string_ref.h
- include/grpc++/impl/codegen/stub_options.h
- include/grpc++/impl/codegen/sync.h
- include/grpc++/impl/codegen/sync_cxx11.h
- include/grpc++/impl/codegen/sync_no_cxx11.h
- include/grpc++/impl/codegen/sync_stream.h
- include/grpc++/impl/codegen/time.h
uses:
@ -2679,6 +2670,16 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: transport_pid_controller_test
build: test
language: c
src:
- test/core/transport/pid_controller_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: transport_security_test
build: test
language: c
@ -3214,6 +3215,13 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: noop-benchmark
build: test
language: c++
src:
- test/cpp/microbenchmarks/noop-benchmark.cc
deps:
- google_benchmark
- name: proto_server_reflection_test
gtest: true
build: test
@ -3335,6 +3343,19 @@ targets:
- gpr_test_util
- gpr
- grpc++_test_config
- name: round_robin_end2end_test
gtest: true
build: test
language: c++
src:
- test/cpp/end2end/round_robin_end2end_test.cc
deps:
- grpc++_test_util
- grpc_test_util
- grpc++
- grpc
- gpr_test_util
- gpr
- name: secure_auth_context_test
gtest: true
build: test
@ -3694,6 +3715,8 @@ defaults:
global:
CPPFLAGS: -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter
LDFLAGS: -g
google_benchmark:
CPPFLAGS: -Ithird_party/google_benchmark/include -DHAVE_POSIX_REGEX
zlib:
CFLAGS: -Wno-sign-conversion -Wno-conversion -Wno-unused-value -Wno-implicit-function-declaration
$(W_NO_SHIFT_NEGATIVE_VALUE) -fvisibility=hidden

@ -187,6 +187,7 @@ if test "$PHP_GRPC" != "no"; then
src/core/lib/transport/metadata.c \
src/core/lib/transport/metadata_batch.c \
src/core/lib/transport/method_config.c \
src/core/lib/transport/pid_controller.c \
src/core/lib/transport/static_metadata.c \
src/core/lib/transport/timeout_encoding.c \
src/core/lib/transport/transport.c \

@ -1,91 +1,9 @@
GRPC C++ STYLE GUIDE
=====================
Background
----------
Here we document style rules for C++ usage in the gRPC C++ bindings
and tests.
General
-------
- The majority of gRPC's C++ requirements are drawn from the [Google C++ style
guide] (https://google.github.io/styleguide/cppguide.html)
- However, gRPC has some additional requirements to maintain
[portability] (#portability)
- As in C, layout rules are defined by clang-format, and all code
The majority of gRPC's C++ requirements are drawn from the [Google C++ style
guide] (https://google.github.io/styleguide/cppguide.html). Additionally,
as in C, layout rules are defined by clang-format, and all code
should be passed through clang-format. A (docker-based) script to do
so is included in [tools/distrib/clang\_format\_code.sh]
(../tools/distrib/clang_format_code.sh).
<a name="portability"></a>
Portability Restrictions
-------------------
gRPC supports a large number of compilers, ranging from those that are
missing many key C++11 features to those that have quite detailed
analysis. As a result, gRPC compiles with a high level of warnings and
treat all warnings as errors. gRPC also forbids the use of some common
C++11 constructs. Here are some guidelines, to be extended as needed:
- Do not use range-based for. Expressions of the form
```c
for (auto& i: vec) {
// code
}
```
are not allowed and should be replaced with code such as
```c
for (auto it = vec.begin; it != vec.end(); it++) {
auto& i = *it;
// code
}
```
- Do not use lambda of any kind (no capture, explicit capture, or
default capture). Other C++ functional features such as
`std::function` or `std::bind` are allowed
- Do not use brace-list initializers.
- Do not compare a pointer to `nullptr` . This is because gcc 4.4
does not support `nullptr` directly and gRPC implements a subset of
its features in [include/grpc++/impl/codegen/config.h]
(../include/grpc++/impl/codegen/config.h). Instead, pointers should
be checked for validity using their implicit conversion to `bool`.
In other words, use `if (p)` rather than `if (p != nullptr)`
- Do not initialize global/static pointer variables to `nullptr`. Just let
the compiler implicitly initialize them to `nullptr` (which it will
definitely do). The reason is that `nullptr` is an actual object in
our implementation rather than just a constant pointer value, so
static/global constructors will be called in a potentially
undesirable sequence.
- Do not use `final` or `override` as these are not supported by some
compilers. Instead use `GRPC_FINAL` and `GRPC_OVERRIDE` . These
compile down to the traditional C++ forms for compilers that support
them but are just elided if the compiler does not support those features.
- In the [include] (../../../tree/master/include/grpc++) and [src]
(../../../tree/master/src/cpp) directory trees, you should also not
use certain STL objects like `std::mutex`, `std::lock_guard`,
`std::unique_lock`, `std::nullptr`, `std::thread` . Instead, use
`grpc::mutex`, `grpc::lock_guard`, etc., which are gRPC
implementations of the prominent features of these objects that are
not always available. You can use the `std` versions of those in [test]
(../../../tree/master/test/cpp)
- Similarly, in the same directories, do not use `std::chrono` unless
it is guarded by `#ifndef GRPC_CXX0X_NO_CHRONO` . For platforms that
lack`std::chrono,` there is a C-language timer called gpr_timespec that can
be used instead.
- `std::unique_ptr` must be used with extreme care in any kind of
collection. For example `vector<std::unique_ptr>` does not work in
gcc 4.4 if the vector is constructed to its full size at
initialization but does work if elements are added to the vector
using functions like `push_back`. `map` and other pair-based
collections do not work with `unique_ptr` under gcc 4.4. The issue
is that many of these collection implementations assume a copy
constructor
to be available.
- Don't use `std::this_thread` . Use `gpr_sleep_until` for sleeping a thread.
- [Some adjacent character combinations cause problems]
(https://en.wikipedia.org/wiki/Digraphs_and_trigraphs#C). If declaring a
template against some class relative to the global namespace,
`<::name` will be non-portable. Separate the `<` from the `:` and use `< ::name`.

@ -339,6 +339,7 @@ Pod::Spec.new do |s|
'src/core/lib/transport/metadata.h',
'src/core/lib/transport/metadata_batch.h',
'src/core/lib/transport/method_config.h',
'src/core/lib/transport/pid_controller.h',
'src/core/lib/transport/static_metadata.h',
'src/core/lib/transport/timeout_encoding.h',
'src/core/lib/transport/transport.h',
@ -527,6 +528,7 @@ Pod::Spec.new do |s|
'src/core/lib/transport/metadata.c',
'src/core/lib/transport/metadata_batch.c',
'src/core/lib/transport/method_config.c',
'src/core/lib/transport/pid_controller.c',
'src/core/lib/transport/static_metadata.c',
'src/core/lib/transport/timeout_encoding.c',
'src/core/lib/transport/transport.c',
@ -735,6 +737,7 @@ Pod::Spec.new do |s|
'src/core/lib/transport/metadata.h',
'src/core/lib/transport/metadata_batch.h',
'src/core/lib/transport/method_config.h',
'src/core/lib/transport/pid_controller.h',
'src/core/lib/transport/static_metadata.h',
'src/core/lib/transport/timeout_encoding.h',
'src/core/lib/transport/transport.h',

@ -259,6 +259,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/transport/metadata.h )
s.files += %w( src/core/lib/transport/metadata_batch.h )
s.files += %w( src/core/lib/transport/method_config.h )
s.files += %w( src/core/lib/transport/pid_controller.h )
s.files += %w( src/core/lib/transport/static_metadata.h )
s.files += %w( src/core/lib/transport/timeout_encoding.h )
s.files += %w( src/core/lib/transport/transport.h )
@ -447,6 +448,7 @@ Gem::Specification.new do |s|
s.files += %w( src/core/lib/transport/metadata.c )
s.files += %w( src/core/lib/transport/metadata_batch.c )
s.files += %w( src/core/lib/transport/method_config.c )
s.files += %w( src/core/lib/transport/pid_controller.c )
s.files += %w( src/core/lib/transport/static_metadata.c )
s.files += %w( src/core/lib/transport/timeout_encoding.c )
s.files += %w( src/core/lib/transport/transport.c )

@ -78,7 +78,7 @@ class Alarm : private GrpcLibraryCodegen {
class AlarmEntry : public CompletionQueueTag {
public:
AlarmEntry(void* tag) : tag_(tag) {}
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
bool FinalizeResult(void** tag, bool* status) override {
*tag = tag_;
return true;
}

@ -46,7 +46,7 @@ struct grpc_channel;
namespace grpc {
/// Channels represent a connection to an endpoint. Created by \a CreateChannel.
class Channel GRPC_FINAL : public ChannelInterface,
class Channel final : public ChannelInterface,
public CallHook,
public std::enable_shared_from_this<Channel>,
private GrpcLibraryCodegen {
@ -55,7 +55,7 @@ class Channel GRPC_FINAL : public ChannelInterface,
/// Get the current channel state. If the channel is in IDLE and
/// \a try_to_connect is set to true, try to connect.
grpc_connectivity_state GetState(bool try_to_connect) GRPC_OVERRIDE;
grpc_connectivity_state GetState(bool try_to_connect) override;
private:
template <class InputMessage, class OutputMessage>
@ -69,15 +69,15 @@ class Channel GRPC_FINAL : public ChannelInterface,
Channel(const grpc::string& host, grpc_channel* c_channel);
Call CreateCall(const RpcMethod& method, ClientContext* context,
CompletionQueue* cq) GRPC_OVERRIDE;
void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) GRPC_OVERRIDE;
void* RegisterMethod(const char* method) GRPC_OVERRIDE;
CompletionQueue* cq) override;
void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) override;
void* RegisterMethod(const char* method) override;
void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline, CompletionQueue* cq,
void* tag) GRPC_OVERRIDE;
void* tag) override;
bool WaitForStateChangeImpl(grpc_connectivity_state last_observed,
gpr_timespec deadline) GRPC_OVERRIDE;
gpr_timespec deadline) override;
const grpc::string host_;
grpc_channel* const c_channel_; // owned

@ -48,12 +48,12 @@ namespace reflection {
class ProtoServerReflectionPlugin : public ::grpc::ServerBuilderPlugin {
public:
ProtoServerReflectionPlugin();
::grpc::string name() GRPC_OVERRIDE;
void InitServer(::grpc::ServerInitializer* si) GRPC_OVERRIDE;
void Finish(::grpc::ServerInitializer* si) GRPC_OVERRIDE;
void ChangeArguments(const ::grpc::string& name, void* value) GRPC_OVERRIDE;
bool has_async_methods() const GRPC_OVERRIDE;
bool has_sync_methods() const GRPC_OVERRIDE;
::grpc::string name() override;
void InitServer(::grpc::ServerInitializer* si) override;
void Finish(::grpc::ServerInitializer* si) override;
void ChangeArguments(const ::grpc::string& name, void* value) override;
bool has_async_methods() const override;
bool has_sync_methods() const override;
private:
std::shared_ptr<grpc::ProtoServerReflection> reflection_service_;

@ -44,7 +44,7 @@ namespace grpc {
typedef ServerAsyncReaderWriter<ByteBuffer, ByteBuffer>
GenericServerAsyncReaderWriter;
class GenericServerContext GRPC_FINAL : public ServerContext {
class GenericServerContext final : public ServerContext {
public:
const grpc::string& method() const { return method_; }
const grpc::string& host() const { return host_; }
@ -57,7 +57,7 @@ class GenericServerContext GRPC_FINAL : public ServerContext {
grpc::string host_;
};
class AsyncGenericService GRPC_FINAL {
class AsyncGenericService final {
public:
AsyncGenericService() : server_(nullptr) {}

@ -45,7 +45,7 @@ typedef ClientAsyncReaderWriter<ByteBuffer, ByteBuffer>
// Generic stubs provide a type-unsafe interface to call gRPC methods
// by name.
class GenericStub GRPC_FINAL {
class GenericStub final {
public:
explicit GenericStub(std::shared_ptr<ChannelInterface> channel)
: channel_(channel) {}

@ -108,7 +108,7 @@ class ClientAsyncReaderInterface : public ClientAsyncStreamingInterface,
public AsyncReaderInterface<R> {};
template <class R>
class ClientAsyncReader GRPC_FINAL : public ClientAsyncReaderInterface<R> {
class ClientAsyncReader final : public ClientAsyncReaderInterface<R> {
public:
/// Create a stream and write the first request out.
template <class W>
@ -125,7 +125,7 @@ class ClientAsyncReader GRPC_FINAL : public ClientAsyncReaderInterface<R> {
call_.PerformOps(&init_ops_);
}
void ReadInitialMetadata(void* tag) GRPC_OVERRIDE {
void ReadInitialMetadata(void* tag) override {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
meta_ops_.set_output_tag(tag);
@ -133,7 +133,7 @@ class ClientAsyncReader GRPC_FINAL : public ClientAsyncReaderInterface<R> {
call_.PerformOps(&meta_ops_);
}
void Read(R* msg, void* tag) GRPC_OVERRIDE {
void Read(R* msg, void* tag) override {
read_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
read_ops_.RecvInitialMetadata(context_);
@ -142,7 +142,7 @@ class ClientAsyncReader GRPC_FINAL : public ClientAsyncReaderInterface<R> {
call_.PerformOps(&read_ops_);
}
void Finish(Status* status, void* tag) GRPC_OVERRIDE {
void Finish(Status* status, void* tag) override {
finish_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
finish_ops_.RecvInitialMetadata(context_);
@ -174,7 +174,7 @@ class ClientAsyncWriterInterface : public ClientAsyncStreamingInterface,
};
template <class W>
class ClientAsyncWriter GRPC_FINAL : public ClientAsyncWriterInterface<W> {
class ClientAsyncWriter final : public ClientAsyncWriterInterface<W> {
public:
template <class R>
ClientAsyncWriter(ChannelInterface* channel, CompletionQueue* cq,
@ -190,7 +190,7 @@ class ClientAsyncWriter GRPC_FINAL : public ClientAsyncWriterInterface<W> {
call_.PerformOps(&init_ops_);
}
void ReadInitialMetadata(void* tag) GRPC_OVERRIDE {
void ReadInitialMetadata(void* tag) override {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
meta_ops_.set_output_tag(tag);
@ -198,20 +198,20 @@ class ClientAsyncWriter GRPC_FINAL : public ClientAsyncWriterInterface<W> {
call_.PerformOps(&meta_ops_);
}
void Write(const W& msg, void* tag) GRPC_OVERRIDE {
void Write(const W& msg, void* tag) override {
write_ops_.set_output_tag(tag);
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
call_.PerformOps(&write_ops_);
}
void WritesDone(void* tag) GRPC_OVERRIDE {
void WritesDone(void* tag) override {
writes_done_ops_.set_output_tag(tag);
writes_done_ops_.ClientSendClose();
call_.PerformOps(&writes_done_ops_);
}
void Finish(Status* status, void* tag) GRPC_OVERRIDE {
void Finish(Status* status, void* tag) override {
finish_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
finish_ops_.RecvInitialMetadata(context_);
@ -246,7 +246,7 @@ class ClientAsyncReaderWriterInterface : public ClientAsyncStreamingInterface,
};
template <class W, class R>
class ClientAsyncReaderWriter GRPC_FINAL
class ClientAsyncReaderWriter final
: public ClientAsyncReaderWriterInterface<W, R> {
public:
ClientAsyncReaderWriter(ChannelInterface* channel, CompletionQueue* cq,
@ -259,7 +259,7 @@ class ClientAsyncReaderWriter GRPC_FINAL
call_.PerformOps(&init_ops_);
}
void ReadInitialMetadata(void* tag) GRPC_OVERRIDE {
void ReadInitialMetadata(void* tag) override {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
meta_ops_.set_output_tag(tag);
@ -267,7 +267,7 @@ class ClientAsyncReaderWriter GRPC_FINAL
call_.PerformOps(&meta_ops_);
}
void Read(R* msg, void* tag) GRPC_OVERRIDE {
void Read(R* msg, void* tag) override {
read_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
read_ops_.RecvInitialMetadata(context_);
@ -276,20 +276,20 @@ class ClientAsyncReaderWriter GRPC_FINAL
call_.PerformOps(&read_ops_);
}
void Write(const W& msg, void* tag) GRPC_OVERRIDE {
void Write(const W& msg, void* tag) override {
write_ops_.set_output_tag(tag);
// TODO(ctiller): don't assert
GPR_CODEGEN_ASSERT(write_ops_.SendMessage(msg).ok());
call_.PerformOps(&write_ops_);
}
void WritesDone(void* tag) GRPC_OVERRIDE {
void WritesDone(void* tag) override {
writes_done_ops_.set_output_tag(tag);
writes_done_ops_.ClientSendClose();
call_.PerformOps(&writes_done_ops_);
}
void Finish(Status* status, void* tag) GRPC_OVERRIDE {
void Finish(Status* status, void* tag) override {
finish_ops_.set_output_tag(tag);
if (!context_->initial_metadata_received_) {
finish_ops_.RecvInitialMetadata(context_);
@ -319,12 +319,12 @@ class ServerAsyncReaderInterface : public ServerAsyncStreamingInterface,
};
template <class W, class R>
class ServerAsyncReader GRPC_FINAL : public ServerAsyncReaderInterface<W, R> {
class ServerAsyncReader final : public ServerAsyncReaderInterface<W, R> {
public:
explicit ServerAsyncReader(ServerContext* ctx)
: call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
void SendInitialMetadata(void* tag) GRPC_OVERRIDE {
void SendInitialMetadata(void* tag) override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
meta_ops_.set_output_tag(tag);
@ -337,13 +337,13 @@ class ServerAsyncReader GRPC_FINAL : public ServerAsyncReaderInterface<W, R> {
call_.PerformOps(&meta_ops_);
}
void Read(R* msg, void* tag) GRPC_OVERRIDE {
void Read(R* msg, void* tag) override {
read_ops_.set_output_tag(tag);
read_ops_.RecvMessage(msg);
call_.PerformOps(&read_ops_);
}
void Finish(const W& msg, const Status& status, void* tag) GRPC_OVERRIDE {
void Finish(const W& msg, const Status& status, void* tag) override {
finish_ops_.set_output_tag(tag);
if (!ctx_->sent_initial_metadata_) {
finish_ops_.SendInitialMetadata(ctx_->initial_metadata_,
@ -363,7 +363,7 @@ class ServerAsyncReader GRPC_FINAL : public ServerAsyncReaderInterface<W, R> {
call_.PerformOps(&finish_ops_);
}
void FinishWithError(const Status& status, void* tag) GRPC_OVERRIDE {
void FinishWithError(const Status& status, void* tag) override {
GPR_CODEGEN_ASSERT(!status.ok());
finish_ops_.set_output_tag(tag);
if (!ctx_->sent_initial_metadata_) {
@ -379,7 +379,7 @@ class ServerAsyncReader GRPC_FINAL : public ServerAsyncReaderInterface<W, R> {
}
private:
void BindCall(Call* call) GRPC_OVERRIDE { call_ = *call; }
void BindCall(Call* call) override { call_ = *call; }
Call call_;
ServerContext* ctx_;
@ -398,12 +398,12 @@ class ServerAsyncWriterInterface : public ServerAsyncStreamingInterface,
};
template <class W>
class ServerAsyncWriter GRPC_FINAL : public ServerAsyncWriterInterface<W> {
class ServerAsyncWriter final : public ServerAsyncWriterInterface<W> {
public:
explicit ServerAsyncWriter(ServerContext* ctx)
: call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
void SendInitialMetadata(void* tag) GRPC_OVERRIDE {
void SendInitialMetadata(void* tag) override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
meta_ops_.set_output_tag(tag);
@ -416,7 +416,7 @@ class ServerAsyncWriter GRPC_FINAL : public ServerAsyncWriterInterface<W> {
call_.PerformOps(&meta_ops_);
}
void Write(const W& msg, void* tag) GRPC_OVERRIDE {
void Write(const W& msg, void* tag) override {
write_ops_.set_output_tag(tag);
if (!ctx_->sent_initial_metadata_) {
write_ops_.SendInitialMetadata(ctx_->initial_metadata_,
@ -431,7 +431,7 @@ class ServerAsyncWriter GRPC_FINAL : public ServerAsyncWriterInterface<W> {
call_.PerformOps(&write_ops_);
}
void Finish(const Status& status, void* tag) GRPC_OVERRIDE {
void Finish(const Status& status, void* tag) override {
finish_ops_.set_output_tag(tag);
if (!ctx_->sent_initial_metadata_) {
finish_ops_.SendInitialMetadata(ctx_->initial_metadata_,
@ -446,7 +446,7 @@ class ServerAsyncWriter GRPC_FINAL : public ServerAsyncWriterInterface<W> {
}
private:
void BindCall(Call* call) GRPC_OVERRIDE { call_ = *call; }
void BindCall(Call* call) override { call_ = *call; }
Call call_;
ServerContext* ctx_;
@ -465,13 +465,13 @@ class ServerAsyncReaderWriterInterface : public ServerAsyncStreamingInterface,
};
template <class W, class R>
class ServerAsyncReaderWriter GRPC_FINAL
class ServerAsyncReaderWriter final
: public ServerAsyncReaderWriterInterface<W, R> {
public:
explicit ServerAsyncReaderWriter(ServerContext* ctx)
: call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
void SendInitialMetadata(void* tag) GRPC_OVERRIDE {
void SendInitialMetadata(void* tag) override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
meta_ops_.set_output_tag(tag);
@ -484,13 +484,13 @@ class ServerAsyncReaderWriter GRPC_FINAL
call_.PerformOps(&meta_ops_);
}
void Read(R* msg, void* tag) GRPC_OVERRIDE {
void Read(R* msg, void* tag) override {
read_ops_.set_output_tag(tag);
read_ops_.RecvMessage(msg);
call_.PerformOps(&read_ops_);
}
void Write(const W& msg, void* tag) GRPC_OVERRIDE {
void Write(const W& msg, void* tag) override {
write_ops_.set_output_tag(tag);
if (!ctx_->sent_initial_metadata_) {
write_ops_.SendInitialMetadata(ctx_->initial_metadata_,
@ -505,7 +505,7 @@ class ServerAsyncReaderWriter GRPC_FINAL
call_.PerformOps(&write_ops_);
}
void Finish(const Status& status, void* tag) GRPC_OVERRIDE {
void Finish(const Status& status, void* tag) override {
finish_ops_.set_output_tag(tag);
if (!ctx_->sent_initial_metadata_) {
finish_ops_.SendInitialMetadata(ctx_->initial_metadata_,
@ -522,7 +522,7 @@ class ServerAsyncReaderWriter GRPC_FINAL
private:
friend class ::grpc::Server;
void BindCall(Call* call) GRPC_OVERRIDE { call_ = *call; }
void BindCall(Call* call) override { call_ = *call; }
Call call_;
ServerContext* ctx_;

@ -55,7 +55,7 @@ class ClientAsyncResponseReaderInterface {
};
template <class R>
class ClientAsyncResponseReader GRPC_FINAL
class ClientAsyncResponseReader final
: public ClientAsyncResponseReaderInterface<R> {
public:
template <class W>
@ -113,13 +113,12 @@ class ClientAsyncResponseReader GRPC_FINAL
};
template <class W>
class ServerAsyncResponseWriter GRPC_FINAL
: public ServerAsyncStreamingInterface {
class ServerAsyncResponseWriter final : public ServerAsyncStreamingInterface {
public:
explicit ServerAsyncResponseWriter(ServerContext* ctx)
: call_(nullptr, nullptr, nullptr), ctx_(ctx) {}
void SendInitialMetadata(void* tag) GRPC_OVERRIDE {
void SendInitialMetadata(void* tag) override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
meta_buf_.set_output_tag(tag);
@ -168,7 +167,7 @@ class ServerAsyncResponseWriter GRPC_FINAL
}
private:
void BindCall(Call* call) GRPC_OVERRIDE { call_ = *call; }
void BindCall(Call* call) override { call_ = *call; }
Call call_;
ServerContext* ctx_;

@ -337,16 +337,16 @@ class DeserializeFunc {
};
template <class R>
class DeserializeFuncType GRPC_FINAL : public DeserializeFunc {
class DeserializeFuncType final : public DeserializeFunc {
public:
DeserializeFuncType(R* message) : message_(message) {}
Status Deserialize(grpc_byte_buffer* buf,
int max_receive_message_size) GRPC_OVERRIDE {
int max_receive_message_size) override {
return SerializationTraits<R>::Deserialize(buf, message_,
max_receive_message_size);
}
~DeserializeFuncType() GRPC_OVERRIDE {}
~DeserializeFuncType() override {}
private:
R* message_; // Not a managed pointer because management is external to this
@ -603,7 +603,7 @@ class CallOpSet : public CallOpSetInterface,
public Op6 {
public:
CallOpSet() : return_tag_(this) {}
void FillOps(grpc_op* ops, size_t* nops) GRPC_OVERRIDE {
void FillOps(grpc_op* ops, size_t* nops) override {
this->Op1::AddOp(ops, nops);
this->Op2::AddOp(ops, nops);
this->Op3::AddOp(ops, nops);
@ -612,7 +612,7 @@ class CallOpSet : public CallOpSetInterface,
this->Op6::AddOp(ops, nops);
}
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
bool FinalizeResult(void** tag, bool* status) override {
this->Op1::FinishOp(status, max_receive_message_size_);
this->Op2::FinishOp(status, max_receive_message_size_);
this->Op3::FinishOp(status, max_receive_message_size_);
@ -639,14 +639,14 @@ template <class Op1 = CallNoOp<1>, class Op2 = CallNoOp<2>,
class Op5 = CallNoOp<5>, class Op6 = CallNoOp<6>>
class SneakyCallOpSet : public CallOpSet<Op1, Op2, Op3, Op4, Op5, Op6> {
public:
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
bool FinalizeResult(void** tag, bool* status) override {
typedef CallOpSet<Op1, Op2, Op3, Op4, Op5, Op6> Base;
return Base::FinalizeResult(tag, status) && false;
}
};
// Straightforward wrapping of the C call object
class Call GRPC_FINAL {
class Call final {
public:
/* call is owned by the caller */
Call(grpc_call* call, CallHook* call_hook, CompletionQueue* cq)

@ -51,6 +51,7 @@
#include <map>
#include <memory>
#include <mutex>
#include <string>
#include <grpc++/impl/codegen/config.h>
@ -59,7 +60,6 @@
#include <grpc++/impl/codegen/security/auth_context.h>
#include <grpc++/impl/codegen/status.h>
#include <grpc++/impl/codegen/string_ref.h>
#include <grpc++/impl/codegen/sync.h>
#include <grpc++/impl/codegen/time.h>
#include <grpc/impl/codegen/compression_types.h>
#include <grpc/impl/codegen/propagation_bits.h>
@ -235,12 +235,10 @@ class ClientContext {
/// DEPRECATED: Use set_wait_for_ready() instead.
void set_fail_fast(bool fail_fast) { set_wait_for_ready(!fail_fast); }
#ifndef GRPC_CXX0X_NO_CHRONO
/// Return the deadline for the client call.
std::chrono::system_clock::time_point deadline() const {
return Timespec2Timepoint(deadline_);
}
#endif // !GRPC_CXX0X_NO_CHRONO
/// Return a \a gpr_timespec representation of the client call's deadline.
gpr_timespec raw_deadline() const { return deadline_; }
@ -368,7 +366,7 @@ class ClientContext {
bool idempotent_;
bool cacheable_;
std::shared_ptr<Channel> channel_;
grpc::mutex mu_;
std::mutex mu_;
grpc_call* call_;
bool call_canceled_;
gpr_timespec deadline_;

@ -34,80 +34,6 @@
#ifndef GRPCXX_IMPL_CODEGEN_CONFIG_H
#define GRPCXX_IMPL_CODEGEN_CONFIG_H
#if !defined(GRPC_NO_AUTODETECT_PLATFORM)
#ifdef _MSC_VER
// Visual Studio 2010 is 1600.
#if _MSC_VER < 1600
#error "gRPC is only supported with Visual Studio starting at 2010"
// Visual Studio 2013 is 1800.
#elif _MSC_VER < 1800
#define GRPC_CXX0X_NO_FINAL 1
#define GRPC_CXX0X_NO_OVERRIDE 1
#define GRPC_CXX0X_NO_CHRONO 1
#define GRPC_CXX0X_NO_THREAD 1
#endif
#endif // Visual Studio
#ifndef __clang__
#ifdef __GNUC__
// nullptr was added in gcc 4.6
#if (__GNUC__ * 100 + __GNUC_MINOR__ < 406)
#define GRPC_CXX0X_NO_NULLPTR 1
#define GRPC_CXX0X_LIMITED_TOSTRING 1
#endif
// final and override were added in gcc 4.7
#if (__GNUC__ * 100 + __GNUC_MINOR__ < 407)
#define GRPC_CXX0X_NO_FINAL 1
#define GRPC_CXX0X_NO_OVERRIDE 1
#endif
#endif
#endif
#endif
#ifdef GRPC_CXX0X_NO_FINAL
#define GRPC_FINAL
#else
#define GRPC_FINAL final
#endif
#ifdef GRPC_CXX0X_NO_OVERRIDE
#define GRPC_OVERRIDE
#else
#define GRPC_OVERRIDE override
#endif
#ifdef GRPC_CXX0X_NO_NULLPTR
#include <functional>
#include <memory>
namespace grpc {
const class {
public:
template <class T>
operator T *() const {
return static_cast<T *>(0);
}
template <class T>
operator std::unique_ptr<T>() const {
return std::unique_ptr<T>(static_cast<T *>(0));
}
template <class T>
operator std::shared_ptr<T>() const {
return std::shared_ptr<T>(static_cast<T *>(0));
}
operator bool() const { return false; }
template <class F>
operator std::function<F>() const {
return std::function<F>();
}
private:
void operator&() const = delete;
} nullptr = {};
}
#endif
#ifndef GRPC_CUSTOM_STRING
#include <string>
#define GRPC_CUSTOM_STRING std::string
@ -117,16 +43,7 @@ namespace grpc {
typedef GRPC_CUSTOM_STRING string;
#ifdef GRPC_CXX0X_LIMITED_TOSTRING
inline grpc::string to_string(const int x) {
return std::to_string(static_cast<const long long int>(x));
}
inline grpc::string to_string(const unsigned int x) {
return std::to_string(static_cast<const long long unsigned int>(x));
}
#else
using std::to_string;
#endif
} // namespace grpc

@ -45,56 +45,53 @@ namespace grpc {
/// Implementation of the core codegen interface.
class CoreCodegen : public CoreCodegenInterface {
private:
grpc_completion_queue* grpc_completion_queue_create(void* reserved)
GRPC_OVERRIDE;
void grpc_completion_queue_destroy(grpc_completion_queue* cq) GRPC_OVERRIDE;
grpc_completion_queue* grpc_completion_queue_create(void* reserved) override;
void grpc_completion_queue_destroy(grpc_completion_queue* cq) override;
grpc_event grpc_completion_queue_pluck(grpc_completion_queue* cq, void* tag,
gpr_timespec deadline,
void* reserved) GRPC_OVERRIDE;
void* reserved) override;
void* gpr_malloc(size_t size) GRPC_OVERRIDE;
void gpr_free(void* p) GRPC_OVERRIDE;
void* gpr_malloc(size_t size) override;
void gpr_free(void* p) override;
void gpr_mu_init(gpr_mu* mu) GRPC_OVERRIDE;
void gpr_mu_destroy(gpr_mu* mu) GRPC_OVERRIDE;
void gpr_mu_lock(gpr_mu* mu) GRPC_OVERRIDE;
void gpr_mu_unlock(gpr_mu* mu) GRPC_OVERRIDE;
void gpr_cv_init(gpr_cv* cv) GRPC_OVERRIDE;
void gpr_cv_destroy(gpr_cv* cv) GRPC_OVERRIDE;
int gpr_cv_wait(gpr_cv* cv, gpr_mu* mu,
gpr_timespec abs_deadline) GRPC_OVERRIDE;
void gpr_cv_signal(gpr_cv* cv) GRPC_OVERRIDE;
void gpr_cv_broadcast(gpr_cv* cv) GRPC_OVERRIDE;
void gpr_mu_init(gpr_mu* mu) override;
void gpr_mu_destroy(gpr_mu* mu) override;
void gpr_mu_lock(gpr_mu* mu) override;
void gpr_mu_unlock(gpr_mu* mu) override;
void gpr_cv_init(gpr_cv* cv) override;
void gpr_cv_destroy(gpr_cv* cv) override;
int gpr_cv_wait(gpr_cv* cv, gpr_mu* mu, gpr_timespec abs_deadline) override;
void gpr_cv_signal(gpr_cv* cv) override;
void gpr_cv_broadcast(gpr_cv* cv) override;
void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) GRPC_OVERRIDE;
void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) override;
int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,
grpc_byte_buffer* buffer) GRPC_OVERRIDE;
void grpc_byte_buffer_reader_destroy(grpc_byte_buffer_reader* reader)
GRPC_OVERRIDE;
grpc_byte_buffer* buffer) override;
void grpc_byte_buffer_reader_destroy(
grpc_byte_buffer_reader* reader) override;
int grpc_byte_buffer_reader_next(grpc_byte_buffer_reader* reader,
gpr_slice* slice) GRPC_OVERRIDE;
gpr_slice* slice) override;
grpc_byte_buffer* grpc_raw_byte_buffer_create(gpr_slice* slice,
size_t nslices) GRPC_OVERRIDE;
size_t nslices) override;
gpr_slice gpr_slice_malloc(size_t length) GRPC_OVERRIDE;
void gpr_slice_unref(gpr_slice slice) GRPC_OVERRIDE;
gpr_slice gpr_slice_split_tail(gpr_slice* s, size_t split) GRPC_OVERRIDE;
void gpr_slice_buffer_add(gpr_slice_buffer* sb,
gpr_slice slice) GRPC_OVERRIDE;
void gpr_slice_buffer_pop(gpr_slice_buffer* sb) GRPC_OVERRIDE;
gpr_slice gpr_slice_malloc(size_t length) override;
void gpr_slice_unref(gpr_slice slice) override;
gpr_slice gpr_slice_split_tail(gpr_slice* s, size_t split) override;
void gpr_slice_buffer_add(gpr_slice_buffer* sb, gpr_slice slice) override;
void gpr_slice_buffer_pop(gpr_slice_buffer* sb) override;
void grpc_metadata_array_init(grpc_metadata_array* array) GRPC_OVERRIDE;
void grpc_metadata_array_destroy(grpc_metadata_array* array) GRPC_OVERRIDE;
void grpc_metadata_array_init(grpc_metadata_array* array) override;
void grpc_metadata_array_destroy(grpc_metadata_array* array) override;
gpr_timespec gpr_inf_future(gpr_clock_type type) GRPC_OVERRIDE;
gpr_timespec gpr_time_0(gpr_clock_type type) GRPC_OVERRIDE;
gpr_timespec gpr_inf_future(gpr_clock_type type) override;
gpr_timespec gpr_time_0(gpr_clock_type type) override;
virtual const Status& ok() GRPC_OVERRIDE;
virtual const Status& cancelled() GRPC_OVERRIDE;
virtual const Status& ok() override;
virtual const Status& cancelled() override;
void assert_fail(const char* failed_assertion) GRPC_OVERRIDE;
void assert_fail(const char* failed_assertion) override;
};
} // namespace grpc

@ -50,7 +50,7 @@ class RpcMethodHandler : public MethodHandler {
ServiceType* service)
: func_(func), service_(service) {}
void RunHandler(const HandlerParameter& param) GRPC_FINAL {
void RunHandler(const HandlerParameter& param) final {
RequestType req;
Status status = SerializationTraits<RequestType>::Deserialize(
param.request, &req, param.max_receive_message_size);
@ -96,7 +96,7 @@ class ClientStreamingHandler : public MethodHandler {
ServiceType* service)
: func_(func), service_(service) {}
void RunHandler(const HandlerParameter& param) GRPC_FINAL {
void RunHandler(const HandlerParameter& param) final {
ServerReader<RequestType> reader(param.call, param.server_context);
ResponseType rsp;
Status status = func_(service_, param.server_context, &reader, &rsp);
@ -136,7 +136,7 @@ class ServerStreamingHandler : public MethodHandler {
ServiceType* service)
: func_(func), service_(service) {}
void RunHandler(const HandlerParameter& param) GRPC_FINAL {
void RunHandler(const HandlerParameter& param) final {
RequestType req;
Status status = SerializationTraits<RequestType>::Deserialize(
param.request, &req, param.max_receive_message_size);
@ -180,7 +180,7 @@ class TemplatedBidiStreamingHandler : public MethodHandler {
std::function<Status(ServerContext*, Streamer*)> func)
: func_(func), write_needed_(WriteNeeded) {}
void RunHandler(const HandlerParameter& param) GRPC_FINAL {
void RunHandler(const HandlerParameter& param) final {
Streamer stream(param.call, param.server_context);
Status status = func_(param.server_context, &stream);
@ -266,7 +266,7 @@ class UnknownMethodHandler : public MethodHandler {
ops->ServerSendStatus(context->trailing_metadata_, status);
}
void RunHandler(const HandlerParameter& param) GRPC_FINAL {
void RunHandler(const HandlerParameter& param) final {
CallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus> ops;
FillOps(param.server_context, &ops);
param.call->PerformOps(&ops);

@ -52,7 +52,7 @@ namespace internal {
const int kGrpcBufferWriterMaxBufferLength = 8192;
class GrpcBufferWriter GRPC_FINAL
class GrpcBufferWriter final
: public ::grpc::protobuf::io::ZeroCopyOutputStream {
public:
explicit GrpcBufferWriter(grpc_byte_buffer** bp, int block_size)
@ -61,13 +61,13 @@ class GrpcBufferWriter GRPC_FINAL
slice_buffer_ = &(*bp)->data.raw.slice_buffer;
}
~GrpcBufferWriter() GRPC_OVERRIDE {
~GrpcBufferWriter() override {
if (have_backup_) {
g_core_codegen_interface->gpr_slice_unref(backup_slice_);
}
}
bool Next(void** data, int* size) GRPC_OVERRIDE {
bool Next(void** data, int* size) override {
if (have_backup_) {
slice_ = backup_slice_;
have_backup_ = false;
@ -82,7 +82,7 @@ class GrpcBufferWriter GRPC_FINAL
return true;
}
void BackUp(int count) GRPC_OVERRIDE {
void BackUp(int count) override {
g_core_codegen_interface->gpr_slice_buffer_pop(slice_buffer_);
if (count == block_size_) {
backup_slice_ = slice_;
@ -95,7 +95,7 @@ class GrpcBufferWriter GRPC_FINAL
byte_count_ -= count;
}
grpc::protobuf::int64 ByteCount() const GRPC_OVERRIDE { return byte_count_; }
grpc::protobuf::int64 ByteCount() const override { return byte_count_; }
private:
const int block_size_;
@ -106,7 +106,7 @@ class GrpcBufferWriter GRPC_FINAL
gpr_slice slice_;
};
class GrpcBufferReader GRPC_FINAL
class GrpcBufferReader final
: public ::grpc::protobuf::io::ZeroCopyInputStream {
public:
explicit GrpcBufferReader(grpc_byte_buffer* buffer)
@ -117,11 +117,11 @@ class GrpcBufferReader GRPC_FINAL
"Couldn't initialize byte buffer reader");
}
}
~GrpcBufferReader() GRPC_OVERRIDE {
~GrpcBufferReader() override {
g_core_codegen_interface->grpc_byte_buffer_reader_destroy(&reader_);
}
bool Next(const void** data, int* size) GRPC_OVERRIDE {
bool Next(const void** data, int* size) override {
if (!status_.ok()) {
return false;
}
@ -147,9 +147,9 @@ class GrpcBufferReader GRPC_FINAL
Status status() const { return status_; }
void BackUp(int count) GRPC_OVERRIDE { backup_count_ = count; }
void BackUp(int count) override { backup_count_ = count; }
bool Skip(int count) GRPC_OVERRIDE {
bool Skip(int count) override {
const void* data;
int size;
while (Next(&data, &size)) {
@ -164,7 +164,7 @@ class GrpcBufferReader GRPC_FINAL
return false;
}
grpc::protobuf::int64 ByteCount() const GRPC_OVERRIDE {
grpc::protobuf::int64 ByteCount() const override {
return byte_count_ - backup_count_;
}

@ -94,11 +94,9 @@ class ServerContext {
ServerContext(); // for async calls
~ServerContext();
#ifndef GRPC_CXX0X_NO_CHRONO
std::chrono::system_clock::time_point deadline() const {
return Timespec2Timepoint(deadline_);
}
#endif // !GRPC_CXX0X_NO_CHRONO
gpr_timespec raw_deadline() const { return deadline_; }

@ -142,7 +142,7 @@ class ServerInterface : public CallHook {
bool delete_on_finalize);
virtual ~BaseAsyncRequest() {}
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE;
bool FinalizeResult(void** tag, bool* status) override;
protected:
ServerInterface* const server_;
@ -168,7 +168,7 @@ class ServerInterface : public CallHook {
ServerCompletionQueue* notification_cq);
};
class NoPayloadAsyncRequest GRPC_FINAL : public RegisteredAsyncRequest {
class NoPayloadAsyncRequest final : public RegisteredAsyncRequest {
public:
NoPayloadAsyncRequest(void* registered_method, ServerInterface* server,
ServerContext* context,
@ -183,7 +183,7 @@ class ServerInterface : public CallHook {
};
template <class Message>
class PayloadAsyncRequest GRPC_FINAL : public RegisteredAsyncRequest {
class PayloadAsyncRequest final : public RegisteredAsyncRequest {
public:
PayloadAsyncRequest(void* registered_method, ServerInterface* server,
ServerContext* context,
@ -196,7 +196,7 @@ class ServerInterface : public CallHook {
IssueRequest(registered_method, &payload_, notification_cq);
}
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
bool FinalizeResult(void** tag, bool* status) override {
bool serialization_status =
*status && payload_ &&
SerializationTraits<Message>::Deserialize(
@ -220,7 +220,7 @@ class ServerInterface : public CallHook {
ServerCompletionQueue* notification_cq, void* tag,
bool delete_on_finalize);
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE;
bool FinalizeResult(void** tag, bool* status) override;
private:
grpc_call_details call_details_;

@ -1,111 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPCXX_IMPL_CODEGEN_SYNC_NO_CXX11_H
#define GRPCXX_IMPL_CODEGEN_SYNC_NO_CXX11_H
#include <grpc++/impl/codegen/core_codegen_interface.h>
namespace grpc {
extern CoreCodegenInterface *g_core_codegen_interface;
template <class mutex>
class lock_guard;
class condition_variable;
class mutex {
public:
mutex() { g_core_codegen_interface->gpr_mu_init(&mu_); }
~mutex() { g_core_codegen_interface->gpr_mu_destroy(&mu_); }
private:
::gpr_mu mu_;
template <class mutex>
friend class lock_guard;
friend class condition_variable;
};
template <class mutex>
class lock_guard {
public:
lock_guard(mutex &mu) : mu_(mu), locked(true) {
g_core_codegen_interface->gpr_mu_lock(&mu.mu_);
}
~lock_guard() { unlock_internal(); }
protected:
void lock_internal() {
if (!locked) g_core_codegen_interface->gpr_mu_lock(&mu_.mu_);
locked = true;
}
void unlock_internal() {
if (locked) g_core_codegen_interface->gpr_mu_unlock(&mu_.mu_);
locked = false;
}
private:
mutex &mu_;
bool locked;
friend class condition_variable;
};
template <class mutex>
class unique_lock : public lock_guard<mutex> {
public:
unique_lock(mutex &mu) : lock_guard<mutex>(mu) {}
void lock() { this->lock_internal(); }
void unlock() { this->unlock_internal(); }
};
class condition_variable {
public:
condition_variable() { g_core_codegen_interface->gpr_cv_init(&cv_); }
~condition_variable() { g_core_codegen_interface->gpr_cv_destroy(&cv_); }
void wait(lock_guard<mutex> &mu) {
mu.locked = false;
g_core_codegen_interface->gpr_cv_wait(
&cv_, &mu.mu_.mu_,
g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME));
mu.locked = true;
}
void notify_one() { g_core_codegen_interface->gpr_cv_signal(&cv_); }
void notify_all() { g_core_codegen_interface->gpr_cv_broadcast(&cv_); }
private:
gpr_cv cv_;
};
} // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_SYNC_NO_CXX11_H

@ -131,7 +131,7 @@ class ClientReaderInterface : public ClientStreamingInterface,
};
template <class R>
class ClientReader GRPC_FINAL : public ClientReaderInterface<R> {
class ClientReader final : public ClientReaderInterface<R> {
public:
/// Blocking create a stream and write the first request out.
template <class W>
@ -150,7 +150,7 @@ class ClientReader GRPC_FINAL : public ClientReaderInterface<R> {
cq_.Pluck(&ops);
}
void WaitForInitialMetadata() GRPC_OVERRIDE {
void WaitForInitialMetadata() override {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
CallOpSet<CallOpRecvInitialMetadata> ops;
@ -159,12 +159,12 @@ class ClientReader GRPC_FINAL : public ClientReaderInterface<R> {
cq_.Pluck(&ops); /// status ignored
}
bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
bool NextMessageSize(uint32_t* sz) override {
*sz = call_.max_receive_message_size();
return true;
}
bool Read(R* msg) GRPC_OVERRIDE {
bool Read(R* msg) override {
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> ops;
if (!context_->initial_metadata_received_) {
ops.RecvInitialMetadata(context_);
@ -174,7 +174,7 @@ class ClientReader GRPC_FINAL : public ClientReaderInterface<R> {
return cq_.Pluck(&ops) && ops.got_message;
}
Status Finish() GRPC_OVERRIDE {
Status Finish() override {
CallOpSet<CallOpClientRecvStatus> ops;
Status status;
ops.ClientRecvStatus(context_, &status);
@ -230,7 +230,7 @@ class ClientWriter : public ClientWriterInterface<W> {
}
using WriterInterface<W>::Write;
bool Write(const W& msg, const WriteOptions& options) GRPC_OVERRIDE {
bool Write(const W& msg, const WriteOptions& options) override {
CallOpSet<CallOpSendMessage> ops;
if (!ops.SendMessage(msg, options).ok()) {
return false;
@ -239,7 +239,7 @@ class ClientWriter : public ClientWriterInterface<W> {
return cq_.Pluck(&ops);
}
bool WritesDone() GRPC_OVERRIDE {
bool WritesDone() override {
CallOpSet<CallOpClientSendClose> ops;
ops.ClientSendClose();
call_.PerformOps(&ops);
@ -247,7 +247,7 @@ class ClientWriter : public ClientWriterInterface<W> {
}
/// Read the final response and wait for the final status.
Status Finish() GRPC_OVERRIDE {
Status Finish() override {
Status status;
if (!context_->initial_metadata_received_) {
finish_ops_.RecvInitialMetadata(context_);
@ -287,7 +287,7 @@ class ClientReaderWriterInterface : public ClientStreamingInterface,
};
template <class W, class R>
class ClientReaderWriter GRPC_FINAL : public ClientReaderWriterInterface<W, R> {
class ClientReaderWriter final : public ClientReaderWriterInterface<W, R> {
public:
/// Blocking create a stream.
ClientReaderWriter(ChannelInterface* channel, const RpcMethod& method,
@ -300,7 +300,7 @@ class ClientReaderWriter GRPC_FINAL : public ClientReaderWriterInterface<W, R> {
cq_.Pluck(&ops);
}
void WaitForInitialMetadata() GRPC_OVERRIDE {
void WaitForInitialMetadata() override {
GPR_CODEGEN_ASSERT(!context_->initial_metadata_received_);
CallOpSet<CallOpRecvInitialMetadata> ops;
@ -309,12 +309,12 @@ class ClientReaderWriter GRPC_FINAL : public ClientReaderWriterInterface<W, R> {
cq_.Pluck(&ops); // status ignored
}
bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
bool NextMessageSize(uint32_t* sz) override {
*sz = call_.max_receive_message_size();
return true;
}
bool Read(R* msg) GRPC_OVERRIDE {
bool Read(R* msg) override {
CallOpSet<CallOpRecvInitialMetadata, CallOpRecvMessage<R>> ops;
if (!context_->initial_metadata_received_) {
ops.RecvInitialMetadata(context_);
@ -325,21 +325,21 @@ class ClientReaderWriter GRPC_FINAL : public ClientReaderWriterInterface<W, R> {
}
using WriterInterface<W>::Write;
bool Write(const W& msg, const WriteOptions& options) GRPC_OVERRIDE {
bool Write(const W& msg, const WriteOptions& options) override {
CallOpSet<CallOpSendMessage> ops;
if (!ops.SendMessage(msg, options).ok()) return false;
call_.PerformOps(&ops);
return cq_.Pluck(&ops);
}
bool WritesDone() GRPC_OVERRIDE {
bool WritesDone() override {
CallOpSet<CallOpClientSendClose> ops;
ops.ClientSendClose();
call_.PerformOps(&ops);
return cq_.Pluck(&ops);
}
Status Finish() GRPC_OVERRIDE {
Status Finish() override {
CallOpSet<CallOpRecvInitialMetadata, CallOpClientRecvStatus> ops;
if (!context_->initial_metadata_received_) {
ops.RecvInitialMetadata(context_);
@ -363,11 +363,11 @@ class ServerReaderInterface : public ServerStreamingInterface,
public ReaderInterface<R> {};
template <class R>
class ServerReader GRPC_FINAL : public ServerReaderInterface<R> {
class ServerReader final : public ServerReaderInterface<R> {
public:
ServerReader(Call* call, ServerContext* ctx) : call_(call), ctx_(ctx) {}
void SendInitialMetadata() GRPC_OVERRIDE {
void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
CallOpSet<CallOpSendInitialMetadata> ops;
@ -381,12 +381,12 @@ class ServerReader GRPC_FINAL : public ServerReaderInterface<R> {
call_->cq()->Pluck(&ops);
}
bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
bool NextMessageSize(uint32_t* sz) override {
*sz = call_->max_receive_message_size();
return true;
}
bool Read(R* msg) GRPC_OVERRIDE {
bool Read(R* msg) override {
CallOpSet<CallOpRecvMessage<R>> ops;
ops.RecvMessage(msg);
call_->PerformOps(&ops);
@ -404,11 +404,11 @@ class ServerWriterInterface : public ServerStreamingInterface,
public WriterInterface<W> {};
template <class W>
class ServerWriter GRPC_FINAL : public ServerWriterInterface<W> {
class ServerWriter final : public ServerWriterInterface<W> {
public:
ServerWriter(Call* call, ServerContext* ctx) : call_(call), ctx_(ctx) {}
void SendInitialMetadata() GRPC_OVERRIDE {
void SendInitialMetadata() override {
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_);
CallOpSet<CallOpSendInitialMetadata> ops;
@ -423,7 +423,7 @@ class ServerWriter GRPC_FINAL : public ServerWriterInterface<W> {
}
using WriterInterface<W>::Write;
bool Write(const W& msg, const WriteOptions& options) GRPC_OVERRIDE {
bool Write(const W& msg, const WriteOptions& options) override {
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage> ops;
if (!ops.SendMessage(msg, options).ok()) {
return false;
@ -454,7 +454,7 @@ class ServerReaderWriterInterface : public ServerStreamingInterface,
// Actual implementation of bi-directional streaming
namespace internal {
template <class W, class R>
class ServerReaderWriterBody GRPC_FINAL {
class ServerReaderWriterBody final {
public:
ServerReaderWriterBody(Call* call, ServerContext* ctx)
: call_(call), ctx_(ctx) {}
@ -510,20 +510,20 @@ class ServerReaderWriterBody GRPC_FINAL {
// class to represent the user API for a bidirectional streaming call
template <class W, class R>
class ServerReaderWriter GRPC_FINAL : public ServerReaderWriterInterface<W, R> {
class ServerReaderWriter final : public ServerReaderWriterInterface<W, R> {
public:
ServerReaderWriter(Call* call, ServerContext* ctx) : body_(call, ctx) {}
void SendInitialMetadata() GRPC_OVERRIDE { body_.SendInitialMetadata(); }
void SendInitialMetadata() override { body_.SendInitialMetadata(); }
bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
bool NextMessageSize(uint32_t* sz) override {
return body_.NextMessageSize(sz);
}
bool Read(R* msg) GRPC_OVERRIDE { return body_.Read(msg); }
bool Read(R* msg) override { return body_.Read(msg); }
using WriterInterface<W>::Write;
bool Write(const W& msg, const WriteOptions& options) GRPC_OVERRIDE {
bool Write(const W& msg, const WriteOptions& options) override {
return body_.Write(msg, options);
}
@ -541,19 +541,19 @@ class ServerReaderWriter GRPC_FINAL : public ServerReaderWriterInterface<W, R> {
/// must have exactly 1 Read and exactly 1 Write, in that order, to function
/// correctly. Otherwise, the RPC is in error.
template <class RequestType, class ResponseType>
class ServerUnaryStreamer GRPC_FINAL
class ServerUnaryStreamer final
: public ServerReaderWriterInterface<ResponseType, RequestType> {
public:
ServerUnaryStreamer(Call* call, ServerContext* ctx)
: body_(call, ctx), read_done_(false), write_done_(false) {}
void SendInitialMetadata() GRPC_OVERRIDE { body_.SendInitialMetadata(); }
void SendInitialMetadata() override { body_.SendInitialMetadata(); }
bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
bool NextMessageSize(uint32_t* sz) override {
return body_.NextMessageSize(sz);
}
bool Read(RequestType* request) GRPC_OVERRIDE {
bool Read(RequestType* request) override {
if (read_done_) {
return false;
}
@ -563,7 +563,7 @@ class ServerUnaryStreamer GRPC_FINAL
using WriterInterface<ResponseType>::Write;
bool Write(const ResponseType& response,
const WriteOptions& options) GRPC_OVERRIDE {
const WriteOptions& options) override {
if (write_done_ || !read_done_) {
return false;
}
@ -583,19 +583,19 @@ class ServerUnaryStreamer GRPC_FINAL
/// but the server responds to it as though it were a bidi streaming call that
/// must first have exactly 1 Read and then any number of Writes.
template <class RequestType, class ResponseType>
class ServerSplitStreamer GRPC_FINAL
class ServerSplitStreamer final
: public ServerReaderWriterInterface<ResponseType, RequestType> {
public:
ServerSplitStreamer(Call* call, ServerContext* ctx)
: body_(call, ctx), read_done_(false) {}
void SendInitialMetadata() GRPC_OVERRIDE { body_.SendInitialMetadata(); }
void SendInitialMetadata() override { body_.SendInitialMetadata(); }
bool NextMessageSize(uint32_t* sz) GRPC_OVERRIDE {
bool NextMessageSize(uint32_t* sz) override {
return body_.NextMessageSize(sz);
}
bool Read(RequestType* request) GRPC_OVERRIDE {
bool Read(RequestType* request) override {
if (read_done_) {
return false;
}
@ -605,7 +605,7 @@ class ServerSplitStreamer GRPC_FINAL
using WriterInterface<ResponseType>::Write;
bool Write(const ResponseType& response,
const WriteOptions& options) GRPC_OVERRIDE {
const WriteOptions& options) override {
return read_done_ && body_.Write(response, options);
}

@ -75,8 +75,6 @@ class TimePoint<gpr_timespec> {
} // namespace grpc
#ifndef GRPC_CXX0X_NO_CHRONO
#include <chrono>
#include <grpc/impl/codegen/grpc_types.h>
@ -106,6 +104,4 @@ class TimePoint<std::chrono::system_clock::time_point> {
} // namespace grpc
#endif // !GRPC_CXX0X_NO_CHRONO
#endif // GRPCXX_IMPL_CODEGEN_TIME_H

@ -44,17 +44,17 @@
namespace grpc {
namespace internal {
class GrpcLibrary GRPC_FINAL : public GrpcLibraryInterface {
class GrpcLibrary final : public GrpcLibraryInterface {
public:
void init() GRPC_OVERRIDE { grpc_init(); }
void shutdown() GRPC_OVERRIDE { grpc_shutdown(); }
void init() override { grpc_init(); }
void shutdown() override { grpc_shutdown(); }
};
static GrpcLibrary g_gli;
static CoreCodegen g_core_codegen;
/// Instantiating this class ensures the proper initialization of gRPC.
class GrpcLibraryInitializer GRPC_FINAL {
class GrpcLibraryInitializer final {
public:
GrpcLibraryInitializer() {
if (grpc::g_glip == nullptr) {

@ -1,39 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPCXX_IMPL_SYNC_H
#define GRPCXX_IMPL_SYNC_H
#include <grpc++/impl/codegen/sync.h>
#endif // GRPCXX_IMPL_SYNC_H

@ -1,45 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPCXX_IMPL_THD_H
#define GRPCXX_IMPL_THD_H
#include <grpc++/support/config.h>
#ifdef GRPC_CXX0X_NO_THREAD
#include <grpc++/impl/thd_no_cxx11.h>
#else
#include <grpc++/impl/thd_cxx11.h>
#endif
#endif // GRPCXX_IMPL_THD_H

@ -1,117 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPCXX_IMPL_THD_NO_CXX11_H
#define GRPCXX_IMPL_THD_NO_CXX11_H
#include <grpc/support/thd.h>
namespace grpc {
class thread {
public:
template <class T>
thread(void (T::*fptr)(), T *obj) {
func_ = new thread_function<T>(fptr, obj);
joined_ = false;
start();
}
template <class T, class U>
thread(void (T::*fptr)(U arg), T *obj, U arg) {
func_ = new thread_function_arg<T, U>(fptr, obj, arg);
joined_ = false;
start();
}
~thread() {
if (!joined_) std::terminate();
delete func_;
}
thread(thread &&other)
: func_(other.func_), thd_(other.thd_), joined_(other.joined_) {
other.joined_ = true;
other.func_ = NULL;
}
void join() {
gpr_thd_join(thd_);
joined_ = true;
}
private:
void start() {
gpr_thd_options options = gpr_thd_options_default();
gpr_thd_options_set_joinable(&options);
gpr_thd_new(&thd_, thread_func, (void *)func_, &options);
}
static void thread_func(void *arg) {
thread_function_base *func = (thread_function_base *)arg;
func->call();
}
class thread_function_base {
public:
virtual ~thread_function_base() {}
virtual void call() = 0;
};
template <class T>
class thread_function : public thread_function_base {
public:
thread_function(void (T::*fptr)(), T *obj) : fptr_(fptr), obj_(obj) {}
virtual void call() { (obj_->*fptr_)(); }
private:
void (T::*fptr_)();
T *obj_;
};
template <class T, class U>
class thread_function_arg : public thread_function_base {
public:
thread_function_arg(void (T::*fptr)(U arg), T *obj, U arg)
: fptr_(fptr), obj_(obj), arg_(arg) {}
virtual void call() { (obj_->*fptr_)(arg_); }
private:
void (T::*fptr_)(U arg);
T *obj_;
U arg_;
};
thread_function_base *func_;
gpr_thd_id thd_;
bool joined_;
// Disallow copy and assign.
thread(const thread &);
void operator=(const thread &);
};
} // namespace grpc
#endif // GRPCXX_IMPL_THD_NO_CXX11_H

@ -44,7 +44,7 @@ namespace grpc {
/// A ResourceQuota can be attached to a server (via ServerBuilder), or a client
/// channel (via ChannelArguments). gRPC will attempt to keep memory used by
/// all attached entities below the ResourceQuota bound.
class ResourceQuota GRPC_FINAL {
class ResourceQuota final {
public:
explicit ResourceQuota(const grpc::string& name);
ResourceQuota();

@ -34,8 +34,10 @@
#ifndef GRPCXX_SERVER_H
#define GRPCXX_SERVER_H
#include <condition_variable>
#include <list>
#include <memory>
#include <mutex>
#include <vector>
#include <grpc++/completion_queue.h>
@ -43,7 +45,6 @@
#include <grpc++/impl/codegen/grpc_library.h>
#include <grpc++/impl/codegen/server_interface.h>
#include <grpc++/impl/rpc_service_method.h>
#include <grpc++/impl/sync.h>
#include <grpc++/security/server_credentials.h>
#include <grpc++/support/channel_arguments.h>
#include <grpc++/support/config.h>
@ -64,7 +65,7 @@ class ThreadPoolInterface;
/// Models a gRPC server.
///
/// Servers are configured and started via \a grpc::ServerBuilder.
class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen {
class Server final : public ServerInterface, private GrpcLibraryCodegen {
public:
~Server();
@ -72,7 +73,7 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen {
///
/// \warning The server must be either shutting down or some other thread must
/// call \a Shutdown for this function to ever return.
void Wait() GRPC_OVERRIDE;
void Wait() override;
/// Global Callbacks
///
@ -143,12 +144,11 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen {
/// Register a service. This call does not take ownership of the service.
/// The service must exist for the lifetime of the Server instance.
bool RegisterService(const grpc::string* host,
Service* service) GRPC_OVERRIDE;
bool RegisterService(const grpc::string* host, Service* service) override;
/// Register a generic service. This call does not take ownership of the
/// service. The service must exist for the lifetime of the Server instance.
void RegisterAsyncGenericService(AsyncGenericService* service) GRPC_OVERRIDE;
void RegisterAsyncGenericService(AsyncGenericService* service) override;
/// Tries to bind \a server to the given \a addr.
///
@ -162,7 +162,7 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen {
///
/// \warning It's an error to call this method on an already started server.
int AddListeningPort(const grpc::string& addr,
ServerCredentials* creds) GRPC_OVERRIDE;
ServerCredentials* creds) override;
/// Start the server.
///
@ -172,17 +172,17 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen {
/// \param num_cqs How many completion queues does \a cqs hold.
///
/// \return true on a successful shutdown.
bool Start(ServerCompletionQueue** cqs, size_t num_cqs) GRPC_OVERRIDE;
bool Start(ServerCompletionQueue** cqs, size_t num_cqs) override;
void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) GRPC_OVERRIDE;
void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) override;
void ShutdownInternal(gpr_timespec deadline) GRPC_OVERRIDE;
void ShutdownInternal(gpr_timespec deadline) override;
int max_receive_message_size() const GRPC_OVERRIDE {
int max_receive_message_size() const override {
return max_receive_message_size_;
};
grpc_server* server() GRPC_OVERRIDE { return server_; };
grpc_server* server() override { return server_; };
ServerInitializer* initializer();
@ -198,12 +198,12 @@ class Server GRPC_FINAL : public ServerInterface, private GrpcLibraryCodegen {
std::vector<std::unique_ptr<SyncRequestThreadManager>> sync_req_mgrs_;
// Sever status
grpc::mutex mu_;
std::mutex mu_;
bool started_;
bool shutdown_;
bool shutdown_notified_; // Was notify called on the shutdown_cv_
grpc::condition_variable shutdown_cv_;
std::condition_variable shutdown_cv_;
std::shared_ptr<GlobalCallbacks> global_callbacks_;

@ -47,7 +47,7 @@
namespace grpc {
/// A sequence of bytes.
class ByteBuffer GRPC_FINAL {
class ByteBuffer final {
public:
/// Constuct an empty buffer.
ByteBuffer() : buffer_(nullptr) {}

@ -85,6 +85,11 @@ class ChannelArguments {
/// The given buffer pool will be attached to the constructed channel
void SetResourceQuota(const ResourceQuota& resource_quota);
/// Set LB policy name.
/// Note that if the name resolver returns only balancer addresses, the
/// grpclb LB policy will be used, regardless of what is specified here.
void SetLoadBalancingPolicyName(const grpc::string& lb_policy_name);
// Generic channel argument setters. Only for advanced use cases.
/// Set an integer argument \a value under \a key.
void SetInt(const grpc::string& key, int value);

@ -44,7 +44,7 @@ namespace grpc {
/// A slice represents a contiguous reference counted array of bytes.
/// It is cheap to take references to a slice, and it is cheap to create a
/// slice pointing to a subset of another slice.
class Slice GRPC_FINAL {
class Slice final {
public:
/// Construct an empty slice.
Slice();

@ -266,6 +266,7 @@
<file baseinstalldir="/" name="src/core/lib/transport/metadata.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/metadata_batch.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/method_config.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/pid_controller.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/static_metadata.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/timeout_encoding.h" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/transport.h" role="src" />
@ -454,6 +455,7 @@
<file baseinstalldir="/" name="src/core/lib/transport/metadata.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/metadata_batch.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/method_config.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/pid_controller.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/static_metadata.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/timeout_encoding.c" role="src" />
<file baseinstalldir="/" name="src/core/lib/transport/transport.c" role="src" />

@ -52,6 +52,7 @@ PYTHON_STEM = os.path.join('src', 'python', 'grpcio')
CORE_INCLUDE = ('include', '.',)
BORINGSSL_INCLUDE = (os.path.join('third_party', 'boringssl', 'include'),)
ZLIB_INCLUDE = (os.path.join('third_party', 'zlib'),)
README = os.path.join(PYTHON_STEM, 'README.rst')
# Ensure we're in the proper directory whether or not we're being used by pip.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
@ -259,6 +260,7 @@ setuptools.setup(
name='grpcio',
version=grpc_version.VERSION,
license=LICENSE,
long_description=open(README).read(),
ext_modules=CYTHON_EXTENSION_MODULES,
packages=list(PACKAGES),
package_dir=PACKAGE_DIRECTORIES,

@ -322,7 +322,7 @@ void PrintHeaderClientMethod(Printer *printer, const Method *method,
printer->Print(
*vars,
"::grpc::Status $Method$(::grpc::ClientContext* context, "
"const $Request$& request, $Response$* response) GRPC_OVERRIDE;\n");
"const $Request$& request, $Response$* response) override;\n");
printer->Print(
*vars,
"std::unique_ptr< ::grpc::ClientAsyncResponseReader< $Response$>> "
@ -417,37 +417,34 @@ void PrintHeaderClientMethod(Printer *printer, const Method *method,
"::grpc::ClientAsyncResponseReader< $Response$>* "
"Async$Method$Raw(::grpc::ClientContext* context, "
"const $Request$& request, "
"::grpc::CompletionQueue* cq) GRPC_OVERRIDE;\n");
"::grpc::CompletionQueue* cq) override;\n");
} else if (method->ClientOnlyStreaming()) {
printer->Print(*vars,
"::grpc::ClientWriter< $Request$>* $Method$Raw("
"::grpc::ClientContext* context, $Response$* response) "
"GRPC_OVERRIDE;\n");
printer->Print(
*vars,
"override;\n");
printer->Print(*vars,
"::grpc::ClientAsyncWriter< $Request$>* Async$Method$Raw("
"::grpc::ClientContext* context, $Response$* response, "
"::grpc::CompletionQueue* cq, void* tag) GRPC_OVERRIDE;\n");
"::grpc::CompletionQueue* cq, void* tag) override;\n");
} else if (method->ServerOnlyStreaming()) {
printer->Print(*vars,
"::grpc::ClientReader< $Response$>* $Method$Raw("
"::grpc::ClientContext* context, const $Request$& request)"
" GRPC_OVERRIDE;\n");
" override;\n");
printer->Print(
*vars,
"::grpc::ClientAsyncReader< $Response$>* Async$Method$Raw("
"::grpc::ClientContext* context, const $Request$& request, "
"::grpc::CompletionQueue* cq, void* tag) GRPC_OVERRIDE;\n");
"::grpc::CompletionQueue* cq, void* tag) override;\n");
} else if (method->BidiStreaming()) {
printer->Print(
*vars,
printer->Print(*vars,
"::grpc::ClientReaderWriter< $Request$, $Response$>* "
"$Method$Raw(::grpc::ClientContext* context) GRPC_OVERRIDE;\n");
printer->Print(
*vars,
"$Method$Raw(::grpc::ClientContext* context) override;\n");
printer->Print(*vars,
"::grpc::ClientAsyncReaderWriter< $Request$, $Response$>* "
"Async$Method$Raw(::grpc::ClientContext* context, "
"::grpc::CompletionQueue* cq, void* tag) GRPC_OVERRIDE;\n");
"::grpc::CompletionQueue* cq, void* tag) override;\n");
}
}
}
@ -509,7 +506,7 @@ void PrintHeaderServerMethodAsync(Printer *printer, const Method *method,
" ::grpc::Service::MarkMethodAsync($Idx$);\n"
"}\n");
printer->Print(*vars,
"~WithAsyncMethod_$Method$() GRPC_OVERRIDE {\n"
"~WithAsyncMethod_$Method$() override {\n"
" BaseClassMustBeDerivedFromService(this);\n"
"}\n");
if (method->NoStreaming()) {
@ -518,7 +515,7 @@ void PrintHeaderServerMethodAsync(Printer *printer, const Method *method,
"// disable synchronous version of this method\n"
"::grpc::Status $Method$("
"::grpc::ServerContext* context, const $Request$* request, "
"$Response$* response) GRPC_FINAL GRPC_OVERRIDE {\n"
"$Response$* response) final override {\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
"}\n");
@ -540,7 +537,7 @@ void PrintHeaderServerMethodAsync(Printer *printer, const Method *method,
"::grpc::Status $Method$("
"::grpc::ServerContext* context, "
"::grpc::ServerReader< $Request$>* reader, "
"$Response$* response) GRPC_FINAL GRPC_OVERRIDE {\n"
"$Response$* response) final override {\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
"}\n");
@ -561,7 +558,7 @@ void PrintHeaderServerMethodAsync(Printer *printer, const Method *method,
"// disable synchronous version of this method\n"
"::grpc::Status $Method$("
"::grpc::ServerContext* context, const $Request$* request, "
"::grpc::ServerWriter< $Response$>* writer) GRPC_FINAL GRPC_OVERRIDE "
"::grpc::ServerWriter< $Response$>* writer) final override "
"{\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
@ -585,7 +582,7 @@ void PrintHeaderServerMethodAsync(Printer *printer, const Method *method,
"::grpc::Status $Method$("
"::grpc::ServerContext* context, "
"::grpc::ServerReaderWriter< $Response$, $Request$>* stream) "
"GRPC_FINAL GRPC_OVERRIDE {\n"
"final override {\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
"}\n");
@ -632,7 +629,7 @@ void PrintHeaderServerMethodStreamedUnary(
"std::placeholders::_2)));\n"
"}\n");
printer->Print(*vars,
"~WithStreamedUnaryMethod_$Method$() GRPC_OVERRIDE {\n"
"~WithStreamedUnaryMethod_$Method$() override {\n"
" BaseClassMustBeDerivedFromService(this);\n"
"}\n");
printer->Print(
@ -640,7 +637,7 @@ void PrintHeaderServerMethodStreamedUnary(
"// disable regular version of this method\n"
"::grpc::Status $Method$("
"::grpc::ServerContext* context, const $Request$* request, "
"$Response$* response) GRPC_FINAL GRPC_OVERRIDE {\n"
"$Response$* response) final override {\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
"}\n");
@ -683,7 +680,7 @@ void PrintHeaderServerMethodSplitStreaming(
"std::placeholders::_2)));\n"
"}\n");
printer->Print(*vars,
"~WithSplitStreamingMethod_$Method$() GRPC_OVERRIDE {\n"
"~WithSplitStreamingMethod_$Method$() override {\n"
" BaseClassMustBeDerivedFromService(this);\n"
"}\n");
printer->Print(
@ -691,7 +688,7 @@ void PrintHeaderServerMethodSplitStreaming(
"// disable regular version of this method\n"
"::grpc::Status $Method$("
"::grpc::ServerContext* context, const $Request$* request, "
"::grpc::ServerWriter< $Response$>* writer) GRPC_FINAL GRPC_OVERRIDE "
"::grpc::ServerWriter< $Response$>* writer) final override "
"{\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
@ -727,7 +724,7 @@ void PrintHeaderServerMethodGeneric(
" ::grpc::Service::MarkMethodGeneric($Idx$);\n"
"}\n");
printer->Print(*vars,
"~WithGenericMethod_$Method$() GRPC_OVERRIDE {\n"
"~WithGenericMethod_$Method$() override {\n"
" BaseClassMustBeDerivedFromService(this);\n"
"}\n");
if (method->NoStreaming()) {
@ -736,7 +733,7 @@ void PrintHeaderServerMethodGeneric(
"// disable synchronous version of this method\n"
"::grpc::Status $Method$("
"::grpc::ServerContext* context, const $Request$* request, "
"$Response$* response) GRPC_FINAL GRPC_OVERRIDE {\n"
"$Response$* response) final override {\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
"}\n");
@ -747,7 +744,7 @@ void PrintHeaderServerMethodGeneric(
"::grpc::Status $Method$("
"::grpc::ServerContext* context, "
"::grpc::ServerReader< $Request$>* reader, "
"$Response$* response) GRPC_FINAL GRPC_OVERRIDE {\n"
"$Response$* response) final override {\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
"}\n");
@ -757,7 +754,7 @@ void PrintHeaderServerMethodGeneric(
"// disable synchronous version of this method\n"
"::grpc::Status $Method$("
"::grpc::ServerContext* context, const $Request$* request, "
"::grpc::ServerWriter< $Response$>* writer) GRPC_FINAL GRPC_OVERRIDE "
"::grpc::ServerWriter< $Response$>* writer) final override "
"{\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
@ -769,7 +766,7 @@ void PrintHeaderServerMethodGeneric(
"::grpc::Status $Method$("
"::grpc::ServerContext* context, "
"::grpc::ServerReaderWriter< $Response$, $Request$>* stream) "
"GRPC_FINAL GRPC_OVERRIDE {\n"
"final override {\n"
" abort();\n"
" return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, \"\");\n"
"}\n");
@ -784,7 +781,7 @@ void PrintHeaderService(Printer *printer, const Service *service,
printer->Print(service->GetLeadingComments().c_str());
printer->Print(*vars,
"class $Service$ GRPC_FINAL {\n"
"class $Service$ final {\n"
" public:\n");
printer->Indent();
@ -810,7 +807,7 @@ void PrintHeaderService(Printer *printer, const Service *service,
printer->Outdent();
printer->Print("};\n");
printer->Print(
"class Stub GRPC_FINAL : public StubInterface"
"class Stub final : public StubInterface"
" {\n public:\n");
printer->Indent();
printer->Print(

@ -109,10 +109,16 @@ struct grpc_lb_policy_vtable {
/*#define GRPC_LB_POLICY_REFCOUNT_DEBUG*/
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
/* Strong references: the policy will shutdown when they reach zero */
#define GRPC_LB_POLICY_REF(p, r) \
grpc_lb_policy_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_UNREF(exec_ctx, p, r) \
grpc_lb_policy_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
/* Weak references: they don't prevent the shutdown of the LB policy. When no
* strong references are left but there are still weak ones, shutdown is called.
* Once the weak reference also reaches zero, the LB policy is destroyed. */
#define GRPC_LB_POLICY_WEAK_REF(p, r) \
grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, p, r) \

@ -43,30 +43,23 @@
* policy to select from this list of LB server backends.
*
* The first time the policy gets a request for a pick, a ping, or to exit the
* idle state, \a query_for_backends() is called. It creates an instance of \a
* lb_client_data, an internal struct meant to contain the data associated with
* the internal communication with the LB server. This instance is created via
* \a lb_client_data_create(). There, the call over lb_channel to pick-first
* from {a1..an} is created, the \a LoadBalancingRequest message is assembled
* and all necessary callbacks for the progress of the internal call configured.
* idle state, \a query_for_backends_locked() is called. This function sets up
* and initiates the internal communication with the LB server. In particular,
* it's responsible for instantiating the internal *streaming* call to the LB
* server (whichever address from {a1..an} pick-first chose). This call is
* serviced by two callbacks, \a lb_on_server_status_received and \a
* lb_on_response_received. The former will be called when the call to the LB
* server completes. This can happen if the LB server closes the connection or
* if this policy itself cancels the call (for example because it's shutting
* down). If the internal call times out, the usual behavior of pick-first
* applies, continuing to pick from the list {a1..an}.
*
* Back in \a query_for_backends(), the internal *streaming* call to the LB
* server (whichever address from {a1..an} pick-first chose) is kicked off.
* It'll progress over the callbacks configured in \a lb_client_data_create()
* (see the field docstrings of \a lb_client_data for more details).
*
* If the call fails with UNIMPLEMENTED, the original call will also fail.
* There's a misconfiguration somewhere: at least one of {a1..an} isn't a LB
* server, which contradicts the LB bit being set. If the internal call times
* out, the usual behavior of pick-first applies, continuing to pick from the
* list {a1..an}.
*
* Upon sucesss, a \a LoadBalancingResponse is expected in \a res_recv_cb. An
* invalid one results in the termination of the streaming call. A new streaming
* call should be created if possible, failing the original call otherwise.
* For a valid \a LoadBalancingResponse, the server list of actual backends is
* extracted. A Round Robin policy will be created from this list. There are two
* possible scenarios:
* Upon sucesss, the incoming \a LoadBalancingResponse is processed by \a
* res_recv. An invalid one results in the termination of the streaming call. A
* new streaming call should be created if possible, failing the original call
* otherwise. For a valid \a LoadBalancingResponse, the server list of actual
* backends is extracted. A Round Robin policy will be created from this list.
* There are two possible scenarios:
*
* 1. This is the first server list received. There was no previous instance of
* the Round Robin policy. \a rr_handover_locked() will instantiate the RR
@ -84,10 +77,10 @@
* Once a RR policy instance is in place (and getting updated as described),
* calls to for a pick, a ping or a cancellation will be serviced right away by
* forwarding them to the RR instance. Any time there's no RR policy available
* (ie, right after the creation of the gRPCLB policy, if an empty serverlist
* is received, etc), pick/ping requests are added to a list of pending
* picks/pings to be flushed and serviced as part of \a rr_handover_locked() the
* moment the RR policy instance becomes available.
* (ie, right after the creation of the gRPCLB policy, if an empty serverlist is
* received, etc), pick/ping requests are added to a list of pending picks/pings
* to be flushed and serviced as part of \a rr_handover_locked() the moment the
* RR policy instance becomes available.
*
* \see https://github.com/grpc/grpc/blob/master/doc/load-balancing.md for the
* high level design and details. */
@ -120,12 +113,20 @@
#include "src/core/ext/lb_policy/grpclb/grpclb.h"
#include "src/core/ext/lb_policy/grpclb/load_balancer_api.h"
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/iomgr/sockaddr.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
#include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/support/backoff.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/surface/call.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/transport/static_metadata.h"
#define BACKOFF_MULTIPLIER 1.6
#define BACKOFF_JITTER 0.2
#define BACKOFF_MIN_SECONDS 10
#define BACKOFF_MAX_SECONDS 60
int grpc_lb_glb_trace = 0;
/* add lb_token of selected subchannel (address) to the call's initial
@ -174,13 +175,12 @@ typedef struct wrapped_rr_closure_arg {
static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
wrapped_rr_closure_arg *wc_arg = arg;
if (wc_arg->rr_policy != NULL) {
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
(intptr_t)wc_arg->rr_policy);
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
grpc_exec_ctx_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error),
NULL);
if (wc_arg->rr_policy != NULL) {
/* if target is NULL, no pick has been made by the RR policy (eg, all
* addresses failed to connect). There won't be any user_data/token
* available */
@ -189,10 +189,12 @@ static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
wc_arg->lb_token_mdelem_storage,
GRPC_MDELEM_REF(wc_arg->lb_token));
}
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
(intptr_t)wc_arg->rr_policy);
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
}
GPR_ASSERT(wc_arg->wrapped_closure != NULL);
grpc_exec_ctx_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error),
NULL);
GPR_ASSERT(wc_arg->free_when_done != NULL);
gpr_free(wc_arg->free_when_done);
}
@ -264,7 +266,6 @@ static void add_pending_ping(pending_ping **root, grpc_closure *notify) {
* glb_lb_policy
*/
typedef struct rr_connectivity_data rr_connectivity_data;
struct lb_client_data;
static const grpc_lb_policy_vtable glb_lb_policy_vtable;
typedef struct glb_lb_policy {
/** base policy: must be first */
@ -296,20 +297,47 @@ typedef struct glb_lb_policy {
* response has arrived. */
grpc_grpclb_serverlist *serverlist;
/** addresses from \a serverlist */
grpc_lb_addresses *addresses;
/** list of picks that are waiting on RR's policy connectivity */
pending_pick *pending_picks;
/** list of pings that are waiting on RR's policy connectivity */
pending_ping *pending_pings;
/** client data associated with the LB server communication */
struct lb_client_data *lb_client;
bool shutting_down;
/************************************************************/
/* client data associated with the LB server communication */
/************************************************************/
/* Status from the LB server has been received. This signals the end of the LB
* call. */
grpc_closure lb_on_server_status_received;
/* A response from the LB server has been received. Process it */
grpc_closure lb_on_response_received;
grpc_call *lb_call; /* streaming call to the LB server, */
grpc_metadata_array lb_initial_metadata_recv; /* initial MD from LB server */
grpc_metadata_array
lb_trailing_metadata_recv; /* trailing MD from LB server */
/* what's being sent to the LB server. Note that its value may vary if the LB
* server indicates a redirect. */
grpc_byte_buffer *lb_request_payload;
/* response the LB server, if any. Processed in lb_on_response_received() */
grpc_byte_buffer *lb_response_payload;
/* call status code and details, set in lb_on_server_status_received() */
grpc_status_code lb_call_status;
char *lb_call_status_details;
size_t lb_call_status_details_capacity;
/** for tracking of the RR connectivity */
rr_connectivity_data *rr_connectivity;
/** LB call retry backoff state */
gpr_backoff lb_call_backoff_state;
/** LB call retry timer */
grpc_timer lb_call_retry_timer;
} glb_lb_policy;
/* Keeps track and reacts to changes in connectivity of the RR instance */
@ -358,6 +386,28 @@ static int lb_token_cmp(void *token1, void *token2) {
static const grpc_lb_user_data_vtable lb_token_vtable = {
lb_token_copy, lb_token_destroy, lb_token_cmp};
static void parse_server(const grpc_grpclb_server *server,
grpc_resolved_address *addr) {
const uint16_t netorder_port = htons((uint16_t)server->port);
/* the addresses are given in binary format (a in(6)_addr struct) in
* server->ip_address.bytes. */
const grpc_grpclb_ip_address *ip = &server->ip_address;
memset(addr, 0, sizeof(*addr));
if (ip->size == 4) {
addr->len = sizeof(struct sockaddr_in);
struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr->addr;
addr4->sin_family = AF_INET;
memcpy(&addr4->sin_addr, ip->bytes, ip->size);
addr4->sin_port = netorder_port;
} else if (ip->size == 16) {
addr->len = sizeof(struct sockaddr_in6);
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&addr->addr;
addr6->sin6_family = AF_INET6;
memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
addr6->sin6_port = netorder_port;
}
}
/* Returns addresses extracted from \a serverlist. */
static grpc_lb_addresses *process_serverlist(
const grpc_grpclb_serverlist *serverlist) {
@ -384,33 +434,18 @@ static grpc_lb_addresses *process_serverlist(
if (!is_server_valid(serverlist->servers[sl_idx], sl_idx, false)) continue;
/* address processing */
const uint16_t netorder_port = htons((uint16_t)server->port);
/* the addresses are given in binary format (a in(6)_addr struct) in
* server->ip_address.bytes. */
const grpc_grpclb_ip_address *ip = &server->ip_address;
grpc_resolved_address addr;
memset(&addr, 0, sizeof(addr));
if (ip->size == 4) {
addr.len = sizeof(struct sockaddr_in);
struct sockaddr_in *addr4 = (struct sockaddr_in *)&addr.addr;
addr4->sin_family = AF_INET;
memcpy(&addr4->sin_addr, ip->bytes, ip->size);
addr4->sin_port = netorder_port;
} else if (ip->size == 16) {
addr.len = sizeof(struct sockaddr_in6);
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&addr.addr;
addr6->sin6_family = AF_INET;
memcpy(&addr6->sin6_addr, ip->bytes, ip->size);
addr6->sin6_port = netorder_port;
}
parse_server(server, &addr);
/* lb token processing */
void *user_data;
if (server->has_load_balance_token) {
const size_t lb_token_size =
GPR_ARRAY_SIZE(server->load_balance_token) - 1;
const size_t lb_token_max_length =
GPR_ARRAY_SIZE(server->load_balance_token);
const size_t lb_token_length =
strnlen(server->load_balance_token, lb_token_max_length);
grpc_mdstr *lb_token_mdstr = grpc_mdstr_from_buffer(
(uint8_t *)server->load_balance_token, lb_token_size);
(uint8_t *)server->load_balance_token, lb_token_length);
user_data = grpc_mdelem_from_metadata_strings(GRPC_MDSTR_LB_TOKEN,
lb_token_mdstr);
} else {
@ -427,7 +462,6 @@ static grpc_lb_addresses *process_serverlist(
++addr_idx;
}
GPR_ASSERT(addr_idx == num_valid);
return lb_addresses;
}
@ -448,7 +482,7 @@ static bool pick_from_internal_rr_locked(
gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
(intptr_t)wc_arg->rr_policy);
}
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick");
GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
/* add the load reporting initial metadata */
initial_metadata_add_lb_token(pick_args->initial_metadata,
@ -461,7 +495,6 @@ static bool pick_from_internal_rr_locked(
* pending pick list inside the RR policy (glb_policy->rr_policy).
* Eventually, wrapped_on_complete will be called, which will -among other
* things- add the LB token to the call's initial metadata */
return pick_done;
}
@ -470,54 +503,70 @@ static grpc_lb_policy *create_rr_locked(
glb_lb_policy *glb_policy) {
GPR_ASSERT(serverlist != NULL && serverlist->num_servers > 0);
if (glb_policy->addresses != NULL) {
/* dispose of the previous version */
grpc_lb_addresses_destroy(glb_policy->addresses);
}
glb_policy->addresses = process_serverlist(serverlist);
grpc_lb_policy_args args;
memset(&args, 0, sizeof(args));
args.client_channel_factory = glb_policy->cc_factory;
grpc_lb_addresses *addresses = process_serverlist(serverlist);
// Replace the LB addresses in the channel args that we pass down to
// the subchannel.
static const char *keys_to_remove[] = {GRPC_ARG_LB_ADDRESSES};
const grpc_arg arg =
grpc_lb_addresses_create_channel_arg(glb_policy->addresses);
const grpc_arg arg = grpc_lb_addresses_create_channel_arg(addresses);
args.args = grpc_channel_args_copy_and_add_and_remove(
glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
1);
grpc_lb_policy *rr = grpc_lb_policy_create(exec_ctx, "round_robin", &args);
GPR_ASSERT(rr != NULL);
grpc_lb_addresses_destroy(addresses);
grpc_channel_args_destroy(args.args);
return rr;
}
static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
/* glb_policy->rr_policy may be NULL (initial handover) */
static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy, grpc_error *error) {
GPR_ASSERT(glb_policy->serverlist != NULL &&
glb_policy->serverlist->num_servers > 0);
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO, "RR handover. Old RR: %p", (void *)glb_policy->rr_policy);
}
if (glb_policy->rr_policy != NULL) {
/* if we are phasing out an existing RR instance, unref it. */
GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "rr_handover");
}
glb_policy->rr_policy =
create_rr_locked(exec_ctx, glb_policy->serverlist, glb_policy);
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO, "Created RR policy (0x%" PRIxPTR ")",
(intptr_t)glb_policy->rr_policy);
gpr_log(GPR_INFO, "Created RR policy (%p)", (void *)glb_policy->rr_policy);
}
GPR_ASSERT(glb_policy->rr_policy != NULL);
grpc_pollset_set_add_pollset_set(exec_ctx,
glb_policy->rr_policy->interested_parties,
glb_policy->base.interested_parties);
glb_policy->rr_connectivity->state = grpc_lb_policy_check_connectivity(
rr_connectivity_data *rr_connectivity =
gpr_malloc(sizeof(rr_connectivity_data));
memset(rr_connectivity, 0, sizeof(rr_connectivity_data));
grpc_closure_init(&rr_connectivity->on_change, glb_rr_connectivity_changed,
rr_connectivity);
rr_connectivity->glb_policy = glb_policy;
rr_connectivity->state = grpc_lb_policy_check_connectivity(
exec_ctx, glb_policy->rr_policy, &error);
grpc_lb_policy_notify_on_state_change(
exec_ctx, glb_policy->rr_policy, &glb_policy->rr_connectivity->state,
&glb_policy->rr_connectivity->on_change);
grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
glb_policy->rr_connectivity->state,
GRPC_ERROR_REF(error), "rr_handover");
rr_connectivity->state, GRPC_ERROR_REF(error),
"rr_handover");
/* subscribe */
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "rr_connectivity_cb");
grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
&rr_connectivity->state,
&rr_connectivity->on_change);
grpc_lb_policy_exit_idle(exec_ctx, glb_policy->rr_policy);
/* flush pending ops */
@ -551,36 +600,28 @@ static void rr_handover_locked(grpc_exec_ctx *exec_ctx,
static void glb_rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
/* If shutdown or error free the arg. Rely on the rest of the code to set the
* right grpclb status. */
rr_connectivity_data *rr_conn_data = arg;
glb_lb_policy *glb_policy = rr_conn_data->glb_policy;
if (rr_conn_data->state == GRPC_CHANNEL_SHUTDOWN) {
if (glb_policy->serverlist != NULL) {
/* a RR policy is shutting down but there's a serverlist available ->
* perform a handover */
gpr_mu_lock(&glb_policy->mu);
rr_handover_locked(exec_ctx, glb_policy, error);
gpr_mu_unlock(&glb_policy->mu);
} else {
/* shutting down and no new serverlist available. Bail out. */
gpr_free(rr_conn_data);
}
} else {
if (error == GRPC_ERROR_NONE) {
if (rr_conn_data->state != GRPC_CHANNEL_SHUTDOWN &&
!glb_policy->shutting_down) {
gpr_mu_lock(&glb_policy->mu);
/* RR not shutting down. Mimic the RR's policy state */
grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
rr_conn_data->state, GRPC_ERROR_REF(error),
"glb_rr_connectivity_changed");
/* resubscribe */
"rr_connectivity_cb");
/* resubscribe. Reuse the "rr_connectivity_cb" weak ref. */
grpc_lb_policy_notify_on_state_change(exec_ctx, glb_policy->rr_policy,
&rr_conn_data->state,
&rr_conn_data->on_change);
gpr_mu_unlock(&glb_policy->mu);
} else { /* error */
} else {
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"rr_connectivity_cb");
gpr_free(rr_conn_data);
}
}
}
static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
@ -682,18 +723,11 @@ static grpc_lb_policy *glb_create(grpc_exec_ctx *exec_ctx,
return NULL;
}
rr_connectivity_data *rr_connectivity =
gpr_malloc(sizeof(rr_connectivity_data));
memset(rr_connectivity, 0, sizeof(rr_connectivity_data));
grpc_closure_init(&rr_connectivity->on_change, glb_rr_connectivity_changed,
rr_connectivity);
rr_connectivity->glb_policy = glb_policy;
glb_policy->rr_connectivity = rr_connectivity;
grpc_lb_policy_init(&glb_policy->base, &glb_lb_policy_vtable);
gpr_mu_init(&glb_policy->mu);
grpc_connectivity_state_init(&glb_policy->state_tracker, GRPC_CHANNEL_IDLE,
"grpclb");
return &glb_policy->base;
}
@ -710,14 +744,13 @@ static void glb_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
gpr_mu_destroy(&glb_policy->mu);
grpc_lb_addresses_destroy(glb_policy->addresses);
gpr_free(glb_policy);
}
static void lb_client_data_destroy(struct lb_client_data *lb_client);
static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
glb_policy->shutting_down = true;
pending_pick *pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
@ -741,15 +774,16 @@ static void glb_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
if (glb_policy->rr_policy) {
/* unsubscribe */
grpc_lb_policy_notify_on_state_change(
exec_ctx, glb_policy->rr_policy, NULL,
&glb_policy->rr_connectivity->on_change);
GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
}
lb_client_data_destroy(glb_policy->lb_client);
glb_policy->lb_client = NULL;
if (glb_policy->started_picking) {
if (glb_policy->lb_call != NULL) {
grpc_call_cancel(glb_policy->lb_call, NULL);
/* lb_on_server_status_received will pick up the cancellation and clean up
*/
}
}
grpc_connectivity_state_set(
exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
@ -780,17 +814,12 @@ static void glb_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
static grpc_call *lb_client_data_get_call(struct lb_client_data *lb_client);
static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
uint32_t initial_metadata_flags_mask,
uint32_t initial_metadata_flags_eq,
grpc_error *error) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
if (glb_policy->lb_client != NULL) {
/* cancel the call to the load balancer service, if any */
grpc_call_cancel(lb_client_data_get_call(glb_policy->lb_client), NULL);
}
pending_pick *pp = glb_policy->pending_picks;
glb_policy->pending_picks = NULL;
while (pp != NULL) {
@ -810,18 +839,20 @@ static void glb_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
GRPC_ERROR_UNREF(error);
}
static void query_for_backends(grpc_exec_ctx *exec_ctx,
static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy);
static void start_picking(grpc_exec_ctx *exec_ctx, glb_lb_policy *glb_policy) {
static void start_picking_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
glb_policy->started_picking = true;
query_for_backends(exec_ctx, glb_policy);
gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
query_for_backends_locked(exec_ctx, glb_policy);
}
static void glb_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
glb_lb_policy *glb_policy = (glb_lb_policy *)pol;
gpr_mu_lock(&glb_policy->mu);
if (!glb_policy->started_picking) {
start_picking(exec_ctx, glb_policy);
start_picking_locked(exec_ctx, glb_policy);
}
gpr_mu_unlock(&glb_policy->mu);
}
@ -847,8 +878,8 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
if (glb_policy->rr_policy != NULL) {
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO, "about to PICK from 0x%" PRIxPTR "",
(intptr_t)glb_policy->rr_policy);
gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
(void *)glb_policy, (void *)glb_policy->rr_policy);
}
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
@ -865,11 +896,17 @@ static int glb_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pick_done = pick_from_internal_rr_locked(exec_ctx, glb_policy->rr_policy,
pick_args, target, wc_arg);
} else {
if (grpc_lb_glb_trace) {
gpr_log(GPR_DEBUG,
"No RR policy in grpclb instance %p. Adding to grpclb's pending "
"picks",
(void *)(glb_policy));
}
add_pending_pick(&glb_policy->pending_picks, pick_args, target,
on_complete);
if (!glb_policy->started_picking) {
start_picking(exec_ctx, glb_policy);
start_picking_locked(exec_ctx, glb_policy);
}
pick_done = false;
}
@ -898,7 +935,7 @@ static void glb_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
} else {
add_pending_ping(&glb_policy->pending_pings, closure);
if (!glb_policy->started_picking) {
start_picking(exec_ctx, glb_policy);
start_picking_locked(exec_ctx, glb_policy);
}
}
gpr_mu_unlock(&glb_policy->mu);
@ -916,250 +953,182 @@ static void glb_notify_on_state_change(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&glb_policy->mu);
}
/*
* lb_client_data
*
* Used internally for the client call to the LB */
typedef struct lb_client_data {
gpr_mu mu;
/* called once initial metadata's been sent */
grpc_closure md_sent;
/* called once the LoadBalanceRequest has been sent to the LB server. See
* src/proto/grpc/.../load_balancer.proto */
grpc_closure req_sent;
/* A response from the LB server has been received (or error). Process it */
grpc_closure res_rcvd;
/* After the client has sent a close to the LB server */
grpc_closure close_sent;
/* ... and the status from the LB server has been received */
grpc_closure srv_status_rcvd;
grpc_call *lb_call; /* streaming call to the LB server, */
gpr_timespec deadline; /* for the streaming call to the LB server */
grpc_metadata_array initial_metadata_recv; /* initial MD from LB server */
grpc_metadata_array trailing_metadata_recv; /* trailing MD from LB server */
/* what's being sent to the LB server. Note that its value may vary if the LB
* server indicates a redirect. */
grpc_byte_buffer *request_payload;
/* response from the LB server, if any. Processed in res_recv_cb() */
grpc_byte_buffer *response_payload;
/* the call's status and status detailset in srv_status_rcvd_cb() */
grpc_status_code status;
char *status_details;
size_t status_details_capacity;
/* pointer back to the enclosing policy */
glb_lb_policy *glb_policy;
} lb_client_data;
static void md_sent_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
static void req_sent_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
static void close_sent_cb(grpc_exec_ctx *exec_ctx, void *arg,
static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static void srv_status_rcvd_cb(grpc_exec_ctx *exec_ctx, void *arg,
static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error);
static lb_client_data *lb_client_data_create(glb_lb_policy *glb_policy) {
static void lb_call_init(glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->server_name != NULL);
GPR_ASSERT(glb_policy->server_name[0] != '\0');
lb_client_data *lb_client = gpr_malloc(sizeof(lb_client_data));
memset(lb_client, 0, sizeof(lb_client_data));
gpr_mu_init(&lb_client->mu);
grpc_closure_init(&lb_client->md_sent, md_sent_cb, lb_client);
grpc_closure_init(&lb_client->req_sent, req_sent_cb, lb_client);
grpc_closure_init(&lb_client->res_rcvd, res_recv_cb, lb_client);
grpc_closure_init(&lb_client->close_sent, close_sent_cb, lb_client);
grpc_closure_init(&lb_client->srv_status_rcvd, srv_status_rcvd_cb, lb_client);
lb_client->deadline = glb_policy->deadline;
/* Note the following LB call progresses every time there's activity in \a
* glb_policy->base.interested_parties, which is comprised of the polling
* entities from \a client_channel. */
lb_client->lb_call = grpc_channel_create_pollset_set_call(
glb_policy->lb_call = grpc_channel_create_pollset_set_call(
glb_policy->lb_channel, NULL, GRPC_PROPAGATE_DEFAULTS,
glb_policy->base.interested_parties,
"/grpc.lb.v1.LoadBalancer/BalanceLoad", glb_policy->server_name,
lb_client->deadline, NULL);
glb_policy->deadline, NULL);
grpc_metadata_array_init(&lb_client->initial_metadata_recv);
grpc_metadata_array_init(&lb_client->trailing_metadata_recv);
grpc_metadata_array_init(&glb_policy->lb_initial_metadata_recv);
grpc_metadata_array_init(&glb_policy->lb_trailing_metadata_recv);
grpc_grpclb_request *request =
grpc_grpclb_request_create(glb_policy->server_name);
gpr_slice request_payload_slice = grpc_grpclb_request_encode(request);
lb_client->request_payload =
glb_policy->lb_request_payload =
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
gpr_slice_unref(request_payload_slice);
grpc_grpclb_request_destroy(request);
lb_client->status_details = NULL;
lb_client->status_details_capacity = 0;
lb_client->glb_policy = glb_policy;
return lb_client;
glb_policy->lb_call_status_details = NULL;
glb_policy->lb_call_status_details_capacity = 0;
grpc_closure_init(&glb_policy->lb_on_server_status_received,
lb_on_server_status_received, glb_policy);
grpc_closure_init(&glb_policy->lb_on_response_received,
lb_on_response_received, glb_policy);
gpr_backoff_init(&glb_policy->lb_call_backoff_state, BACKOFF_MULTIPLIER,
BACKOFF_JITTER, BACKOFF_MIN_SECONDS * 1000,
BACKOFF_MAX_SECONDS * 1000);
}
static void lb_client_data_destroy(lb_client_data *lb_client) {
grpc_call_destroy(lb_client->lb_call);
grpc_metadata_array_destroy(&lb_client->initial_metadata_recv);
grpc_metadata_array_destroy(&lb_client->trailing_metadata_recv);
static void lb_call_destroy(glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->lb_call != NULL);
grpc_call_destroy(glb_policy->lb_call);
glb_policy->lb_call = NULL;
grpc_byte_buffer_destroy(lb_client->request_payload);
grpc_metadata_array_destroy(&glb_policy->lb_initial_metadata_recv);
grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
gpr_free(lb_client->status_details);
gpr_mu_destroy(&lb_client->mu);
gpr_free(lb_client);
}
static grpc_call *lb_client_data_get_call(lb_client_data *lb_client) {
return lb_client->lb_call;
grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
gpr_free(glb_policy->lb_call_status_details);
}
/*
* Auxiliary functions and LB client callbacks.
*/
static void query_for_backends(grpc_exec_ctx *exec_ctx,
static void query_for_backends_locked(grpc_exec_ctx *exec_ctx,
glb_lb_policy *glb_policy) {
GPR_ASSERT(glb_policy->lb_channel != NULL);
lb_call_init(glb_policy);
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO, "Query for backends (grpclb: %p, lb_call: %p)",
(void *)glb_policy, (void *)glb_policy->lb_call);
}
GPR_ASSERT(glb_policy->lb_call != NULL);
glb_policy->lb_client = lb_client_data_create(glb_policy);
grpc_call_error call_error;
grpc_op ops[1];
grpc_op ops[4];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
call_error = grpc_call_start_batch_and_execute(
exec_ctx, glb_policy->lb_client->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_client->md_sent);
GPR_ASSERT(GRPC_CALL_OK == call_error);
op = ops;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata =
&glb_policy->lb_client->trailing_metadata_recv;
op->data.recv_status_on_client.status = &glb_policy->lb_client->status;
op->data.recv_status_on_client.status_details =
&glb_policy->lb_client->status_details;
op->data.recv_status_on_client.status_details_capacity =
&glb_policy->lb_client->status_details_capacity;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &glb_policy->lb_initial_metadata_recv;
op->flags = 0;
op->reserved = NULL;
op++;
call_error = grpc_call_start_batch_and_execute(
exec_ctx, glb_policy->lb_client->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_client->srv_status_rcvd);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
static void md_sent_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
lb_client_data *lb_client = arg;
GPR_ASSERT(lb_client->lb_call);
grpc_op ops[1];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
GPR_ASSERT(glb_policy->lb_request_payload != NULL);
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = lb_client->request_payload;
op->data.send_message = glb_policy->lb_request_payload;
op->flags = 0;
op->reserved = NULL;
op++;
grpc_call_error call_error = grpc_call_start_batch_and_execute(
exec_ctx, lb_client->lb_call, ops, (size_t)(op - ops),
&lb_client->req_sent);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
static void req_sent_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
lb_client_data *lb_client = arg;
GPR_ASSERT(lb_client->lb_call);
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &lb_client->initial_metadata_recv;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata =
&glb_policy->lb_trailing_metadata_recv;
op->data.recv_status_on_client.status = &glb_policy->lb_call_status;
op->data.recv_status_on_client.status_details =
&glb_policy->lb_call_status_details;
op->data.recv_status_on_client.status_details_capacity =
&glb_policy->lb_call_status_details_capacity;
op->flags = 0;
op->reserved = NULL;
op++;
/* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
* count goes to zero) to be unref'd in lb_on_server_status_received */
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_server_status_received");
call_error = grpc_call_start_batch_and_execute(
exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_server_status_received);
GPR_ASSERT(GRPC_CALL_OK == call_error);
op = ops;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &lb_client->response_payload;
op->data.recv_message = &glb_policy->lb_response_payload;
op->flags = 0;
op->reserved = NULL;
op++;
grpc_call_error call_error = grpc_call_start_batch_and_execute(
exec_ctx, lb_client->lb_call, ops, (size_t)(op - ops),
&lb_client->res_rcvd);
/* take another weak ref to be unref'd in lb_on_response_received */
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received");
call_error = grpc_call_start_batch_and_execute(
exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_response_received);
GPR_ASSERT(GRPC_CALL_OK == call_error);
}
static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
lb_client_data *lb_client = arg;
static void lb_on_response_received(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = arg;
grpc_op ops[2];
memset(ops, 0, sizeof(ops));
grpc_op *op = ops;
if (lb_client->response_payload != NULL) {
if (glb_policy->lb_response_payload != NULL) {
gpr_backoff_reset(&glb_policy->lb_call_backoff_state);
/* Received data from the LB server. Look inside
* lb_client->response_payload, for a serverlist. */
* glb_policy->lb_response_payload, for a serverlist. */
grpc_byte_buffer_reader bbr;
grpc_byte_buffer_reader_init(&bbr, lb_client->response_payload);
grpc_byte_buffer_reader_init(&bbr, glb_policy->lb_response_payload);
gpr_slice response_slice = grpc_byte_buffer_reader_readall(&bbr);
grpc_byte_buffer_destroy(lb_client->response_payload);
grpc_byte_buffer_destroy(glb_policy->lb_response_payload);
grpc_grpclb_serverlist *serverlist =
grpc_grpclb_response_parse_serverlist(response_slice);
if (serverlist != NULL) {
GPR_ASSERT(glb_policy->lb_call != NULL);
gpr_slice_unref(response_slice);
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO, "Serverlist with %lu servers received",
(unsigned long)serverlist->num_servers);
for (size_t i = 0; i < serverlist->num_servers; ++i) {
grpc_resolved_address addr;
parse_server(serverlist->servers[i], &addr);
char *ipport;
grpc_sockaddr_to_string(&ipport, &addr, false);
gpr_log(GPR_INFO, "Serverlist[%lu]: %s", (unsigned long)i, ipport);
gpr_free(ipport);
}
}
/* update serverlist */
if (serverlist->num_servers > 0) {
gpr_mu_lock(&lb_client->glb_policy->mu);
if (grpc_grpclb_serverlist_equals(lb_client->glb_policy->serverlist,
serverlist)) {
gpr_mu_lock(&glb_policy->mu);
if (grpc_grpclb_serverlist_equals(glb_policy->serverlist, serverlist)) {
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO,
"Incoming server list identical to current, ignoring.");
}
} else { /* new serverlist */
if (lb_client->glb_policy->serverlist != NULL) {
if (glb_policy->serverlist != NULL) {
/* dispose of the old serverlist */
grpc_grpclb_destroy_serverlist(lb_client->glb_policy->serverlist);
grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
}
/* and update the copy in the glb_lb_policy instance */
lb_client->glb_policy->serverlist = serverlist;
}
if (lb_client->glb_policy->rr_policy == NULL) {
/* initial "handover", in this case from a null RR policy, meaning
* it'll just create the first RR policy instance */
rr_handover_locked(exec_ctx, lb_client->glb_policy, error);
} else {
/* unref the RR policy, eventually leading to its substitution with a
* new one constructed from the received serverlist (see
* glb_rr_connectivity_changed) */
GRPC_LB_POLICY_UNREF(exec_ctx, lb_client->glb_policy->rr_policy,
"serverlist_received");
glb_policy->serverlist = serverlist;
rr_handover_locked(exec_ctx, glb_policy, error);
}
gpr_mu_unlock(&lb_client->glb_policy->mu);
gpr_mu_unlock(&glb_policy->mu);
} else {
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO,
@ -1167,60 +1136,94 @@ static void res_recv_cb(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
"response with > 0 servers is received");
}
}
} else { /* serverlist == NULL */
gpr_log(GPR_ERROR, "Invalid LB response received: '%s'. Ignoring.",
gpr_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
gpr_slice_unref(response_slice);
}
if (!glb_policy->shutting_down) {
/* keep listening for serverlist updates */
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &lb_client->response_payload;
op->data.recv_message = &glb_policy->lb_response_payload;
op->flags = 0;
op->reserved = NULL;
op++;
/* reuse the "lb_on_response_received" weak ref taken in
* query_for_backends_locked() */
const grpc_call_error call_error = grpc_call_start_batch_and_execute(
exec_ctx, lb_client->lb_call, ops, (size_t)(op - ops),
&lb_client->res_rcvd); /* loop */
exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
&glb_policy->lb_on_response_received); /* loop */
GPR_ASSERT(GRPC_CALL_OK == call_error);
return;
}
GPR_ASSERT(serverlist == NULL);
gpr_log(GPR_ERROR, "Invalid LB response received: '%s'",
gpr_dump_slice(response_slice, GPR_DUMP_ASCII));
gpr_slice_unref(response_slice);
/* Disconnect from server returning invalid response. */
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op->flags = 0;
op->reserved = NULL;
op++;
grpc_call_error call_error = grpc_call_start_batch_and_execute(
exec_ctx, lb_client->lb_call, ops, (size_t)(op - ops),
&lb_client->close_sent);
GPR_ASSERT(GRPC_CALL_OK == call_error);
} else { /* empty payload: call cancelled. */
/* dispose of the "lb_on_response_received" weak ref taken in
* query_for_backends_locked() and reused in every reception loop */
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_response_received_empty_payload");
}
/* empty payload: call cancelled by server. Cleanups happening in
* srv_status_rcvd_cb */
}
static void close_sent_cb(grpc_exec_ctx *exec_ctx, void *arg,
static void lb_call_on_retry_timer(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
glb_lb_policy *glb_policy = arg;
gpr_mu_lock(&glb_policy->mu);
if (!glb_policy->shutting_down) {
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO,
"Close from LB client sent. Waiting from server status now");
gpr_log(GPR_INFO, "Restaring call to LB server (grpclb %p)",
(void *)glb_policy);
}
GPR_ASSERT(glb_policy->lb_call == NULL);
query_for_backends_locked(exec_ctx, glb_policy);
}
gpr_mu_unlock(&glb_policy->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"grpclb_on_retry_timer");
}
static void srv_status_rcvd_cb(grpc_exec_ctx *exec_ctx, void *arg,
static void lb_on_server_status_received(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
lb_client_data *lb_client = arg;
glb_lb_policy *glb_policy = arg;
gpr_mu_lock(&glb_policy->mu);
GPR_ASSERT(glb_policy->lb_call != NULL);
if (grpc_lb_glb_trace) {
gpr_log(GPR_INFO,
"status from lb server received. Status = %d, Details = '%s', "
"Capacity "
"= %lu",
lb_client->status, lb_client->status_details,
(unsigned long)lb_client->status_details_capacity);
gpr_log(GPR_DEBUG,
"Status from LB server received. Status = %d, Details = '%s', "
"(call: %p)",
glb_policy->lb_call_status, glb_policy->lb_call_status_details,
(void *)glb_policy->lb_call);
}
/* TODO(dgq): deal with stream termination properly (fire up another one?
* fail the original call?) */
/* We need to performe cleanups no matter what. */
lb_call_destroy(glb_policy);
if (!glb_policy->shutting_down) {
/* if we aren't shutting down, restart the LB client call after some time */
gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
gpr_timespec next_try =
gpr_backoff_step(&glb_policy->lb_call_backoff_state, now);
if (grpc_lb_glb_trace) {
gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
(void *)glb_policy);
gpr_timespec timeout = gpr_time_sub(next_try, now);
if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
gpr_log(GPR_DEBUG, "... retrying in %" PRId64 ".%09d seconds.",
timeout.tv_sec, timeout.tv_nsec);
} else {
gpr_log(GPR_DEBUG, "... retrying immediately.");
}
}
GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_retry_timer");
grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
lb_call_on_retry_timer, glb_policy, now);
}
gpr_mu_unlock(&glb_policy->mu);
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
"lb_on_server_status_received");
}
/* Code wiring the policy with the rest of the core */

@ -77,7 +77,7 @@ typedef struct _grpc_lb_v1_Server {
bool has_port;
int32_t port;
bool has_load_balance_token;
char load_balance_token[65];
char load_balance_token[50];
bool has_drop_request;
bool drop_request;
/* @@protoc_insertion_point(struct:grpc_lb_v1_Server) */
@ -172,7 +172,7 @@ extern const pb_field_t grpc_lb_v1_Server_fields[5];
#define grpc_lb_v1_LoadBalanceResponse_size (98 + grpc_lb_v1_ServerList_size)
#define grpc_lb_v1_InitialLoadBalanceResponse_size 90
/* grpc_lb_v1_ServerList_size depends on runtime parameters */
#define grpc_lb_v1_Server_size 98
#define grpc_lb_v1_Server_size 83
/* Message IDs (where set with "msgid" option) */
#ifdef PB_MSGID

@ -120,6 +120,8 @@ typedef struct {
grpc_connectivity_state connectivity_state;
/** the subchannel's target user data */
void *user_data;
/** vtable to operate over \a user_data */
const grpc_lb_user_data_vtable *user_data_vtable;
} subchannel_data;
struct round_robin_lb_policy {
@ -186,9 +188,13 @@ static void advance_last_picked_locked(round_robin_lb_policy *p) {
}
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_DEBUG, "[READYLIST] ADVANCED LAST PICK. NOW AT NODE %p (SC %p)",
(void *)p->ready_list_last_pick,
(void *)p->ready_list_last_pick->subchannel);
gpr_log(GPR_DEBUG,
"[READYLIST, RR: %p] ADVANCED LAST PICK. NOW AT NODE %p (SC %p, "
"CSC %p)",
(void *)p, (void *)p->ready_list_last_pick,
(void *)p->ready_list_last_pick->subchannel,
(void *)grpc_subchannel_get_connected_subchannel(
p->ready_list_last_pick->subchannel));
}
}
@ -255,9 +261,18 @@ static void remove_disconnected_sc_locked(round_robin_lb_policy *p,
static void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
ready_list *elem;
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_DEBUG, "Destroying Round Robin policy at %p", (void *)pol);
}
for (size_t i = 0; i < p->num_subchannels; i++) {
subchannel_data *sd = p->subchannels[i];
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin");
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin_destroy");
if (sd->user_data != NULL) {
GPR_ASSERT(sd->user_data_vtable != NULL);
sd->user_data_vtable->destroy(sd->user_data);
}
gpr_free(sd);
}
@ -285,6 +300,9 @@ static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
size_t i;
gpr_mu_lock(&p->mu);
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_DEBUG, "Shutting down Round Robin policy at %p", (void *)pol);
}
p->shutdown = 1;
while ((pp = p->pending_picks)) {
@ -296,7 +314,7 @@ static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
grpc_connectivity_state_set(
exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE("Channel Shutdown"), "shutdown");
GRPC_ERROR_CREATE("Channel Shutdown"), "rr_shutdown");
for (i = 0; i < p->num_subchannels; i++) {
subchannel_data *sd = p->subchannels[i];
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
@ -395,6 +413,11 @@ static int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pending_pick *pp;
ready_list *selected;
gpr_mu_lock(&p->mu);
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_INFO, "Round Robin %p trying to pick", (void *)pol);
}
if ((selected = peek_next_connected_locked(p))) {
/* readily available, report right away */
*target = GRPC_CONNECTED_SUBCHANNEL_REF(
@ -435,7 +458,6 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
subchannel_data *sd = arg;
round_robin_lb_policy *p = sd->policy;
pending_pick *pp;
ready_list *selected;
int unref = 0;
@ -456,12 +478,14 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
/* at this point we know there's at least one suitable subchannel. Go
* ahead and pick one and notify the pending suitors in
* p->pending_picks. This preemtively replicates rr_pick()'s actions. */
selected = peek_next_connected_locked(p);
ready_list *selected = peek_next_connected_locked(p);
GPR_ASSERT(selected != NULL);
if (p->pending_picks != NULL) {
/* if the selected subchannel is going to be used for the pending
* picks, update the last picked pointer */
advance_last_picked_locked(p);
}
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
@ -585,6 +609,7 @@ static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_subchannel_get_connected_subchannel(selected->subchannel),
"picked");
grpc_connected_subchannel_ping(exec_ctx, target, closure);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "picked");
} else {
gpr_mu_unlock(&p->mu);
grpc_exec_ctx_sched(exec_ctx, closure,
@ -653,7 +678,11 @@ static grpc_lb_policy *round_robin_create(grpc_exec_ctx *exec_ctx,
sd->policy = p;
sd->index = subchannel_idx;
sd->subchannel = subchannel;
sd->user_data = addresses->addresses[i].user_data;
sd->user_data_vtable = addresses->user_data_vtable;
if (sd->user_data_vtable != NULL) {
sd->user_data =
sd->user_data_vtable->copy(addresses->addresses[i].user_data);
}
++subchannel_idx;
grpc_closure_init(&sd->connectivity_changed_closure,
rr_connectivity_changed, sd);

@ -190,7 +190,7 @@ static void dns_on_resolved(grpc_exec_ctx *exec_ctx, void *arg,
GPR_ASSERT(!r->have_retry_timer);
r->have_retry_timer = true;
GRPC_RESOLVER_REF(&r->base, "retry-timer");
if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) <= 0) {
if (gpr_time_cmp(timeout, gpr_time_0(timeout.clock_type)) > 0) {
gpr_log(GPR_DEBUG, "retrying in %" PRId64 ".%09d seconds", timeout.tv_sec,
timeout.tv_nsec);
} else {

@ -347,7 +347,7 @@ grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
&exec_ctx, &f->base, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR, new_args);
// Clean up.
GRPC_SECURITY_CONNECTOR_UNREF(&f->security_connector->base,
"client_channel_factory_create_channel");
"secure_client_channel_factory_create_channel");
grpc_channel_args_destroy(new_args);
grpc_client_channel_factory_unref(&exec_ctx, &f->base);
grpc_exec_ctx_finish(&exec_ctx);

@ -50,6 +50,7 @@
#include <grpc/support/useful.h>
#include "src/core/ext/transport/chttp2/transport/bin_encoder.h"
#include "src/core/ext/transport/chttp2/transport/http2_errors.h"
#include "src/core/lib/profiling/timers.h"
#include "src/core/lib/support/string.h"
@ -1578,6 +1579,20 @@ static const maybe_complete_func_type maybe_complete_funcs[] = {
grpc_chttp2_maybe_complete_recv_initial_metadata,
grpc_chttp2_maybe_complete_recv_trailing_metadata};
static void force_client_rst_stream(grpc_exec_ctx *exec_ctx, void *sp,
grpc_error *error) {
grpc_chttp2_stream *s = sp;
grpc_chttp2_transport *t = s->t;
if (!s->write_closed) {
gpr_slice_buffer_add(
&t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_CHTTP2_NO_ERROR,
&s->stats.outgoing));
grpc_chttp2_initiate_write(exec_ctx, t, false, "force_rst_stream");
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, GRPC_ERROR_NONE);
}
GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "final_rst");
}
grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
void *hpack_parser,
grpc_chttp2_transport *t,
@ -1613,6 +1628,17 @@ grpc_error *grpc_chttp2_header_parser_parse(grpc_exec_ctx *exec_ctx,
s->header_frames_received++;
}
if (parser->is_eof) {
if (t->is_client && !s->write_closed) {
/* server eof ==> complete closure; we may need to forcefully close
the stream. Wait until the combiner lock is ready to be released
however -- it might be that we receive a RST_STREAM following this
and can avoid the extra write */
GRPC_CHTTP2_STREAM_REF(s, "final_rst");
grpc_combiner_execute_finally(
exec_ctx, t->combiner,
grpc_closure_create(force_client_rst_stream, s), GRPC_ERROR_NONE,
false);
}
grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
GRPC_ERROR_NONE);
}

@ -46,7 +46,7 @@ static gpr_mu g_endpoint_mutex;
void grpc_network_status_shutdown(void) {
if (head != NULL) {
gpr_log(GPR_ERROR,
"Memory leaked as all network endpoints were not shut down");
"Memory leaked as not all network endpoints were shut down");
}
gpr_mu_destroy(&g_endpoint_mutex);
}

@ -715,3 +715,10 @@ void grpc_resource_user_alloc_slices(
grpc_resource_user_alloc(exec_ctx, slice_allocator->resource_user,
count * length, &slice_allocator->on_allocated);
}
gpr_slice grpc_resource_user_slice_malloc(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user,
size_t size) {
grpc_resource_user_alloc(exec_ctx, resource_user, size, NULL);
return ru_slice_create(resource_user, size);
}

@ -221,4 +221,9 @@ void grpc_resource_user_alloc_slices(
grpc_resource_user_slice_allocator *slice_allocator, size_t length,
size_t count, gpr_slice_buffer *dest);
/* Allocate one slice of length \a size synchronously. */
gpr_slice grpc_resource_user_slice_malloc(grpc_exec_ctx *exec_ctx,
grpc_resource_user *resource_user,
size_t size);
#endif /* GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H */

@ -54,9 +54,12 @@ typedef struct grpc_uv_tcp_connect {
grpc_endpoint **endpoint;
int refs;
char *addr_name;
grpc_resource_quota *resource_quota;
} grpc_uv_tcp_connect;
static void uv_tcp_connect_cleanup(grpc_uv_tcp_connect *connect) {
static void uv_tcp_connect_cleanup(grpc_exec_ctx *exec_ctx,
grpc_uv_tcp_connect *connect) {
grpc_resource_quota_internal_unref(exec_ctx, connect->resource_quota);
gpr_free(connect);
}
@ -74,7 +77,7 @@ static void uv_tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp,
}
done = (--connect->refs == 0);
if (done) {
uv_tcp_connect_cleanup(connect);
uv_tcp_connect_cleanup(exec_ctx, connect);
}
}
@ -86,8 +89,8 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) {
grpc_closure *closure = connect->closure;
grpc_timer_cancel(&exec_ctx, &connect->alarm);
if (status == 0) {
*connect->endpoint =
grpc_tcp_create(connect->tcp_handle, connect->addr_name);
*connect->endpoint = grpc_tcp_create(
connect->tcp_handle, connect->resource_quota, connect->addr_name);
} else {
error = GRPC_ERROR_CREATE("Failed to connect to remote host");
error = grpc_error_set_int(error, GRPC_ERROR_INT_ERRNO, -status);
@ -105,7 +108,7 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) {
}
done = (--connect->refs == 0);
if (done) {
uv_tcp_connect_cleanup(connect);
uv_tcp_connect_cleanup(&exec_ctx, connect);
}
grpc_exec_ctx_sched(&exec_ctx, closure, error, NULL);
grpc_exec_ctx_finish(&exec_ctx);
@ -114,16 +117,31 @@ static void uv_tc_on_connect(uv_connect_t *req, int status) {
static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties,
const grpc_channel_args *channel_args,
const grpc_resolved_address *resolved_addr,
gpr_timespec deadline) {
grpc_uv_tcp_connect *connect;
grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
(void)channel_args;
(void)interested_parties;
if (channel_args != NULL) {
for (size_t i = 0; i < channel_args->num_args; i++) {
if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
resource_quota = grpc_resource_quota_internal_ref(
channel_args->args[i].value.pointer.p);
}
}
}
connect = gpr_malloc(sizeof(grpc_uv_tcp_connect));
memset(connect, 0, sizeof(grpc_uv_tcp_connect));
connect->closure = closure;
connect->endpoint = ep;
connect->tcp_handle = gpr_malloc(sizeof(uv_tcp_t));
connect->addr_name = grpc_sockaddr_to_uri(resolved_addr);
connect->resource_quota = resource_quota;
uv_tcp_init(uv_default_loop(), connect->tcp_handle);
connect->connect_req.data = connect;
// TODO(murgatroid99): figure out what the return value here means
@ -138,16 +156,18 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
// overridden by api_fuzzer.c
void (*grpc_tcp_client_connect_impl)(
grpc_exec_ctx *exec_ctx, grpc_closure *closure, grpc_endpoint **ep,
grpc_pollset_set *interested_parties, const grpc_resolved_address *addr,
grpc_pollset_set *interested_parties, const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
gpr_timespec deadline) = tcp_client_connect_impl;
void grpc_tcp_client_connect(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
grpc_endpoint **ep,
grpc_pollset_set *interested_parties,
const grpc_channel_args *channel_args,
const grpc_resolved_address *addr,
gpr_timespec deadline) {
grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties, addr,
deadline);
grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
channel_args, addr, deadline);
}
#endif /* GRPC_UV */

@ -657,41 +657,46 @@ done:
}
}
unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s,
/* Return listener at port_index or NULL. Should only be called with s->mu
locked. */
static grpc_tcp_listener *get_port_index(grpc_tcp_server *s,
unsigned port_index) {
unsigned num_fds = 0;
unsigned num_ports = 0;
grpc_tcp_listener *sp;
gpr_mu_lock(&s->mu);
for (sp = s->head; sp && port_index != 0; sp = sp->next) {
for (sp = s->head; sp; sp = sp->next) {
if (!sp->is_sibling) {
--port_index;
if (++num_ports > port_index) {
return sp;
}
}
}
for (; sp; sp = sp->sibling, ++num_fds)
;
return NULL;
}
unsigned grpc_tcp_server_port_fd_count(grpc_tcp_server *s,
unsigned port_index) {
unsigned num_fds = 0;
gpr_mu_lock(&s->mu);
grpc_tcp_listener *sp = get_port_index(s, port_index);
for (; sp; sp = sp->sibling) {
++num_fds;
}
gpr_mu_unlock(&s->mu);
return num_fds;
}
int grpc_tcp_server_port_fd(grpc_tcp_server *s, unsigned port_index,
unsigned fd_index) {
grpc_tcp_listener *sp;
int fd;
gpr_mu_lock(&s->mu);
for (sp = s->head; sp && port_index != 0; sp = sp->next) {
if (!sp->is_sibling) {
--port_index;
}
grpc_tcp_listener *sp = get_port_index(s, port_index);
for (; sp; sp = sp->sibling, --fd_index) {
if (fd_index == 0) {
gpr_mu_unlock(&s->mu);
return sp->fd;
}
for (; sp && fd_index != 0; sp = sp->sibling, --fd_index)
;
if (sp) {
fd = sp->fd;
} else {
fd = -1;
}
gpr_mu_unlock(&s->mu);
return fd;
return -1;
}
void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s,

@ -76,13 +76,30 @@ struct grpc_tcp_server {
/* shutdown callback */
grpc_closure *shutdown_complete;
grpc_resource_quota *resource_quota;
};
grpc_error *grpc_tcp_server_create(grpc_closure *shutdown_complete,
grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
grpc_closure *shutdown_complete,
const grpc_channel_args *args,
grpc_tcp_server **server) {
grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
(void)args;
s->resource_quota = grpc_resource_quota_create(NULL);
for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
if (args->args[i].type == GRPC_ARG_POINTER) {
grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
s->resource_quota =
grpc_resource_quota_internal_ref(args->args[i].value.pointer.p);
} else {
grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
gpr_free(s);
return GRPC_ERROR_CREATE(GRPC_ARG_RESOURCE_QUOTA
" must be a pointer to a buffer pool");
}
}
}
gpr_ref_init(&s->refs, 1);
s->on_accept_cb = NULL;
s->on_accept_cb_arg = NULL;
@ -119,6 +136,7 @@ static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
gpr_free(sp->handle);
gpr_free(sp);
}
grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
gpr_free(s);
}
@ -201,7 +219,7 @@ static void on_connect(uv_stream_t *server, int status) {
} else {
gpr_log(GPR_INFO, "uv_tcp_getpeername error: %s", uv_strerror(status));
}
ep = grpc_tcp_create(client, peer_name_string);
ep = grpc_tcp_create(client, sp->server->resource_quota, peer_name_string);
sp->server->on_accept_cb(&exec_ctx, sp->server->on_accept_cb_arg, ep, NULL,
&acceptor);
grpc_exec_ctx_finish(&exec_ctx);

@ -54,6 +54,9 @@ typedef struct {
grpc_endpoint base;
gpr_refcount refcount;
uv_write_t write_req;
uv_shutdown_t shutdown_req;
uv_tcp_t *handle;
grpc_closure *read_cb;
@ -64,14 +67,23 @@ typedef struct {
gpr_slice_buffer *write_slices;
uv_buf_t *write_buffers;
grpc_resource_user resource_user;
bool shutting_down;
bool resource_user_shutting_down;
char *peer_string;
grpc_pollset *pollset;
} grpc_tcp;
static void uv_close_callback(uv_handle_t *handle) { gpr_free(handle); }
static void tcp_free(grpc_tcp *tcp) { gpr_free(tcp); }
static void tcp_free(grpc_tcp *tcp) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_resource_user_destroy(&exec_ctx, &tcp->resource_user);
gpr_free(tcp);
grpc_exec_ctx_finish(&exec_ctx);
}
/*#define GRPC_TCP_REFCOUNT_DEBUG*/
#ifdef GRPC_TCP_REFCOUNT_DEBUG
@ -106,11 +118,14 @@ static void tcp_ref(grpc_tcp *tcp) { gpr_ref(&tcp->refcount); }
static void alloc_uv_buf(uv_handle_t *handle, size_t suggested_size,
uv_buf_t *buf) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
grpc_tcp *tcp = handle->data;
(void)suggested_size;
tcp->read_slice = gpr_slice_malloc(GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
tcp->read_slice = grpc_resource_user_slice_malloc(
&exec_ctx, &tcp->resource_user, GRPC_TCP_DEFAULT_READ_SLICE_SIZE);
buf->base = (char *)GPR_SLICE_START_PTR(tcp->read_slice);
buf->len = GPR_SLICE_LENGTH(tcp->read_slice);
grpc_exec_ctx_finish(&exec_ctx);
}
static void read_callback(uv_stream_t *stream, ssize_t nread,
@ -198,7 +213,8 @@ static void write_callback(uv_write_t *req, int status) {
gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp, str);
}
gpr_free(tcp->write_buffers);
gpr_free(req);
grpc_resource_user_free(&exec_ctx, &tcp->resource_user,
sizeof(uv_buf_t) * tcp->write_slices->count);
grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL);
grpc_exec_ctx_finish(&exec_ctx);
}
@ -243,12 +259,15 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->write_cb = cb;
buffer_count = (unsigned int)tcp->write_slices->count;
buffers = gpr_malloc(sizeof(uv_buf_t) * buffer_count);
grpc_resource_user_alloc(exec_ctx, &tcp->resource_user,
sizeof(uv_buf_t) * buffer_count, NULL);
for (i = 0; i < buffer_count; i++) {
slice = &tcp->write_slices->slices[i];
buffers[i].base = (char *)GPR_SLICE_START_PTR(*slice);
buffers[i].len = GPR_SLICE_LENGTH(*slice);
}
write_req = gpr_malloc(sizeof(uv_write_t));
tcp->write_buffers = buffers;
write_req = &tcp->write_req;
write_req->data = tcp;
TCP_REF(tcp, "write");
// TODO(murgatroid99): figure out what the return value here means
@ -274,13 +293,29 @@ static void uv_add_to_pollset_set(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
(void)pollset;
}
static void shutdown_callback(uv_shutdown_t *req, int status) { gpr_free(req); }
static void shutdown_callback(uv_shutdown_t *req, int status) {}
static void resource_user_shutdown_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_error *error) {
TCP_UNREF(arg, "resource_user");
}
static void uv_resource_user_maybe_shutdown(grpc_exec_ctx *exec_ctx,
grpc_tcp *tcp) {
if (!tcp->resource_user_shutting_down) {
tcp->resource_user_shutting_down = true;
TCP_REF(tcp, "resource_user");
grpc_resource_user_shutdown(
exec_ctx, &tcp->resource_user,
grpc_closure_create(resource_user_shutdown_done, tcp));
}
}
static void uv_endpoint_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
if (!tcp->shutting_down) {
tcp->shutting_down = true;
uv_shutdown_t *req = gpr_malloc(sizeof(uv_shutdown_t));
uv_shutdown_t *req = &tcp->shutdown_req;
uv_shutdown(req, (uv_stream_t *)tcp->handle, shutdown_callback);
}
}
@ -289,6 +324,7 @@ static void uv_destroy(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
grpc_network_status_unregister_endpoint(ep);
grpc_tcp *tcp = (grpc_tcp *)ep;
uv_close((uv_handle_t *)tcp->handle, uv_close_callback);
uv_resource_user_maybe_shutdown(exec_ctx, tcp);
TCP_UNREF(tcp, "destroy");
}
@ -297,18 +333,21 @@ static char *uv_get_peer(grpc_endpoint *ep) {
return gpr_strdup(tcp->peer_string);
}
static grpc_resource_user *uv_get_resource_user(grpc_endpoint *ep) {
grpc_tcp *tcp = (grpc_tcp *)ep;
return &tcp->resource_user;
}
static grpc_workqueue *uv_get_workqueue(grpc_endpoint *ep) { return NULL; }
static grpc_endpoint_vtable vtable = {uv_endpoint_read,
uv_endpoint_write,
uv_get_workqueue,
uv_add_to_pollset,
uv_add_to_pollset_set,
uv_endpoint_shutdown,
uv_destroy,
uv_get_peer};
static grpc_endpoint_vtable vtable = {
uv_endpoint_read, uv_endpoint_write, uv_get_workqueue,
uv_add_to_pollset, uv_add_to_pollset_set, uv_endpoint_shutdown,
uv_destroy, uv_get_resource_user, uv_get_peer};
grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, char *peer_string) {
grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
grpc_resource_quota *resource_quota,
char *peer_string) {
grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
if (grpc_tcp_trace) {
@ -325,6 +364,8 @@ grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, char *peer_string) {
gpr_ref_init(&tcp->refcount, 1);
tcp->peer_string = gpr_strdup(peer_string);
tcp->shutting_down = false;
tcp->resource_user_shutting_down = false;
grpc_resource_user_init(&tcp->resource_user, resource_quota, peer_string);
/* Tell network status tracking code about the new endpoint */
grpc_network_status_register_endpoint(&tcp->base);

@ -52,6 +52,8 @@ extern int grpc_tcp_trace;
#define GRPC_TCP_DEFAULT_READ_SLICE_SIZE 8192
grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle, char *peer_string);
grpc_endpoint *grpc_tcp_create(uv_tcp_t *handle,
grpc_resource_quota *resource_quota,
char *peer_string);
#endif /* GRPC_CORE_LIB_IOMGR_TCP_UV_H */

@ -210,11 +210,11 @@ void grpc_security_connector_unref(grpc_security_connector *sc) {
}
static void connector_pointer_arg_destroy(void *p) {
GRPC_SECURITY_CONNECTOR_UNREF(p, "connector_pointer_arg");
GRPC_SECURITY_CONNECTOR_UNREF(p, "connector_pointer_arg_destroy");
}
static void *connector_pointer_arg_copy(void *p) {
return GRPC_SECURITY_CONNECTOR_REF(p, "connector_pointer_arg");
return GRPC_SECURITY_CONNECTOR_REF(p, "connector_pointer_arg_copy");
}
static int connector_pointer_cmp(void *a, void *b) { return GPR_ICMP(a, b); }

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -31,19 +31,27 @@
*
*/
#ifndef GRPCXX_IMPL_CODEGEN_SYNC_CXX11_H
#define GRPCXX_IMPL_CODEGEN_SYNC_CXX11_H
#include "src/core/lib/transport/pid_controller.h"
#include <condition_variable>
#include <mutex>
void grpc_pid_controller_init(grpc_pid_controller *pid_controller,
double gain_p, double gain_i, double gain_d) {
pid_controller->gain_p = gain_p;
pid_controller->gain_i = gain_i;
pid_controller->gain_d = gain_d;
grpc_pid_controller_reset(pid_controller);
}
namespace grpc {
void grpc_pid_controller_reset(grpc_pid_controller *pid_controller) {
pid_controller->last_error = 0.0;
pid_controller->error_integral = 0.0;
}
using std::condition_variable;
using std::mutex;
using std::lock_guard;
using std::unique_lock;
} // namespace grpc
#endif // GRPCXX_IMPL_CODEGEN_SYNC_CXX11_H
double grpc_pid_controller_update(grpc_pid_controller *pid_controller,
double error, double dt) {
pid_controller->error_integral += error * dt;
double diff_error = (error - pid_controller->last_error) / dt;
pid_controller->last_error = error;
return dt * (pid_controller->gain_p * error +
pid_controller->gain_i * pid_controller->error_integral +
pid_controller->gain_d * diff_error);
}

@ -1,6 +1,6 @@
/*
*
* Copyright 2015, Google Inc.
* Copyright 2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -31,15 +31,34 @@
*
*/
#ifndef GRPCXX_IMPL_CODEGEN_IMPL_SYNC_H
#define GRPCXX_IMPL_CODEGEN_IMPL_SYNC_H
#ifndef GRPC_CORE_LIB_TRANSPORT_PID_CONTROLLER_H
#define GRPC_CORE_LIB_TRANSPORT_PID_CONTROLLER_H
#include <grpc++/impl/codegen/config.h>
/* \file Simple PID controller.
Implements a proportional-integral-derivative controller.
Used when we want to iteratively control a variable to converge some other
observed value to a 'set-point'.
Gains can be set to adjust sensitivity to current error (p), the integral
of error (i), and the derivative of error (d). */
#ifdef GRPC_CXX0X_NO_THREAD
#include <grpc++/impl/codegen/sync_no_cxx11.h>
#else
#include <grpc++/impl/codegen/sync_cxx11.h>
#endif
typedef struct {
double gain_p;
double gain_i;
double gain_d;
double last_error;
double error_integral;
} grpc_pid_controller;
/** Initialize the controller */
void grpc_pid_controller_init(grpc_pid_controller *pid_controller,
double gain_p, double gain_i, double gain_d);
/** Reset the controller: useful when things have changed significantly */
void grpc_pid_controller_reset(grpc_pid_controller *pid_controller);
#endif // GRPCXX_IMPL_CODEGEN_IMPL_SYNC_H
/** Update the controller: given a current error estimate, and the time since
the last update, returns a delta to the control value */
double grpc_pid_controller_update(grpc_pid_controller *pid_controller,
double error, double dt);
#endif

@ -106,11 +106,11 @@ grpc_connectivity_state Channel::GetState(bool try_to_connect) {
}
namespace {
class TagSaver GRPC_FINAL : public CompletionQueueTag {
class TagSaver final : public CompletionQueueTag {
public:
explicit TagSaver(void* tag) : tag_(tag) {}
~TagSaver() GRPC_OVERRIDE {}
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
~TagSaver() override {}
bool FinalizeResult(void** tag, bool* status) override {
*tag = tag_;
delete this;
return true;

@ -45,12 +45,12 @@
namespace grpc {
class DefaultGlobalClientCallbacks GRPC_FINAL
class DefaultGlobalClientCallbacks final
: public ClientContext::GlobalCallbacks {
public:
~DefaultGlobalClientCallbacks() GRPC_OVERRIDE {}
void DefaultConstructor(ClientContext* context) GRPC_OVERRIDE {}
void Destructor(ClientContext* context) GRPC_OVERRIDE {}
~DefaultGlobalClientCallbacks() override {}
void DefaultConstructor(ClientContext* context) override {}
void Destructor(ClientContext* context) override {}
};
static DefaultGlobalClientCallbacks g_default_client_callbacks;
@ -93,7 +93,7 @@ void ClientContext::AddMetadata(const grpc::string& meta_key,
void ClientContext::set_call(grpc_call* call,
const std::shared_ptr<Channel>& channel) {
grpc::unique_lock<grpc::mutex> lock(mu_);
std::unique_lock<std::mutex> lock(mu_);
GPR_ASSERT(call_ == nullptr);
call_ = call;
channel_ = channel;
@ -119,7 +119,7 @@ void ClientContext::set_compression_algorithm(
}
void ClientContext::TryCancel() {
grpc::unique_lock<grpc::mutex> lock(mu_);
std::unique_lock<std::mutex> lock(mu_);
if (call_) {
grpc_call_cancel(call_, nullptr);
} else {

@ -40,12 +40,12 @@
namespace grpc {
class CronetChannelCredentialsImpl GRPC_FINAL : public ChannelCredentials {
class CronetChannelCredentialsImpl final : public ChannelCredentials {
public:
CronetChannelCredentialsImpl(void* engine) : engine_(engine) {}
std::shared_ptr<grpc::Channel> CreateChannel(
const string& target, const grpc::ChannelArguments& args) GRPC_OVERRIDE {
const string& target, const grpc::ChannelArguments& args) override {
grpc_channel_args channel_args;
args.SetChannelArgs(&channel_args);
return CreateChannelInternal(
@ -53,9 +53,7 @@ class CronetChannelCredentialsImpl GRPC_FINAL : public ChannelCredentials {
&channel_args, nullptr));
}
SecureChannelCredentials* AsSecureCredentials() GRPC_OVERRIDE {
return nullptr;
}
SecureChannelCredentials* AsSecureCredentials() override { return nullptr; }
private:
void* engine_;

@ -43,10 +43,10 @@
namespace grpc {
namespace {
class InsecureChannelCredentialsImpl GRPC_FINAL : public ChannelCredentials {
class InsecureChannelCredentialsImpl final : public ChannelCredentials {
public:
std::shared_ptr<grpc::Channel> CreateChannel(
const string& target, const grpc::ChannelArguments& args) GRPC_OVERRIDE {
const string& target, const grpc::ChannelArguments& args) override {
grpc_channel_args channel_args;
args.SetChannelArgs(&channel_args);
return CreateChannelInternal(
@ -54,9 +54,7 @@ class InsecureChannelCredentialsImpl GRPC_FINAL : public ChannelCredentials {
grpc_insecure_channel_create(target.c_str(), &channel_args, nullptr));
}
SecureChannelCredentials* AsSecureCredentials() GRPC_OVERRIDE {
return nullptr;
}
SecureChannelCredentials* AsSecureCredentials() override { return nullptr; }
};
} // namespace

@ -43,34 +43,34 @@
namespace grpc {
class SecureChannelCredentials GRPC_FINAL : public ChannelCredentials {
class SecureChannelCredentials final : public ChannelCredentials {
public:
explicit SecureChannelCredentials(grpc_channel_credentials* c_creds);
~SecureChannelCredentials() { grpc_channel_credentials_release(c_creds_); }
grpc_channel_credentials* GetRawCreds() { return c_creds_; }
std::shared_ptr<grpc::Channel> CreateChannel(
const string& target, const grpc::ChannelArguments& args) GRPC_OVERRIDE;
SecureChannelCredentials* AsSecureCredentials() GRPC_OVERRIDE { return this; }
const string& target, const grpc::ChannelArguments& args) override;
SecureChannelCredentials* AsSecureCredentials() override { return this; }
private:
grpc_channel_credentials* const c_creds_;
};
class SecureCallCredentials GRPC_FINAL : public CallCredentials {
class SecureCallCredentials final : public CallCredentials {
public:
explicit SecureCallCredentials(grpc_call_credentials* c_creds);
~SecureCallCredentials() { grpc_call_credentials_release(c_creds_); }
grpc_call_credentials* GetRawCreds() { return c_creds_; }
bool ApplyToCall(grpc_call* call) GRPC_OVERRIDE;
SecureCallCredentials* AsSecureCredentials() GRPC_OVERRIDE { return this; }
bool ApplyToCall(grpc_call* call) override;
SecureCallCredentials* AsSecureCredentials() override { return this; }
private:
grpc_call_credentials* const c_creds_;
};
class MetadataCredentialsPluginWrapper GRPC_FINAL {
class MetadataCredentialsPluginWrapper final {
public:
static void Destroy(void* wrapper);
static void GetMetadata(void* wrapper, grpc_auth_metadata_context context,

@ -121,6 +121,11 @@ void ChannelArguments::SetResourceQuota(
grpc_resource_quota_arg_vtable());
}
void ChannelArguments::SetLoadBalancingPolicyName(
const grpc::string& lb_policy_name) {
SetString(GRPC_ARG_LB_POLICY_NAME, lb_policy_name);
}
void ChannelArguments::SetInt(const grpc::string& key, int value) {
grpc_arg arg;
arg.type = GRPC_ARG_INTEGER;

@ -268,7 +268,7 @@ namespace internal {
// Members of this class correspond to the members of the C
// grpc_channel_filter struct.
template <typename ChannelDataType, typename CallDataType>
class ChannelFilter GRPC_FINAL {
class ChannelFilter final {
public:
static const size_t channel_data_size = sizeof(ChannelDataType);

@ -40,30 +40,29 @@ struct grpc_auth_context;
namespace grpc {
class SecureAuthContext GRPC_FINAL : public AuthContext {
class SecureAuthContext final : public AuthContext {
public:
SecureAuthContext(grpc_auth_context* ctx, bool take_ownership);
~SecureAuthContext() GRPC_OVERRIDE;
~SecureAuthContext() override;
bool IsPeerAuthenticated() const GRPC_OVERRIDE;
bool IsPeerAuthenticated() const override;
std::vector<grpc::string_ref> GetPeerIdentity() const GRPC_OVERRIDE;
std::vector<grpc::string_ref> GetPeerIdentity() const override;
grpc::string GetPeerIdentityPropertyName() const GRPC_OVERRIDE;
grpc::string GetPeerIdentityPropertyName() const override;
std::vector<grpc::string_ref> FindPropertyValues(
const grpc::string& name) const GRPC_OVERRIDE;
const grpc::string& name) const override;
AuthPropertyIterator begin() const GRPC_OVERRIDE;
AuthPropertyIterator begin() const override;
AuthPropertyIterator end() const GRPC_OVERRIDE;
AuthPropertyIterator end() const override;
void AddProperty(const grpc::string& key,
const grpc::string_ref& value) GRPC_OVERRIDE;
const grpc::string_ref& value) override;
virtual bool SetPeerIdentityPropertyName(const grpc::string& name)
GRPC_OVERRIDE;
virtual bool SetPeerIdentityPropertyName(const grpc::string& name) override;
private:
grpc_auth_context* ctx_;

@ -42,7 +42,7 @@
namespace grpc {
class ProtoServerReflection GRPC_FINAL
class ProtoServerReflection final
: public reflection::v1alpha::ServerReflection::Service {
public:
ProtoServerReflection();
@ -56,7 +56,7 @@ class ProtoServerReflection GRPC_FINAL
ServerContext* context,
ServerReaderWriter<reflection::v1alpha::ServerReflectionResponse,
reflection::v1alpha::ServerReflectionRequest>* stream)
GRPC_OVERRIDE;
override;
private:
Status ListService(ServerContext* context,

@ -31,15 +31,15 @@
*
*/
#include <grpc++/impl/sync.h>
#include <grpc++/impl/thd.h>
#include <mutex>
#include <thread>
#include "src/cpp/server/dynamic_thread_pool.h"
namespace grpc {
DynamicThreadPool::DynamicThread::DynamicThread(DynamicThreadPool* pool)
: pool_(pool),
thd_(new grpc::thread(&DynamicThreadPool::DynamicThread::ThreadFunc,
thd_(new std::thread(&DynamicThreadPool::DynamicThread::ThreadFunc,
this)) {}
DynamicThreadPool::DynamicThread::~DynamicThread() {
thd_->join();
@ -49,7 +49,7 @@ DynamicThreadPool::DynamicThread::~DynamicThread() {
void DynamicThreadPool::DynamicThread::ThreadFunc() {
pool_->ThreadFunc();
// Now that we have killed ourselves, we should reduce the thread count
grpc::unique_lock<grpc::mutex> lock(pool_->mu_);
std::unique_lock<std::mutex> lock(pool_->mu_);
pool_->nthreads_--;
// Move ourselves to dead list
pool_->dead_threads_.push_back(this);
@ -62,7 +62,7 @@ void DynamicThreadPool::DynamicThread::ThreadFunc() {
void DynamicThreadPool::ThreadFunc() {
for (;;) {
// Wait until work is available or we are shutting down.
grpc::unique_lock<grpc::mutex> lock(mu_);
std::unique_lock<std::mutex> lock(mu_);
if (!shutdown_ && callbacks_.empty()) {
// If there are too many threads waiting, then quit this thread
if (threads_waiting_ >= reserve_threads_) {
@ -91,7 +91,7 @@ DynamicThreadPool::DynamicThreadPool(int reserve_threads)
nthreads_(0),
threads_waiting_(0) {
for (int i = 0; i < reserve_threads_; i++) {
grpc::lock_guard<grpc::mutex> lock(mu_);
std::lock_guard<std::mutex> lock(mu_);
nthreads_++;
new DynamicThread(this);
}
@ -104,7 +104,7 @@ void DynamicThreadPool::ReapThreads(std::list<DynamicThread*>* tlist) {
}
DynamicThreadPool::~DynamicThreadPool() {
grpc::unique_lock<grpc::mutex> lock(mu_);
std::unique_lock<std::mutex> lock(mu_);
shutdown_ = true;
cv_.notify_all();
while (nthreads_ != 0) {
@ -114,7 +114,7 @@ DynamicThreadPool::~DynamicThreadPool() {
}
void DynamicThreadPool::Add(const std::function<void()>& callback) {
grpc::lock_guard<grpc::mutex> lock(mu_);
std::lock_guard<std::mutex> lock(mu_);
// Add works to the callbacks list
callbacks_.push(callback);
// Increase pool size or notify as needed

@ -34,24 +34,25 @@
#ifndef GRPC_INTERNAL_CPP_DYNAMIC_THREAD_POOL_H
#define GRPC_INTERNAL_CPP_DYNAMIC_THREAD_POOL_H
#include <condition_variable>
#include <list>
#include <memory>
#include <mutex>
#include <queue>
#include <thread>
#include <grpc++/impl/sync.h>
#include <grpc++/impl/thd.h>
#include <grpc++/support/config.h>
#include "src/cpp/server/thread_pool_interface.h"
namespace grpc {
class DynamicThreadPool GRPC_FINAL : public ThreadPoolInterface {
class DynamicThreadPool final : public ThreadPoolInterface {
public:
explicit DynamicThreadPool(int reserve_threads);
~DynamicThreadPool();
void Add(const std::function<void()>& callback) GRPC_OVERRIDE;
void Add(const std::function<void()>& callback) override;
private:
class DynamicThread {
@ -61,12 +62,12 @@ class DynamicThreadPool GRPC_FINAL : public ThreadPoolInterface {
private:
DynamicThreadPool* pool_;
std::unique_ptr<grpc::thread> thd_;
std::unique_ptr<std::thread> thd_;
void ThreadFunc();
};
grpc::mutex mu_;
grpc::condition_variable cv_;
grpc::condition_variable shutdown_cv_;
std::mutex mu_;
std::condition_variable cv_;
std::condition_variable shutdown_cv_;
bool shutdown_;
std::queue<std::function<void()>> callbacks_;
int reserve_threads_;

@ -38,14 +38,13 @@
namespace grpc {
namespace {
class InsecureServerCredentialsImpl GRPC_FINAL : public ServerCredentials {
class InsecureServerCredentialsImpl final : public ServerCredentials {
public:
int AddPortToServer(const grpc::string& addr,
grpc_server* server) GRPC_OVERRIDE {
int AddPortToServer(const grpc::string& addr, grpc_server* server) override {
return grpc_server_add_insecure_http2_port(server, addr.c_str());
}
void SetAuthMetadataProcessor(
const std::shared_ptr<AuthMetadataProcessor>& processor) GRPC_OVERRIDE {
const std::shared_ptr<AuthMetadataProcessor>& processor) override {
(void)processor;
GPR_ASSERT(0); // Should not be called on InsecureServerCredentials.
}

@ -44,7 +44,7 @@
namespace grpc {
class AuthMetadataProcessorAyncWrapper GRPC_FINAL {
class AuthMetadataProcessorAyncWrapper final {
public:
static void Destroy(void* wrapper);
@ -64,19 +64,18 @@ class AuthMetadataProcessorAyncWrapper GRPC_FINAL {
std::shared_ptr<AuthMetadataProcessor> processor_;
};
class SecureServerCredentials GRPC_FINAL : public ServerCredentials {
class SecureServerCredentials final : public ServerCredentials {
public:
explicit SecureServerCredentials(grpc_server_credentials* creds)
: creds_(creds) {}
~SecureServerCredentials() GRPC_OVERRIDE {
~SecureServerCredentials() override {
grpc_server_credentials_release(creds_);
}
int AddPortToServer(const grpc::string& addr,
grpc_server* server) GRPC_OVERRIDE;
int AddPortToServer(const grpc::string& addr, grpc_server* server) override;
void SetAuthMetadataProcessor(
const std::shared_ptr<AuthMetadataProcessor>& processor) GRPC_OVERRIDE;
const std::shared_ptr<AuthMetadataProcessor>& processor) override;
private:
grpc_server_credentials* creds_;

@ -55,11 +55,11 @@
namespace grpc {
class DefaultGlobalCallbacks GRPC_FINAL : public Server::GlobalCallbacks {
class DefaultGlobalCallbacks final : public Server::GlobalCallbacks {
public:
~DefaultGlobalCallbacks() GRPC_OVERRIDE {}
void PreSynchronousRequest(ServerContext* context) GRPC_OVERRIDE {}
void PostSynchronousRequest(ServerContext* context) GRPC_OVERRIDE {}
~DefaultGlobalCallbacks() override {}
void PreSynchronousRequest(ServerContext* context) override {}
void PostSynchronousRequest(ServerContext* context) override {}
};
static std::shared_ptr<Server::GlobalCallbacks> g_callbacks = nullptr;
@ -79,7 +79,7 @@ class Server::UnimplementedAsyncRequestContext {
GenericServerAsyncReaderWriter generic_stream_;
};
class Server::UnimplementedAsyncRequest GRPC_FINAL
class Server::UnimplementedAsyncRequest final
: public UnimplementedAsyncRequestContext,
public GenericAsyncRequest {
public:
@ -89,7 +89,7 @@ class Server::UnimplementedAsyncRequest GRPC_FINAL
server_(server),
cq_(cq) {}
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE;
bool FinalizeResult(void** tag, bool* status) override;
ServerContext* context() { return &server_context_; }
GenericServerAsyncReaderWriter* stream() { return &generic_stream_; }
@ -101,13 +101,13 @@ class Server::UnimplementedAsyncRequest GRPC_FINAL
typedef SneakyCallOpSet<CallOpSendInitialMetadata, CallOpServerSendStatus>
UnimplementedAsyncResponseOp;
class Server::UnimplementedAsyncResponse GRPC_FINAL
class Server::UnimplementedAsyncResponse final
: public UnimplementedAsyncResponseOp {
public:
UnimplementedAsyncResponse(UnimplementedAsyncRequest* request);
~UnimplementedAsyncResponse() { delete request_; }
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
bool FinalizeResult(void** tag, bool* status) override {
bool r = UnimplementedAsyncResponseOp::FinalizeResult(tag, status);
delete this;
return r;
@ -122,7 +122,7 @@ class ShutdownTag : public CompletionQueueTag {
bool FinalizeResult(void** tag, bool* status) { return false; }
};
class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
class Server::SyncRequest final : public CompletionQueueTag {
public:
SyncRequest(RpcServiceMethod* method, void* tag)
: method_(method),
@ -170,7 +170,7 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
}
}
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE {
bool FinalizeResult(void** tag, bool* status) override {
if (!*status) {
grpc_completion_queue_destroy(cq_);
}
@ -182,7 +182,7 @@ class Server::SyncRequest GRPC_FINAL : public CompletionQueueTag {
return true;
}
class CallData GRPC_FINAL {
class CallData final {
public:
explicit CallData(Server* server, SyncRequest* mrd)
: cq_(mrd->cq_),
@ -255,7 +255,7 @@ class Server::SyncRequestThreadManager : public ThreadManager {
cq_timeout_msec_(cq_timeout_msec),
global_callbacks_(global_callbacks) {}
WorkStatus PollForWork(void** tag, bool* ok) GRPC_OVERRIDE {
WorkStatus PollForWork(void** tag, bool* ok) override {
*tag = nullptr;
gpr_timespec deadline =
gpr_time_from_millis(cq_timeout_msec_, GPR_TIMESPAN);
@ -272,7 +272,7 @@ class Server::SyncRequestThreadManager : public ThreadManager {
GPR_UNREACHABLE_CODE(return TIMEOUT);
}
void DoWork(void* tag, bool ok) GRPC_OVERRIDE {
void DoWork(void* tag, bool ok) override {
SyncRequest* sync_req = static_cast<SyncRequest*>(tag);
if (!sync_req) {
@ -379,7 +379,7 @@ Server::Server(
Server::~Server() {
{
grpc::unique_lock<grpc::mutex> lock(mu_);
std::unique_lock<std::mutex> lock(mu_);
if (started_ && !shutdown_) {
lock.unlock();
Shutdown();
@ -501,7 +501,7 @@ bool Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
}
void Server::ShutdownInternal(gpr_timespec deadline) {
grpc::unique_lock<grpc::mutex> lock(mu_);
std::unique_lock<std::mutex> lock(mu_);
if (started_ && !shutdown_) {
shutdown_ = true;
@ -549,7 +549,7 @@ void Server::ShutdownInternal(gpr_timespec deadline) {
}
void Server::Wait() {
grpc::unique_lock<grpc::mutex> lock(mu_);
std::unique_lock<std::mutex> lock(mu_);
while (started_ && !shutdown_notified_) {
shutdown_cv_.wait(lock);
}

@ -33,9 +33,10 @@
#include <grpc++/server_context.h>
#include <mutex>
#include <grpc++/completion_queue.h>
#include <grpc++/impl/call.h>
#include <grpc++/impl/sync.h>
#include <grpc++/support/time.h>
#include <grpc/compression.h>
#include <grpc/grpc.h>
@ -48,7 +49,7 @@ namespace grpc {
// CompletionOp
class ServerContext::CompletionOp GRPC_FINAL : public CallOpSetInterface {
class ServerContext::CompletionOp final : public CallOpSetInterface {
public:
// initial refs: one in the server context, one in the cq
CompletionOp()
@ -58,8 +59,8 @@ class ServerContext::CompletionOp GRPC_FINAL : public CallOpSetInterface {
finalized_(false),
cancelled_(0) {}
void FillOps(grpc_op* ops, size_t* nops) GRPC_OVERRIDE;
bool FinalizeResult(void** tag, bool* status) GRPC_OVERRIDE;
void FillOps(grpc_op* ops, size_t* nops) override;
bool FinalizeResult(void** tag, bool* status) override;
bool CheckCancelled(CompletionQueue* cq) {
cq->TryPluck(this);
@ -76,20 +77,20 @@ class ServerContext::CompletionOp GRPC_FINAL : public CallOpSetInterface {
private:
bool CheckCancelledNoPluck() {
grpc::lock_guard<grpc::mutex> g(mu_);
std::lock_guard<std::mutex> g(mu_);
return finalized_ ? (cancelled_ != 0) : false;
}
bool has_tag_;
void* tag_;
grpc::mutex mu_;
std::mutex mu_;
int refs_;
bool finalized_;
int cancelled_;
};
void ServerContext::CompletionOp::Unref() {
grpc::unique_lock<grpc::mutex> lock(mu_);
std::unique_lock<std::mutex> lock(mu_);
if (--refs_ == 0) {
lock.unlock();
delete this;
@ -105,7 +106,7 @@ void ServerContext::CompletionOp::FillOps(grpc_op* ops, size_t* nops) {
}
bool ServerContext::CompletionOp::FinalizeResult(void** tag, bool* status) {
grpc::unique_lock<grpc::mutex> lock(mu_);
std::unique_lock<std::mutex> lock(mu_);
finalized_ = true;
bool ret = false;
if (has_tag_) {

@ -31,12 +31,13 @@
*
*/
#include <grpc++/impl/sync.h>
#include <grpc++/impl/thd.h>
#include <grpc/support/log.h>
#include "src/cpp/thread_manager/thread_manager.h"
#include <climits>
#include <mutex>
#include <thread>
#include "src/cpp/thread_manager/thread_manager.h"
#include <grpc/support/log.h>
namespace grpc {
@ -59,7 +60,7 @@ ThreadManager::ThreadManager(int min_pollers, int max_pollers)
ThreadManager::~ThreadManager() {
{
std::unique_lock<grpc::mutex> lock(mu_);
std::unique_lock<std::mutex> lock(mu_);
GPR_ASSERT(num_threads_ == 0);
}
@ -67,29 +68,29 @@ ThreadManager::~ThreadManager() {
}
void ThreadManager::Wait() {
std::unique_lock<grpc::mutex> lock(mu_);
std::unique_lock<std::mutex> lock(mu_);
while (num_threads_ != 0) {
shutdown_cv_.wait(lock);
}
}
void ThreadManager::Shutdown() {
std::unique_lock<grpc::mutex> lock(mu_);
std::unique_lock<std::mutex> lock(mu_);
shutdown_ = true;
}
bool ThreadManager::IsShutdown() {
std::unique_lock<grpc::mutex> lock(mu_);
std::unique_lock<std::mutex> lock(mu_);
return shutdown_;
}
void ThreadManager::MarkAsCompleted(WorkerThread* thd) {
{
std::unique_lock<grpc::mutex> list_lock(list_mu_);
std::unique_lock<std::mutex> list_lock(list_mu_);
completed_threads_.push_back(thd);
}
grpc::unique_lock<grpc::mutex> lock(mu_);
std::unique_lock<std::mutex> lock(mu_);
num_threads_--;
if (num_threads_ == 0) {
shutdown_cv_.notify_one();
@ -97,7 +98,7 @@ void ThreadManager::MarkAsCompleted(WorkerThread* thd) {
}
void ThreadManager::CleanupCompletedThreads() {
std::unique_lock<grpc::mutex> lock(list_mu_);
std::unique_lock<std::mutex> lock(list_mu_);
for (auto thd = completed_threads_.begin(); thd != completed_threads_.end();
thd = completed_threads_.erase(thd)) {
delete *thd;
@ -114,7 +115,7 @@ void ThreadManager::Initialize() {
// less than max threshold (i.e max_pollers_) and the total number of threads is
// below the maximum threshold, we can let the current thread continue as poller
bool ThreadManager::MaybeContinueAsPoller() {
std::unique_lock<grpc::mutex> lock(mu_);
std::unique_lock<std::mutex> lock(mu_);
if (shutdown_ || num_pollers_ > max_pollers_) {
return false;
}
@ -127,7 +128,7 @@ bool ThreadManager::MaybeContinueAsPoller() {
// threads currently blocked in PollForWork()) is below the threshold (i.e
// min_pollers_) and the total number of threads is below the maximum threshold
void ThreadManager::MaybeCreatePoller() {
grpc::unique_lock<grpc::mutex> lock(mu_);
std::unique_lock<std::mutex> lock(mu_);
if (!shutdown_ && num_pollers_ < min_pollers_) {
num_pollers_++;
num_threads_++;
@ -156,7 +157,7 @@ void ThreadManager::MainWorkLoop() {
WorkStatus work_status = PollForWork(&tag, &ok);
{
grpc::unique_lock<grpc::mutex> lock(mu_);
std::unique_lock<std::mutex> lock(mu_);
num_pollers_--;
if (work_status == TIMEOUT && num_pollers_ > min_pollers_) {

@ -34,11 +34,12 @@
#ifndef GRPC_INTERNAL_CPP_THREAD_MANAGER_H
#define GRPC_INTERNAL_CPP_THREAD_MANAGER_H
#include <condition_variable>
#include <list>
#include <memory>
#include <mutex>
#include <thread>
#include <grpc++/impl/sync.h>
#include <grpc++/impl/thd.h>
#include <grpc++/support/config.h>
namespace grpc {
@ -115,7 +116,7 @@ class ThreadManager {
void Run();
ThreadManager* thd_mgr_;
grpc::thread thd_;
std::thread thd_;
};
// The main funtion in ThreadManager
@ -134,10 +135,10 @@ class ThreadManager {
// Protects shutdown_, num_pollers_ and num_threads_
// TODO: sreek - Change num_pollers and num_threads_ to atomics
grpc::mutex mu_;
std::mutex mu_;
bool shutdown_;
grpc::condition_variable shutdown_cv_;
std::condition_variable shutdown_cv_;
// Number of threads doing polling
int num_pollers_;
@ -150,7 +151,7 @@ class ThreadManager {
// currently polling i.e num_pollers_)
int num_threads_;
grpc::mutex list_mu_;
std::mutex list_mu_;
std::list<WorkerThread*> completed_threads_;
};

@ -32,9 +32,6 @@
*/
#include <grpc++/support/config.h>
#ifndef GRPC_CXX0X_NO_CHRONO
#include <grpc++/support/time.h>
#include <grpc/support/time.h>
@ -91,5 +88,3 @@ system_clock::time_point Timespec2Timepoint(gpr_timespec t) {
}
} // namespace grpc
#endif // !GRPC_CXX0X_NO_CHRONO

@ -0,0 +1,51 @@
#!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import glob
import yaml
os.chdir(os.path.dirname(sys.argv[0])+'/../..')
out = {}
out['libs'] = [{
'name': 'google_benchmark',
'build': 'private',
'language': 'c++',
'secure': 'no',
'defaults': 'google_benchmark',
'src': glob.glob('third_party/google_benchmark/src/*.cc'),
'headers': glob.glob('third_party/google_benchmark/src/*.h') + glob.glob('third_party/google_benchmark/include/benchmark/*.h'),
}]
print yaml.dump(out)

@ -1,6 +1,5 @@
grpc.lb.v1.InitialLoadBalanceRequest.name max_size:128
grpc.lb.v1.InitialLoadBalanceResponse.client_config max_size:64
grpc.lb.v1.InitialLoadBalanceResponse.load_balancer_delegate max_size:64
grpc.lb.v1.Server.ip_address max_size:16
grpc.lb.v1.Server.load_balance_token max_size:65
grpc.lb.v1.Server.load_balance_token max_size:50
load_balancer.proto no_unions:true

@ -63,7 +63,8 @@ message LoadBalanceRequest {
}
message InitialLoadBalanceRequest {
// Name of load balanced service (IE, service.grpc.gslb.google.com)
// Name of load balanced service (IE, service.grpc.gslb.google.com). Its
// length should be less than 256 bytes.
string name = 1;
}
@ -95,7 +96,8 @@ message InitialLoadBalanceResponse {
// This is an application layer redirect that indicates the client should use
// the specified server for load balancing. When this field is non-empty in
// the response, the client should open a separate connection to the
// load_balancer_delegate and call the BalanceLoad method.
// load_balancer_delegate and call the BalanceLoad method. Its length should
// be less than 64 bytes.
string load_balancer_delegate = 1;
// This interval defines how often the client should send the client stats
@ -130,6 +132,8 @@ message Server {
// frontend requests for that pick must include the token in its initial
// metadata. The token is used by the backend to verify the request and to
// allow the backend to report load to the gRPC LB system.
//
// Its length is variable but less than 50 bytes.
string load_balance_token = 3;
// Indicates whether this particular request should be dropped by the client

@ -181,6 +181,7 @@ CORE_SOURCE_FILES = [
'src/core/lib/transport/metadata.c',
'src/core/lib/transport/metadata_batch.c',
'src/core/lib/transport/method_config.c',
'src/core/lib/transport/pid_controller.c',
'src/core/lib/transport/static_metadata.c',
'src/core/lib/transport/timeout_encoding.c',
'src/core/lib/transport/transport.c',

@ -283,6 +283,8 @@ VALUE grpc_rb_compression_options_level_value_to_name_internal(
rb_eArgError,
"Failed to convert compression level value to name for value: %d",
(int)compression_value);
/* return something to avoid compiler error about no return */
return Qnil;
}
}

@ -37,5 +37,9 @@
<%include file="../../clang_update.include"/>
<%include file="../../run_tests_addons.include"/>
<%include file="../../libuv_install.include"/>
# Install gcc-4.8 and other relevant items
RUN apt-get update && apt-get -y install gcc-4.8 gcc-4.8-multilib g++-4.8 g++-4.8-multilib && apt-get clean
# Define the default command.
CMD ["bash"]

@ -35,5 +35,9 @@
<%include file="../../python_deps.include"/>
<%include file="../../cxx_deps.include"/>
<%include file="../../run_tests_addons.include"/>
# Install gcc-4.8 and other relevant items
RUN apt-get update && apt-get -y install gcc-4.8 gcc-4.8-multilib g++-4.8 g++-4.8-multilib && apt-get clean
# Define the default command.
CMD ["bash"]

@ -48,7 +48,6 @@
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/server.h"
#include "test/core/end2end/cq_verifier.h"
#include "test/core/end2end/fake_resolver.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
@ -501,7 +500,7 @@ void run_spec(const test_spec *spec) {
request_data rdata;
servers_fixture *f;
grpc_channel_args args;
grpc_arg arg;
grpc_arg arg_array[2];
rdata.call_details =
gpr_malloc(sizeof(grpc_call_details) * spec->num_servers);
f = setup_servers("127.0.0.1", &rdata, spec->num_servers);
@ -509,14 +508,16 @@ void run_spec(const test_spec *spec) {
/* Create client. */
servers_hostports_str = gpr_strjoin_sep((const char **)f->servers_hostports,
f->num_servers, ",", NULL);
gpr_asprintf(&client_hostport, "test:%s?lb_policy=round_robin",
servers_hostports_str);
gpr_asprintf(&client_hostport, "ipv4:%s", servers_hostports_str);
arg.type = GRPC_ARG_INTEGER;
arg.key = "grpc.testing.fixed_reconnect_backoff";
arg.value.integer = RETRY_TIMEOUT;
args.num_args = 1;
args.args = &arg;
arg_array[0].type = GRPC_ARG_INTEGER;
arg_array[0].key = "grpc.testing.fixed_reconnect_backoff";
arg_array[0].value.integer = RETRY_TIMEOUT;
arg_array[1].type = GRPC_ARG_STRING;
arg_array[1].key = GRPC_ARG_LB_POLICY_NAME;
arg_array[1].value.string = "round_robin";
args.num_args = 2;
args.args = arg_array;
client = grpc_insecure_channel_create(client_hostport, &args, NULL);
@ -540,19 +541,21 @@ static grpc_channel *create_client(const servers_fixture *f) {
grpc_channel *client;
char *client_hostport;
char *servers_hostports_str;
grpc_arg arg;
grpc_arg arg_array[2];
grpc_channel_args args;
servers_hostports_str = gpr_strjoin_sep((const char **)f->servers_hostports,
f->num_servers, ",", NULL);
gpr_asprintf(&client_hostport, "test:%s?lb_policy=round_robin",
servers_hostports_str);
gpr_asprintf(&client_hostport, "ipv4:%s", servers_hostports_str);
arg.type = GRPC_ARG_INTEGER;
arg.key = "grpc.testing.fixed_reconnect_backoff";
arg.value.integer = RETRY_TIMEOUT;
args.num_args = 1;
args.args = &arg;
arg_array[0].type = GRPC_ARG_INTEGER;
arg_array[0].key = "grpc.testing.fixed_reconnect_backoff";
arg_array[0].value.integer = RETRY_TIMEOUT;
arg_array[1].type = GRPC_ARG_STRING;
arg_array[1].key = GRPC_ARG_LB_POLICY_NAME;
arg_array[1].value.string = "round_robin";
args.num_args = 2;
args.args = arg_array;
client = grpc_insecure_channel_create(client_hostport, &args, NULL);
gpr_free(client_hostport);
@ -875,7 +878,6 @@ int main(int argc, char **argv) {
const size_t NUM_SERVERS = 4;
grpc_test_init(argc, argv);
grpc_fake_resolver_init();
grpc_init();
grpc_tracer_set_enabled("round_robin", 1);

@ -115,8 +115,8 @@ static void reset_addr_and_set_magic_string(grpc_resolved_address **addr,
target.sin_family = AF_INET;
target.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
target.sin_port = htons((uint16_t)server_port);
(*addr)->len = sizeof(target);
*addr = (grpc_resolved_address *)gpr_malloc(sizeof(grpc_resolved_address));
(*addr)->len = sizeof(target);
memcpy((*addr)->addr, &target, sizeof(target));
}

@ -39,8 +39,11 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/lib/channel/channel_args.h"
#include "src/core/lib/transport/metadata.h"
#include "src/core/lib/transport/method_config.h"
#include "test/core/end2end/cq_verifier.h"
#include "test/core/end2end/fake_resolver.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
@ -62,7 +65,6 @@ static void run_test(bool wait_for_ready, bool use_service_config) {
gpr_log(GPR_INFO, "TEST: wait_for_ready=%d use_service_config=%d",
wait_for_ready, use_service_config);
grpc_fake_resolver_init();
grpc_init();
grpc_metadata_array_init(&trailing_metadata_recv);
@ -70,21 +72,30 @@ static void run_test(bool wait_for_ready, bool use_service_config) {
cq = grpc_completion_queue_create(NULL);
cqv = cq_verifier_create(cq);
/* if using service config, create channel args */
grpc_channel_args *args = NULL;
if (use_service_config) {
GPR_ASSERT(wait_for_ready);
grpc_method_config_table_entry entry = {
grpc_mdstr_from_string("/service/method"),
grpc_method_config_create(&wait_for_ready, NULL, NULL, NULL),
};
grpc_method_config_table *method_config_table =
grpc_method_config_table_create(1, &entry);
GRPC_MDSTR_UNREF(entry.method_name);
grpc_method_config_unref(entry.method_config);
grpc_arg arg =
grpc_method_config_table_create_channel_arg(method_config_table);
args = grpc_channel_args_copy_and_add(args, &arg, 1);
grpc_method_config_table_unref(method_config_table);
}
/* create a call, channel to a port which will refuse connection */
int port = grpc_pick_unused_port_or_die();
char *addr;
gpr_join_host_port(&addr, "127.0.0.1", port);
if (use_service_config) {
GPR_ASSERT(wait_for_ready);
char *server_uri;
gpr_asprintf(&server_uri,
"test:%s?method_name=/service/method&wait_for_ready=1", addr);
gpr_free(addr);
addr = server_uri;
}
gpr_log(GPR_INFO, "server: %s", addr);
chan = grpc_insecure_channel_create(addr, NULL, NULL);
chan = grpc_insecure_channel_create(addr, args, NULL);
call = grpc_channel_create_call(chan, NULL, GRPC_PROPAGATE_DEFAULTS, cq,
"/service/method", "nonexistant", deadline,
NULL);
@ -133,6 +144,8 @@ static void run_test(bool wait_for_ready, bool use_service_config) {
gpr_free(details);
grpc_metadata_array_destroy(&trailing_metadata_recv);
if (args != NULL) grpc_channel_args_destroy(args);
grpc_shutdown();
}

@ -43,7 +43,7 @@ typedef struct grpc_end2end_test_config grpc_end2end_test_config;
#define FEATURE_MASK_SUPPORTS_HOSTNAME_VERIFICATION 2
#define FEATURE_MASK_SUPPORTS_PER_CALL_CREDENTIALS 4
#define FEATURE_MASK_SUPPORTS_REQUEST_PROXYING 8
#define FEATURE_MASK_SUPPORTS_QUERY_ARGS 16
#define FEATURE_MASK_SUPPORTS_CLIENT_CHANNEL 16
#define FAIL_AUTH_CHECK_SERVER_ARG_NAME "fail_auth_check"
@ -60,7 +60,7 @@ struct grpc_end2end_test_config {
grpc_end2end_test_fixture (*create_fixture)(grpc_channel_args *client_args,
grpc_channel_args *server_args);
void (*init_client)(grpc_end2end_test_fixture *f,
grpc_channel_args *client_args, const char *query_args);
grpc_channel_args *client_args);
void (*init_server)(grpc_end2end_test_fixture *f,
grpc_channel_args *server_args);
void (*tear_down_data)(grpc_end2end_test_fixture *f);

@ -49,7 +49,6 @@
#include "src/core/lib/iomgr/resolve_address.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/support/string.h"
#include "src/core/lib/transport/method_config.h"
//
// fake_resolver
@ -62,8 +61,6 @@ typedef struct {
// passed-in parameters
grpc_channel_args* channel_args;
grpc_lb_addresses* addresses;
char* lb_policy_name;
grpc_method_config_table* method_config_table;
// mutex guarding the rest of the state
gpr_mu mu;
@ -80,8 +77,6 @@ static void fake_resolver_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* gr) {
gpr_mu_destroy(&r->mu);
grpc_channel_args_destroy(r->channel_args);
grpc_lb_addresses_destroy(r->addresses);
gpr_free(r->lb_policy_name);
grpc_method_config_table_unref(r->method_config_table);
gpr_free(r);
}
@ -101,21 +96,9 @@ static void fake_resolver_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
fake_resolver* r) {
if (r->next_completion != NULL && !r->published) {
r->published = true;
grpc_arg new_args[3];
size_t num_args = 0;
new_args[num_args++] = grpc_lb_addresses_create_channel_arg(r->addresses);
if (r->method_config_table != NULL) {
new_args[num_args++] =
grpc_method_config_table_create_channel_arg(r->method_config_table);
}
if (r->lb_policy_name != NULL) {
new_args[num_args].type = GRPC_ARG_STRING;
new_args[num_args].key = GRPC_ARG_LB_POLICY_NAME;
new_args[num_args].value.string = r->lb_policy_name;
++num_args;
}
grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses);
*r->target_result =
grpc_channel_args_copy_and_add(r->channel_args, new_args, num_args);
grpc_channel_args_copy_and_add(r->channel_args, &arg, 1);
grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
r->next_completion = NULL;
}
@ -194,45 +177,6 @@ static grpc_resolver* fake_resolver_create(grpc_resolver_factory* factory,
grpc_lb_addresses_destroy(addresses);
return NULL;
}
// Construct method config table.
// We only support parameters for a single method.
grpc_method_config_table* method_config_table = NULL;
const char* method_name = grpc_uri_get_query_arg(args->uri, "method_name");
if (method_name != NULL) {
const char* wait_for_ready_str =
grpc_uri_get_query_arg(args->uri, "wait_for_ready");
// Anything other than "0" is interpreted as true.
bool wait_for_ready =
wait_for_ready_str != NULL && strcmp("0", wait_for_ready_str) != 0;
const char* timeout_str =
grpc_uri_get_query_arg(args->uri, "timeout_seconds");
gpr_timespec timeout = {timeout_str == NULL ? 0 : atoi(timeout_str), 0,
GPR_TIMESPAN};
const char* max_request_message_bytes_str =
grpc_uri_get_query_arg(args->uri, "max_request_message_bytes");
int32_t max_request_message_bytes =
max_request_message_bytes_str == NULL
? 0
: atoi(max_request_message_bytes_str);
const char* max_response_message_bytes_str =
grpc_uri_get_query_arg(args->uri, "max_response_message_bytes");
int32_t max_response_message_bytes =
max_response_message_bytes_str == NULL
? 0
: atoi(max_response_message_bytes_str);
grpc_method_config* method_config = grpc_method_config_create(
wait_for_ready_str == NULL ? NULL : &wait_for_ready,
timeout_str == NULL ? NULL : &timeout,
max_request_message_bytes_str == NULL ? NULL
: &max_request_message_bytes,
max_response_message_bytes_str == NULL ? NULL
: &max_response_message_bytes);
grpc_method_config_table_entry entry = {grpc_mdstr_from_string(method_name),
method_config};
method_config_table = grpc_method_config_table_create(1, &entry);
GRPC_MDSTR_UNREF(entry.method_name);
grpc_method_config_unref(method_config);
}
// Instantiate resolver.
fake_resolver* r = gpr_malloc(sizeof(fake_resolver));
memset(r, 0, sizeof(*r));
@ -243,9 +187,6 @@ static grpc_resolver* fake_resolver_create(grpc_resolver_factory* factory,
r->channel_args =
grpc_channel_args_copy_and_add(args->args, &server_name_arg, 1);
r->addresses = addresses;
r->lb_policy_name =
gpr_strdup(grpc_uri_get_query_arg(args->uri, "lb_policy"));
r->method_config_table = method_config_table;
gpr_mu_init(&r->mu);
grpc_resolver_init(&r->base, &fake_resolver_vtable);
return &r->base;

@ -79,9 +79,7 @@ static grpc_arg make_census_enable_arg(void) {
}
void chttp2_init_client_fullstack(grpc_end2end_test_fixture *f,
grpc_channel_args *client_args,
const char *query_args) {
GPR_ASSERT(query_args == NULL);
grpc_channel_args *client_args) {
fullstack_fixture_data *ffd = f->fixture_data;
grpc_arg arg = make_census_enable_arg();
client_args = grpc_channel_args_copy_and_add(client_args, &arg, 1);
@ -113,7 +111,8 @@ void chttp2_tear_down_fullstack(grpc_end2end_test_fixture *f) {
/* All test configurations */
static grpc_end2end_test_config configs[] = {
{"chttp2/fullstack+census", FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION,
{"chttp2/fullstack+census", FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION |
FEATURE_MASK_SUPPORTS_CLIENT_CHANNEL,
chttp2_create_fixture_fullstack, chttp2_init_client_fullstack,
chttp2_init_server_fullstack, chttp2_tear_down_fullstack},
};

@ -75,9 +75,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_fullstack_compression(
}
void chttp2_init_client_fullstack_compression(grpc_end2end_test_fixture *f,
grpc_channel_args *client_args,
const char *query_args) {
GPR_ASSERT(query_args == NULL);
grpc_channel_args *client_args) {
fullstack_compression_fixture_data *ffd = f->fixture_data;
if (ffd->client_args_compression != NULL) {
grpc_channel_args_destroy(ffd->client_args_compression);
@ -115,7 +113,8 @@ void chttp2_tear_down_fullstack_compression(grpc_end2end_test_fixture *f) {
/* All test configurations */
static grpc_end2end_test_config configs[] = {
{"chttp2/fullstack_compression", FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION,
{"chttp2/fullstack_compression", FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION |
FEATURE_MASK_SUPPORTS_CLIENT_CHANNEL,
chttp2_create_fixture_fullstack_compression,
chttp2_init_client_fullstack_compression,
chttp2_init_server_fullstack_compression,

@ -1,128 +0,0 @@
//
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include "test/core/end2end/end2end_tests.h"
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/host_port.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/useful.h>
#include "src/core/ext/client_channel/client_channel.h"
#include "src/core/ext/transport/chttp2/transport/chttp2_transport.h"
#include "src/core/lib/channel/connected_channel.h"
#include "src/core/lib/channel/http_server_filter.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/server.h"
#include "test/core/end2end/fake_resolver.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
typedef struct fullstack_fixture_data {
char *localaddr;
} fullstack_fixture_data;
static grpc_end2end_test_fixture chttp2_create_fixture_fullstack(
grpc_channel_args *client_args, grpc_channel_args *server_args) {
grpc_end2end_test_fixture f;
int port = grpc_pick_unused_port_or_die();
fullstack_fixture_data *ffd = gpr_malloc(sizeof(fullstack_fixture_data));
memset(&f, 0, sizeof(f));
gpr_join_host_port(&ffd->localaddr, "127.0.0.1", port);
f.fixture_data = ffd;
f.cq = grpc_completion_queue_create(NULL);
return f;
}
void chttp2_init_client_fullstack(grpc_end2end_test_fixture *f,
grpc_channel_args *client_args,
const char *query_args) {
fullstack_fixture_data *ffd = f->fixture_data;
char *server_uri;
gpr_asprintf(&server_uri, "test:%s%s%s", ffd->localaddr,
(query_args == NULL ? "" : "?"),
(query_args == NULL ? "" : query_args));
gpr_log(GPR_INFO, "server_uri: %s", server_uri);
f->client = grpc_insecure_channel_create(server_uri, client_args, NULL);
GPR_ASSERT(f->client);
gpr_free(server_uri);
}
void chttp2_init_server_fullstack(grpc_end2end_test_fixture *f,
grpc_channel_args *server_args) {
fullstack_fixture_data *ffd = f->fixture_data;
if (f->server) {
grpc_server_destroy(f->server);
}
f->server = grpc_server_create(server_args, NULL);
grpc_server_register_completion_queue(f->server, f->cq, NULL);
GPR_ASSERT(grpc_server_add_insecure_http2_port(f->server, ffd->localaddr));
grpc_server_start(f->server);
}
void chttp2_tear_down_fullstack(grpc_end2end_test_fixture *f) {
fullstack_fixture_data *ffd = f->fixture_data;
gpr_free(ffd->localaddr);
gpr_free(ffd);
}
/* All test configurations */
static grpc_end2end_test_config configs[] = {
{"chttp2/fullstack", FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION |
FEATURE_MASK_SUPPORTS_QUERY_ARGS,
chttp2_create_fixture_fullstack, chttp2_init_client_fullstack,
chttp2_init_server_fullstack, chttp2_tear_down_fullstack},
};
int main(int argc, char **argv) {
size_t i;
grpc_test_init(argc, argv);
grpc_end2end_tests_pre_init();
grpc_fake_resolver_init();
grpc_init();
for (i = 0; i < sizeof(configs) / sizeof(*configs); i++) {
grpc_end2end_tests(argc, argv, configs[i]);
}
grpc_shutdown();
return 0;
}

@ -105,9 +105,7 @@ void chttp2_tear_down_secure_fullstack(grpc_end2end_test_fixture *f) {
}
static void chttp2_init_client_fake_secure_fullstack(
grpc_end2end_test_fixture *f, grpc_channel_args *client_args,
const char *query_args) {
GPR_ASSERT(query_args == NULL);
grpc_end2end_test_fixture *f, grpc_channel_args *client_args) {
grpc_channel_credentials *fake_ts_creds =
grpc_fake_transport_security_credentials_create();
chttp2_init_client_secure_fullstack(f, client_args, fake_ts_creds);
@ -142,7 +140,8 @@ static void chttp2_init_server_fake_secure_fullstack(
static grpc_end2end_test_config configs[] = {
{"chttp2/fake_secure_fullstack",
FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION |
FEATURE_MASK_SUPPORTS_PER_CALL_CREDENTIALS,
FEATURE_MASK_SUPPORTS_PER_CALL_CREDENTIALS |
FEATURE_MASK_SUPPORTS_CLIENT_CHANNEL,
chttp2_create_fixture_secure_fullstack,
chttp2_init_client_fake_secure_fullstack,
chttp2_init_server_fake_secure_fullstack,

@ -78,10 +78,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_socketpair(
}
static void chttp2_init_client_socketpair(grpc_end2end_test_fixture *f,
grpc_channel_args *client_args,
const char *query_args) {
GPR_ASSERT(query_args == NULL);
grpc_channel_args *client_args) {
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
sp_fixture_data *sfd = f->fixture_data;

@ -76,9 +76,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_fullstack(
}
void chttp2_init_client_fullstack(grpc_end2end_test_fixture *f,
grpc_channel_args *client_args,
const char *query_args) {
GPR_ASSERT(query_args == NULL);
grpc_channel_args *client_args) {
fullstack_fixture_data *ffd = f->fixture_data;
f->client = grpc_insecure_channel_create(ffd->localaddr, client_args, NULL);
GPR_ASSERT(f->client);
@ -104,7 +102,8 @@ void chttp2_tear_down_fullstack(grpc_end2end_test_fixture *f) {
/* All test configurations */
static grpc_end2end_test_config configs[] = {
{"chttp2/fullstack", FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION,
{"chttp2/fullstack", FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION |
FEATURE_MASK_SUPPORTS_CLIENT_CHANNEL,
chttp2_create_fixture_fullstack, chttp2_init_client_fullstack,
chttp2_init_server_fullstack, chttp2_tear_down_fullstack},
};

@ -76,9 +76,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_fullstack(
}
void chttp2_init_client_fullstack(grpc_end2end_test_fixture *f,
grpc_channel_args *client_args,
const char *query_args) {
GPR_ASSERT(query_args == NULL);
grpc_channel_args *client_args) {
fullstack_fixture_data *ffd = f->fixture_data;
f->client = grpc_insecure_channel_create(ffd->localaddr, client_args, NULL);
GPR_ASSERT(f->client);
@ -104,7 +102,8 @@ void chttp2_tear_down_fullstack(grpc_end2end_test_fixture *f) {
/* All test configurations */
static grpc_end2end_test_config configs[] = {
{"chttp2/fullstack", FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION,
{"chttp2/fullstack", FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION |
FEATURE_MASK_SUPPORTS_CLIENT_CHANNEL,
chttp2_create_fixture_fullstack, chttp2_init_client_fullstack,
chttp2_init_server_fullstack, chttp2_tear_down_fullstack},
};

@ -70,9 +70,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_fullstack(
}
void chttp2_init_client_fullstack(grpc_end2end_test_fixture *f,
grpc_channel_args *client_args,
const char *query_args) {
GPR_ASSERT(query_args == NULL);
grpc_channel_args *client_args) {
fullstack_fixture_data *ffd = f->fixture_data;
f->client = grpc_insecure_channel_create(ffd->localaddr, client_args, NULL);
GPR_ASSERT(f->client);
@ -98,7 +96,8 @@ void chttp2_tear_down_fullstack(grpc_end2end_test_fixture *f) {
/* All test configurations */
static grpc_end2end_test_config configs[] = {
{"chttp2/fullstack", FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION,
{"chttp2/fullstack", FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION |
FEATURE_MASK_SUPPORTS_CLIENT_CHANNEL,
chttp2_create_fixture_fullstack, chttp2_init_client_fullstack,
chttp2_init_server_fullstack, chttp2_tear_down_fullstack},
};

@ -75,9 +75,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_fullstack(
}
void chttp2_init_client_fullstack(grpc_end2end_test_fixture *f,
grpc_channel_args *client_args,
const char *query_args) {
GPR_ASSERT(query_args == NULL);
grpc_channel_args *client_args) {
fullstack_fixture_data *ffd = f->fixture_data;
char *proxy_uri;
gpr_asprintf(&proxy_uri, "http://%s",
@ -109,7 +107,8 @@ void chttp2_tear_down_fullstack(grpc_end2end_test_fixture *f) {
/* All test configurations */
static grpc_end2end_test_config configs[] = {
{"chttp2/fullstack", FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION,
{"chttp2/fullstack", FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION |
FEATURE_MASK_SUPPORTS_CLIENT_CHANNEL,
chttp2_create_fixture_fullstack, chttp2_init_client_fullstack,
chttp2_init_server_fullstack, chttp2_tear_down_fullstack},
};

@ -73,9 +73,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_load_reporting(
}
void chttp2_init_client_load_reporting(grpc_end2end_test_fixture *f,
grpc_channel_args *client_args,
const char *query_args) {
GPR_ASSERT(query_args == NULL);
grpc_channel_args *client_args) {
load_reporting_fixture_data *ffd = f->fixture_data;
f->client = grpc_insecure_channel_create(ffd->localaddr, client_args, NULL);
GPR_ASSERT(f->client);
@ -105,7 +103,8 @@ void chttp2_tear_down_load_reporting(grpc_end2end_test_fixture *f) {
/* All test configurations */
static grpc_end2end_test_config configs[] = {
{"chttp2/fullstack+load_reporting",
FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION,
FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION |
FEATURE_MASK_SUPPORTS_CLIENT_CHANNEL,
chttp2_create_fixture_load_reporting, chttp2_init_client_load_reporting,
chttp2_init_server_load_reporting, chttp2_tear_down_load_reporting},
};

@ -150,9 +150,7 @@ void chttp2_tear_down_secure_fullstack(grpc_end2end_test_fixture *f) {
}
static void chttp2_init_client_simple_ssl_with_oauth2_secure_fullstack(
grpc_end2end_test_fixture *f, grpc_channel_args *client_args,
const char *query_args) {
GPR_ASSERT(query_args == NULL);
grpc_end2end_test_fixture *f, grpc_channel_args *client_args) {
grpc_channel_credentials *ssl_creds =
grpc_ssl_credentials_create(test_root_cert, NULL, NULL);
grpc_call_credentials *oauth2_creds =
@ -218,7 +216,8 @@ static void chttp2_init_server_simple_ssl_secure_fullstack(
static grpc_end2end_test_config configs[] = {
{"chttp2/simple_ssl_with_oauth2_fullstack",
FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION |
FEATURE_MASK_SUPPORTS_PER_CALL_CREDENTIALS,
FEATURE_MASK_SUPPORTS_PER_CALL_CREDENTIALS |
FEATURE_MASK_SUPPORTS_CLIENT_CHANNEL,
chttp2_create_fixture_secure_fullstack,
chttp2_init_client_simple_ssl_with_oauth2_secure_fullstack,
chttp2_init_server_simple_ssl_secure_fullstack,

@ -85,9 +85,7 @@ static grpc_end2end_test_fixture chttp2_create_fixture_fullstack(
}
void chttp2_init_client_fullstack(grpc_end2end_test_fixture *f,
grpc_channel_args *client_args,
const char *query_args) {
GPR_ASSERT(query_args == NULL);
grpc_channel_args *client_args) {
fullstack_fixture_data *ffd = f->fixture_data;
f->client = grpc_insecure_channel_create(
grpc_end2end_proxy_get_client_target(ffd->proxy), client_args, NULL);
@ -116,7 +114,8 @@ void chttp2_tear_down_fullstack(grpc_end2end_test_fixture *f) {
/* All test configurations */
static grpc_end2end_test_config configs[] = {
{"chttp2/fullstack+proxy", FEATURE_MASK_SUPPORTS_DELAYED_CONNECTION |
FEATURE_MASK_SUPPORTS_REQUEST_PROXYING,
FEATURE_MASK_SUPPORTS_REQUEST_PROXYING |
FEATURE_MASK_SUPPORTS_CLIENT_CHANNEL,
chttp2_create_fixture_fullstack, chttp2_init_client_fullstack,
chttp2_init_server_fullstack, chttp2_tear_down_fullstack},
};

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save