Merge remote-tracking branch 'upstream/master'

pull/4274/head
Hongyu Chen 9 years ago
commit aa28ccc922
  1. 29
      BUILD
  2. 8333
      Makefile
  3. 38
      README.md
  4. 4
      binding.gyp
  5. 144
      build.yaml
  6. 1
      examples/objective-c/auth_sample/Podfile
  7. 1
      examples/objective-c/helloworld/Podfile
  8. 1
      examples/objective-c/route_guide/Podfile
  9. 12
      gRPC.podspec
  10. 7
      include/grpc++/impl/rpc_service_method.h
  11. 23
      include/grpc++/impl/server_builder_option.h
  12. 5
      include/grpc++/server.h
  13. 6
      include/grpc++/server_builder.h
  14. 5
      include/grpc++/support/channel_arguments.h
  15. 5
      include/grpc/grpc.h
  16. 14
      include/grpc/support/alloc.h
  17. 8
      include/grpc/support/cmdline.h
  18. 9
      include/grpc/support/port_platform.h
  19. 3
      package.json
  20. 2
      src/core/census/context.h
  21. 10
      src/core/census/grpc_filter.c
  22. 17
      src/core/channel/channel_stack.c
  23. 41
      src/core/channel/channel_stack.h
  24. 163
      src/core/channel/client_channel.c
  25. 12
      src/core/channel/client_channel.h
  26. 82
      src/core/channel/client_uchannel.c
  27. 16
      src/core/channel/client_uchannel.h
  28. 7
      src/core/channel/compress_filter.c
  29. 6
      src/core/channel/connected_channel.c
  30. 4
      src/core/channel/http_client_filter.c
  31. 6
      src/core/channel/http_server_filter.c
  32. 118
      src/core/channel/noop_filter.c
  33. 80
      src/core/channel/subchannel_call_holder.c
  34. 14
      src/core/channel/subchannel_call_holder.h
  35. 165
      src/core/client_config/lb_policies/pick_first.c
  36. 258
      src/core/client_config/lb_policies/round_robin.c
  37. 83
      src/core/client_config/lb_policy.c
  38. 41
      src/core/client_config/lb_policy.h
  39. 7
      src/core/client_config/resolver.c
  40. 10
      src/core/client_config/resolver.h
  41. 8
      src/core/client_config/resolvers/dns_resolver.c
  42. 13
      src/core/client_config/resolvers/sockaddr_resolver.c
  43. 7
      src/core/client_config/resolvers/zookeeper_resolver.c
  44. 735
      src/core/client_config/subchannel.c
  45. 92
      src/core/client_config/subchannel.h
  46. 86
      src/core/client_config/subchannel_factory_decorators/merge_channel_args.c
  47. 46
      src/core/client_config/subchannel_factory_decorators/merge_channel_args.h
  48. 14
      src/core/compression/message_compress.c
  49. 9
      src/core/httpcli/httpcli.c
  50. 2
      src/core/httpcli/httpcli.h
  51. 5
      src/core/httpcli/httpcli_security_connector.c
  52. 3
      src/core/iomgr/endpoint_pair_posix.c
  53. 6
      src/core/iomgr/fd_posix.c
  54. 1
      src/core/iomgr/fd_posix.h
  55. 22
      src/core/iomgr/pollset_set.h
  56. 51
      src/core/iomgr/pollset_set_posix.c
  57. 4
      src/core/iomgr/pollset_set_posix.h
  58. 8
      src/core/iomgr/pollset_set_windows.c
  59. 10
      src/core/iomgr/tcp_server_posix.c
  60. 13
      src/core/iomgr/tcp_windows.c
  61. 2
      src/core/iomgr/udp_server.c
  62. 6
      src/core/security/client_auth_filter.c
  63. 14
      src/core/security/credentials.c
  64. 9
      src/core/security/credentials.h
  65. 2
      src/core/security/credentials_posix.c
  66. 2
      src/core/security/credentials_win32.c
  67. 15
      src/core/security/google_default_credentials.c
  68. 49
      src/core/security/handshake.c
  69. 2
      src/core/security/handshake.h
  70. 4
      src/core/security/json_token.c
  71. 27
      src/core/security/security_connector.c
  72. 15
      src/core/security/security_connector.h
  73. 75
      src/core/security/server_secure_chttp2.c
  74. 20
      src/core/support/alloc.c
  75. 62
      src/core/support/cmdline.c
  76. 3
      src/core/support/log.c
  77. 7
      src/core/support/slice.c
  78. 29
      src/core/support/string.c
  79. 10
      src/core/support/string.h
  80. 35
      src/core/surface/call.c
  81. 14
      src/core/surface/call.h
  82. 23
      src/core/surface/call_log_batch.c
  83. 54
      src/core/surface/channel.c
  84. 2
      src/core/surface/channel.h
  85. 33
      src/core/surface/channel_connectivity.c
  86. 8
      src/core/surface/channel_create.c
  87. 79
      src/core/surface/channel_ping.c
  88. 7
      src/core/surface/init.c
  89. 9
      src/core/surface/lame_client.c
  90. 20
      src/core/surface/secure_channel_create.c
  91. 4
      src/core/surface/server_chttp2.c
  92. 13
      src/core/surface/server_create.c
  93. 2
      src/core/transport/chttp2/frame_data.c
  94. 3
      src/core/transport/chttp2/frame_data.h
  95. 11
      src/core/transport/chttp2/frame_ping.c
  96. 4
      src/core/transport/chttp2/frame_settings.c
  97. 13
      src/core/transport/chttp2/hpack_encoder.c
  98. 66
      src/core/transport/chttp2/hpack_parser.c
  99. 2
      src/core/transport/chttp2/hpack_parser.h
  100. 18
      src/core/transport/chttp2/incoming_metadata.c
  101. Some files were not shown because too many files have changed in this diff Show More

29
BUILD

@ -161,7 +161,6 @@ cc_library(
"src/core/channel/context.h",
"src/core/channel/http_client_filter.h",
"src/core/channel/http_server_filter.h",
"src/core/channel/noop_filter.h",
"src/core/channel/subchannel_call_holder.h",
"src/core/client_config/client_config.h",
"src/core/client_config/connector.h",
@ -178,8 +177,6 @@ cc_library(
"src/core/client_config/resolvers/sockaddr_resolver.h",
"src/core/client_config/subchannel.h",
"src/core/client_config/subchannel_factory.h",
"src/core/client_config/subchannel_factory_decorators/add_channel_arg.h",
"src/core/client_config/subchannel_factory_decorators/merge_channel_args.h",
"src/core/client_config/uri_parser.h",
"src/core/compression/algorithm_metadata.h",
"src/core/compression/message_compress.h",
@ -301,7 +298,6 @@ cc_library(
"src/core/channel/connected_channel.c",
"src/core/channel/http_client_filter.c",
"src/core/channel/http_server_filter.c",
"src/core/channel/noop_filter.c",
"src/core/channel/subchannel_call_holder.c",
"src/core/client_config/client_config.c",
"src/core/client_config/connector.c",
@ -319,8 +315,6 @@ cc_library(
"src/core/client_config/resolvers/sockaddr_resolver.c",
"src/core/client_config/subchannel.c",
"src/core/client_config/subchannel_factory.c",
"src/core/client_config/subchannel_factory_decorators/add_channel_arg.c",
"src/core/client_config/subchannel_factory_decorators/merge_channel_args.c",
"src/core/client_config/uri_parser.c",
"src/core/compression/algorithm.c",
"src/core/compression/message_compress.c",
@ -381,6 +375,7 @@ cc_library(
"src/core/surface/channel.c",
"src/core/surface/channel_connectivity.c",
"src/core/surface/channel_create.c",
"src/core/surface/channel_ping.c",
"src/core/surface/completion_queue.c",
"src/core/surface/event_string.c",
"src/core/surface/init.c",
@ -457,7 +452,6 @@ cc_library(
"src/core/channel/context.h",
"src/core/channel/http_client_filter.h",
"src/core/channel/http_server_filter.h",
"src/core/channel/noop_filter.h",
"src/core/channel/subchannel_call_holder.h",
"src/core/client_config/client_config.h",
"src/core/client_config/connector.h",
@ -474,8 +468,6 @@ cc_library(
"src/core/client_config/resolvers/sockaddr_resolver.h",
"src/core/client_config/subchannel.h",
"src/core/client_config/subchannel_factory.h",
"src/core/client_config/subchannel_factory_decorators/add_channel_arg.h",
"src/core/client_config/subchannel_factory_decorators/merge_channel_args.h",
"src/core/client_config/uri_parser.h",
"src/core/compression/algorithm_metadata.h",
"src/core/compression/message_compress.h",
@ -577,7 +569,6 @@ cc_library(
"src/core/channel/connected_channel.c",
"src/core/channel/http_client_filter.c",
"src/core/channel/http_server_filter.c",
"src/core/channel/noop_filter.c",
"src/core/channel/subchannel_call_holder.c",
"src/core/client_config/client_config.c",
"src/core/client_config/connector.c",
@ -595,8 +586,6 @@ cc_library(
"src/core/client_config/resolvers/sockaddr_resolver.c",
"src/core/client_config/subchannel.c",
"src/core/client_config/subchannel_factory.c",
"src/core/client_config/subchannel_factory_decorators/add_channel_arg.c",
"src/core/client_config/subchannel_factory_decorators/merge_channel_args.c",
"src/core/client_config/uri_parser.c",
"src/core/compression/algorithm.c",
"src/core/compression/message_compress.c",
@ -657,6 +646,7 @@ cc_library(
"src/core/surface/channel.c",
"src/core/surface/channel_connectivity.c",
"src/core/surface/channel_create.c",
"src/core/surface/channel_ping.c",
"src/core/surface/completion_queue.c",
"src/core/surface/event_string.c",
"src/core/surface/init.c",
@ -748,14 +738,13 @@ cc_library(
"src/cpp/server/dynamic_thread_pool.h",
"src/cpp/server/fixed_size_thread_pool.h",
"src/cpp/server/thread_pool_interface.h",
"src/cpp/client/secure_channel_arguments.cc",
"src/cpp/client/secure_credentials.cc",
"src/cpp/common/auth_property_iterator.cc",
"src/cpp/common/secure_auth_context.cc",
"src/cpp/common/secure_channel_arguments.cc",
"src/cpp/common/secure_create_auth_context.cc",
"src/cpp/server/secure_server_credentials.cc",
"src/cpp/client/channel.cc",
"src/cpp/client/channel_arguments.cc",
"src/cpp/client/client_context.cc",
"src/cpp/client/create_channel.cc",
"src/cpp/client/create_channel_internal.cc",
@ -763,6 +752,7 @@ cc_library(
"src/cpp/client/generic_stub.cc",
"src/cpp/client/insecure_credentials.cc",
"src/cpp/common/call.cc",
"src/cpp/common/channel_arguments.cc",
"src/cpp/common/completion_queue.cc",
"src/cpp/common/rpc_method.cc",
"src/cpp/proto/proto_utils.cc",
@ -796,6 +786,7 @@ cc_library(
"include/grpc++/impl/rpc_method.h",
"include/grpc++/impl/rpc_service_method.h",
"include/grpc++/impl/serialization_traits.h",
"include/grpc++/impl/server_builder_option.h",
"include/grpc++/impl/service_type.h",
"include/grpc++/impl/sync.h",
"include/grpc++/impl/sync_cxx11.h",
@ -847,7 +838,6 @@ cc_library(
"src/cpp/server/thread_pool_interface.h",
"src/cpp/common/insecure_create_auth_context.cc",
"src/cpp/client/channel.cc",
"src/cpp/client/channel_arguments.cc",
"src/cpp/client/client_context.cc",
"src/cpp/client/create_channel.cc",
"src/cpp/client/create_channel_internal.cc",
@ -855,6 +845,7 @@ cc_library(
"src/cpp/client/generic_stub.cc",
"src/cpp/client/insecure_credentials.cc",
"src/cpp/common/call.cc",
"src/cpp/common/channel_arguments.cc",
"src/cpp/common/completion_queue.cc",
"src/cpp/common/rpc_method.cc",
"src/cpp/proto/proto_utils.cc",
@ -888,6 +879,7 @@ cc_library(
"include/grpc++/impl/rpc_method.h",
"include/grpc++/impl/rpc_service_method.h",
"include/grpc++/impl/serialization_traits.h",
"include/grpc++/impl/server_builder_option.h",
"include/grpc++/impl/service_type.h",
"include/grpc++/impl/sync.h",
"include/grpc++/impl/sync_cxx11.h",
@ -1111,7 +1103,6 @@ objc_library(
"src/core/channel/connected_channel.c",
"src/core/channel/http_client_filter.c",
"src/core/channel/http_server_filter.c",
"src/core/channel/noop_filter.c",
"src/core/channel/subchannel_call_holder.c",
"src/core/client_config/client_config.c",
"src/core/client_config/connector.c",
@ -1129,8 +1120,6 @@ objc_library(
"src/core/client_config/resolvers/sockaddr_resolver.c",
"src/core/client_config/subchannel.c",
"src/core/client_config/subchannel_factory.c",
"src/core/client_config/subchannel_factory_decorators/add_channel_arg.c",
"src/core/client_config/subchannel_factory_decorators/merge_channel_args.c",
"src/core/client_config/uri_parser.c",
"src/core/compression/algorithm.c",
"src/core/compression/message_compress.c",
@ -1191,6 +1180,7 @@ objc_library(
"src/core/surface/channel.c",
"src/core/surface/channel_connectivity.c",
"src/core/surface/channel_create.c",
"src/core/surface/channel_ping.c",
"src/core/surface/completion_queue.c",
"src/core/surface/event_string.c",
"src/core/surface/init.c",
@ -1264,7 +1254,6 @@ objc_library(
"src/core/channel/context.h",
"src/core/channel/http_client_filter.h",
"src/core/channel/http_server_filter.h",
"src/core/channel/noop_filter.h",
"src/core/channel/subchannel_call_holder.h",
"src/core/client_config/client_config.h",
"src/core/client_config/connector.h",
@ -1281,8 +1270,6 @@ objc_library(
"src/core/client_config/resolvers/sockaddr_resolver.h",
"src/core/client_config/subchannel.h",
"src/core/client_config/subchannel_factory.h",
"src/core/client_config/subchannel_factory_decorators/add_channel_arg.h",
"src/core/client_config/subchannel_factory_decorators/merge_channel_args.h",
"src/core/client_config/uri_parser.h",
"src/core/compression/algorithm_metadata.h",
"src/core/compression/message_compress.h",

8333
Makefile

File diff suppressed because one or more lines are too long

@ -13,34 +13,28 @@ You can find more detailed documentation and examples in the [doc](doc) and [exa
See grpc/INSTALL for installation instructions for various platforms.
#Repository Structure
#Repository Structure & Status
This repository contains source code for gRPC libraries for multiple languages written on top
of shared C core library [src/core] (src/core).
This repository contains source code for gRPC libraries for multiple languages written on top of shared C core library [src/core] (src/core).
* C++ source code: [src/cpp] (src/cpp)
* Ruby source code: [src/ruby] (src/ruby)
* NodeJS source code: [src/node] (src/node)
* Python source code: [src/python] (src/python)
* PHP source code: [src/php] (src/php)
* C# source code: [src/csharp] (src/csharp)
* Objective-C source code: [src/objective-c] (src/objective-c)
Libraries in different languages are in different state of development. We are seeking contributions for all of these libraries.
| Language | Source | Status |
|-------------------------|-------------------------------------|---------------------------------|
| Shared C [core library] | [src/core] (src/core) | Beta - the surface API is stable |
| C++ | [src/cpp] (src/cpp) | Beta - the surface API is stable |
| Ruby | [src/ruby] (src/ruby) | Beta - the surface API is stable |
| NodeJS | [src/node] (src/node) | Beta - the surface API is stable |
| Python | [src/python] (src/python) | Beta - the surface API is stable |
| PHP | [src/php] (src/php) | Beta - the surface API is stable |
| C# | [src/csharp] (src/csharp) | Beta - the surface API is stable |
| Objective-C | [src/objective-c] (src/objective-c) | Beta - the surface API is stable |
<small>
Java source code is in [grpc-java] (http://github.com/grpc/grpc-java) repository.
Go source code is in [grpc-go] (http://github.com/grpc/grpc-go) repository.
</small>
#Current Status of libraries
Libraries in different languages are in different state of development. We are seeking contributions for all of these libraries.
* shared C core library [src/core] (src/core) : Beta - the surface API is stable
* C++ Library: [src/cpp] (src/cpp) : Beta - the surface API is stable
* Ruby Library: [src/ruby] (src/ruby) : Beta - the surface API is stable
* NodeJS Library: [src/node] (src/node) : Beta - the surface API is stable
* Python Library: [src/python] (src/python) : Beta - the surface API is stable
* C# Library: [src/csharp] (src/csharp) : Beta - the surface API is stable
* Objective-C Library: [src/objective-c] (src/objective-c): Beta - the surface API is stable
* PHP Library: [src/php] (src/php) : Beta - the surface API is stable
#Overview

@ -183,7 +183,6 @@
'src/core/channel/connected_channel.c',
'src/core/channel/http_client_filter.c',
'src/core/channel/http_server_filter.c',
'src/core/channel/noop_filter.c',
'src/core/channel/subchannel_call_holder.c',
'src/core/client_config/client_config.c',
'src/core/client_config/connector.c',
@ -201,8 +200,6 @@
'src/core/client_config/resolvers/sockaddr_resolver.c',
'src/core/client_config/subchannel.c',
'src/core/client_config/subchannel_factory.c',
'src/core/client_config/subchannel_factory_decorators/add_channel_arg.c',
'src/core/client_config/subchannel_factory_decorators/merge_channel_args.c',
'src/core/client_config/uri_parser.c',
'src/core/compression/algorithm.c',
'src/core/compression/message_compress.c',
@ -263,6 +260,7 @@
'src/core/surface/channel.c',
'src/core/surface/channel_connectivity.c',
'src/core/surface/channel_create.c',
'src/core/surface/channel_ping.c',
'src/core/surface/completion_queue.c',
'src/core/surface/event_string.c',
'src/core/surface/init.c',

@ -37,6 +37,7 @@ filegroups:
- include/grpc++/impl/rpc_method.h
- include/grpc++/impl/rpc_service_method.h
- include/grpc++/impl/serialization_traits.h
- include/grpc++/impl/server_builder_option.h
- include/grpc++/impl/service_type.h
- include/grpc++/impl/sync.h
- include/grpc++/impl/sync_cxx11.h
@ -72,7 +73,6 @@ filegroups:
- src/cpp/server/thread_pool_interface.h
src:
- src/cpp/client/channel.cc
- src/cpp/client/channel_arguments.cc
- src/cpp/client/client_context.cc
- src/cpp/client/create_channel.cc
- src/cpp/client/create_channel_internal.cc
@ -80,6 +80,7 @@ filegroups:
- src/cpp/client/generic_stub.cc
- src/cpp/client/insecure_credentials.cc
- src/cpp/common/call.cc
- src/cpp/common/channel_arguments.cc
- src/cpp/common/completion_queue.cc
- src/cpp/common/rpc_method.cc
- src/cpp/proto/proto_utils.cc
@ -115,7 +116,6 @@ filegroups:
- src/core/channel/context.h
- src/core/channel/http_client_filter.h
- src/core/channel/http_server_filter.h
- src/core/channel/noop_filter.h
- src/core/channel/subchannel_call_holder.h
- src/core/client_config/client_config.h
- src/core/client_config/connector.h
@ -132,8 +132,6 @@ filegroups:
- src/core/client_config/resolvers/sockaddr_resolver.h
- src/core/client_config/subchannel.h
- src/core/client_config/subchannel_factory.h
- src/core/client_config/subchannel_factory_decorators/add_channel_arg.h
- src/core/client_config/subchannel_factory_decorators/merge_channel_args.h
- src/core/client_config/uri_parser.h
- src/core/compression/algorithm_metadata.h
- src/core/compression/message_compress.h
@ -232,7 +230,6 @@ filegroups:
- src/core/channel/connected_channel.c
- src/core/channel/http_client_filter.c
- src/core/channel/http_server_filter.c
- src/core/channel/noop_filter.c
- src/core/channel/subchannel_call_holder.c
- src/core/client_config/client_config.c
- src/core/client_config/connector.c
@ -250,8 +247,6 @@ filegroups:
- src/core/client_config/resolvers/sockaddr_resolver.c
- src/core/client_config/subchannel.c
- src/core/client_config/subchannel_factory.c
- src/core/client_config/subchannel_factory_decorators/add_channel_arg.c
- src/core/client_config/subchannel_factory_decorators/merge_channel_args.c
- src/core/client_config/uri_parser.c
- src/core/compression/algorithm.c
- src/core/compression/message_compress.c
@ -312,6 +307,7 @@ filegroups:
- src/core/surface/channel.c
- src/core/surface/channel_connectivity.c
- src/core/surface/channel_create.c
- src/core/surface/channel_ping.c
- src/core/surface/completion_queue.c
- src/core/surface/event_string.c
- src/core/surface/init.c
@ -354,7 +350,6 @@ filegroups:
- test/core/end2end/cq_verifier.h
- test/core/end2end/fixtures/proxy.h
- test/core/iomgr/endpoint_tests.h
- test/core/security/oauth2_utils.h
- test/core/util/grpc_profiler.h
- test/core/util/parse_hexstring.h
- test/core/util/port.h
@ -363,7 +358,6 @@ filegroups:
- test/core/end2end/cq_verifier.c
- test/core/end2end/fixtures/proxy.c
- test/core/iomgr/endpoint_tests.c
- test/core/security/oauth2_utils.c
- test/core/util/grpc_profiler.c
- test/core/util/parse_hexstring.c
- test/core/util/port_posix.c
@ -526,10 +520,12 @@ libs:
language: c
headers:
- test/core/end2end/data/ssl_test_data.h
- test/core/security/oauth2_utils.h
src:
- test/core/end2end/data/server1_cert.c
- test/core/end2end/data/server1_key.c
- test/core/end2end/data/test_root_cert.c
- test/core/security/oauth2_utils.c
deps:
- gpr
- gpr_test_util
@ -543,7 +539,7 @@ libs:
deps:
- gpr
- gpr_test_util
- grpc
- grpc_unsecure
filegroups:
- grpc_test_util_base
secure: false
@ -612,10 +608,10 @@ libs:
- src/cpp/common/secure_auth_context.h
- src/cpp/server/secure_server_credentials.h
src:
- src/cpp/client/secure_channel_arguments.cc
- src/cpp/client/secure_credentials.cc
- src/cpp/common/auth_property_iterator.cc
- src/cpp/common/secure_auth_context.cc
- src/cpp/common/secure_channel_arguments.cc
- src/cpp/common/secure_create_auth_context.cc
- src/cpp/server/secure_server_credentials.cc
deps:
@ -814,6 +810,14 @@ libs:
- winsock
- global
targets:
- name: alloc_test
build: test
language: c
src:
- test/core/support/alloc_test.c
deps:
- gpr_test_util
- gpr
- name: alpn_test
build: test
language: c
@ -834,6 +838,16 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: channel_create_test
build: test
language: c
src:
- test/core/surface/channel_create_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: chttp2_hpack_encoder_test
build: test
language: c
@ -864,6 +878,16 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: chttp2_varint_test
build: test
language: c
src:
- test/core/transport/chttp2/varint_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: compression_test
build: test
language: c
@ -1216,6 +1240,16 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: grpc_invalid_channel_args_test
build: test
language: c
src:
- test/core/surface/invalid_channel_args_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: grpc_json_token_test
build: test
language: c
@ -1324,6 +1358,38 @@ targets:
- mac
- linux
- posix
- name: httpscli_test
build: test
language: c
src:
- test/core/httpcli/httpscli_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
platforms:
- linux
- name: init_test
build: test
language: c
src:
- test/core/surface/init_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: invalid_call_argument_test
build: test
language: c
src:
- test/core/end2end/invalid_call_argument_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: json_rewrite
build: test
run: false
@ -1397,16 +1463,6 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: multi_init_test
build: test
language: c
src:
- test/core/surface/multi_init_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: multiple_server_queues_test
build: test
language: c
@ -1445,6 +1501,16 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: secure_channel_create_test
build: test
language: c
src:
- test/core/surface/secure_channel_create_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: secure_endpoint_test
build: test
language: c
@ -1455,6 +1521,16 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: server_chttp2_test
build: test
language: c
src:
- test/core/surface/server_chttp2_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: set_initial_connect_string_test
build: test
language: c
@ -1466,6 +1542,16 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: sockaddr_resolver_test
build: test
language: c
src:
- test/core/client_config/resolvers/sockaddr_resolver_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: sockaddr_utils_test
build: test
language: c
@ -1568,6 +1654,16 @@ targets:
- grpc
- gpr_test_util
- gpr
- name: transport_connectivity_state_test
build: test
language: c
src:
- test/core/transport/connectivity_state_test.c
deps:
- grpc_test_util
- grpc
- gpr_test_util
- gpr
- name: transport_metadata_test
build: test
language: c
@ -1692,7 +1788,7 @@ targets:
build: test
language: c++
src:
- test/cpp/client/channel_arguments_test.cc
- test/cpp/common/channel_arguments_test.cc
deps:
- grpc++
- grpc
@ -2269,11 +2365,11 @@ vspackages:
name: grpc.dependencies.zlib
props: false
redist: true
version: 1.2.8.9
version: 1.2.8.10
- name: grpc.dependencies.openssl
props: true
redist: true
version: 1.0.2.3
version: 1.0.204.1
- name: gflags
props: false
redist: false

@ -2,6 +2,7 @@ source 'https://github.com/CocoaPods/Specs.git'
platform :ios, '8.0'
pod 'Protobuf', :path => "../../../third_party/protobuf"
pod 'BoringSSL', :podspec => "../../../src/objective-c"
pod 'gRPC', :path => "../../.."
target 'AuthSample' do

@ -2,6 +2,7 @@ source 'https://github.com/CocoaPods/Specs.git'
platform :ios, '8.0'
pod 'Protobuf', :path => "../../../third_party/protobuf"
pod 'BoringSSL', :podspec => "../../../src/objective-c"
pod 'gRPC', :path => "../../.."
target 'HelloWorld' do

@ -3,6 +3,7 @@ platform :ios, '8.0'
target 'RouteGuideClient' do
pod 'Protobuf', :path => "../../../third_party/protobuf"
pod 'BoringSSL', :podspec => "../../../src/objective-c"
pod 'gRPC', :path => "../../.."
# Depend on the generated RouteGuide library.
pod 'RouteGuide', :path => '.'

@ -165,7 +165,6 @@ Pod::Spec.new do |s|
'src/core/channel/context.h',
'src/core/channel/http_client_filter.h',
'src/core/channel/http_server_filter.h',
'src/core/channel/noop_filter.h',
'src/core/channel/subchannel_call_holder.h',
'src/core/client_config/client_config.h',
'src/core/client_config/connector.h',
@ -182,8 +181,6 @@ Pod::Spec.new do |s|
'src/core/client_config/resolvers/sockaddr_resolver.h',
'src/core/client_config/subchannel.h',
'src/core/client_config/subchannel_factory.h',
'src/core/client_config/subchannel_factory_decorators/add_channel_arg.h',
'src/core/client_config/subchannel_factory_decorators/merge_channel_args.h',
'src/core/client_config/uri_parser.h',
'src/core/compression/algorithm_metadata.h',
'src/core/compression/message_compress.h',
@ -312,7 +309,6 @@ Pod::Spec.new do |s|
'src/core/channel/connected_channel.c',
'src/core/channel/http_client_filter.c',
'src/core/channel/http_server_filter.c',
'src/core/channel/noop_filter.c',
'src/core/channel/subchannel_call_holder.c',
'src/core/client_config/client_config.c',
'src/core/client_config/connector.c',
@ -330,8 +326,6 @@ Pod::Spec.new do |s|
'src/core/client_config/resolvers/sockaddr_resolver.c',
'src/core/client_config/subchannel.c',
'src/core/client_config/subchannel_factory.c',
'src/core/client_config/subchannel_factory_decorators/add_channel_arg.c',
'src/core/client_config/subchannel_factory_decorators/merge_channel_args.c',
'src/core/client_config/uri_parser.c',
'src/core/compression/algorithm.c',
'src/core/compression/message_compress.c',
@ -392,6 +386,7 @@ Pod::Spec.new do |s|
'src/core/surface/channel.c',
'src/core/surface/channel_connectivity.c',
'src/core/surface/channel_create.c',
'src/core/surface/channel_ping.c',
'src/core/surface/completion_queue.c',
'src/core/surface/event_string.c',
'src/core/surface/init.c',
@ -467,7 +462,6 @@ Pod::Spec.new do |s|
'src/core/channel/context.h',
'src/core/channel/http_client_filter.h',
'src/core/channel/http_server_filter.h',
'src/core/channel/noop_filter.h',
'src/core/channel/subchannel_call_holder.h',
'src/core/client_config/client_config.h',
'src/core/client_config/connector.h',
@ -484,8 +478,6 @@ Pod::Spec.new do |s|
'src/core/client_config/resolvers/sockaddr_resolver.h',
'src/core/client_config/subchannel.h',
'src/core/client_config/subchannel_factory.h',
'src/core/client_config/subchannel_factory_decorators/add_channel_arg.h',
'src/core/client_config/subchannel_factory_decorators/merge_channel_args.h',
'src/core/client_config/uri_parser.h',
'src/core/compression/algorithm_metadata.h',
'src/core/compression/message_compress.h',
@ -589,7 +581,7 @@ Pod::Spec.new do |s|
ss.requires_arc = false
ss.libraries = 'z'
ss.dependency 'OpenSSL', '~> 1.0.204.1'
ss.dependency 'BoringSSL', '~> 1.0'
# ss.compiler_flags = '-GCC_WARN_INHIBIT_ALL_WARNINGS', '-w'
end

@ -34,6 +34,7 @@
#ifndef GRPCXX_IMPL_RPC_SERVICE_METHOD_H
#define GRPCXX_IMPL_RPC_SERVICE_METHOD_H
#include <climits>
#include <functional>
#include <map>
#include <memory>
@ -251,7 +252,11 @@ class RpcService {
void AddMethod(RpcServiceMethod* method) { methods_.emplace_back(method); }
RpcServiceMethod* GetMethod(int i) { return methods_[i].get(); }
int GetMethodCount() const { return methods_.size(); }
int GetMethodCount() const {
// On win x64, int is only 32bit
GPR_ASSERT(methods_.size() <= INT_MAX);
return (int)methods_.size();
}
private:
std::vector<std::unique_ptr<RpcServiceMethod>> methods_;

@ -31,14 +31,21 @@
*
*/
#ifndef GRPC_INTERNAL_CORE_CHANNEL_NOOP_FILTER_H
#define GRPC_INTERNAL_CORE_CHANNEL_NOOP_FILTER_H
#ifndef GRPCXX_IMPL_SERVER_BUILDER_OPTION_H
#define GRPCXX_IMPL_SERVER_BUILDER_OPTION_H
#include "src/core/channel/channel_stack.h"
#include <grpc++/support/channel_arguments.h>
/* No-op filter: simply takes everything it's given, and passes it on to the
next filter. Exists simply as a starting point that others can take and
customize for their own filters */
extern const grpc_channel_filter grpc_no_op_filter;
namespace grpc {
#endif /* GRPC_INTERNAL_CORE_CHANNEL_NOOP_FILTER_H */
/// Interface to pass an option to a \a ServerBuilder.
class ServerBuilderOption {
public:
virtual ~ServerBuilderOption() {}
/// Alter the \a ChannelArguments used to create the gRPC server.
virtual void UpdateArguments(ChannelArguments* args) = 0;
};
} // namespace grpc
#endif // GRPCXX_IMPL_SERVER_BUILDER_OPTION_H

@ -37,14 +37,15 @@
#include <list>
#include <memory>
#include <grpc/compression.h>
#include <grpc++/completion_queue.h>
#include <grpc++/impl/call.h>
#include <grpc++/impl/grpc_library.h>
#include <grpc++/impl/sync.h>
#include <grpc++/security/server_credentials.h>
#include <grpc++/support/channel_arguments.h>
#include <grpc++/support/config.h>
#include <grpc++/support/status.h>
#include <grpc/compression.h>
struct grpc_server;
@ -118,7 +119,7 @@ class Server GRPC_FINAL : public GrpcLibrary, private CallHook {
/// \param max_message_size Maximum message length that the channel can
/// receive.
Server(ThreadPoolInterface* thread_pool, bool thread_pool_owned,
int max_message_size, grpc_compression_options compression_options);
int max_message_size, const ChannelArguments& args);
/// Register a service. This call does not take ownership of the service.
/// The service must exist for the lifetime of the Server instance.

@ -37,8 +37,9 @@
#include <memory>
#include <vector>
#include <grpc/compression.h>
#include <grpc++/impl/server_builder_option.h>
#include <grpc++/support/config.h>
#include <grpc/compression.h>
namespace grpc {
@ -98,6 +99,8 @@ class ServerBuilder {
compression_options_ = options;
}
void SetOption(std::unique_ptr<ServerBuilderOption> option);
/// Tries to bind \a server to the given \a addr.
///
/// It can be invoked multiple times.
@ -140,6 +143,7 @@ class ServerBuilder {
int max_message_size_;
grpc_compression_options compression_options_;
std::vector<std::unique_ptr<ServerBuilderOption>> options_;
std::vector<std::unique_ptr<NamedService<RpcService>>> services_;
std::vector<std::unique_ptr<NamedService<AsynchronousService>>>
async_services_;

@ -80,6 +80,11 @@ class ChannelArguments {
// Generic channel argument setters. Only for advanced use cases.
/// Set an integer argument \a value under \a key.
void SetInt(const grpc::string& key, int value);
// Generic channel argument setter. Only for advanced use cases.
/// Set a pointer argument \a value under \a key. Owership is not transferred.
void SetPointer(const grpc::string& key, void* value);
/// Set a textual argument \a value under \a key.
void SetString(const grpc::string& key, const grpc::string& value);

@ -531,6 +531,11 @@ grpc_call *grpc_channel_create_call(grpc_channel *channel,
const char *method, const char *host,
gpr_timespec deadline, void *reserved);
/** Ping the channels peer (load balanced channels will select one sub-channel
to ping); if the channel is not connected, posts a failed. */
void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
void *tag, void *reserved);
/** Pre-register a method/host pair on a channel. */
void *grpc_channel_register_call(grpc_channel *channel, const char *method,
const char *host, void *reserved);

@ -40,6 +40,12 @@
extern "C" {
#endif
typedef struct gpr_allocation_functions {
void *(*malloc_fn)(size_t size);
void *(*realloc_fn)(void *ptr, size_t size);
void (*free_fn)(void *ptr);
} gpr_allocation_functions;
/* malloc, never returns NULL */
void *gpr_malloc(size_t size);
/* free */
@ -51,6 +57,14 @@ void *gpr_malloc_aligned(size_t size, size_t alignment_log);
/* free memory allocated by gpr_malloc_aligned */
void gpr_free_aligned(void *ptr);
/** Request the family of allocation functions in \a functions be used. NOTE
* that this request will be honored in a *best effort* basis and that no
* guarantees are made about the default functions (eg, malloc) being called. */
void gpr_set_allocation_functions(gpr_allocation_functions functions);
/** Return the family of allocation functions currently in effect. */
gpr_allocation_functions gpr_get_allocation_functions();
#ifdef __cplusplus
}
#endif

@ -83,8 +83,12 @@ void gpr_cmdline_add_string(gpr_cmdline *cl, const char *name, const char *help,
void gpr_cmdline_on_extra_arg(
gpr_cmdline *cl, const char *name, const char *help,
void (*on_extra_arg)(void *user_data, const char *arg), void *user_data);
/* Parse the command line */
void gpr_cmdline_parse(gpr_cmdline *cl, int argc, char **argv);
/* Enable surviving failure: default behavior is to exit the process */
void gpr_cmdline_set_survive_failure(gpr_cmdline *cl);
/* Parse the command line; returns 1 on success, on failure either dies
(by default) or returns 0 if gpr_cmdline_set_survive_failure() has been
called */
int gpr_cmdline_parse(gpr_cmdline *cl, int argc, char **argv);
/* Destroy the parser */
void gpr_cmdline_destroy(gpr_cmdline *cl);
/* Get a string describing usage */

@ -183,7 +183,7 @@
#endif
#define GPR_MSG_IOVLEN_TYPE int
#if TARGET_OS_IPHONE
#define GPR_FORBID_UNREACHABLE_CODE
#define GPR_FORBID_UNREACHABLE_CODE 1
#define GPR_PLATFORM_STRING "ios"
#define GPR_CPU_IPHONE 1
#define GPR_PTHREAD_TLS 1
@ -252,6 +252,11 @@
#define GPR_PLATFORM_STRING "unknown"
#endif
#ifdef GPR_GCOV
#undef GPR_FORBID_UNREACHABLE_CODE
#define GPR_FORBID_UNREACHABLE_CODE 1
#endif
/* For a common case, assume that the platform has a C99-like stdint.h */
#include <stdint.h>
@ -337,7 +342,7 @@ typedef uintptr_t gpr_uintptr;
#endif
#endif
#ifdef GPR_FORBID_UNREACHABLE_CODE
#if GPR_FORBID_UNREACHABLE_CODE
#define GPR_UNREACHABLE_CODE(STATEMENT)
#else
#define GPR_UNREACHABLE_CODE(STATEMENT) \

@ -39,7 +39,8 @@
"minimist": "^1.1.0",
"mocha": "~1.21.0",
"mocha-jenkins-reporter": "^0.1.9",
"mustache": "^2.0.0"
"mustache": "^2.0.0",
"poisson-process": "^0.2.1"
},
"engines": {
"node": ">=0.10.13"

@ -41,7 +41,7 @@
/* census_context is the in-memory representation of information needed to
* maintain tracing, RPC statistics and resource usage information. */
struct census_context {
census_tag_set* tags; /* Opaque data structure for census tags. */
census_tag_set *tags; /* Opaque data structure for census tags. */
};
#endif /* GRPC_INTERNAL_CORE_CENSUS_CONTEXT_H */

@ -43,7 +43,6 @@
#include <grpc/support/time.h>
#include "src/core/channel/channel_stack.h"
#include "src/core/channel/noop_filter.h"
#include "src/core/statistics/census_interface.h"
#include "src/core/statistics/census_rpc_stats.h"
#include "src/core/transport/static_metadata.h"
@ -60,9 +59,7 @@ typedef struct call_data {
grpc_closure finish_recv;
} call_data;
typedef struct channel_data {
gpr_uint8 unused;
} channel_data;
typedef struct channel_data { gpr_uint8 unused; } channel_data;
static void extract_and_annotate_method_tag(grpc_metadata_batch *md,
call_data *calld,
@ -118,8 +115,11 @@ static void server_mutate_op(grpc_call_element *elem,
static void server_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
/* TODO(ctiller): this code fails. I don't know why. I expect it's
incomplete, and someone should look at it soon.
call_data *calld = elem->call_data;
GPR_ASSERT((calld->op_id.upper != 0) || (calld->op_id.lower != 0));
GPR_ASSERT((calld->op_id.upper != 0) || (calld->op_id.lower != 0)); */
server_mutate_op(elem, op);
grpc_call_next_op(exec_ctx, elem, op);
}

@ -101,11 +101,12 @@ grpc_call_element *grpc_call_stack_element(grpc_call_stack *call_stack,
return CALL_ELEMS_FROM_STACK(call_stack) + index;
}
void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx,
void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
grpc_iomgr_cb_func destroy, void *destroy_arg,
const grpc_channel_filter **filters,
size_t filter_count, grpc_channel *master,
size_t filter_count,
const grpc_channel_args *channel_args,
grpc_channel_stack *stack) {
const char *name, grpc_channel_stack *stack) {
size_t call_size =
ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) +
ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_call_element));
@ -115,6 +116,8 @@ void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx,
size_t i;
stack->count = filter_count;
GRPC_STREAM_REF_INIT(&stack->refcount, initial_refs, destroy, destroy_arg,
name);
elems = CHANNEL_ELEMS_FROM_STACK(stack);
user_data =
((char *)elems) +
@ -122,7 +125,7 @@ void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx,
/* init per-filter data */
for (i = 0; i < filter_count; i++) {
args.master = master;
args.channel_stack = stack;
args.channel_args = channel_args;
args.is_first = i == 0;
args.is_last = i == (filter_count - 1);
@ -166,15 +169,15 @@ void grpc_call_stack_init(grpc_exec_ctx *exec_ctx,
size_t i;
call_stack->count = count;
gpr_ref_init(&call_stack->refcount.refs, initial_refs);
grpc_closure_init(&call_stack->refcount.destroy, destroy, destroy_arg);
GRPC_STREAM_REF_INIT(&call_stack->refcount, initial_refs, destroy,
destroy_arg, "CALL_STACK");
call_elems = CALL_ELEMS_FROM_STACK(call_stack);
user_data = ((char *)call_elems) +
ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element));
/* init per-filter data */
for (i = 0; i < count; i++) {
args.refcount = &call_stack->refcount;
args.call_stack = call_stack;
args.server_transport_data = transport_server_data;
args.context = context;
call_elems[i].filter = channel_elems[i].filter;

@ -51,15 +51,18 @@
typedef struct grpc_channel_element grpc_channel_element;
typedef struct grpc_call_element grpc_call_element;
typedef struct grpc_channel_stack grpc_channel_stack;
typedef struct grpc_call_stack grpc_call_stack;
typedef struct {
grpc_channel *master;
grpc_channel_stack *channel_stack;
const grpc_channel_args *channel_args;
int is_first;
int is_last;
} grpc_channel_element_args;
typedef struct {
grpc_stream_refcount *refcount;
grpc_call_stack *call_stack;
const void *server_transport_data;
grpc_call_context_element *context;
} grpc_call_element_args;
@ -144,23 +147,24 @@ struct grpc_call_element {
/* A channel stack tracks a set of related filters for one channel, and
guarantees they live within a single malloc() allocation */
typedef struct {
struct grpc_channel_stack {
grpc_stream_refcount refcount;
size_t count;
/* Memory required for a call stack (computed at channel stack
initialization) */
size_t call_stack_size;
} grpc_channel_stack;
};
/* A call stack tracks a set of related filters for one call, and guarantees
they live within a single malloc() allocation */
typedef struct {
struct grpc_call_stack {
/* shared refcount for this channel stack.
MUST be the first element: the underlying code calls destroy
with the address of the refcount, but higher layers prefer to think
about the address of the call stack itself. */
grpc_stream_refcount refcount;
size_t count;
} grpc_call_stack;
};
/* Get a channel element given a channel stack and its index */
grpc_channel_element *grpc_channel_stack_element(grpc_channel_stack *stack,
@ -175,11 +179,11 @@ grpc_call_element *grpc_call_stack_element(grpc_call_stack *stack, size_t i);
size_t grpc_channel_stack_size(const grpc_channel_filter **filters,
size_t filter_count);
/* Initialize a channel stack given some filters */
void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx,
void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs,
grpc_iomgr_cb_func destroy, void *destroy_arg,
const grpc_channel_filter **filters,
size_t filter_count, grpc_channel *master,
const grpc_channel_args *args,
grpc_channel_stack *stack);
size_t filter_count, const grpc_channel_args *args,
const char *name, grpc_channel_stack *stack);
/* Destroy a channel stack */
void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx,
grpc_channel_stack *stack);
@ -199,14 +203,23 @@ void grpc_call_stack_set_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset *pollset);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
#define grpc_call_stack_ref(call_stack, reason) \
#define GRPC_CALL_STACK_REF(call_stack, reason) \
grpc_stream_ref(&(call_stack)->refcount, reason)
#define grpc_call_stack_unref(exec_ctx, call_stack, reason) \
#define GRPC_CALL_STACK_UNREF(exec_ctx, call_stack, reason) \
grpc_stream_unref(exec_ctx, &(call_stack)->refcount, reason)
#define GRPC_CHANNEL_STACK_REF(channel_stack, reason) \
grpc_stream_ref(&(channel_stack)->refcount, reason)
#define GRPC_CHANNEL_STACK_UNREF(exec_ctx, channel_stack, reason) \
grpc_stream_unref(exec_ctx, &(channel_stack)->refcount, reason)
#else
#define grpc_call_stack_ref(call_stack) grpc_stream_ref(&(call_stack)->refcount)
#define grpc_call_stack_unref(exec_ctx, call_stack) \
#define GRPC_CALL_STACK_REF(call_stack, reason) \
grpc_stream_ref(&(call_stack)->refcount)
#define GRPC_CALL_STACK_UNREF(exec_ctx, call_stack, reason) \
grpc_stream_unref(exec_ctx, &(call_stack)->refcount)
#define GRPC_CHANNEL_STACK_REF(channel_stack, reason) \
grpc_stream_ref(&(channel_stack)->refcount)
#define GRPC_CHANNEL_STACK_UNREF(exec_ctx, channel_stack, reason) \
grpc_stream_unref(exec_ctx, &(channel_stack)->refcount)
#endif
/* Destroy a call stack */

@ -59,11 +59,6 @@ typedef struct client_channel_channel_data {
grpc_resolver *resolver;
/** have we started resolving this channel */
int started_resolving;
/** master channel - the grpc_channel instance that ultimately owns
this channel_data via its channel stack.
We occasionally use this to bump the refcount on the master channel
to keep ourselves alive through an asynchronous operation. */
grpc_channel *master;
/** mutex protecting client configuration, including all
variables below in this data structure */
@ -81,8 +76,10 @@ typedef struct client_channel_channel_data {
grpc_connectivity_state_tracker state_tracker;
/** when an lb_policy arrives, should we try to exit idle */
int exit_idle_when_lb_policy_arrives;
/** pollset_set of interested parties in a new connection */
grpc_pollset_set pollset_set;
/** owning stack */
grpc_channel_stack *owning_stack;
/** interested parties */
grpc_pollset_set interested_parties;
} channel_data;
/** We create one watcher for each new lb_policy that is returned from a
@ -103,9 +100,7 @@ typedef struct {
} waiting_call;
static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
return grpc_subchannel_call_holder_get_peer(exec_ctx, elem->call_data,
chand->master);
return grpc_subchannel_call_holder_get_peer(exec_ctx, elem->call_data);
}
static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
@ -121,10 +116,18 @@ static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
static void on_lb_policy_state_changed_locked(
grpc_exec_ctx *exec_ctx, lb_policy_connectivity_watcher *w) {
grpc_connectivity_state publish_state = w->state;
/* check if the notification is for a stale policy */
if (w->lb_policy != w->chand->lb_policy) return;
grpc_connectivity_state_set(exec_ctx, &w->chand->state_tracker, w->state,
if (publish_state == GRPC_CHANNEL_FATAL_FAILURE &&
w->chand->resolver != NULL) {
publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
grpc_resolver_channel_saw_error(exec_ctx, w->chand->resolver);
GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel");
w->chand->lb_policy = NULL;
}
grpc_connectivity_state_set(exec_ctx, &w->chand->state_tracker, publish_state,
"lb_changed");
if (w->state != GRPC_CHANNEL_FATAL_FAILURE) {
watch_lb_policy(exec_ctx, w->chand, w->lb_policy, w->state);
@ -139,7 +142,7 @@ static void on_lb_policy_state_changed(grpc_exec_ctx *exec_ctx, void *arg,
on_lb_policy_state_changed_locked(exec_ctx, w);
gpr_mu_unlock(&w->chand->mu_config);
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, w->chand->master, "watch_lb_policy");
GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "watch_lb_policy");
gpr_free(w);
}
@ -147,7 +150,7 @@ static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
grpc_lb_policy *lb_policy,
grpc_connectivity_state current_state) {
lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w));
GRPC_CHANNEL_INTERNAL_REF(chand->master, "watch_lb_policy");
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
w->chand = chand;
grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w);
@ -179,6 +182,11 @@ static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
chand->incoming_configuration = NULL;
if (lb_policy != NULL) {
grpc_pollset_set_add_pollset_set(exec_ctx, &lb_policy->interested_parties,
&chand->interested_parties);
}
gpr_mu_lock(&chand->mu_config);
old_lb_policy = chand->lb_policy;
chand->lb_policy = lb_policy;
@ -200,7 +208,7 @@ static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
watch_lb_policy(exec_ctx, chand, lb_policy, state);
}
gpr_mu_unlock(&chand->mu_config);
GRPC_CHANNEL_INTERNAL_REF(chand->master, "resolver");
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
grpc_resolver_next(exec_ctx, resolver, &chand->incoming_configuration,
&chand->on_config_changed);
GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel-next");
@ -222,7 +230,9 @@ static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
}
if (old_lb_policy != NULL) {
grpc_lb_policy_shutdown(exec_ctx, old_lb_policy);
grpc_pollset_set_del_pollset_set(exec_ctx,
&old_lb_policy->interested_parties,
&chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, old_lb_policy, "channel");
}
@ -230,20 +240,22 @@ static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "config_change");
}
GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->master, "resolver");
GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver");
}
static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_transport_op *op) {
grpc_lb_policy *lb_policy = NULL;
channel_data *chand = elem->channel_data;
grpc_resolver *destroy_resolver = NULL;
grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, 1);
GPR_ASSERT(op->set_accept_stream == NULL);
GPR_ASSERT(op->bind_pollset == NULL);
if (op->bind_pollset != NULL) {
grpc_pollset_set_add_pollset(exec_ctx, &chand->interested_parties,
op->bind_pollset);
}
gpr_mu_lock(&chand->mu_config);
if (op->on_connectivity_state_change != NULL) {
@ -254,9 +266,14 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
op->connectivity_state = NULL;
}
lb_policy = chand->lb_policy;
if (lb_policy) {
GRPC_LB_POLICY_REF(lb_policy, "broadcast");
if (op->send_ping != NULL) {
if (chand->lb_policy == NULL) {
grpc_exec_ctx_enqueue(exec_ctx, op->send_ping, 0);
} else {
grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping);
op->bind_pollset = NULL;
}
op->send_ping = NULL;
}
if (op->disconnect && chand->resolver != NULL) {
@ -265,7 +282,9 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
destroy_resolver = chand->resolver;
chand->resolver = NULL;
if (chand->lb_policy != NULL) {
grpc_lb_policy_shutdown(exec_ctx, chand->lb_policy);
grpc_pollset_set_del_pollset_set(exec_ctx,
&chand->lb_policy->interested_parties,
&chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
chand->lb_policy = NULL;
}
@ -276,16 +295,11 @@ static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
grpc_resolver_shutdown(exec_ctx, destroy_resolver);
GRPC_RESOLVER_UNREF(exec_ctx, destroy_resolver, "channel");
}
if (lb_policy) {
grpc_lb_policy_broadcast(exec_ctx, lb_policy, op);
GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "broadcast");
}
}
typedef struct {
grpc_metadata_batch *initial_metadata;
grpc_subchannel **subchannel;
grpc_connected_subchannel **connected_subchannel;
grpc_closure *on_ready;
grpc_call_element *elem;
grpc_closure closure;
@ -293,17 +307,17 @@ typedef struct {
static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
grpc_metadata_batch *initial_metadata,
grpc_subchannel **subchannel,
grpc_connected_subchannel **connected_subchannel,
grpc_closure *on_ready);
static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg, int success) {
continue_picking_args *cpa = arg;
if (!success) {
grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, 0);
} else if (cpa->subchannel == NULL) {
} else if (cpa->connected_subchannel == NULL) {
/* cancelled, do nothing */
} else if (cc_pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
cpa->subchannel, cpa->on_ready)) {
cpa->connected_subchannel, cpa->on_ready)) {
grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, 1);
}
gpr_free(cpa);
@ -311,7 +325,7 @@ static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg, int success) {
static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
grpc_metadata_batch *initial_metadata,
grpc_subchannel **subchannel,
grpc_connected_subchannel **connected_subchannel,
grpc_closure *on_ready) {
grpc_call_element *elem = elemp;
channel_data *chand = elem->channel_data;
@ -319,18 +333,19 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
continue_picking_args *cpa;
grpc_closure *closure;
GPR_ASSERT(subchannel);
GPR_ASSERT(connected_subchannel);
gpr_mu_lock(&chand->mu_config);
if (initial_metadata == NULL) {
if (chand->lb_policy != NULL) {
grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy, subchannel);
grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy,
connected_subchannel);
}
for (closure = chand->waiting_for_config_closures.head; closure != NULL;
closure = grpc_closure_next(closure)) {
cpa = closure->cb_arg;
if (cpa->subchannel == subchannel) {
cpa->subchannel = NULL;
if (cpa->connected_subchannel == connected_subchannel) {
cpa->connected_subchannel = NULL;
grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, 0);
}
}
@ -338,21 +353,22 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
return 1;
}
if (chand->lb_policy != NULL) {
int r = grpc_lb_policy_pick(exec_ctx, chand->lb_policy, calld->pollset,
initial_metadata, subchannel, on_ready);
int r =
grpc_lb_policy_pick(exec_ctx, chand->lb_policy, calld->pollset,
initial_metadata, connected_subchannel, on_ready);
gpr_mu_unlock(&chand->mu_config);
return r;
}
if (chand->resolver != NULL && !chand->started_resolving) {
chand->started_resolving = 1;
GRPC_CHANNEL_INTERNAL_REF(chand->master, "resolver");
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
grpc_resolver_next(exec_ctx, chand->resolver,
&chand->incoming_configuration,
&chand->on_config_changed);
}
cpa = gpr_malloc(sizeof(*cpa));
cpa->initial_metadata = initial_metadata;
cpa->subchannel = subchannel;
cpa->connected_subchannel = connected_subchannel;
cpa->on_ready = on_ready;
cpa->elem = elem;
grpc_closure_init(&cpa->closure, continue_picking, cpa);
@ -364,7 +380,8 @@ static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_call_element_args *args) {
grpc_subchannel_call_holder_init(elem->call_data, cc_pick_subchannel, elem);
grpc_subchannel_call_holder_init(elem->call_data, cc_pick_subchannel, elem,
args->call_stack);
}
/* Destructor for call_data */
@ -385,12 +402,12 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
gpr_mu_init(&chand->mu_config);
chand->master = args->master;
grpc_pollset_set_init(&chand->pollset_set);
grpc_closure_init(&chand->on_config_changed, cc_on_config_changed, chand);
chand->owning_stack = args->channel_stack;
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_channel");
grpc_pollset_set_init(&chand->interested_parties);
}
/* Destructor for channel_data */
@ -403,10 +420,13 @@ static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
}
if (chand->lb_policy != NULL) {
grpc_pollset_set_del_pollset_set(exec_ctx,
&chand->lb_policy->interested_parties,
&chand->interested_parties);
GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
}
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
grpc_pollset_set_destroy(&chand->pollset_set);
grpc_pollset_set_destroy(&chand->interested_parties);
gpr_mu_destroy(&chand->mu_config);
}
@ -435,7 +455,7 @@ void grpc_client_channel_set_resolver(grpc_exec_ctx *exec_ctx,
if (!grpc_closure_list_empty(chand->waiting_for_config_closures) ||
chand->exit_idle_when_lb_policy_arrives) {
chand->started_resolving = 1;
GRPC_CHANNEL_INTERNAL_REF(chand->master, "resolver");
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
grpc_resolver_next(exec_ctx, resolver, &chand->incoming_configuration,
&chand->on_config_changed);
}
@ -454,7 +474,7 @@ grpc_connectivity_state grpc_client_channel_check_connectivity_state(
} else {
chand->exit_idle_when_lb_policy_arrives = 1;
if (!chand->started_resolving && chand->resolver != NULL) {
GRPC_CHANNEL_INTERNAL_REF(chand->master, "resolver");
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
chand->started_resolving = 1;
grpc_resolver_next(exec_ctx, chand->resolver,
&chand->incoming_configuration,
@ -466,32 +486,39 @@ grpc_connectivity_state grpc_client_channel_check_connectivity_state(
return out;
}
typedef struct {
channel_data *chand;
grpc_pollset *pollset;
grpc_closure *on_complete;
grpc_closure my_closure;
} external_connectivity_watcher;
static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
external_connectivity_watcher *w = arg;
grpc_closure *follow_up = w->on_complete;
grpc_pollset_set_del_pollset(exec_ctx, &w->chand->interested_parties,
w->pollset);
GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack,
"external_connectivity_watcher");
gpr_free(w);
follow_up->cb(exec_ctx, follow_up->cb_arg, iomgr_success);
}
void grpc_client_channel_watch_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
grpc_connectivity_state *state, grpc_closure *on_complete) {
channel_data *chand = elem->channel_data;
external_connectivity_watcher *w = gpr_malloc(sizeof(*w));
w->chand = chand;
w->pollset = pollset;
w->on_complete = on_complete;
grpc_pollset_set_add_pollset(exec_ctx, &chand->interested_parties, pollset);
grpc_closure_init(&w->my_closure, on_external_watch_complete, w);
GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
"external_connectivity_watcher");
gpr_mu_lock(&chand->mu_config);
grpc_connectivity_state_notify_on_state_change(
exec_ctx, &chand->state_tracker, state, on_complete);
exec_ctx, &chand->state_tracker, state, &w->my_closure);
gpr_mu_unlock(&chand->mu_config);
}
grpc_pollset_set *grpc_client_channel_get_connecting_pollset_set(
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
return &chand->pollset_set;
}
void grpc_client_channel_add_interested_party(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_pollset *pollset) {
channel_data *chand = elem->channel_data;
grpc_pollset_set_add_pollset(exec_ctx, &chand->pollset_set, pollset);
}
void grpc_client_channel_del_interested_party(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_pollset *pollset) {
channel_data *chand = elem->channel_data;
grpc_pollset_set_del_pollset(exec_ctx, &chand->pollset_set, pollset);
}

@ -57,17 +57,7 @@ grpc_connectivity_state grpc_client_channel_check_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect);
void grpc_client_channel_watch_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
grpc_connectivity_state *state, grpc_closure *on_complete);
grpc_pollset_set *grpc_client_channel_get_connecting_pollset_set(
grpc_channel_element *elem);
void grpc_client_channel_add_interested_party(grpc_exec_ctx *exec_ctx,
grpc_channel_element *channel,
grpc_pollset *pollset);
void grpc_client_channel_del_interested_party(grpc_exec_ctx *exec_ctx,
grpc_channel_element *channel,
grpc_pollset *pollset);
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CLIENT_CHANNEL_H */

@ -58,13 +58,13 @@ typedef struct client_uchannel_channel_data {
this channel_data via its channel stack.
We occasionally use this to bump the refcount on the master channel
to keep ourselves alive through an asynchronous operation. */
grpc_channel *master;
grpc_channel_stack *owning_stack;
/** connectivity state being tracked */
grpc_connectivity_state_tracker state_tracker;
/** the subchannel wrapped by the microchannel */
grpc_subchannel *subchannel;
grpc_connected_subchannel *connected_subchannel;
/** the callback used to stay subscribed to subchannel connectivity
* notifications */
@ -84,15 +84,13 @@ static void monitor_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
grpc_connectivity_state_set(exec_ctx, &chand->state_tracker,
chand->subchannel_connectivity,
"uchannel_monitor_subchannel");
grpc_subchannel_notify_on_state_change(exec_ctx, chand->subchannel,
&chand->subchannel_connectivity,
&chand->connectivity_cb);
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, chand->connected_subchannel, NULL,
&chand->subchannel_connectivity, &chand->connectivity_cb);
}
static char *cuc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
return grpc_subchannel_call_holder_get_peer(exec_ctx, elem->call_data,
chand->master);
return grpc_subchannel_call_holder_get_peer(exec_ctx, elem->call_data);
}
static void cuc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
@ -128,11 +126,11 @@ static void cuc_start_transport_op(grpc_exec_ctx *exec_ctx,
static int cuc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
grpc_metadata_batch *initial_metadata,
grpc_subchannel **subchannel,
grpc_connected_subchannel **connected_subchannel,
grpc_closure *on_ready) {
channel_data *chand = arg;
GPR_ASSERT(initial_metadata != NULL);
*subchannel = chand->subchannel;
*connected_subchannel = chand->connected_subchannel;
return 1;
}
@ -140,7 +138,7 @@ static int cuc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
static void cuc_init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_call_element_args *args) {
grpc_subchannel_call_holder_init(elem->call_data, cuc_pick_subchannel,
elem->channel_data);
elem->channel_data, args->call_stack);
}
/* Destructor for call_data */
@ -158,7 +156,7 @@ static void cuc_init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_closure_init(&chand->connectivity_cb, monitor_subchannel, chand);
GPR_ASSERT(args->is_last);
GPR_ASSERT(elem->filter == &grpc_client_uchannel_filter);
chand->master = args->master;
chand->owning_stack = args->channel_stack;
grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
"client_uchannel");
gpr_mu_init(&chand->mu_state);
@ -168,10 +166,14 @@ static void cuc_init_channel_elem(grpc_exec_ctx *exec_ctx,
static void cuc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
grpc_subchannel_state_change_unsubscribe(exec_ctx, chand->subchannel,
&chand->connectivity_cb);
/* cancel subscription */
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, chand->connected_subchannel, NULL, NULL,
&chand->connectivity_cb);
grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
gpr_mu_destroy(&chand->mu_state);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, chand->connected_subchannel,
"uchannel");
}
static void cuc_set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
@ -191,23 +193,14 @@ grpc_connectivity_state grpc_client_uchannel_check_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
channel_data *chand = elem->channel_data;
grpc_connectivity_state out;
out = grpc_connectivity_state_check(&chand->state_tracker);
gpr_mu_lock(&chand->mu_state);
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
grpc_connectivity_state_set(exec_ctx, &chand->state_tracker,
GRPC_CHANNEL_CONNECTING,
"uchannel_connecting_changed");
chand->subchannel_connectivity = out;
grpc_subchannel_notify_on_state_change(exec_ctx, chand->subchannel,
&chand->subchannel_connectivity,
&chand->connectivity_cb);
}
out = grpc_connectivity_state_check(&chand->state_tracker);
gpr_mu_unlock(&chand->mu_state);
return out;
}
void grpc_client_uchannel_watch_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
grpc_connectivity_state *state, grpc_closure *on_complete) {
channel_data *chand = elem->channel_data;
gpr_mu_lock(&chand->mu_state);
@ -216,40 +209,11 @@ void grpc_client_uchannel_watch_connectivity_state(
gpr_mu_unlock(&chand->mu_state);
}
grpc_pollset_set *grpc_client_uchannel_get_connecting_pollset_set(
grpc_channel_element *elem) {
channel_data *chand = elem->channel_data;
grpc_channel_element *parent_elem;
gpr_mu_lock(&chand->mu_state);
parent_elem = grpc_channel_stack_last_element(grpc_channel_get_channel_stack(
grpc_subchannel_get_master(chand->subchannel)));
gpr_mu_unlock(&chand->mu_state);
return grpc_client_channel_get_connecting_pollset_set(parent_elem);
}
void grpc_client_uchannel_add_interested_party(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_pollset *pollset) {
grpc_pollset_set *master_pollset_set =
grpc_client_uchannel_get_connecting_pollset_set(elem);
grpc_pollset_set_add_pollset(exec_ctx, master_pollset_set, pollset);
}
void grpc_client_uchannel_del_interested_party(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_pollset *pollset) {
grpc_pollset_set *master_pollset_set =
grpc_client_uchannel_get_connecting_pollset_set(elem);
grpc_pollset_set_del_pollset(exec_ctx, master_pollset_set, pollset);
}
grpc_channel *grpc_client_uchannel_create(grpc_subchannel *subchannel,
grpc_channel_args *args) {
grpc_channel *channel = NULL;
#define MAX_FILTERS 3
const grpc_channel_filter *filters[MAX_FILTERS];
grpc_channel *master = grpc_subchannel_get_master(subchannel);
char *target = grpc_channel_get_target(master);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
size_t n = 0;
@ -261,19 +225,19 @@ grpc_channel *grpc_client_uchannel_create(grpc_subchannel *subchannel,
GPR_ASSERT(n <= MAX_FILTERS);
channel =
grpc_channel_create_from_filters(&exec_ctx, target, filters, n, args, 1);
grpc_channel_create_from_filters(&exec_ctx, NULL, filters, n, args, 1);
gpr_free(target);
return channel;
}
void grpc_client_uchannel_set_subchannel(grpc_channel *uchannel,
grpc_subchannel *subchannel) {
void grpc_client_uchannel_set_connected_subchannel(
grpc_channel *uchannel, grpc_connected_subchannel *connected_subchannel) {
grpc_channel_element *elem =
grpc_channel_stack_last_element(grpc_channel_get_channel_stack(uchannel));
channel_data *chand = elem->channel_data;
GPR_ASSERT(elem->filter == &grpc_client_uchannel_filter);
gpr_mu_lock(&chand->mu_state);
chand->subchannel = subchannel;
chand->connected_subchannel = connected_subchannel;
GRPC_CONNECTED_SUBCHANNEL_REF(connected_subchannel, "uchannel");
gpr_mu_unlock(&chand->mu_state);
}

@ -48,23 +48,13 @@ grpc_connectivity_state grpc_client_uchannel_check_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect);
void grpc_client_uchannel_watch_connectivity_state(
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
grpc_connectivity_state *state, grpc_closure *on_complete);
grpc_pollset_set *grpc_client_uchannel_get_connecting_pollset_set(
grpc_channel_element *elem);
void grpc_client_uchannel_add_interested_party(grpc_exec_ctx *exec_ctx,
grpc_channel_element *channel,
grpc_pollset *pollset);
void grpc_client_uchannel_del_interested_party(grpc_exec_ctx *exec_ctx,
grpc_channel_element *channel,
grpc_pollset *pollset);
grpc_channel *grpc_client_uchannel_create(grpc_subchannel *subchannel,
grpc_channel_args *args);
void grpc_client_uchannel_set_subchannel(grpc_channel *uchannel,
grpc_subchannel *subchannel);
void grpc_client_uchannel_set_connected_subchannel(
grpc_channel *uchannel, grpc_connected_subchannel *connected_subchannel);
#endif /* GRPC_INTERNAL_CORE_CHANNEL_CLIENT_MICROCHANNEL_H */

@ -39,11 +39,11 @@
#include <grpc/support/log.h>
#include <grpc/support/slice_buffer.h>
#include "src/core/channel/compress_filter.h"
#include "src/core/channel/channel_args.h"
#include "src/core/profiling/timers.h"
#include "src/core/channel/compress_filter.h"
#include "src/core/compression/algorithm_metadata.h"
#include "src/core/compression/message_compress.h"
#include "src/core/profiling/timers.h"
#include "src/core/support/string.h"
#include "src/core/transport/static_metadata.h"
@ -288,8 +288,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
/* Destructor for channel data */
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
}
grpc_channel_element *elem) {}
const grpc_channel_filter grpc_compress_filter = {
compress_start_transport_stream_op, grpc_channel_next_op, sizeof(call_data),

@ -89,9 +89,9 @@ static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
int r;
GPR_ASSERT(elem->filter == &grpc_connected_channel_filter);
r = grpc_transport_init_stream(exec_ctx, chand->transport,
TRANSPORT_STREAM_FROM_CALL_DATA(calld),
args->refcount, args->server_transport_data);
r = grpc_transport_init_stream(
exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld),
&args->call_stack->refcount, args->server_transport_data);
GPR_ASSERT(r == 0);
}

@ -31,12 +31,12 @@
*/
#include "src/core/channel/http_client_filter.h"
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/support/string.h"
#include <string.h>
#include "src/core/profiling/timers.h"
#include "src/core/support/string.h"
#include "src/core/transport/static_metadata.h"
typedef struct call_data {

@ -33,9 +33,9 @@
#include "src/core/channel/http_server_filter.h"
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <string.h>
#include "src/core/profiling/timers.h"
#include "src/core/transport/static_metadata.h"
@ -124,7 +124,6 @@ static grpc_mdelem *server_filter(void *user_data, grpc_mdelem *md) {
omitted */
grpc_mdelem *authority = grpc_mdelem_from_metadata_strings(
GRPC_MDSTR_AUTHORITY, GRPC_MDSTR_REF(md->value));
GRPC_MDELEM_UNREF(md);
calld->seen_authority = 1;
return authority;
} else {
@ -225,8 +224,7 @@ static void init_channel_elem(grpc_exec_ctx *exec_ctx,
/* Destructor for channel data */
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
}
grpc_channel_element *elem) {}
const grpc_channel_filter grpc_http_server_filter = {
hs_start_transport_op, grpc_channel_next_op, sizeof(call_data),

@ -1,118 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/channel/noop_filter.h"
#include <grpc/support/log.h>
typedef struct call_data {
int unused; /* C89 requires at least one struct element */
} call_data;
typedef struct channel_data {
int unused; /* C89 requires at least one struct element */
} channel_data;
/* used to silence 'variable not used' warnings */
static void ignore_unused(void *ignored) {}
static void noop_mutate_op(grpc_call_element *elem,
grpc_transport_stream_op *op) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
ignore_unused(calld);
ignore_unused(channeld);
/* do nothing */
}
/* Called either:
- in response to an API call (or similar) from above, to send something
- a network event (or similar) from below, to receive something
op contains type and call direction information, in addition to the data
that is being sent or received. */
static void noop_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem,
grpc_transport_stream_op *op) {
noop_mutate_op(elem, op);
/* pass control down the stack */
grpc_call_next_op(exec_ctx, elem, op);
}
/* Constructor for call_data */
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_call_element_args *args) {
/* grab pointers to our data from the call element */
call_data *calld = elem->call_data;
channel_data *channeld = elem->channel_data;
/* initialize members */
calld->unused = channeld->unused;
}
/* Destructor for call_data */
static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {}
/* Constructor for channel_data */
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
/* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data;
/* The last filter tends to be implemented differently to
handle the case that there's no 'next' filter to call on the down
path */
GPR_ASSERT(!args->is_last);
/* initialize members */
channeld->unused = 0;
}
/* Destructor for channel data */
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem) {
/* grab pointers to our data from the channel element */
channel_data *channeld = elem->channel_data;
ignore_unused(channeld);
}
const grpc_channel_filter grpc_no_op_filter = {
noop_start_transport_stream_op, grpc_channel_next_op, sizeof(call_data),
init_call_elem, grpc_call_stack_ignore_set_pollset, destroy_call_elem,
sizeof(channel_data), init_channel_elem, destroy_channel_elem,
grpc_call_next_get_peer, "no-op"};

@ -44,7 +44,6 @@
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *holder,
int success);
static void call_ready(grpc_exec_ctx *exec_ctx, void *holder, int success);
static void retry_ops(grpc_exec_ctx *exec_ctx, void *retry_ops_args,
int success);
@ -58,16 +57,17 @@ static void retry_waiting_locked(grpc_exec_ctx *exec_ctx,
void grpc_subchannel_call_holder_init(
grpc_subchannel_call_holder *holder,
grpc_subchannel_call_holder_pick_subchannel pick_subchannel,
void *pick_subchannel_arg) {
void *pick_subchannel_arg, grpc_call_stack *owning_call) {
gpr_atm_rel_store(&holder->subchannel_call, 0);
holder->pick_subchannel = pick_subchannel;
holder->pick_subchannel_arg = pick_subchannel_arg;
gpr_mu_init(&holder->mu);
holder->subchannel = NULL;
holder->connected_subchannel = NULL;
holder->waiting_ops = NULL;
holder->waiting_ops_count = 0;
holder->waiting_ops_capacity = 0;
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
holder->owning_call = owning_call;
}
void grpc_subchannel_call_holder_destroy(grpc_exec_ctx *exec_ctx,
@ -125,13 +125,9 @@ retry:
case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
fail_locked(exec_ctx, holder);
break;
case GRPC_SUBCHANNEL_CALL_HOLDER_CREATING_CALL:
grpc_subchannel_cancel_create_call(exec_ctx, holder->subchannel,
&holder->subchannel_call);
break;
case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
holder->pick_subchannel(exec_ctx, holder->pick_subchannel_arg, NULL,
&holder->subchannel, NULL);
&holder->connected_subchannel, NULL);
break;
}
gpr_mu_unlock(&holder->mu);
@ -142,28 +138,27 @@ retry:
}
/* if we don't have a subchannel, try to get one */
if (holder->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
holder->subchannel == NULL && op->send_initial_metadata != NULL) {
holder->connected_subchannel == NULL &&
op->send_initial_metadata != NULL) {
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
grpc_closure_init(&holder->next_step, subchannel_ready, holder);
if (holder->pick_subchannel(exec_ctx, holder->pick_subchannel_arg,
op->send_initial_metadata, &holder->subchannel,
&holder->next_step)) {
GRPC_CALL_STACK_REF(holder->owning_call, "pick_subchannel");
if (holder->pick_subchannel(
exec_ctx, holder->pick_subchannel_arg, op->send_initial_metadata,
&holder->connected_subchannel, &holder->next_step)) {
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
GRPC_CALL_STACK_UNREF(exec_ctx, holder->owning_call, "pick_subchannel");
}
}
/* if we've got a subchannel, then let's ask it to create a call */
if (holder->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
holder->subchannel != NULL) {
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_CREATING_CALL;
grpc_closure_init(&holder->next_step, call_ready, holder);
if (grpc_subchannel_create_call(exec_ctx, holder->subchannel,
holder->pollset, &holder->subchannel_call,
&holder->next_step)) {
/* got one immediately - continue the op (and any waiting ops) */
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
retry_waiting_locked(exec_ctx, holder);
goto retry;
}
holder->connected_subchannel != NULL) {
gpr_atm_rel_store(
&holder->subchannel_call,
(gpr_atm)(gpr_uintptr)grpc_connected_subchannel_create_call(
exec_ctx, holder->connected_subchannel, holder->pollset));
retry_waiting_locked(exec_ctx, holder);
goto retry;
}
/* nothing to be done but wait */
add_waiting_locked(holder, op);
@ -179,36 +174,18 @@ static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, int success) {
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
call = GET_CALL(holder);
GPR_ASSERT(call == NULL || call == CANCELLED_CALL);
if (holder->subchannel == NULL) {
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
if (holder->connected_subchannel == NULL) {
fail_locked(exec_ctx, holder);
} else {
grpc_closure_init(&holder->next_step, call_ready, holder);
if (grpc_subchannel_create_call(exec_ctx, holder->subchannel,
holder->pollset, &holder->subchannel_call,
&holder->next_step)) {
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
/* got one immediately - continue the op (and any waiting ops) */
retry_waiting_locked(exec_ctx, holder);
}
}
gpr_mu_unlock(&holder->mu);
}
static void call_ready(grpc_exec_ctx *exec_ctx, void *arg, int success) {
grpc_subchannel_call_holder *holder = arg;
GPR_TIMER_BEGIN("call_ready", 0);
gpr_mu_lock(&holder->mu);
GPR_ASSERT(holder->creation_phase ==
GRPC_SUBCHANNEL_CALL_HOLDER_CREATING_CALL);
holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
if (GET_CALL(holder) != NULL) {
gpr_atm_rel_store(
&holder->subchannel_call,
(gpr_atm)(gpr_uintptr)grpc_connected_subchannel_create_call(
exec_ctx, holder->connected_subchannel, holder->pollset));
retry_waiting_locked(exec_ctx, holder);
} else {
fail_locked(exec_ctx, holder);
}
gpr_mu_unlock(&holder->mu);
GPR_TIMER_END("call_ready", 0);
GRPC_CALL_STACK_UNREF(exec_ctx, holder->owning_call, "pick_subchannel");
}
typedef struct {
@ -270,14 +247,13 @@ static void fail_locked(grpc_exec_ctx *exec_ctx,
holder->waiting_ops_count = 0;
}
char *grpc_subchannel_call_holder_get_peer(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder,
grpc_channel *master) {
char *grpc_subchannel_call_holder_get_peer(
grpc_exec_ctx *exec_ctx, grpc_subchannel_call_holder *holder) {
grpc_subchannel_call *subchannel_call = GET_CALL(holder);
if (subchannel_call) {
return grpc_subchannel_call_get_peer(exec_ctx, subchannel_call);
} else {
return grpc_channel_get_target(master);
return NULL;
}
}

@ -42,12 +42,11 @@
called when the subchannel is available) */
typedef int (*grpc_subchannel_call_holder_pick_subchannel)(
grpc_exec_ctx *exec_ctx, void *arg, grpc_metadata_batch *initial_metadata,
grpc_subchannel **subchannel, grpc_closure *on_ready);
grpc_connected_subchannel **connected_subchannel, grpc_closure *on_ready);
typedef enum {
GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING,
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL,
GRPC_SUBCHANNEL_CALL_HOLDER_CREATING_CALL
GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL
} grpc_subchannel_call_holder_creation_phase;
/** Wrapper for holding a pointer to grpc_subchannel_call, and the
@ -71,7 +70,7 @@ typedef struct grpc_subchannel_call_holder {
gpr_mu mu;
grpc_subchannel_call_holder_creation_phase creation_phase;
grpc_subchannel *subchannel;
grpc_connected_subchannel *connected_subchannel;
grpc_pollset *pollset;
grpc_transport_stream_op *waiting_ops;
@ -79,12 +78,14 @@ typedef struct grpc_subchannel_call_holder {
size_t waiting_ops_capacity;
grpc_closure next_step;
grpc_call_stack *owning_call;
} grpc_subchannel_call_holder;
void grpc_subchannel_call_holder_init(
grpc_subchannel_call_holder *holder,
grpc_subchannel_call_holder_pick_subchannel pick_subchannel,
void *pick_subchannel_arg);
void *pick_subchannel_arg, grpc_call_stack *owning_call);
void grpc_subchannel_call_holder_destroy(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder);
@ -92,7 +93,6 @@ void grpc_subchannel_call_holder_perform_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder,
grpc_transport_stream_op *op);
char *grpc_subchannel_call_holder_get_peer(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call_holder *holder,
grpc_channel *master);
grpc_subchannel_call_holder *holder);
#endif

@ -42,7 +42,7 @@
typedef struct pending_pick {
struct pending_pick *next;
grpc_pollset *pollset;
grpc_subchannel **target;
grpc_connected_subchannel **target;
grpc_closure *on_complete;
} pending_pick;
@ -60,7 +60,7 @@ typedef struct {
/** the selected channel
TODO(ctiller): this should be atomically set so we don't
need to take a mutex in the common case */
grpc_subchannel *selected;
grpc_connected_subchannel *selected;
/** have we started picking? */
int started_picking;
/** are we shut down? */
@ -76,24 +76,6 @@ typedef struct {
grpc_connectivity_state_tracker state_tracker;
} pick_first_lb_policy;
static void del_interested_parties_locked(grpc_exec_ctx *exec_ctx,
pick_first_lb_policy *p) {
pending_pick *pp;
for (pp = p->pending_picks; pp; pp = pp->next) {
grpc_subchannel_del_interested_party(
exec_ctx, p->subchannels[p->checking_subchannel], pp->pollset);
}
}
static void add_interested_parties_locked(grpc_exec_ctx *exec_ctx,
pick_first_lb_policy *p) {
pending_pick *pp;
for (pp = p->pending_picks; pp; pp = pp->next) {
grpc_subchannel_add_interested_party(
exec_ctx, p->subchannels[p->checking_subchannel], pp->pollset);
}
}
void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
size_t i;
@ -102,7 +84,7 @@ void pf_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "pick_first");
}
if (p->selected) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->selected, "picked_first");
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, p->selected, "picked_first");
}
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p->subchannels);
@ -114,16 +96,26 @@ void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
del_interested_parties_locked(exec_ctx, p);
p->shutdown = 1;
pp = p->pending_picks;
p->pending_picks = NULL;
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_FATAL_FAILURE, "shutdown");
/* cancel subscription */
if (p->selected != NULL) {
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, p->selected, NULL, NULL, &p->connectivity_changed);
} else {
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
&p->connectivity_changed);
}
gpr_mu_unlock(&p->mu);
while (pp != NULL) {
pending_pick *next = pp->next;
*pp->target = NULL;
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
pp = next;
@ -131,7 +123,7 @@ void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_subchannel **target) {
grpc_connected_subchannel **target) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
@ -140,8 +132,8 @@ static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
grpc_subchannel_del_interested_party(
exec_ctx, p->subchannels[p->checking_subchannel], pp->pollset);
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
*target = NULL;
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 0);
gpr_free(pp);
@ -158,10 +150,11 @@ static void start_picking(grpc_exec_ctx *exec_ctx, pick_first_lb_policy *p) {
p->started_picking = 1;
p->checking_subchannel = 0;
p->checking_connectivity = GRPC_CHANNEL_IDLE;
GRPC_LB_POLICY_REF(&p->base, "pick_first_connectivity");
GRPC_LB_POLICY_WEAK_REF(&p->base, "pick_first_connectivity");
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel],
&p->checking_connectivity, &p->connectivity_changed);
&p->base.interested_parties, &p->checking_connectivity,
&p->connectivity_changed);
}
void pf_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
@ -174,8 +167,8 @@ void pf_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_pollset *pollset,
grpc_metadata_batch *initial_metadata, grpc_subchannel **target,
grpc_closure *on_complete) {
grpc_metadata_batch *initial_metadata,
grpc_connected_subchannel **target, grpc_closure *on_complete) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
@ -187,8 +180,8 @@ int pf_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_pollset *pollset,
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
grpc_subchannel_add_interested_party(
exec_ctx, p->subchannels[p->checking_subchannel], pollset);
grpc_pollset_set_add_pollset(exec_ctx, &p->base.interested_parties,
pollset);
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->pollset = pollset;
@ -204,25 +197,17 @@ static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
pick_first_lb_policy *p = arg;
size_t i;
grpc_transport_op op;
size_t num_subchannels = p->num_subchannels;
grpc_subchannel **subchannels;
grpc_subchannel *exclude_subchannel;
gpr_mu_lock(&p->mu);
subchannels = p->subchannels;
p->num_subchannels = 0;
p->subchannels = NULL;
exclude_subchannel = p->selected;
gpr_mu_unlock(&p->mu);
GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "destroy_subchannels");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "destroy_subchannels");
for (i = 0; i < num_subchannels; i++) {
if (subchannels[i] != exclude_subchannel) {
memset(&op, 0, sizeof(op));
op.disconnect = 1;
grpc_subchannel_process_transport_op(exec_ctx, subchannels[i], &op);
}
GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "pick_first");
}
@ -232,23 +217,28 @@ static void destroy_subchannels(grpc_exec_ctx *exec_ctx, void *arg,
static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
pick_first_lb_policy *p = arg;
grpc_subchannel *selected_subchannel;
pending_pick *pp;
gpr_mu_lock(&p->mu);
if (p->shutdown) {
gpr_mu_unlock(&p->mu);
GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
return;
} else if (p->selected != NULL) {
if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
/* if the selected channel goes bad, we're done */
p->checking_connectivity = GRPC_CHANNEL_FATAL_FAILURE;
}
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
p->checking_connectivity, "selected_changed");
if (p->checking_connectivity != GRPC_CHANNEL_FATAL_FAILURE) {
grpc_subchannel_notify_on_state_change(exec_ctx, p->selected,
&p->checking_connectivity,
&p->connectivity_changed);
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, p->selected, &p->base.interested_parties,
&p->checking_connectivity, &p->connectivity_changed);
} else {
GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
}
} else {
loop:
@ -256,39 +246,41 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
case GRPC_CHANNEL_READY:
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_READY, "connecting_ready");
p->selected = p->subchannels[p->checking_subchannel];
GRPC_SUBCHANNEL_REF(p->selected, "picked_first");
selected_subchannel = p->subchannels[p->checking_subchannel];
p->selected =
grpc_subchannel_get_connected_subchannel(selected_subchannel);
GPR_ASSERT(p->selected);
GRPC_CONNECTED_SUBCHANNEL_REF(p->selected, "picked_first");
/* drop the pick list: we are connected now */
GRPC_LB_POLICY_REF(&p->base, "destroy_subchannels");
GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels");
grpc_exec_ctx_enqueue(exec_ctx,
grpc_closure_create(destroy_subchannels, p), 1);
/* update any calls that were waiting for a pick */
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = p->selected;
grpc_subchannel_del_interested_party(exec_ctx, p->selected,
pp->pollset);
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
}
grpc_subchannel_notify_on_state_change(exec_ctx, p->selected,
&p->checking_connectivity,
&p->connectivity_changed);
grpc_connected_subchannel_notify_on_state_change(
exec_ctx, p->selected, &p->base.interested_parties,
&p->checking_connectivity, &p->connectivity_changed);
break;
case GRPC_CHANNEL_TRANSIENT_FAILURE:
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
"connecting_transient_failure");
del_interested_parties_locked(exec_ctx, p);
p->checking_subchannel =
(p->checking_subchannel + 1) % p->num_subchannels;
p->checking_connectivity = grpc_subchannel_check_connectivity(
p->subchannels[p->checking_subchannel]);
add_interested_parties_locked(exec_ctx, p);
if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel],
&p->checking_connectivity, &p->connectivity_changed);
&p->base.interested_parties, &p->checking_connectivity,
&p->connectivity_changed);
} else {
goto loop;
}
@ -300,13 +292,13 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
"connecting_changed");
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[p->checking_subchannel],
&p->checking_connectivity, &p->connectivity_changed);
&p->base.interested_parties, &p->checking_connectivity,
&p->connectivity_changed);
break;
case GRPC_CHANNEL_FATAL_FAILURE:
del_interested_parties_locked(exec_ctx, p);
GPR_SWAP(grpc_subchannel *, p->subchannels[p->checking_subchannel],
p->subchannels[p->num_subchannels - 1]);
p->num_subchannels--;
GPR_SWAP(grpc_subchannel *, p->subchannels[p->checking_subchannel],
p->subchannels[p->num_subchannels]);
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[p->num_subchannels],
"pick_first");
if (p->num_subchannels == 0) {
@ -319,7 +311,8 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
}
GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
"pick_first_connectivity");
} else {
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
@ -327,7 +320,6 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
p->checking_subchannel %= p->num_subchannels;
p->checking_connectivity = grpc_subchannel_check_connectivity(
p->subchannels[p->checking_subchannel]);
add_interested_parties_locked(exec_ctx, p);
goto loop;
}
}
@ -336,39 +328,6 @@ static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_unlock(&p->mu);
}
static void pf_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_transport_op *op) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
size_t i;
size_t n;
grpc_subchannel **subchannels;
grpc_subchannel *selected;
gpr_mu_lock(&p->mu);
n = p->num_subchannels;
subchannels = gpr_malloc(n * sizeof(*subchannels));
selected = p->selected;
if (selected) {
GRPC_SUBCHANNEL_REF(selected, "pf_broadcast_to_selected");
}
for (i = 0; i < n; i++) {
subchannels[i] = p->subchannels[i];
GRPC_SUBCHANNEL_REF(subchannels[i], "pf_broadcast");
}
gpr_mu_unlock(&p->mu);
for (i = 0; i < n; i++) {
if (selected == subchannels[i]) continue;
grpc_subchannel_process_transport_op(exec_ctx, subchannels[i], op);
GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "pf_broadcast");
}
if (p->selected) {
grpc_subchannel_process_transport_op(exec_ctx, selected, op);
GRPC_SUBCHANNEL_UNREF(exec_ctx, selected, "pf_broadcast_to_selected");
}
gpr_free(subchannels);
}
static grpc_connectivity_state pf_check_connectivity(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *pol) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
@ -389,9 +348,21 @@ void pf_notify_on_state_change(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
gpr_mu_unlock(&p->mu);
}
void pf_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *closure) {
pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
gpr_mu_lock(&p->mu);
if (p->selected) {
grpc_connected_subchannel_ping(exec_ctx, p->selected, closure);
} else {
grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
}
gpr_mu_unlock(&p->mu);
}
static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
pf_destroy, pf_shutdown, pf_pick, pf_cancel_pick, pf_exit_idle,
pf_broadcast, pf_check_connectivity, pf_notify_on_state_change};
pf_destroy, pf_shutdown, pf_pick, pf_cancel_pick, pf_ping_one, pf_exit_idle,
pf_check_connectivity, pf_notify_on_state_change};
static void pick_first_factory_ref(grpc_lb_policy_factory *factory) {}

@ -38,6 +38,8 @@
#include <grpc/support/alloc.h>
#include "src/core/transport/connectivity_state.h"
typedef struct round_robin_lb_policy round_robin_lb_policy;
int grpc_lb_round_robin_trace = 0;
/** List of entities waiting for a pick.
@ -46,7 +48,7 @@ int grpc_lb_round_robin_trace = 0;
typedef struct pending_pick {
struct pending_pick *next;
grpc_pollset *pollset;
grpc_subchannel **target;
grpc_connected_subchannel **target;
grpc_closure *on_complete;
} pending_pick;
@ -58,22 +60,27 @@ typedef struct ready_list {
} ready_list;
typedef struct {
size_t subchannel_idx; /**< Index over p->subchannels */
void *p; /**< round_robin_lb_policy instance */
} connectivity_changed_cb_arg;
typedef struct {
/** index within policy->subchannels */
size_t index;
/** backpointer to owning policy */
round_robin_lb_policy *policy;
/** subchannel itself */
grpc_subchannel *subchannel;
/** notification that connectivity has changed on subchannel */
grpc_closure connectivity_changed_closure;
/** this subchannels current position in subchannel->ready_list */
ready_list *ready_list_node;
/** last observed connectivity */
grpc_connectivity_state connectivity_state;
} subchannel_data;
struct round_robin_lb_policy {
/** base policy: must be first */
grpc_lb_policy base;
/** all our subchannels */
grpc_subchannel **subchannels;
size_t num_subchannels;
/** Callbacks, one per subchannel being watched, to be called when their
* respective connectivity changes */
grpc_closure *connectivity_changed_cbs;
connectivity_changed_cb_arg *cb_args;
subchannel_data **subchannels;
/** mutex protecting remaining members */
gpr_mu mu;
@ -81,8 +88,6 @@ typedef struct {
int started_picking;
/** are we shutting down? */
int shutdown;
/** Connectivity state of the subchannels being watched */
grpc_connectivity_state *subchannel_connectivity;
/** List of picks that are waiting on connectivity */
pending_pick *pending_picks;
@ -93,13 +98,7 @@ typedef struct {
ready_list ready_list;
/** Last pick from the ready list. */
ready_list *ready_list_last_pick;
/** Subchannel index to ready_list node.
*
* Kept in order to remove nodes from the ready list associated with a
* subchannel */
ready_list **subchannel_index_to_readylist_node;
} round_robin_lb_policy;
};
/** Returns the next subchannel from the connected list or NULL if the list is
* empty.
@ -144,9 +143,9 @@ static void advance_last_picked_locked(round_robin_lb_policy *p) {
/** Prepends (relative to the root at p->ready_list) the connected subchannel \a
* csc to the list of ready subchannels. */
static ready_list *add_connected_sc_locked(round_robin_lb_policy *p,
grpc_subchannel *csc) {
grpc_subchannel *sc) {
ready_list *new_elem = gpr_malloc(sizeof(ready_list));
new_elem->subchannel = csc;
new_elem->subchannel = sc;
if (p->ready_list.prev == NULL) {
/* first element */
new_elem->next = &p->ready_list;
@ -160,7 +159,7 @@ static ready_list *add_connected_sc_locked(round_robin_lb_policy *p,
p->ready_list.prev = new_elem;
}
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_DEBUG, "[READYLIST] ADDING NODE %p (SC %p)", new_elem, csc);
gpr_log(GPR_DEBUG, "[READYLIST] ADDING NODE %p (SC %p)", new_elem, sc);
}
return new_elem;
}
@ -200,28 +199,15 @@ static void remove_disconnected_sc_locked(round_robin_lb_policy *p,
gpr_free(node);
}
static void del_interested_parties_locked(grpc_exec_ctx *exec_ctx,
round_robin_lb_policy *p,
const size_t subchannel_idx) {
pending_pick *pp;
for (pp = p->pending_picks; pp; pp = pp->next) {
grpc_subchannel_del_interested_party(
exec_ctx, p->subchannels[subchannel_idx], pp->pollset);
}
}
void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
size_t i;
ready_list *elem;
for (i = 0; i < p->num_subchannels; i++) {
del_interested_parties_locked(exec_ctx, p, i);
}
for (i = 0; i < p->num_subchannels; i++) {
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[i], "round_robin");
subchannel_data *sd = p->subchannels[i];
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin");
gpr_free(sd);
}
gpr_free(p->connectivity_changed_cbs);
gpr_free(p->subchannel_connectivity);
grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
gpr_free(p->subchannels);
@ -237,20 +223,15 @@ void rr_destroy(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
gpr_free(elem);
elem = tmp;
}
gpr_free(p->subchannel_index_to_readylist_node);
gpr_free(p->cb_args);
gpr_free(p);
}
void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
size_t i;
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
gpr_mu_lock(&p->mu);
size_t i;
for (i = 0; i < p->num_subchannels; i++) {
del_interested_parties_locked(exec_ctx, p, i);
}
gpr_mu_lock(&p->mu);
p->shutdown = 1;
while ((pp = p->pending_picks)) {
@ -261,24 +242,26 @@ void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_FATAL_FAILURE, "shutdown");
for (i = 0; i < p->num_subchannels; i++) {
subchannel_data *sd = p->subchannels[i];
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
&sd->connectivity_changed_closure);
}
gpr_mu_unlock(&p->mu);
}
static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_subchannel **target) {
grpc_connected_subchannel **target) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
size_t i;
gpr_mu_lock(&p->mu);
pp = p->pending_picks;
p->pending_picks = NULL;
while (pp != NULL) {
pending_pick *next = pp->next;
if (pp->target == target) {
for (i = 0; i < p->num_subchannels; i++) {
grpc_subchannel_add_interested_party(exec_ctx, p->subchannels[i],
pp->pollset);
}
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
*target = NULL;
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 0);
gpr_free(pp);
@ -295,12 +278,16 @@ static void start_picking(grpc_exec_ctx *exec_ctx, round_robin_lb_policy *p) {
size_t i;
p->started_picking = 1;
gpr_log(GPR_DEBUG, "LB_POLICY: p=%p num_subchannels=%d", p,
p->num_subchannels);
for (i = 0; i < p->num_subchannels; i++) {
p->subchannel_connectivity[i] = GRPC_CHANNEL_IDLE;
grpc_subchannel_notify_on_state_change(exec_ctx, p->subchannels[i],
&p->subchannel_connectivity[i],
&p->connectivity_changed_cbs[i]);
GRPC_LB_POLICY_REF(&p->base, "round_robin_connectivity");
subchannel_data *sd = p->subchannels[i];
sd->connectivity_state = GRPC_CHANNEL_IDLE;
grpc_subchannel_notify_on_state_change(
exec_ctx, sd->subchannel, &p->base.interested_parties,
&sd->connectivity_state, &sd->connectivity_changed_closure);
GRPC_LB_POLICY_WEAK_REF(&p->base, "round_robin_connectivity");
}
}
@ -314,18 +301,18 @@ void rr_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
}
int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_pollset *pollset,
grpc_metadata_batch *initial_metadata, grpc_subchannel **target,
grpc_closure *on_complete) {
size_t i;
grpc_metadata_batch *initial_metadata,
grpc_connected_subchannel **target, grpc_closure *on_complete) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
pending_pick *pp;
ready_list *selected;
gpr_mu_lock(&p->mu);
if ((selected = peek_next_connected_locked(p))) {
gpr_mu_unlock(&p->mu);
*target = selected->subchannel;
*target = grpc_subchannel_get_connected_subchannel(selected->subchannel);
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_DEBUG, "[RR PICK] TARGET <-- SUBCHANNEL %p (NODE %p)",
gpr_log(GPR_DEBUG,
"[RR PICK] TARGET <-- CONNECTED SUBCHANNEL %p (NODE %p)",
selected->subchannel, selected);
}
/* only advance the last picked pointer if the selection was used */
@ -335,10 +322,8 @@ int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_pollset *pollset,
if (!p->started_picking) {
start_picking(exec_ctx, p);
}
for (i = 0; i < p->num_subchannels; i++) {
grpc_subchannel_add_interested_party(exec_ctx, p->subchannels[i],
pollset);
}
grpc_pollset_set_add_pollset(exec_ctx, &p->base.interested_parties,
pollset);
pp = gpr_malloc(sizeof(*pp));
pp->next = p->pending_picks;
pp->pollset = pollset;
@ -352,33 +337,25 @@ int rr_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol, grpc_pollset *pollset,
static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
connectivity_changed_cb_arg *cb_arg = arg;
round_robin_lb_policy *p = cb_arg->p;
/* index over p->subchannels of this cb's subchannel */
const size_t this_idx = cb_arg->subchannel_idx;
subchannel_data *sd = arg;
round_robin_lb_policy *p = sd->policy;
pending_pick *pp;
ready_list *selected;
int unref = 0;
/* connectivity state of this cb's subchannel */
grpc_connectivity_state *this_connectivity;
gpr_mu_lock(&p->mu);
this_connectivity = &p->subchannel_connectivity[this_idx];
if (p->shutdown) {
unref = 1;
} else {
switch (*this_connectivity) {
switch (sd->connectivity_state) {
case GRPC_CHANNEL_READY:
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_READY, "connecting_ready");
/* add the newly connected subchannel to the list of connected ones.
* Note that it goes to the "end of the line". */
p->subchannel_index_to_readylist_node[this_idx] =
add_connected_sc_locked(p, p->subchannels[this_idx]);
sd->ready_list_node = add_connected_sc_locked(p, sd->subchannel);
/* at this point we know there's at least one suitable subchannel. Go
* ahead and pick one and notify the pending suitors in
* p->pending_picks. This preemtively replicates rr_pick()'s actions. */
@ -390,60 +367,60 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
}
while ((pp = p->pending_picks)) {
p->pending_picks = pp->next;
*pp->target = selected->subchannel;
*pp->target =
grpc_subchannel_get_connected_subchannel(selected->subchannel);
if (grpc_lb_round_robin_trace) {
gpr_log(GPR_DEBUG,
"[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
selected->subchannel, selected);
}
grpc_subchannel_del_interested_party(exec_ctx, selected->subchannel,
pp->pollset);
grpc_pollset_set_del_pollset(exec_ctx, &p->base.interested_parties,
pp->pollset);
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
}
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[this_idx], this_connectivity,
&p->connectivity_changed_cbs[this_idx]);
exec_ctx, sd->subchannel, &p->base.interested_parties,
&sd->connectivity_state, &sd->connectivity_changed_closure);
break;
case GRPC_CHANNEL_CONNECTING:
case GRPC_CHANNEL_IDLE:
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
*this_connectivity, "connecting_changed");
sd->connectivity_state,
"connecting_changed");
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[this_idx], this_connectivity,
&p->connectivity_changed_cbs[this_idx]);
exec_ctx, sd->subchannel, &p->base.interested_parties,
&sd->connectivity_state, &sd->connectivity_changed_closure);
break;
case GRPC_CHANNEL_TRANSIENT_FAILURE:
del_interested_parties_locked(exec_ctx, p, this_idx);
/* renew state notification */
grpc_subchannel_notify_on_state_change(
exec_ctx, p->subchannels[this_idx], this_connectivity,
&p->connectivity_changed_cbs[this_idx]);
exec_ctx, sd->subchannel, &p->base.interested_parties,
&sd->connectivity_state, &sd->connectivity_changed_closure);
/* remove from ready list if still present */
if (p->subchannel_index_to_readylist_node[this_idx] != NULL) {
remove_disconnected_sc_locked(
p, p->subchannel_index_to_readylist_node[this_idx]);
p->subchannel_index_to_readylist_node[this_idx] = NULL;
if (sd->ready_list_node != NULL) {
remove_disconnected_sc_locked(p, sd->ready_list_node);
sd->ready_list_node = NULL;
}
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
"connecting_transient_failure");
break;
case GRPC_CHANNEL_FATAL_FAILURE:
del_interested_parties_locked(exec_ctx, p, this_idx);
if (p->subchannel_index_to_readylist_node[this_idx] != NULL) {
remove_disconnected_sc_locked(
p, p->subchannel_index_to_readylist_node[this_idx]);
p->subchannel_index_to_readylist_node[this_idx] = NULL;
if (sd->ready_list_node != NULL) {
remove_disconnected_sc_locked(p, sd->ready_list_node);
sd->ready_list_node = NULL;
}
GPR_SWAP(grpc_subchannel *, p->subchannels[this_idx],
p->subchannels[p->num_subchannels - 1]);
p->num_subchannels--;
GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[p->num_subchannels],
"round_robin");
GPR_SWAP(subchannel_data *, p->subchannels[sd->index],
p->subchannels[p->num_subchannels]);
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin");
p->subchannels[sd->index]->index = sd->index;
gpr_free(sd);
unref = 1;
if (p->num_subchannels == 0) {
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_FATAL_FAILURE,
@ -454,7 +431,6 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, 1);
gpr_free(pp);
}
unref = 1;
} else {
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
GRPC_CHANNEL_TRANSIENT_FAILURE,
@ -466,31 +442,8 @@ static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_unlock(&p->mu);
if (unref) {
GRPC_LB_POLICY_UNREF(exec_ctx, &p->base, "round_robin_connectivity");
}
}
static void rr_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_transport_op *op) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
size_t i;
size_t n;
grpc_subchannel **subchannels;
gpr_mu_lock(&p->mu);
n = p->num_subchannels;
subchannels = gpr_malloc(n * sizeof(*subchannels));
for (i = 0; i < n; i++) {
subchannels[i] = p->subchannels[i];
GRPC_SUBCHANNEL_REF(subchannels[i], "rr_broadcast");
}
gpr_mu_unlock(&p->mu);
for (i = 0; i < n; i++) {
grpc_subchannel_process_transport_op(exec_ctx, subchannels[i], op);
GRPC_SUBCHANNEL_UNREF(exec_ctx, subchannels[i], "rr_broadcast");
GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "round_robin_connectivity");
}
gpr_free(subchannels);
}
static grpc_connectivity_state rr_check_connectivity(grpc_exec_ctx *exec_ctx,
@ -514,9 +467,25 @@ static void rr_notify_on_state_change(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&p->mu);
}
static void rr_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_closure *closure) {
round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
ready_list *selected;
grpc_connected_subchannel *target;
gpr_mu_lock(&p->mu);
if ((selected = peek_next_connected_locked(p))) {
gpr_mu_unlock(&p->mu);
target = grpc_subchannel_get_connected_subchannel(selected->subchannel);
grpc_connected_subchannel_ping(exec_ctx, target, closure);
} else {
gpr_mu_unlock(&p->mu);
grpc_exec_ctx_enqueue(exec_ctx, closure, 0);
}
}
static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
rr_destroy, rr_shutdown, rr_pick, rr_cancel_pick, rr_exit_idle,
rr_broadcast, rr_check_connectivity, rr_notify_on_state_change};
rr_destroy, rr_shutdown, rr_pick, rr_cancel_pick, rr_ping_one, rr_exit_idle,
rr_check_connectivity, rr_notify_on_state_change};
static void round_robin_factory_ref(grpc_lb_policy_factory *factory) {}
@ -529,27 +498,22 @@ static grpc_lb_policy *create_round_robin(grpc_lb_policy_factory *factory,
GPR_ASSERT(args->num_subchannels > 0);
memset(p, 0, sizeof(*p));
grpc_lb_policy_init(&p->base, &round_robin_lb_policy_vtable);
p->subchannels =
gpr_malloc(sizeof(grpc_subchannel *) * args->num_subchannels);
p->num_subchannels = args->num_subchannels;
p->subchannels = gpr_malloc(sizeof(*p->subchannels) * p->num_subchannels);
memset(p->subchannels, 0, sizeof(*p->subchannels) * p->num_subchannels);
grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
"round_robin");
memcpy(p->subchannels, args->subchannels,
sizeof(grpc_subchannel *) * args->num_subchannels);
gpr_mu_init(&p->mu);
p->connectivity_changed_cbs =
gpr_malloc(sizeof(grpc_closure) * args->num_subchannels);
p->subchannel_connectivity =
gpr_malloc(sizeof(grpc_connectivity_state) * args->num_subchannels);
p->cb_args =
gpr_malloc(sizeof(connectivity_changed_cb_arg) * args->num_subchannels);
for (i = 0; i < args->num_subchannels; i++) {
p->cb_args[i].subchannel_idx = i;
p->cb_args[i].p = p;
grpc_closure_init(&p->connectivity_changed_cbs[i], rr_connectivity_changed,
&p->cb_args[i]);
subchannel_data *sd = gpr_malloc(sizeof(*sd));
memset(sd, 0, sizeof(*sd));
p->subchannels[i] = sd;
sd->policy = p;
sd->index = i;
sd->subchannel = args->subchannels[i];
grpc_closure_init(&sd->connectivity_changed_closure,
rr_connectivity_changed, sd);
}
/* The (dummy node) root of the ready list */
@ -558,10 +522,6 @@ static grpc_lb_policy *create_round_robin(grpc_lb_policy_factory *factory,
p->ready_list.next = NULL;
p->ready_list_last_pick = &p->ready_list;
p->subchannel_index_to_readylist_node =
gpr_malloc(sizeof(grpc_subchannel *) * args->num_subchannels);
memset(p->subchannel_index_to_readylist_node, 0,
sizeof(grpc_subchannel *) * args->num_subchannels);
return &p->base;
}

@ -33,63 +33,94 @@
#include "src/core/client_config/lb_policy.h"
#define WEAK_REF_BITS 16
void grpc_lb_policy_init(grpc_lb_policy *policy,
const grpc_lb_policy_vtable *vtable) {
policy->vtable = vtable;
gpr_ref_init(&policy->refs, 1);
gpr_atm_no_barrier_store(&policy->ref_pair, 1 << WEAK_REF_BITS);
grpc_pollset_set_init(&policy->interested_parties);
}
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
void grpc_lb_policy_ref(grpc_lb_policy *policy, const char *file, int line,
const char *reason) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "LB_POLICY:%p ref %d -> %d %s",
policy, (int)policy->refs.count, (int)policy->refs.count + 1, reason);
#define REF_FUNC_EXTRA_ARGS , const char *file, int line, const char *reason
#define REF_MUTATE_EXTRA_ARGS REF_FUNC_EXTRA_ARGS, const char *purpose
#define REF_FUNC_PASS_ARGS(new_reason) , file, line, new_reason
#define REF_MUTATE_PASS_ARGS(purpose) , file, line, reason, purpose
#else
void grpc_lb_policy_ref(grpc_lb_policy *policy) {
#define REF_FUNC_EXTRA_ARGS
#define REF_MUTATE_EXTRA_ARGS
#define REF_FUNC_PASS_ARGS(new_reason)
#define REF_MUTATE_PASS_ARGS(x)
#endif
gpr_ref(&policy->refs);
}
static gpr_atm ref_mutate(grpc_lb_policy *c, gpr_atm delta,
int barrier REF_MUTATE_EXTRA_ARGS) {
gpr_atm old_val = barrier ? gpr_atm_full_fetch_add(&c->ref_pair, delta)
: gpr_atm_no_barrier_fetch_add(&c->ref_pair, delta);
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
void grpc_lb_policy_unref(grpc_lb_policy *policy,
grpc_closure_list *closure_list, const char *file,
int line, const char *reason) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "LB_POLICY:%p unref %d -> %d %s",
policy, (int)policy->refs.count, (int)policy->refs.count - 1, reason);
#else
void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
"LB_POLICY: %p % 12s 0x%08x -> 0x%08x [%s]", c, purpose, old_val,
old_val + delta, reason);
#endif
if (gpr_unref(&policy->refs)) {
policy->vtable->destroy(exec_ctx, policy);
return old_val;
}
void grpc_lb_policy_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
ref_mutate(policy, 1 << WEAK_REF_BITS, 0 REF_MUTATE_PASS_ARGS("STRONG_REF"));
}
void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
ref_mutate(policy, (gpr_atm)1 - (gpr_atm)(1 << WEAK_REF_BITS),
1 REF_MUTATE_PASS_ARGS("STRONG_UNREF"));
gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
gpr_atm check = 1 << WEAK_REF_BITS;
if ((old_val & mask) == check) {
policy->vtable->shutdown(exec_ctx, policy);
}
grpc_lb_policy_weak_unref(exec_ctx,
policy REF_FUNC_PASS_ARGS("strong-unref"));
}
void grpc_lb_policy_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
policy->vtable->shutdown(exec_ctx, policy);
void grpc_lb_policy_weak_ref(grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
ref_mutate(policy, 1, 0 REF_MUTATE_PASS_ARGS("WEAK_REF"));
}
void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy REF_FUNC_EXTRA_ARGS) {
gpr_atm old_val =
ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF"));
if (old_val == 1) {
grpc_pollset_set_destroy(&policy->interested_parties);
policy->vtable->destroy(exec_ctx, policy);
}
}
int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_pollset *pollset,
grpc_metadata_batch *initial_metadata,
grpc_subchannel **target, grpc_closure *on_complete) {
grpc_connected_subchannel **target,
grpc_closure *on_complete) {
return policy->vtable->pick(exec_ctx, policy, pollset, initial_metadata,
target, on_complete);
}
void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_subchannel **target) {
grpc_connected_subchannel **target) {
policy->vtable->cancel_pick(exec_ctx, policy, target);
}
void grpc_lb_policy_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_transport_op *op) {
policy->vtable->broadcast(exec_ctx, policy, op);
}
void grpc_lb_policy_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy) {
policy->vtable->exit_idle(exec_ctx, policy);
}
void grpc_lb_policy_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_closure *closure) {
policy->vtable->ping_one(exec_ctx, policy, closure);
}
void grpc_lb_policy_notify_on_state_change(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy,
grpc_connectivity_state *state,

@ -47,7 +47,8 @@ typedef void (*grpc_lb_completion)(void *cb_arg, grpc_subchannel *subchannel,
struct grpc_lb_policy {
const grpc_lb_policy_vtable *vtable;
gpr_refcount refs;
gpr_atm ref_pair;
grpc_pollset_set interested_parties;
};
struct grpc_lb_policy_vtable {
@ -58,17 +59,16 @@ struct grpc_lb_policy_vtable {
/** implement grpc_lb_policy_pick */
int (*pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_pollset *pollset, grpc_metadata_batch *initial_metadata,
grpc_subchannel **target, grpc_closure *on_complete);
grpc_connected_subchannel **target, grpc_closure *on_complete);
void (*cancel_pick)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_subchannel **target);
grpc_connected_subchannel **target);
void (*ping_one)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_closure *closure);
/** try to enter a READY connectivity state */
void (*exit_idle)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** broadcast a transport op to all subchannels */
void (*broadcast)(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_transport_op *op);
/** check the current connectivity of the lb_policy */
grpc_connectivity_state (*check_connectivity)(grpc_exec_ctx *exec_ctx,
grpc_lb_policy *policy);
@ -81,29 +81,39 @@ struct grpc_lb_policy_vtable {
grpc_closure *closure);
};
/*#define GRPC_LB_POLICY_REFCOUNT_DEBUG*/
#ifdef GRPC_LB_POLICY_REFCOUNT_DEBUG
#define GRPC_LB_POLICY_REF(p, r) \
grpc_lb_policy_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_UNREF(exec_ctx, p, r) \
grpc_lb_policy_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_WEAK_REF(p, r) \
grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r))
#define GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, p, r) \
grpc_lb_policy_weak_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
void grpc_lb_policy_ref(grpc_lb_policy *policy, const char *file, int line,
const char *reason);
void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const char *file, int line, const char *reason);
void grpc_lb_policy_weak_ref(grpc_lb_policy *policy, const char *file, int line,
const char *reason);
void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
const char *file, int line, const char *reason);
#else
#define GRPC_LB_POLICY_REF(p, r) grpc_lb_policy_ref((p))
#define GRPC_LB_POLICY_UNREF(cl, p, r) grpc_lb_policy_unref((cl), (p))
#define GRPC_LB_POLICY_WEAK_REF(p, r) grpc_lb_policy_weak_ref((p))
#define GRPC_LB_POLICY_WEAK_UNREF(cl, p, r) grpc_lb_policy_weak_unref((cl), (p))
void grpc_lb_policy_ref(grpc_lb_policy *policy);
void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
void grpc_lb_policy_weak_ref(grpc_lb_policy *policy);
void grpc_lb_policy_weak_unref(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
#endif
/** called by concrete implementations to initialize the base struct */
void grpc_lb_policy_init(grpc_lb_policy *policy,
const grpc_lb_policy_vtable *vtable);
/** Start shutting down (fail any pending picks) */
void grpc_lb_policy_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
/** Given initial metadata in \a initial_metadata, find an appropriate
target for this rpc, and 'return' it by calling \a on_complete after setting
\a target.
@ -111,13 +121,14 @@ void grpc_lb_policy_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);
int grpc_lb_policy_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_pollset *pollset,
grpc_metadata_batch *initial_metadata,
grpc_subchannel **target, grpc_closure *on_complete);
grpc_connected_subchannel **target,
grpc_closure *on_complete);
void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_subchannel **target);
void grpc_lb_policy_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_closure *closure);
void grpc_lb_policy_broadcast(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_transport_op *op);
void grpc_lb_policy_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy,
grpc_connected_subchannel **target);
void grpc_lb_policy_exit_idle(grpc_exec_ctx *exec_ctx, grpc_lb_policy *policy);

@ -71,11 +71,8 @@ void grpc_resolver_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
}
void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver,
struct sockaddr *failing_address,
int failing_address_len) {
resolver->vtable->channel_saw_error(exec_ctx, resolver, failing_address,
failing_address_len);
grpc_resolver *resolver) {
resolver->vtable->channel_saw_error(exec_ctx, resolver);
}
void grpc_resolver_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,

@ -35,8 +35,8 @@
#define GRPC_INTERNAL_CORE_CLIENT_CONFIG_RESOLVER_H
#include "src/core/client_config/client_config.h"
#include "src/core/client_config/subchannel.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/iomgr/sockaddr.h"
typedef struct grpc_resolver grpc_resolver;
typedef struct grpc_resolver_vtable grpc_resolver_vtable;
@ -51,9 +51,7 @@ struct grpc_resolver {
struct grpc_resolver_vtable {
void (*destroy)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void (*shutdown)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void (*channel_saw_error)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
struct sockaddr *failing_address,
int failing_address_len);
void (*channel_saw_error)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
void (*next)(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_client_config **target_config, grpc_closure *on_complete);
};
@ -81,9 +79,7 @@ void grpc_resolver_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver);
/** Notification that the channel has seen an error on some address.
Can be used as a hint that re-resolution is desirable soon. */
void grpc_resolver_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver,
struct sockaddr *failing_address,
int failing_address_len);
grpc_resolver *resolver);
/** Get the next client config. Called by the channel to fetch a new
configuration. Expected to set *target_config with a new configuration,

@ -40,7 +40,6 @@
#include <grpc/support/string_util.h>
#include "src/core/client_config/lb_policy_registry.h"
#include "src/core/client_config/subchannel_factory_decorators/add_channel_arg.h"
#include "src/core/iomgr/resolve_address.h"
#include "src/core/support/string.h"
@ -81,9 +80,7 @@ static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
dns_resolver *r);
static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
struct sockaddr *failing_address,
int failing_address_len);
static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void dns_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
grpc_client_config **target_config,
grpc_closure *on_complete);
@ -103,8 +100,7 @@ static void dns_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver) {
}
static void dns_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver, struct sockaddr *sa,
int len) {
grpc_resolver *resolver) {
dns_resolver *r = (dns_resolver *)resolver;
gpr_mu_lock(&r->mu);
if (!r->resolving) {

@ -83,9 +83,7 @@ static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *r,
struct sockaddr *failing_address,
int failing_address_len);
grpc_resolver *r);
static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
grpc_client_config **target_config,
grpc_closure *on_complete);
@ -107,8 +105,13 @@ static void sockaddr_shutdown(grpc_exec_ctx *exec_ctx,
}
static void sockaddr_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver,
struct sockaddr *sa, int len) {}
grpc_resolver *resolver) {
sockaddr_resolver *r = (sockaddr_resolver *)resolver;
gpr_mu_lock(&r->mu);
r->published = 0;
sockaddr_maybe_finish_next_locked(exec_ctx, r);
gpr_mu_unlock(&r->mu);
}
static void sockaddr_next(grpc_exec_ctx *exec_ctx, grpc_resolver *resolver,
grpc_client_config **target_config,

@ -96,9 +96,7 @@ static void zookeeper_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
static void zookeeper_shutdown(grpc_exec_ctx *exec_ctx, grpc_resolver *r);
static void zookeeper_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *r,
struct sockaddr *failing_address,
int failing_address_len);
grpc_resolver *r);
static void zookeeper_next(grpc_exec_ctx *exec_ctx, grpc_resolver *r,
grpc_client_config **target_config,
grpc_closure *on_complete);
@ -125,8 +123,7 @@ static void zookeeper_shutdown(grpc_exec_ctx *exec_ctx,
}
static void zookeeper_channel_saw_error(grpc_exec_ctx *exec_ctx,
grpc_resolver *resolver,
struct sockaddr *sa, int len) {
grpc_resolver *resolver) {
zookeeper_resolver *r = (zookeeper_resolver *)resolver;
gpr_mu_lock(&r->mu);
if (r->resolving == 0) {

File diff suppressed because it is too large Load Diff

@ -41,6 +41,7 @@
/** A (sub-)channel that knows how to connect to exactly one target
address. Provides a target for load balancing. */
typedef struct grpc_subchannel grpc_subchannel;
typedef struct grpc_connected_subchannel grpc_connected_subchannel;
typedef struct grpc_subchannel_call grpc_subchannel_call;
typedef struct grpc_subchannel_args grpc_subchannel_args;
@ -49,6 +50,14 @@ typedef struct grpc_subchannel_args grpc_subchannel_args;
grpc_subchannel_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_UNREF(cl, p, r) \
grpc_subchannel_unref((cl), (p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_WEAK_REF(p, r) \
grpc_subchannel_weak_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_WEAK_UNREF(cl, p, r) \
grpc_subchannel_weak_unref((cl), (p), __FILE__, __LINE__, (r))
#define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) \
grpc_connected_subchannel_ref((p), __FILE__, __LINE__, (r))
#define GRPC_CONNECTED_SUBCHANNEL_UNREF(cl, p, r) \
grpc_connected_subchannel_unref((cl), (p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_CALL_REF(p, r) \
grpc_subchannel_call_ref((p), __FILE__, __LINE__, (r))
#define GRPC_SUBCHANNEL_CALL_UNREF(cl, p, r) \
@ -58,6 +67,12 @@ typedef struct grpc_subchannel_args grpc_subchannel_args;
#else
#define GRPC_SUBCHANNEL_REF(p, r) grpc_subchannel_ref((p))
#define GRPC_SUBCHANNEL_UNREF(cl, p, r) grpc_subchannel_unref((cl), (p))
#define GRPC_SUBCHANNEL_WEAK_REF(p, r) grpc_subchannel_weak_ref((p))
#define GRPC_SUBCHANNEL_WEAK_UNREF(cl, p, r) \
grpc_subchannel_weak_unref((cl), (p))
#define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) grpc_connected_subchannel_ref((p))
#define GRPC_CONNECTED_SUBCHANNEL_UNREF(cl, p, r) \
grpc_connected_subchannel_unref((cl), (p))
#define GRPC_SUBCHANNEL_CALL_REF(p, r) grpc_subchannel_call_ref((p))
#define GRPC_SUBCHANNEL_CALL_UNREF(cl, p, r) \
grpc_subchannel_call_unref((cl), (p))
@ -69,33 +84,31 @@ void grpc_subchannel_ref(grpc_subchannel *channel
void grpc_subchannel_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_weak_ref(grpc_subchannel *channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_connected_subchannel_ref(grpc_connected_subchannel *channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_connected_subchannel_unref(grpc_exec_ctx *exec_ctx,
grpc_connected_subchannel *channel
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_call_ref(grpc_subchannel_call *call
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
void grpc_subchannel_call_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel_call *call
GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
/** construct a subchannel call (possibly asynchronously).
*
* If the returned status is 1, the call will return immediately and \a target
* will point to a connected \a subchannel_call instance. Note that \a notify
* will \em not be invoked in this case.
* Otherwise, if the returned status is 0, the subchannel call will be created
* asynchronously, invoking the \a notify callback upon completion. */
int grpc_subchannel_create_call(grpc_exec_ctx *exec_ctx,
grpc_subchannel *subchannel,
grpc_pollset *pollset, gpr_atm *target,
grpc_closure *notify);
/** cancel \a call in the waiting state. */
void grpc_subchannel_cancel_create_call(grpc_exec_ctx *exec_ctx,
grpc_subchannel *subchannel,
gpr_atm *target);
/** construct a subchannel call */
grpc_subchannel_call *grpc_connected_subchannel_create_call(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *connected_subchannel,
grpc_pollset *pollset);
/** process a transport level op */
void grpc_subchannel_process_transport_op(grpc_exec_ctx *exec_ctx,
grpc_subchannel *subchannel,
grpc_transport_op *op);
void grpc_connected_subchannel_process_transport_op(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *subchannel,
grpc_transport_op *op);
/** poll the current connectivity state of a channel */
grpc_connectivity_state grpc_subchannel_check_connectivity(
@ -103,26 +116,22 @@ grpc_connectivity_state grpc_subchannel_check_connectivity(
/** call notify when the connectivity state of a channel changes from *state.
Updates *state with the new state of the channel */
void grpc_subchannel_notify_on_state_change(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel,
grpc_connectivity_state *state,
grpc_closure *notify);
/** Remove \a subscribed_notify from the list of closures to be called on a
* state change if present, returning 1. Otherwise, nothing is done and return
* 0. */
int grpc_subchannel_state_change_unsubscribe(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel,
grpc_closure *subscribed_notify);
/** express interest in \a channel's activities through \a pollset. */
void grpc_subchannel_add_interested_party(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel,
grpc_pollset *pollset);
/** stop following \a channel's activity through \a pollset. */
void grpc_subchannel_del_interested_party(grpc_exec_ctx *exec_ctx,
grpc_subchannel *channel,
grpc_pollset *pollset);
void grpc_subchannel_notify_on_state_change(
grpc_exec_ctx *exec_ctx, grpc_subchannel *channel,
grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
grpc_closure *notify);
void grpc_connected_subchannel_notify_on_state_change(
grpc_exec_ctx *exec_ctx, grpc_connected_subchannel *channel,
grpc_pollset_set *interested_parties, grpc_connectivity_state *state,
grpc_closure *notify);
void grpc_connected_subchannel_ping(grpc_exec_ctx *exec_ctx,
grpc_connected_subchannel *channel,
grpc_closure *notify);
/** retrieve the grpc_connected_subchannel - or NULL if called before
the subchannel becomes connected */
grpc_connected_subchannel *grpc_subchannel_get_connected_subchannel(
grpc_subchannel *subchannel);
/** continue processing a transport op */
void grpc_subchannel_call_process_op(grpc_exec_ctx *exec_ctx,
@ -147,15 +156,10 @@ struct grpc_subchannel_args {
/** Address to connect to */
struct sockaddr *addr;
size_t addr_len;
/** master channel */
grpc_channel *master;
};
/** create a subchannel given a connector */
grpc_subchannel *grpc_subchannel_create(grpc_connector *connector,
grpc_subchannel_args *args);
/** Return the master channel associated with the subchannel */
grpc_channel *grpc_subchannel_get_master(grpc_subchannel *subchannel);
#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_H */

@ -1,86 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/client_config/subchannel_factory_decorators/merge_channel_args.h"
#include <grpc/support/alloc.h>
#include "src/core/channel/channel_args.h"
typedef struct {
grpc_subchannel_factory base;
gpr_refcount refs;
grpc_subchannel_factory *wrapped;
grpc_channel_args *merge_args;
} merge_args_factory;
static void merge_args_factory_ref(grpc_subchannel_factory *scf) {
merge_args_factory *f = (merge_args_factory *)scf;
gpr_ref(&f->refs);
}
static void merge_args_factory_unref(grpc_exec_ctx *exec_ctx,
grpc_subchannel_factory *scf) {
merge_args_factory *f = (merge_args_factory *)scf;
if (gpr_unref(&f->refs)) {
grpc_subchannel_factory_unref(exec_ctx, f->wrapped);
grpc_channel_args_destroy(f->merge_args);
gpr_free(f);
}
}
static grpc_subchannel *merge_args_factory_create_subchannel(
grpc_exec_ctx *exec_ctx, grpc_subchannel_factory *scf,
grpc_subchannel_args *args) {
merge_args_factory *f = (merge_args_factory *)scf;
grpc_channel_args *final_args =
grpc_channel_args_merge(args->args, f->merge_args);
grpc_subchannel *s;
args->args = final_args;
s = grpc_subchannel_factory_create_subchannel(exec_ctx, f->wrapped, args);
grpc_channel_args_destroy(final_args);
return s;
}
static const grpc_subchannel_factory_vtable merge_args_factory_vtable = {
merge_args_factory_ref, merge_args_factory_unref,
merge_args_factory_create_subchannel};
grpc_subchannel_factory *grpc_subchannel_factory_merge_channel_args(
grpc_subchannel_factory *input, const grpc_channel_args *args) {
merge_args_factory *f = gpr_malloc(sizeof(*f));
f->base.vtable = &merge_args_factory_vtable;
gpr_ref_init(&f->refs, 1);
grpc_subchannel_factory_ref(input);
f->wrapped = input;
f->merge_args = grpc_channel_args_copy(args);
return &f->base;
}

@ -1,46 +0,0 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_MERGE_CHANNEL_ARGS_H
#define GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_MERGE_CHANNEL_ARGS_H
#include "src/core/client_config/subchannel_factory.h"
/** Takes a subchannel factory, returns a new one that mutates incoming
channel_args by adding a new argument; ownership of input, args is retained
by the caller. */
grpc_subchannel_factory *grpc_subchannel_factory_merge_channel_args(
grpc_subchannel_factory *input, const grpc_channel_args *args);
#endif /* GRPC_INTERNAL_CORE_CLIENT_CONFIG_SUBCHANNEL_FACTORY_DECORATORS_MERGE_CHANNEL_ARGS_H \
*/

@ -91,6 +91,14 @@ error:
return 0;
}
static void *zalloc_gpr(void* opaque, unsigned int items, unsigned int size) {
return gpr_malloc(items * size);
}
static void zfree_gpr(void* opaque, void *address) {
gpr_free(address);
}
static int zlib_compress(gpr_slice_buffer* input, gpr_slice_buffer* output,
int gzip) {
z_stream zs;
@ -99,6 +107,8 @@ static int zlib_compress(gpr_slice_buffer* input, gpr_slice_buffer* output,
size_t count_before = output->count;
size_t length_before = output->length;
memset(&zs, 0, sizeof(zs));
zs.zalloc = zalloc_gpr;
zs.zfree = zfree_gpr;
r = deflateInit2(&zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 15 | (gzip ? 16 : 0),
8, Z_DEFAULT_STRATEGY);
if (r != Z_OK) {
@ -125,6 +135,8 @@ static int zlib_decompress(gpr_slice_buffer* input, gpr_slice_buffer* output,
size_t count_before = output->count;
size_t length_before = output->length;
memset(&zs, 0, sizeof(zs));
zs.zalloc = zalloc_gpr;
zs.zfree = zfree_gpr;
r = inflateInit2(&zs, 15 | (gzip ? 16 : 0));
if (r != Z_OK) {
gpr_log(GPR_ERROR, "inflateInit2 returns %d", r);
@ -150,7 +162,7 @@ static int copy(gpr_slice_buffer* input, gpr_slice_buffer* output) {
return 1;
}
int compress_inner(grpc_compression_algorithm algorithm,
static int compress_inner(grpc_compression_algorithm algorithm,
gpr_slice_buffer* input, gpr_slice_buffer* output) {
switch (algorithm) {
case GRPC_COMPRESS_NONE:

@ -53,6 +53,7 @@ typedef struct {
size_t next_address;
grpc_endpoint *ep;
char *host;
char *ssl_host_override;
gpr_timespec deadline;
int have_read_byte;
const grpc_httpcli_handshaker *handshaker;
@ -106,6 +107,7 @@ static void finish(grpc_exec_ctx *exec_ctx, internal_request *req,
}
gpr_slice_unref(req->request_text);
gpr_free(req->host);
gpr_free(req->ssl_host_override);
grpc_iomgr_unregister_object(&req->iomgr_obj);
gpr_slice_buffer_destroy(&req->incoming);
gpr_slice_buffer_destroy(&req->outgoing);
@ -180,8 +182,10 @@ static void on_connected(grpc_exec_ctx *exec_ctx, void *arg, int success) {
next_address(exec_ctx, req);
return;
}
req->handshaker->handshake(exec_ctx, req, req->ep, req->host,
on_handshake_done);
req->handshaker->handshake(
exec_ctx, req, req->ep,
req->ssl_host_override ? req->ssl_host_override : req->host,
on_handshake_done);
}
static void next_address(grpc_exec_ctx *exec_ctx, internal_request *req) {
@ -231,6 +235,7 @@ static void internal_request_begin(
gpr_slice_buffer_init(&req->outgoing);
grpc_iomgr_register_object(&req->iomgr_obj, name);
req->host = gpr_strdup(request->host);
req->ssl_host_override = gpr_strdup(request->ssl_host_override);
grpc_pollset_set_add_pollset(exec_ctx, &req->context->pollset_set,
req->pollset);

@ -74,6 +74,8 @@ extern const grpc_httpcli_handshaker grpc_httpcli_ssl;
typedef struct grpc_httpcli_request {
/* The host name to connect to */
char *host;
/* The host to verify in the SSL handshake (or NULL) */
char *ssl_host_override;
/* The path of the resource to fetch */
char *path;
/* Additional headers: count and key/values; the following are supplied

@ -68,7 +68,7 @@ static void httpcli_ssl_do_handshake(grpc_exec_ctx *exec_ctx,
tsi_result result = TSI_OK;
tsi_handshaker *handshaker;
if (c->handshaker_factory == NULL) {
cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, nonsecure_endpoint, NULL);
cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL);
return;
}
result = tsi_ssl_handshaker_factory_create_handshaker(
@ -76,7 +76,7 @@ static void httpcli_ssl_do_handshake(grpc_exec_ctx *exec_ctx,
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Handshaker creation failed with error %s.",
tsi_result_to_string(result));
cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, nonsecure_endpoint, NULL);
cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL);
} else {
grpc_do_security_handshake(exec_ctx, handshaker, sc, nonsecure_endpoint, cb,
user_data);
@ -149,7 +149,6 @@ typedef struct {
static void on_secure_transport_setup_done(grpc_exec_ctx *exec_ctx, void *rp,
grpc_security_status status,
grpc_endpoint *wrapped_endpoint,
grpc_endpoint *secure_endpoint) {
on_done_closure *c = rp;
if (status != GRPC_SECURITY_OK) {

@ -36,6 +36,7 @@
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/endpoint_pair.h"
#include "src/core/iomgr/socket_utils_posix.h"
#include <errno.h>
#include <fcntl.h>
@ -56,6 +57,8 @@ static void create_sockets(int sv[2]) {
GPR_ASSERT(fcntl(sv[0], F_SETFL, flags | O_NONBLOCK) == 0);
flags = fcntl(sv[1], F_GETFL, 0);
GPR_ASSERT(fcntl(sv[1], F_SETFL, flags | O_NONBLOCK) == 0);
GPR_ASSERT(grpc_set_socket_no_sigpipe_if_possible(sv[0]));
GPR_ASSERT(grpc_set_socket_no_sigpipe_if_possible(sv[1]));
}
grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char *name,

@ -43,6 +43,7 @@
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/useful.h>
#define CLOSURE_NOT_READY ((grpc_closure *)0)
@ -158,7 +159,10 @@ void grpc_fd_global_shutdown(void) {
grpc_fd *grpc_fd_create(int fd, const char *name) {
grpc_fd *r = alloc_fd(fd);
grpc_iomgr_register_object(&r->iomgr_object, name);
char *name2;
gpr_asprintf(&name2, "%s fd=%d", name, fd);
grpc_iomgr_register_object(&r->iomgr_object, name2);
gpr_free(name2);
#ifdef GRPC_FD_REF_COUNT_DEBUG
gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, r, name);
#endif

@ -170,6 +170,7 @@ void grpc_fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd);
void grpc_fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd);
/* Reference counting for fds */
/*#define GRPC_FD_REF_COUNT_DEBUG*/
#ifdef GRPC_FD_REF_COUNT_DEBUG
void grpc_fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
void grpc_fd_unref(grpc_fd *fd, const char *reason, const char *file, int line);

@ -49,13 +49,19 @@
#include "src/core/iomgr/pollset_set_windows.h"
#endif
void grpc_pollset_set_init(grpc_pollset_set* pollset_set);
void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set);
void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
grpc_pollset_set* pollset_set,
grpc_pollset* pollset);
void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
grpc_pollset_set* pollset_set,
grpc_pollset* pollset);
void grpc_pollset_set_init(grpc_pollset_set *pollset_set);
void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set);
void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pollset_set,
grpc_pollset *pollset);
void grpc_pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pollset_set,
grpc_pollset *pollset);
void grpc_pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *bag,
grpc_pollset_set *item);
void grpc_pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *bag,
grpc_pollset_set *item);
#endif /* GRPC_INTERNAL_CORE_IOMGR_POLLSET_H */

@ -52,9 +52,10 @@ void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) {
size_t i;
gpr_mu_destroy(&pollset_set->mu);
for (i = 0; i < pollset_set->fd_count; i++) {
GRPC_FD_UNREF(pollset_set->fds[i], "pollset");
GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
}
gpr_free(pollset_set->pollsets);
gpr_free(pollset_set->pollset_sets);
gpr_free(pollset_set->fds);
}
@ -73,7 +74,7 @@ void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
pollset_set->pollsets[pollset_set->pollset_count++] = pollset;
for (i = 0, j = 0; i < pollset_set->fd_count; i++) {
if (grpc_fd_is_orphaned(pollset_set->fds[i])) {
GRPC_FD_UNREF(pollset_set->fds[i], "pollset");
GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
} else {
grpc_pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]);
pollset_set->fds[j++] = pollset_set->fds[i];
@ -99,6 +100,46 @@ void grpc_pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
gpr_mu_unlock(&pollset_set->mu);
}
void grpc_pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *bag,
grpc_pollset_set *item) {
size_t i, j;
gpr_mu_lock(&bag->mu);
if (bag->pollset_set_count == bag->pollset_set_capacity) {
bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity);
bag->pollset_sets =
gpr_realloc(bag->pollset_sets,
bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
}
bag->pollset_sets[bag->pollset_set_count++] = item;
for (i = 0, j = 0; i < bag->fd_count; i++) {
if (grpc_fd_is_orphaned(bag->fds[i])) {
GRPC_FD_UNREF(bag->fds[i], "pollset_set");
} else {
grpc_pollset_set_add_fd(exec_ctx, item, bag->fds[i]);
bag->fds[j++] = bag->fds[i];
}
}
bag->fd_count = j;
gpr_mu_unlock(&bag->mu);
}
void grpc_pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *bag,
grpc_pollset_set *item) {
size_t i;
gpr_mu_lock(&bag->mu);
for (i = 0; i < bag->pollset_set_count; i++) {
if (bag->pollset_sets[i] == item) {
bag->pollset_set_count--;
GPR_SWAP(grpc_pollset_set *, bag->pollset_sets[i],
bag->pollset_sets[bag->pollset_set_count]);
break;
}
}
gpr_mu_unlock(&bag->mu);
}
void grpc_pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
grpc_pollset_set *pollset_set, grpc_fd *fd) {
size_t i;
@ -113,6 +154,9 @@ void grpc_pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
for (i = 0; i < pollset_set->pollset_count; i++) {
grpc_pollset_add_fd(exec_ctx, pollset_set->pollsets[i], fd);
}
for (i = 0; i < pollset_set->pollset_set_count; i++) {
grpc_pollset_set_add_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
}
gpr_mu_unlock(&pollset_set->mu);
}
@ -129,6 +173,9 @@ void grpc_pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
break;
}
}
for (i = 0; i < pollset_set->pollset_set_count; i++) {
grpc_pollset_set_del_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
}
gpr_mu_unlock(&pollset_set->mu);
}

@ -44,6 +44,10 @@ typedef struct grpc_pollset_set {
size_t pollset_capacity;
grpc_pollset **pollsets;
size_t pollset_set_count;
size_t pollset_set_capacity;
struct grpc_pollset_set **pollset_sets;
size_t fd_count;
size_t fd_capacity;
grpc_fd **fds;

@ -49,4 +49,12 @@ void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
grpc_pollset_set* pollset_set,
grpc_pollset* pollset) {}
void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
grpc_pollset_set* bag,
grpc_pollset_set* item) {}
void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
grpc_pollset_set* bag,
grpc_pollset_set* item) {}
#endif /* GPR_WINSOCK_SOCKET */

@ -411,7 +411,6 @@ static grpc_tcp_listener *add_socket_to_server(grpc_tcp_server *s, int fd,
grpc_tcp_listener *grpc_tcp_server_add_port(grpc_tcp_server *s,
const void *addr, size_t addr_len) {
int allocated_port = -1;
grpc_tcp_listener *sp;
grpc_tcp_listener *sp2 = NULL;
int fd;
@ -464,14 +463,13 @@ grpc_tcp_listener *grpc_tcp_server_add_port(grpc_tcp_server *s,
addr_len = sizeof(wild6);
fd = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode);
sp = add_socket_to_server(s, fd, addr, addr_len);
allocated_port = sp->port;
if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) {
goto done;
}
/* If we didn't get a dualstack socket, also listen on 0.0.0.0. */
if (port == 0 && allocated_port > 0) {
grpc_sockaddr_set_port((struct sockaddr *)&wild4, allocated_port);
if (port == 0 && sp != NULL) {
grpc_sockaddr_set_port((struct sockaddr *)&wild4, sp->port);
sp2 = sp;
}
addr = (struct sockaddr *)&wild4;
@ -488,8 +486,8 @@ grpc_tcp_listener *grpc_tcp_server_add_port(grpc_tcp_server *s,
addr_len = sizeof(addr4_copy);
}
sp = add_socket_to_server(s, fd, addr, addr_len);
sp->sibling = sp2;
if (sp2) sp2->is_sibling = 1;
if (sp != NULL) sp->sibling = sp2;
if (sp2 != NULL) sp2->is_sibling = 1;
done:
gpr_free(allocated_addr);

@ -197,7 +197,7 @@ static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->read_slice = gpr_slice_malloc(8192);
buffer.len = GPR_SLICE_LENGTH(tcp->read_slice);
buffer.len = (ULONG)GPR_SLICE_LENGTH(tcp->read_slice); // we know slice size fits in 32bit.
buffer.buf = (char *)GPR_SLICE_START_PTR(tcp->read_slice);
TCP_REF(tcp, "read");
@ -273,6 +273,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
WSABUF local_buffers[16];
WSABUF *allocated = NULL;
WSABUF *buffers = local_buffers;
size_t len;
if (tcp->shutting_down) {
grpc_exec_ctx_enqueue(exec_ctx, cb, 0);
@ -281,19 +282,21 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
tcp->write_cb = cb;
tcp->write_slices = slices;
GPR_ASSERT(tcp->write_slices->count <= UINT_MAX);
if (tcp->write_slices->count > GPR_ARRAY_SIZE(local_buffers)) {
buffers = (WSABUF *)gpr_malloc(sizeof(WSABUF) * tcp->write_slices->count);
allocated = buffers;
}
for (i = 0; i < tcp->write_slices->count; i++) {
buffers[i].len = GPR_SLICE_LENGTH(tcp->write_slices->slices[i]);
len = GPR_SLICE_LENGTH(tcp->write_slices->slices[i]);
GPR_ASSERT(len <= ULONG_MAX);
buffers[i].len = (ULONG) len;
buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices->slices[i]);
}
/* First, let's try a synchronous, non-blocking write. */
status = WSASend(socket->socket, buffers, tcp->write_slices->count,
status = WSASend(socket->socket, buffers, (DWORD)tcp->write_slices->count,
&bytes_sent, 0, NULL, NULL);
info->wsa_error = status == 0 ? 0 : WSAGetLastError();
@ -322,7 +325,7 @@ static void win_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
/* If we got a WSAEWOULDBLOCK earlier, then we need to re-do the same
operation, this time asynchronously. */
memset(&socket->write_info.overlapped, 0, sizeof(OVERLAPPED));
status = WSASend(socket->socket, buffers, tcp->write_slices->count,
status = WSASend(socket->socket, buffers, (DWORD)tcp->write_slices->count,
&bytes_sent, 0, &socket->write_info.overlapped, NULL);
if (allocated) gpr_free(allocated);

@ -38,6 +38,7 @@
#include <grpc/support/port_platform.h>
#ifdef GRPC_NEED_UDP
#ifdef GPR_POSIX_SOCKET
#include "src/core/iomgr/udp_server.h"
@ -435,3 +436,4 @@ void grpc_udp_server_write(server_port *sp, const char *buffer, size_t buf_len,
}
#endif
#endif

@ -39,11 +39,11 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include "src/core/support/string.h"
#include "src/core/channel/channel_stack.h"
#include "src/core/security/security_context.h"
#include "src/core/security/security_connector.h"
#include "src/core/security/credentials.h"
#include "src/core/security/security_connector.h"
#include "src/core/security/security_context.h"
#include "src/core/support/string.h"
#include "src/core/surface/call.h"
#include "src/core/transport/static_metadata.h"

@ -39,7 +39,7 @@
#include "src/core/channel/channel_args.h"
#include "src/core/channel/http_client_filter.h"
#include "src/core/httpcli/httpcli.h"
#include "src/core/iomgr/iomgr.h"
#include "src/core/iomgr/executor.h"
#include "src/core/json/json.h"
#include "src/core/support/string.h"
#include "src/core/surface/api_trace.h"
@ -48,7 +48,6 @@
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <grpc/support/sync.h>
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
/* -- Common. -- */
@ -792,15 +791,14 @@ static void md_only_test_destruct(grpc_call_credentials *creds) {
grpc_credentials_md_store_unref(c->md_store);
}
static void on_simulated_token_fetch_done(void *user_data) {
static void on_simulated_token_fetch_done(grpc_exec_ctx *exec_ctx,
void *user_data, int success) {
grpc_credentials_metadata_request *r =
(grpc_credentials_metadata_request *)user_data;
grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)r->creds;
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
r->cb(&exec_ctx, r->user_data, c->md_store->entries, c->md_store->num_entries,
r->cb(exec_ctx, r->user_data, c->md_store->entries, c->md_store->num_entries,
GRPC_CREDENTIALS_OK);
grpc_credentials_metadata_request_destroy(r);
grpc_exec_ctx_finish(&exec_ctx);
}
static void md_only_test_get_request_metadata(
@ -810,10 +808,10 @@ static void md_only_test_get_request_metadata(
grpc_md_only_test_credentials *c = (grpc_md_only_test_credentials *)creds;
if (c->is_async) {
gpr_thd_id thd_id;
grpc_credentials_metadata_request *cb_arg =
grpc_credentials_metadata_request_create(creds, cb, user_data);
gpr_thd_new(&thd_id, on_simulated_token_fetch_done, cb_arg, NULL);
grpc_executor_enqueue(
grpc_closure_create(on_simulated_token_fetch_done, cb_arg), 1);
} else {
cb(exec_ctx, user_data, c->md_store->entries, 1, GRPC_CREDENTIALS_OK);
}

@ -93,6 +93,14 @@ typedef enum {
/* It is the caller's responsibility to gpr_free the result if not NULL. */
char *grpc_get_well_known_google_credentials_file_path(void);
/* Implementation function for the different platforms. */
char *grpc_get_well_known_google_credentials_file_path_impl(void);
/* Override for testing only. Not thread-safe */
typedef char *(*grpc_well_known_credentials_path_getter)(void);
void grpc_override_well_known_credentials_path_getter(
grpc_well_known_credentials_path_getter getter);
/* --- grpc_channel_credentials. --- */
typedef struct {
@ -201,6 +209,7 @@ grpc_credentials_status
grpc_oauth2_token_fetcher_credentials_parse_server_response(
const struct grpc_httpcli_response *response,
grpc_credentials_md_store **token_md, gpr_timespec *token_lifetime);
void grpc_flush_cached_google_default_credentials(void);
/* Metadata-only credentials with the specified key and value where

@ -44,7 +44,7 @@
#include "src/core/support/env.h"
#include "src/core/support/string.h"
char *grpc_get_well_known_google_credentials_file_path(void) {
char *grpc_get_well_known_google_credentials_file_path_impl(void) {
char *result = NULL;
char *home = gpr_getenv("HOME");
if (home == NULL) {

@ -44,7 +44,7 @@
#include "src/core/support/env.h"
#include "src/core/support/string.h"
char *grpc_get_well_known_google_credentials_file_path(void) {
char *grpc_get_well_known_google_credentials_file_path_impl(void) {
char *result = NULL;
char *appdata_path = gpr_getenv("APPDATA");
if (appdata_path == NULL) {

@ -241,5 +241,20 @@ void grpc_flush_cached_google_default_credentials(void) {
grpc_channel_credentials_unref(default_credentials);
default_credentials = NULL;
}
compute_engine_detection_done = 0;
gpr_mu_unlock(&g_mu);
}
/* -- Well known credentials path. -- */
static grpc_well_known_credentials_path_getter creds_path_getter = NULL;
char *grpc_get_well_known_google_credentials_file_path(void) {
if (creds_path_getter != NULL) return creds_path_getter();
return grpc_get_well_known_google_credentials_file_path_impl();
}
void grpc_override_well_known_credentials_path_getter(
grpc_well_known_credentials_path_getter getter) {
creds_path_getter = getter;
}

@ -64,12 +64,39 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *setup,
int success);
static void security_connector_remove_handshake(grpc_security_handshake *h) {
grpc_security_connector_handshake_list *node;
grpc_security_connector_handshake_list *tmp;
grpc_security_connector *sc = h->connector;
gpr_mu_lock(&sc->mu);
node = sc->handshaking_handshakes;
if (node && node->handshake == h) {
sc->handshaking_handshakes = node->next;
gpr_free(node);
gpr_mu_unlock(&sc->mu);
return;
}
while (node) {
if (node->next->handshake == h) {
tmp = node->next;
node->next = node->next->next;
gpr_free(tmp);
gpr_mu_unlock(&sc->mu);
return;
}
node = node->next;
}
gpr_mu_unlock(&sc->mu);
}
static void security_handshake_done(grpc_exec_ctx *exec_ctx,
grpc_security_handshake *h,
int is_success) {
if (!h->connector->is_client_side) {
security_connector_remove_handshake(h);
}
if (is_success) {
h->cb(exec_ctx, h->user_data, GRPC_SECURITY_OK, h->wrapped_endpoint,
h->secure_endpoint);
h->cb(exec_ctx, h->user_data, GRPC_SECURITY_OK, h->secure_endpoint);
} else {
if (h->secure_endpoint != NULL) {
grpc_endpoint_shutdown(exec_ctx, h->secure_endpoint);
@ -77,8 +104,7 @@ static void security_handshake_done(grpc_exec_ctx *exec_ctx,
} else {
grpc_endpoint_destroy(exec_ctx, h->wrapped_endpoint);
}
h->cb(exec_ctx, h->user_data, GRPC_SECURITY_ERROR, h->wrapped_endpoint,
NULL);
h->cb(exec_ctx, h->user_data, GRPC_SECURITY_ERROR, NULL);
}
if (h->handshaker != NULL) tsi_handshaker_destroy(h->handshaker);
if (h->handshake_buffer != NULL) gpr_free(h->handshake_buffer);
@ -268,6 +294,7 @@ void grpc_do_security_handshake(grpc_exec_ctx *exec_ctx,
grpc_endpoint *nonsecure_endpoint,
grpc_security_handshake_done_cb cb,
void *user_data) {
grpc_security_connector_handshake_list *handshake_node;
grpc_security_handshake *h = gpr_malloc(sizeof(grpc_security_handshake));
memset(h, 0, sizeof(grpc_security_handshake));
h->handshaker = handshaker;
@ -284,5 +311,19 @@ void grpc_do_security_handshake(grpc_exec_ctx *exec_ctx,
gpr_slice_buffer_init(&h->left_overs);
gpr_slice_buffer_init(&h->outgoing);
gpr_slice_buffer_init(&h->incoming);
if (!connector->is_client_side) {
handshake_node = gpr_malloc(sizeof(grpc_security_connector_handshake_list));
handshake_node->handshake = h;
gpr_mu_lock(&connector->mu);
handshake_node->next = connector->handshaking_handshakes;
connector->handshaking_handshakes = handshake_node;
gpr_mu_unlock(&connector->mu);
}
send_handshake_bytes_to_peer(exec_ctx, h);
}
void grpc_security_handshake_shutdown(grpc_exec_ctx *exec_ctx,
void *handshake) {
grpc_security_handshake *h = handshake;
grpc_endpoint_shutdown(exec_ctx, h->wrapped_endpoint);
}

@ -45,4 +45,6 @@ void grpc_do_security_handshake(grpc_exec_ctx *exec_ctx,
grpc_security_handshake_done_cb cb,
void *user_data);
void grpc_security_handshake_shutdown(grpc_exec_ctx *exec_ctx, void *handshake);
#endif /* GRPC_INTERNAL_CORE_SECURITY_HANDSHAKE_H */

@ -215,8 +215,8 @@ static char *encoded_jwt_claim(const grpc_auth_json_key *json_key,
gpr_log(GPR_INFO, "Cropping token lifetime to maximum allowed value.");
expiration = gpr_time_add(now, grpc_max_auth_token_lifetime);
}
gpr_ltoa(now.tv_sec, now_str);
gpr_ltoa(expiration.tv_sec, expiration_str);
gpr_int64toa(now.tv_sec, now_str);
gpr_int64toa(expiration.tv_sec, expiration_str);
child =
create_child(NULL, json, "iss", json_key->client_email, GRPC_JSON_STRING);

@ -102,13 +102,29 @@ const tsi_peer_property *tsi_peer_get_property_by_name(const tsi_peer *peer,
return NULL;
}
void grpc_security_connector_shutdown(grpc_exec_ctx *exec_ctx,
grpc_security_connector *connector) {
grpc_security_connector_handshake_list *tmp;
if (!connector->is_client_side) {
gpr_mu_lock(&connector->mu);
while (connector->handshaking_handshakes) {
tmp = connector->handshaking_handshakes;
grpc_security_handshake_shutdown(
exec_ctx, connector->handshaking_handshakes->handshake);
connector->handshaking_handshakes = tmp->next;
gpr_free(tmp);
}
gpr_mu_unlock(&connector->mu);
}
}
void grpc_security_connector_do_handshake(grpc_exec_ctx *exec_ctx,
grpc_security_connector *sc,
grpc_endpoint *nonsecure_endpoint,
grpc_security_handshake_done_cb cb,
void *user_data) {
if (sc == NULL || nonsecure_endpoint == NULL) {
cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, nonsecure_endpoint, NULL);
cb(exec_ctx, user_data, GRPC_SECURITY_ERROR, NULL);
} else {
sc->vtable->do_handshake(exec_ctx, sc, nonsecure_endpoint, cb, user_data);
}
@ -219,6 +235,7 @@ static void fake_channel_destroy(grpc_security_connector *sc) {
static void fake_server_destroy(grpc_security_connector *sc) {
GRPC_AUTH_CONTEXT_UNREF(sc->auth_context, "connector");
gpr_mu_destroy(&sc->mu);
gpr_free(sc);
}
@ -319,6 +336,7 @@ grpc_security_connector *grpc_fake_server_security_connector_create(void) {
c->is_client_side = 0;
c->vtable = &fake_server_vtable;
c->url_scheme = GRPC_FAKE_SECURITY_URL_SCHEME;
gpr_mu_init(&c->mu);
return c;
}
@ -354,10 +372,12 @@ static void ssl_channel_destroy(grpc_security_connector *sc) {
static void ssl_server_destroy(grpc_security_connector *sc) {
grpc_ssl_server_security_connector *c =
(grpc_ssl_server_security_connector *)sc;
if (c->handshaker_factory != NULL) {
tsi_ssl_handshaker_factory_destroy(c->handshaker_factory);
}
GRPC_AUTH_CONTEXT_UNREF(sc->auth_context, "connector");
gpr_mu_destroy(&sc->mu);
gpr_free(sc);
}
@ -390,7 +410,7 @@ static void ssl_channel_do_handshake(grpc_exec_ctx *exec_ctx,
: c->target_name,
&handshaker);
if (status != GRPC_SECURITY_OK) {
cb(exec_ctx, user_data, status, nonsecure_endpoint, NULL);
cb(exec_ctx, user_data, status, NULL);
} else {
grpc_do_security_handshake(exec_ctx, handshaker, sc, nonsecure_endpoint, cb,
user_data);
@ -408,7 +428,7 @@ static void ssl_server_do_handshake(grpc_exec_ctx *exec_ctx,
grpc_security_status status =
ssl_create_handshaker(c->handshaker_factory, 0, NULL, &handshaker);
if (status != GRPC_SECURITY_OK) {
cb(exec_ctx, user_data, status, nonsecure_endpoint, NULL);
cb(exec_ctx, user_data, status, NULL);
} else {
grpc_do_security_handshake(exec_ctx, handshaker, sc, nonsecure_endpoint, cb,
user_data);
@ -691,6 +711,7 @@ grpc_security_status grpc_ssl_server_security_connector_create(
*sc = NULL;
goto error;
}
gpr_mu_init(&c->base.mu);
*sc = &c->base;
gpr_free((void *)alpn_protocol_strings);
gpr_free(alpn_protocol_string_lengths);

@ -67,7 +67,6 @@ typedef void (*grpc_security_check_cb)(grpc_exec_ctx *exec_ctx, void *user_data,
typedef void (*grpc_security_handshake_done_cb)(grpc_exec_ctx *exec_ctx,
void *user_data,
grpc_security_status status,
grpc_endpoint *wrapped_endpoint,
grpc_endpoint *secure_endpoint);
typedef struct {
@ -80,13 +79,22 @@ typedef struct {
void *user_data);
} grpc_security_connector_vtable;
typedef struct grpc_security_connector_handshake_list {
void *handshake;
struct grpc_security_connector_handshake_list *next;
} grpc_security_connector_handshake_list;
struct grpc_security_connector {
const grpc_security_connector_vtable *vtable;
gpr_refcount refcount;
int is_client_side;
const char *url_scheme;
grpc_auth_context *auth_context; /* Populated after the peer is checked. */
const grpc_channel_args *channel_args; /* Server side only. */
/* Used on server side only. */
/* TODO(yangg) maybe create a grpc_server_security_connector with these */
gpr_mu mu;
grpc_security_connector_handshake_list *handshaking_handshakes;
const grpc_channel_args *channel_args;
};
/* Refcounting. */
@ -127,6 +135,9 @@ grpc_security_status grpc_security_connector_check_peer(
grpc_security_connector *sc, tsi_peer peer, grpc_security_check_cb cb,
void *user_data);
void grpc_security_connector_shutdown(grpc_exec_ctx *exec_ctx,
grpc_security_connector *connector);
/* Util to encapsulate the connector in a channel arg. */
grpc_arg grpc_security_connector_to_arg(grpc_security_connector *sc);

@ -52,17 +52,11 @@
#include <grpc/support/sync.h>
#include <grpc/support/useful.h>
typedef struct tcp_endpoint_list {
grpc_endpoint *tcp_endpoint;
struct tcp_endpoint_list *next;
} tcp_endpoint_list;
typedef struct grpc_server_secure_state {
grpc_server *server;
grpc_tcp_server *tcp;
grpc_security_connector *sc;
grpc_server_credentials *creds;
tcp_endpoint_list *handshaking_tcp_endpoints;
int is_shutdown;
gpr_mu mu;
gpr_refcount refcount;
@ -103,52 +97,28 @@ static void setup_transport(grpc_exec_ctx *exec_ctx, void *statep,
grpc_channel_args_destroy(args_copy);
}
static int remove_tcp_from_list_locked(grpc_server_secure_state *state,
grpc_endpoint *tcp) {
tcp_endpoint_list *node = state->handshaking_tcp_endpoints;
tcp_endpoint_list *tmp = NULL;
if (node && node->tcp_endpoint == tcp) {
state->handshaking_tcp_endpoints = state->handshaking_tcp_endpoints->next;
gpr_free(node);
return 0;
}
while (node) {
if (node->next->tcp_endpoint == tcp) {
tmp = node->next;
node->next = node->next->next;
gpr_free(tmp);
return 0;
}
node = node->next;
}
return -1;
}
static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *statep,
grpc_security_status status,
grpc_endpoint *wrapped_endpoint,
grpc_endpoint *secure_endpoint) {
grpc_server_secure_state *state = statep;
grpc_transport *transport;
if (status == GRPC_SECURITY_OK) {
gpr_mu_lock(&state->mu);
remove_tcp_from_list_locked(state, wrapped_endpoint);
if (!state->is_shutdown) {
transport = grpc_create_chttp2_transport(
exec_ctx, grpc_server_get_channel_args(state->server),
secure_endpoint, 0);
setup_transport(exec_ctx, state, transport);
grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL, 0);
} else {
/* We need to consume this here, because the server may already have gone
* away. */
grpc_endpoint_destroy(exec_ctx, secure_endpoint);
if (secure_endpoint) {
gpr_mu_lock(&state->mu);
if (!state->is_shutdown) {
transport = grpc_create_chttp2_transport(
exec_ctx, grpc_server_get_channel_args(state->server),
secure_endpoint, 0);
setup_transport(exec_ctx, state, transport);
grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL, 0);
} else {
/* We need to consume this here, because the server may already have
* gone away. */
grpc_endpoint_destroy(exec_ctx, secure_endpoint);
}
gpr_mu_unlock(&state->mu);
}
gpr_mu_unlock(&state->mu);
} else {
gpr_mu_lock(&state->mu);
remove_tcp_from_list_locked(state, wrapped_endpoint);
gpr_mu_unlock(&state->mu);
gpr_log(GPR_ERROR, "Secure transport failed with error %d", status);
}
state_unref(state);
@ -157,14 +127,7 @@ static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *statep,
static void on_accept(grpc_exec_ctx *exec_ctx, void *statep,
grpc_endpoint *tcp) {
grpc_server_secure_state *state = statep;
tcp_endpoint_list *node;
state_ref(state);
node = gpr_malloc(sizeof(tcp_endpoint_list));
node->tcp_endpoint = tcp;
gpr_mu_lock(&state->mu);
node->next = state->handshaking_tcp_endpoints;
state->handshaking_tcp_endpoints = node;
gpr_mu_unlock(&state->mu);
grpc_security_connector_do_handshake(exec_ctx, state->sc, tcp,
on_secure_handshake_done, state);
}
@ -181,14 +144,7 @@ static void destroy_done(grpc_exec_ctx *exec_ctx, void *statep, int success) {
grpc_server_secure_state *state = statep;
state->destroy_callback->cb(exec_ctx, state->destroy_callback->cb_arg,
success);
gpr_mu_lock(&state->mu);
while (state->handshaking_tcp_endpoints != NULL) {
grpc_endpoint_shutdown(exec_ctx,
state->handshaking_tcp_endpoints->tcp_endpoint);
remove_tcp_from_list_locked(state,
state->handshaking_tcp_endpoints->tcp_endpoint);
}
gpr_mu_unlock(&state->mu);
grpc_security_connector_shutdown(exec_ctx, state->sc);
state_unref(state);
}
@ -281,7 +237,6 @@ int grpc_server_add_secure_http2_port(grpc_server *server, const char *addr,
state->sc = sc;
state->creds = grpc_server_credentials_ref(creds);
state->handshaking_tcp_endpoints = NULL;
state->is_shutdown = 0;
gpr_mu_init(&state->mu);
gpr_ref_init(&state->refcount, 1);

@ -34,13 +34,27 @@
#include <grpc/support/alloc.h>
#include <stdlib.h>
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include "src/core/profiling/timers.h"
static gpr_allocation_functions g_alloc_functions = {malloc, realloc, free};
gpr_allocation_functions gpr_get_allocation_functions() {
return g_alloc_functions;
}
void gpr_set_allocation_functions(gpr_allocation_functions functions) {
GPR_ASSERT(functions.malloc_fn != NULL);
GPR_ASSERT(functions.realloc_fn != NULL);
GPR_ASSERT(functions.free_fn != NULL);
g_alloc_functions = functions;
}
void *gpr_malloc(size_t size) {
void *p;
GPR_TIMER_BEGIN("gpr_malloc", 0);
p = malloc(size);
p = g_alloc_functions.malloc_fn(size);
if (!p) {
abort();
}
@ -50,13 +64,13 @@ void *gpr_malloc(size_t size) {
void gpr_free(void *p) {
GPR_TIMER_BEGIN("gpr_free", 0);
free(p);
g_alloc_functions.free_fn(p);
GPR_TIMER_END("gpr_free", 0);
}
void *gpr_realloc(void *p, size_t size) {
GPR_TIMER_BEGIN("gpr_realloc", 0);
p = realloc(p, size);
p = g_alloc_functions.realloc_fn(p, size);
if (!p) {
abort();
}

@ -62,11 +62,13 @@ struct gpr_cmdline {
void (*extra_arg)(void *user_data, const char *arg);
void *extra_arg_user_data;
void (*state)(gpr_cmdline *cl, char *arg);
int (*state)(gpr_cmdline *cl, char *arg);
arg *cur_arg;
int survive_failure;
};
static void normal_state(gpr_cmdline *cl, char *arg);
static int normal_state(gpr_cmdline *cl, char *arg);
gpr_cmdline *gpr_cmdline_create(const char *description) {
gpr_cmdline *cl = gpr_malloc(sizeof(gpr_cmdline));
@ -78,6 +80,10 @@ gpr_cmdline *gpr_cmdline_create(const char *description) {
return cl;
}
void gpr_cmdline_set_survive_failure(gpr_cmdline *cl) {
cl->survive_failure = 1;
}
void gpr_cmdline_destroy(gpr_cmdline *cl) {
while (cl->args) {
arg *a = cl->args;
@ -185,16 +191,22 @@ char *gpr_cmdline_usage_string(gpr_cmdline *cl, const char *argv0) {
return tmp;
}
static void print_usage_and_die(gpr_cmdline *cl) {
static int print_usage_and_die(gpr_cmdline *cl) {
char *usage = gpr_cmdline_usage_string(cl, cl->argv0);
fprintf(stderr, "%s", usage);
gpr_free(usage);
exit(1);
if (!cl->survive_failure) {
exit(1);
}
return 0;
}
static void extra_state(gpr_cmdline *cl, char *str) {
if (!cl->extra_arg) print_usage_and_die(cl);
static int extra_state(gpr_cmdline *cl, char *str) {
if (!cl->extra_arg) {
return print_usage_and_die(cl);
}
cl->extra_arg(cl->extra_arg_user_data, str);
return 1;
}
static arg *find_arg(gpr_cmdline *cl, char *name) {
@ -208,13 +220,13 @@ static arg *find_arg(gpr_cmdline *cl, char *name) {
if (!a) {
fprintf(stderr, "Unknown argument: %s\n", name);
print_usage_and_die(cl);
return NULL;
}
return a;
}
static void value_state(gpr_cmdline *cl, char *str) {
static int value_state(gpr_cmdline *cl, char *str) {
long intval;
char *end;
@ -226,7 +238,7 @@ static void value_state(gpr_cmdline *cl, char *str) {
if (*end || intval < INT_MIN || intval > INT_MAX) {
fprintf(stderr, "expected integer, got '%s' for %s\n", str,
cl->cur_arg->name);
print_usage_and_die(cl);
return print_usage_and_die(cl);
}
*(int *)cl->cur_arg->value = (int)intval;
break;
@ -238,7 +250,7 @@ static void value_state(gpr_cmdline *cl, char *str) {
} else {
fprintf(stderr, "expected boolean, got '%s' for %s\n", str,
cl->cur_arg->name);
print_usage_and_die(cl);
return print_usage_and_die(cl);
}
break;
case ARGTYPE_STRING:
@ -247,16 +259,18 @@ static void value_state(gpr_cmdline *cl, char *str) {
}
cl->state = normal_state;
return 1;
}
static void normal_state(gpr_cmdline *cl, char *str) {
static int normal_state(gpr_cmdline *cl, char *str) {
char *eq = NULL;
char *tmp = NULL;
char *arg_name = NULL;
int r = 1;
if (0 == strcmp(str, "-help") || 0 == strcmp(str, "--help") ||
0 == strcmp(str, "-h")) {
print_usage_and_die(cl);
return print_usage_and_die(cl);
}
cl->cur_arg = NULL;
@ -266,7 +280,7 @@ static void normal_state(gpr_cmdline *cl, char *str) {
if (str[2] == 0) {
/* handle '--' to move to just extra args */
cl->state = extra_state;
return;
return 1;
}
str += 2;
} else {
@ -277,12 +291,15 @@ static void normal_state(gpr_cmdline *cl, char *str) {
/* str is of the form '--no-foo' - it's a flag disable */
str += 3;
cl->cur_arg = find_arg(cl, str);
if (cl->cur_arg == NULL) {
return print_usage_and_die(cl);
}
if (cl->cur_arg->type != ARGTYPE_BOOL) {
fprintf(stderr, "%s is not a flag argument\n", str);
print_usage_and_die(cl);
return print_usage_and_die(cl);
}
*(int *)cl->cur_arg->value = 0;
return; /* early out */
return 1; /* early out */
}
eq = strchr(str, '=');
if (eq != NULL) {
@ -294,9 +311,12 @@ static void normal_state(gpr_cmdline *cl, char *str) {
arg_name = str;
}
cl->cur_arg = find_arg(cl, arg_name);
if (cl->cur_arg == NULL) {
return print_usage_and_die(cl);
}
if (eq != NULL) {
/* str was of the type --foo=value, parse the value */
value_state(cl, eq + 1);
r = value_state(cl, eq + 1);
} else if (cl->cur_arg->type != ARGTYPE_BOOL) {
/* flag types don't have a '--foo value' variant, other types do */
cl->state = value_state;
@ -305,19 +325,23 @@ static void normal_state(gpr_cmdline *cl, char *str) {
*(int *)cl->cur_arg->value = 1;
}
} else {
extra_state(cl, str);
r = extra_state(cl, str);
}
gpr_free(tmp);
return r;
}
void gpr_cmdline_parse(gpr_cmdline *cl, int argc, char **argv) {
int gpr_cmdline_parse(gpr_cmdline *cl, int argc, char **argv) {
int i;
GPR_ASSERT(argc >= 1);
cl->argv0 = argv[0];
for (i = 1; i < argc; i++) {
cl->state(cl, argv[i]);
if (!cl->state(cl, argv[i])) {
return 0;
}
}
return 1;
}

@ -32,6 +32,7 @@
*/
#include <grpc/support/log.h>
#include <grpc/support/port_platform.h>
#include <stdio.h>
#include <string.h>
@ -48,7 +49,7 @@ const char *gpr_log_severity_string(gpr_log_severity severity) {
case GPR_LOG_SEVERITY_ERROR:
return "E";
}
return "UNKNOWN";
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
void gpr_log_message(const char *file, int line, gpr_log_severity severity,

@ -341,10 +341,3 @@ int gpr_slice_str_cmp(gpr_slice a, const char *b) {
if (d != 0) return d;
return memcmp(GPR_SLICE_START_PTR(a), b, b_length);
}
char *gpr_slice_to_cstring(gpr_slice slice) {
char *result = gpr_malloc(GPR_SLICE_LENGTH(slice) + 1);
memcpy(result, GPR_SLICE_START_PTR(slice), GPR_SLICE_LENGTH(slice));
result[GPR_SLICE_LENGTH(slice)] = '\0';
return result;
}

@ -153,8 +153,8 @@ void gpr_reverse_bytes(char *str, int len) {
}
int gpr_ltoa(long value, char *string) {
long sign;
int i = 0;
int neg = value < 0;
if (value == 0) {
string[0] = '0';
@ -162,12 +162,33 @@ int gpr_ltoa(long value, char *string) {
return 1;
}
if (neg) value = -value;
sign = value < 0 ? -1 : 1;
while (value) {
string[i++] = (char)('0' + value % 10);
string[i++] = (char)('0' + sign * (value % 10));
value /= 10;
}
if (neg) string[i++] = '-';
if (sign < 0) string[i++] = '-';
gpr_reverse_bytes(string, i);
string[i] = 0;
return i;
}
int gpr_int64toa(gpr_int64 value, char *string) {
gpr_int64 sign;
int i = 0;
if (value == 0) {
string[0] = '0';
string[1] = 0;
return 1;
}
sign = value < 0 ? -1 : 1;
while (value) {
string[i++] = (char)('0' + sign * (value % 10));
value /= 10;
}
if (sign < 0) string[i++] = '-';
gpr_reverse_bytes(string, i);
string[i] = 0;
return i;

@ -70,6 +70,16 @@ int gpr_parse_bytes_to_uint32(const char *data, size_t length,
output must be at least GPR_LTOA_MIN_BUFSIZE bytes long. */
int gpr_ltoa(long value, char *output);
/* Minimum buffer size for calling int64toa */
#define GPR_INT64TOA_MIN_BUFSIZE (3 * sizeof(gpr_int64))
/* Convert an int64 to a string in base 10; returns the length of the
output string (or 0 on failure).
output must be at least GPR_INT64TOA_MIN_BUFSIZE bytes long.
NOTE: This function ensures sufficient bit width even on Win x64,
where long is 32bit is size.*/
int gpr_int64toa(gpr_int64 value, char *output);
/* Reverse a run of bytes */
void gpr_reverse_bytes(char *str, int len);

@ -336,26 +336,19 @@ void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_cq_pollset(cq));
}
grpc_completion_queue *grpc_call_get_completion_queue(grpc_call *call) {
return call->cq;
}
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_call_internal_ref(grpc_call *c, const char *reason) {
grpc_call_stack_ref(CALL_STACK_FROM_CALL(c), reason);
}
void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c,
const char *reason) {
grpc_call_stack_unref(exec_ctx, CALL_STACK_FROM_CALL(c), reason);
}
#define REF_REASON reason
#define REF_ARG , const char *reason
#else
void grpc_call_internal_ref(grpc_call *c) {
grpc_call_stack_ref(CALL_STACK_FROM_CALL(c));
#define REF_REASON ""
#define REF_ARG
#endif
void grpc_call_internal_ref(grpc_call *c REF_ARG) {
GRPC_CALL_STACK_REF(CALL_STACK_FROM_CALL(c), REF_REASON);
}
void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c) {
grpc_call_stack_unref(exec_ctx, CALL_STACK_FROM_CALL(c));
void grpc_call_internal_unref(grpc_exec_ctx *exec_ctx, grpc_call *c REF_ARG) {
GRPC_CALL_STACK_UNREF(exec_ctx, CALL_STACK_FROM_CALL(c), REF_REASON);
}
#endif
static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, int success) {
size_t i;
@ -742,8 +735,15 @@ static void execute_op(grpc_exec_ctx *exec_ctx, grpc_call *call,
char *grpc_call_get_peer(grpc_call *call) {
grpc_call_element *elem = CALL_ELEM_FROM_CALL(call, 0);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
char *result = elem->filter->get_peer(&exec_ctx, elem);
char *result;
GRPC_API_TRACE("grpc_call_get_peer(%p)", 1, (call));
result = elem->filter->get_peer(&exec_ctx, elem);
if (result == NULL) {
result = grpc_channel_get_target(call->channel);
}
if (result == NULL) {
result = gpr_strdup("unknown");
}
grpc_exec_ctx_finish(&exec_ctx);
return result;
}
@ -1270,6 +1270,7 @@ static grpc_call_error call_start_batch(grpc_exec_ctx *exec_ctx,
}
if (call->receiving_message) {
error = GRPC_CALL_ERROR_TOO_MANY_OPERATIONS;
goto done_with_error;
}
call->receiving_message = 1;
bctl->recv_message = 1;

@ -58,7 +58,6 @@ grpc_call *grpc_call_create(grpc_channel *channel, grpc_call *parent_call,
void grpc_call_set_completion_queue(grpc_exec_ctx *exec_ctx, grpc_call *call,
grpc_completion_queue *cq);
grpc_completion_queue *grpc_call_get_completion_queue(grpc_call *call);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_call_internal_ref(grpc_call *call, const char *reason);
@ -91,19 +90,6 @@ void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
grpc_call *call, const grpc_op *ops, size_t nops,
void *tag);
void grpc_server_log_request_call(char *file, int line,
gpr_log_severity severity,
grpc_server *server, grpc_call **call,
grpc_call_details *details,
grpc_metadata_array *initial_metadata,
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification,
void *tag);
void grpc_server_log_shutdown(char *file, int line, gpr_log_severity severity,
grpc_server *server, grpc_completion_queue *cq,
void *tag);
/* Set a context pointer.
No thread safety guarantees are made wrt this value. */
void grpc_call_context_set(grpc_call *call, grpc_context_index elem,

@ -117,26 +117,3 @@ void grpc_call_log_batch(char *file, int line, gpr_log_severity severity,
}
}
void grpc_server_log_request_call(char *file, int line,
gpr_log_severity severity,
grpc_server *server, grpc_call **call,
grpc_call_details *details,
grpc_metadata_array *initial_metadata,
grpc_completion_queue *cq_bound_to_call,
grpc_completion_queue *cq_for_notification,
void *tag) {
gpr_log(file, line, severity,
"grpc_server_request_call(server=%p, call=%p, details=%p, "
"initial_metadata=%p, cq_bound_to_call=%p, cq_for_notification=%p, "
"tag=%p)",
server, call, details, initial_metadata, cq_bound_to_call,
cq_for_notification, tag);
}
void grpc_server_log_shutdown(char *file, int line, gpr_log_severity severity,
grpc_server *server, grpc_completion_queue *cq,
void *tag) {
gpr_log(file, line, severity,
"grpc_server_shutdown_and_notify(server=%p, cq=%p, tag=%p)", server,
cq, tag);
}

@ -63,7 +63,6 @@ typedef struct registered_call {
struct grpc_channel {
int is_client;
gpr_refcount refs;
gpr_uint32 max_message_length;
grpc_mdelem *default_authority;
@ -81,6 +80,8 @@ struct grpc_channel {
/* the protobuf library will (by default) start warning at 100megs */
#define DEFAULT_MAX_MESSAGE_LENGTH (100 * 1024 * 1024)
static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg, int success);
grpc_channel *grpc_channel_create_from_filters(
grpc_exec_ctx *exec_ctx, const char *target,
const grpc_channel_filter **filters, size_t num_filters,
@ -93,8 +94,6 @@ grpc_channel *grpc_channel_create_from_filters(
channel->target = gpr_strdup(target);
GPR_ASSERT(grpc_is_initialized() && "call grpc_init()");
channel->is_client = is_client;
/* decremented by grpc_channel_destroy */
gpr_ref_init(&channel->refs, 1);
gpr_mu_init(&channel->registered_call_mu);
channel->registered_calls = NULL;
@ -113,7 +112,7 @@ grpc_channel *grpc_channel_create_from_filters(
}
} else if (0 == strcmp(args->args[i].key, GRPC_ARG_DEFAULT_AUTHORITY)) {
if (args->args[i].type != GRPC_ARG_STRING) {
gpr_log(GPR_ERROR, "%s: must be an string",
gpr_log(GPR_ERROR, "%s ignored: it must be a string",
GRPC_ARG_DEFAULT_AUTHORITY);
} else {
if (channel->default_authority) {
@ -126,13 +125,14 @@ grpc_channel *grpc_channel_create_from_filters(
} else if (0 ==
strcmp(args->args[i].key, GRPC_SSL_TARGET_NAME_OVERRIDE_ARG)) {
if (args->args[i].type != GRPC_ARG_STRING) {
gpr_log(GPR_ERROR, "%s: must be an string",
gpr_log(GPR_ERROR, "%s ignored: it must be a string",
GRPC_SSL_TARGET_NAME_OVERRIDE_ARG);
} else {
if (channel->default_authority) {
/* other ways of setting this (notably ssl) take precedence */
gpr_log(GPR_ERROR, "%s: default host already set some other way",
GRPC_ARG_DEFAULT_AUTHORITY);
gpr_log(GPR_ERROR,
"%s ignored: default host already set some other way",
GRPC_SSL_TARGET_NAME_OVERRIDE_ARG);
} else {
channel->default_authority = grpc_mdelem_from_strings(
":authority", args->args[i].value.string);
@ -152,7 +152,9 @@ grpc_channel *grpc_channel_create_from_filters(
gpr_free(default_authority);
}
grpc_channel_stack_init(exec_ctx, filters, num_filters, channel, args,
grpc_channel_stack_init(exec_ctx, 1, destroy_channel, channel, filters,
num_filters, args,
is_client ? "CLIENT_CHANNEL" : "SERVER_CHANNEL",
CHANNEL_STACK_FROM_CHANNEL(channel));
return channel;
@ -249,17 +251,25 @@ grpc_call *grpc_channel_create_registered_call(
rc->authority ? GRPC_MDELEM_REF(rc->authority) : NULL, deadline);
}
#ifdef GRPC_CHANNEL_REF_COUNT_DEBUG
void grpc_channel_internal_ref(grpc_channel *c, const char *reason) {
gpr_log(GPR_DEBUG, "CHANNEL: ref %p %d -> %d [%s]", c, c->refs.count,
c->refs.count + 1, reason);
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
#define REF_REASON reason
#define REF_ARG , const char *reason
#else
void grpc_channel_internal_ref(grpc_channel *c) {
#define REF_REASON ""
#define REF_ARG
#endif
gpr_ref(&c->refs);
void grpc_channel_internal_ref(grpc_channel *c REF_ARG) {
GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON);
}
static void destroy_channel(grpc_exec_ctx *exec_ctx, grpc_channel *channel) {
void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
grpc_channel *c REF_ARG) {
GRPC_CHANNEL_STACK_UNREF(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON);
}
static void destroy_channel(grpc_exec_ctx *exec_ctx, void *arg,
int iomgr_success) {
grpc_channel *channel = arg;
grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(channel));
while (channel->registered_calls) {
registered_call *rc = channel->registered_calls;
@ -278,20 +288,6 @@ static void destroy_channel(grpc_exec_ctx *exec_ctx, grpc_channel *channel) {
gpr_free(channel);
}
#ifdef GRPC_CHANNEL_REF_COUNT_DEBUG
void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
const char *reason) {
gpr_log(GPR_DEBUG, "CHANNEL: unref %p %d -> %d [%s]", channel,
channel->refs.count, channel->refs.count - 1, reason);
#else
void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx,
grpc_channel *channel) {
#endif
if (gpr_unref(&channel->refs)) {
destroy_channel(exec_ctx, channel);
}
}
void grpc_channel_destroy(grpc_channel *channel) {
grpc_transport_op op;
grpc_channel_element *elem;

@ -53,7 +53,7 @@ grpc_mdelem *grpc_channel_get_reffed_status_elem(grpc_channel *channel,
int status_code);
gpr_uint32 grpc_channel_get_max_message_length(grpc_channel *channel);
#ifdef GRPC_CHANNEL_REF_COUNT_DEBUG
#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_channel_internal_ref(grpc_channel *channel, const char *reason);
void grpc_channel_internal_unref(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
const char *reason);

@ -83,7 +83,6 @@ typedef struct {
gpr_mu mu;
callback_phase phase;
int success;
int removed;
grpc_closure on_complete;
grpc_timer alarm;
grpc_connectivity_state state;
@ -135,30 +134,15 @@ static void finished_completion(grpc_exec_ctx *exec_ctx, void *pw,
static void partly_done(grpc_exec_ctx *exec_ctx, state_watcher *w,
int due_to_completion) {
int delete = 0;
grpc_channel_element *client_channel_elem = NULL;
gpr_mu_lock(&w->mu);
if (w->removed == 0) {
w->removed = 1;
client_channel_elem = grpc_channel_stack_last_element(
grpc_channel_get_channel_stack(w->channel));
if (client_channel_elem->filter == &grpc_client_channel_filter) {
grpc_client_channel_del_interested_party(exec_ctx, client_channel_elem,
grpc_cq_pollset(w->cq));
} else {
grpc_client_uchannel_del_interested_party(exec_ctx, client_channel_elem,
grpc_cq_pollset(w->cq));
}
}
gpr_mu_unlock(&w->mu);
if (due_to_completion) {
gpr_mu_lock(&w->mu);
w->success = 1;
gpr_mu_unlock(&w->mu);
grpc_timer_cancel(exec_ctx, &w->alarm);
}
gpr_mu_lock(&w->mu);
if (due_to_completion) {
w->success = 1;
}
switch (w->phase) {
case WAITING:
w->phase = CALLING_BACK;
@ -212,7 +196,6 @@ void grpc_channel_watch_connectivity_state(
w->phase = WAITING;
w->state = last_observed_state;
w->success = 0;
w->removed = 0;
w->cq = cq;
w->tag = tag;
w->channel = channel;
@ -223,16 +206,14 @@ void grpc_channel_watch_connectivity_state(
if (client_channel_elem->filter == &grpc_client_channel_filter) {
GRPC_CHANNEL_INTERNAL_REF(channel, "watch_channel_connectivity");
grpc_client_channel_add_interested_party(&exec_ctx, client_channel_elem,
grpc_cq_pollset(cq));
grpc_client_channel_watch_connectivity_state(&exec_ctx, client_channel_elem,
&w->state, &w->on_complete);
grpc_cq_pollset(cq), &w->state,
&w->on_complete);
} else if (client_channel_elem->filter == &grpc_client_uchannel_filter) {
GRPC_CHANNEL_INTERNAL_REF(channel, "watch_uchannel_connectivity");
grpc_client_uchannel_add_interested_party(&exec_ctx, client_channel_elem,
grpc_cq_pollset(cq));
grpc_client_uchannel_watch_connectivity_state(
&exec_ctx, client_channel_elem, &w->state, &w->on_complete);
&exec_ctx, client_channel_elem, grpc_cq_pollset(cq), &w->state,
&w->on_complete);
}
grpc_exec_ctx_finish(&exec_ctx);

@ -99,8 +99,8 @@ static void connected(grpc_exec_ctx *exec_ctx, void *arg, int success) {
grpc_endpoint_write(exec_ctx, tcp, &c->initial_string_buffer,
&c->initial_string_sent);
}
c->result->transport = grpc_create_chttp2_transport(
exec_ctx, c->args.channel_args, tcp, 1);
c->result->transport =
grpc_create_chttp2_transport(exec_ctx, c->args.channel_args, tcp, 1);
grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport, NULL,
0);
GPR_ASSERT(c->result->transport);
@ -171,7 +171,6 @@ static grpc_subchannel *subchannel_factory_create_subchannel(
c->base.vtable = &connector_vtable;
gpr_ref_init(&c->refs, 1);
args->args = final_args;
args->master = f->master;
s = grpc_subchannel_create(&c->base, args);
grpc_connector_unref(exec_ctx, &c->base);
grpc_channel_args_destroy(final_args);
@ -218,6 +217,9 @@ grpc_channel *grpc_insecure_channel_create(const char *target,
GRPC_CHANNEL_INTERNAL_REF(f->master, "subchannel_factory");
resolver = grpc_resolver_create(target, &f->base);
if (!resolver) {
GRPC_CHANNEL_INTERNAL_UNREF(&exec_ctx, f->master, "subchannel_factory");
grpc_subchannel_factory_unref(&exec_ctx, &f->base);
grpc_exec_ctx_finish(&exec_ctx);
return NULL;
}

@ -0,0 +1,79 @@
/*
*
* Copyright 2015, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "src/core/surface/channel.h"
#include <string.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include "src/core/surface/api_trace.h"
#include "src/core/surface/completion_queue.h"
typedef struct {
grpc_closure closure;
void *tag;
grpc_completion_queue *cq;
grpc_cq_completion completion_storage;
} ping_result;
static void ping_destroy(grpc_exec_ctx *exec_ctx, void *arg,
grpc_cq_completion *storage) {
gpr_free(arg);
}
static void ping_done(grpc_exec_ctx *exec_ctx, void *arg, int success) {
ping_result *pr = arg;
grpc_cq_end_op(exec_ctx, pr->cq, pr->tag, success, ping_destroy, pr,
&pr->completion_storage);
}
void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
void *tag, void *reserved) {
grpc_transport_op op;
ping_result *pr = gpr_malloc(sizeof(*pr));
grpc_channel_element *top_elem =
grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
GPR_ASSERT(reserved == NULL);
memset(&op, 0, sizeof(op));
pr->tag = tag;
pr->cq = cq;
grpc_closure_init(&pr->closure, ping_done, pr);
op.send_ping = &pr->closure;
op.bind_pollset = grpc_cq_pollset(cq);
grpc_cq_begin_op(cq);
top_elem->filter->start_transport_op(&exec_ctx, top_elem, &op);
grpc_exec_ctx_finish(&exec_ctx);
}

@ -58,6 +58,10 @@
#include "src/core/transport/chttp2_transport.h"
#include "src/core/transport/connectivity_state.h"
#ifndef GRPC_DEFAULT_NAME_PREFIX
#define GRPC_DEFAULT_NAME_PREFIX "dns:///"
#endif
#define MAX_PLUGINS 128
static gpr_once g_basic_init = GPR_ONCE_INIT;
@ -97,7 +101,7 @@ void grpc_init(void) {
grpc_lb_policy_registry_init(grpc_pick_first_lb_factory_create());
grpc_register_lb_policy(grpc_pick_first_lb_factory_create());
grpc_register_lb_policy(grpc_round_robin_lb_factory_create());
grpc_resolver_registry_init("dns:///");
grpc_resolver_registry_init(GRPC_DEFAULT_NAME_PREFIX);
grpc_register_resolver_type(grpc_dns_resolver_factory_create());
grpc_register_resolver_type(grpc_ipv4_resolver_factory_create());
grpc_register_resolver_type(grpc_ipv6_resolver_factory_create());
@ -143,6 +147,7 @@ void grpc_shutdown(void) {
gpr_timers_global_destroy();
grpc_tracer_shutdown();
grpc_resolver_registry_shutdown();
grpc_lb_policy_registry_shutdown();
for (i = 0; i < g_number_of_plugins; i++) {
if (g_all_of_the_plugins[i].destroy != NULL) {
g_all_of_the_plugins[i].destroy();

@ -49,7 +49,6 @@ typedef struct {
} call_data;
typedef struct {
grpc_channel *master;
grpc_status_code error_code;
const char *error_message;
} channel_data;
@ -84,8 +83,7 @@ static void lame_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
}
static char *lame_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
channel_data *chand = elem->channel_data;
return grpc_channel_get_target(chand->master);
return NULL;
}
static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
@ -103,8 +101,7 @@ static void lame_start_transport_op(grpc_exec_ctx *exec_ctx,
}
static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
grpc_call_element_args *args) {
}
grpc_call_element_args *args) {}
static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
grpc_call_element *elem) {}
@ -112,10 +109,8 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
static void init_channel_elem(grpc_exec_ctx *exec_ctx,
grpc_channel_element *elem,
grpc_channel_element_args *args) {
channel_data *chand = elem->channel_data;
GPR_ASSERT(args->is_first);
GPR_ASSERT(args->is_last);
chand->master = args->master;
}
static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,

@ -88,7 +88,6 @@ static void connector_unref(grpc_exec_ctx *exec_ctx, grpc_connector *con) {
static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
grpc_security_status status,
grpc_endpoint *wrapped_endpoint,
grpc_endpoint *secure_endpoint) {
connector *c = arg;
grpc_closure *notify;
@ -97,13 +96,11 @@ static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
memset(c->result, 0, sizeof(*c->result));
gpr_mu_unlock(&c->mu);
} else if (status != GRPC_SECURITY_OK) {
GPR_ASSERT(c->connecting_endpoint == wrapped_endpoint);
gpr_log(GPR_ERROR, "Secure handshake failed with error %d.", status);
memset(c->result, 0, sizeof(*c->result));
c->connecting_endpoint = NULL;
gpr_mu_unlock(&c->mu);
} else {
GPR_ASSERT(c->connecting_endpoint == wrapped_endpoint);
c->connecting_endpoint = NULL;
gpr_mu_unlock(&c->mu);
c->result->transport = grpc_create_chttp2_transport(
@ -231,7 +228,6 @@ static grpc_subchannel *subchannel_factory_create_subchannel(
gpr_mu_init(&c->mu);
gpr_ref_init(&c->refs, 1);
args->args = final_args;
args->master = f->master;
s = grpc_subchannel_create(&c->base, args);
grpc_connector_unref(exec_ctx, &c->base);
grpc_channel_args_destroy(final_args);
@ -308,22 +304,22 @@ grpc_channel *grpc_secure_channel_create(grpc_channel_credentials *creds,
f->master = channel;
GRPC_CHANNEL_INTERNAL_REF(channel, "subchannel_factory");
resolver = grpc_resolver_create(target, &f->base);
if (!resolver) {
grpc_exec_ctx_finish(&exec_ctx);
return NULL;
if (resolver) {
grpc_client_channel_set_resolver(
&exec_ctx, grpc_channel_get_channel_stack(channel), resolver);
GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "create");
}
grpc_client_channel_set_resolver(
&exec_ctx, grpc_channel_get_channel_stack(channel), resolver);
GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "create");
grpc_subchannel_factory_unref(&exec_ctx, &f->base);
GRPC_SECURITY_CONNECTOR_UNREF(&security_connector->base, "channel_create");
grpc_channel_args_destroy(args_copy);
if (new_args_from_connector != NULL) {
grpc_channel_args_destroy(new_args_from_connector);
}
if (!resolver) {
GRPC_CHANNEL_INTERNAL_UNREF(&exec_ctx, channel, "subchannel_factory");
channel = NULL;
}
grpc_exec_ctx_finish(&exec_ctx);
return channel;

@ -101,9 +101,7 @@ int grpc_server_add_insecure_http2_port(grpc_server *server, const char *addr) {
}
tcp = grpc_tcp_server_create();
if (!tcp) {
goto error;
}
GPR_ASSERT(tcp);
for (i = 0; i < resolved->naddrs; i++) {
grpc_tcp_listener *listener;

@ -32,14 +32,21 @@
*/
#include <grpc/grpc.h>
#include "src/core/census/grpc_filter.h"
#include "src/core/channel/channel_args.h"
#include "src/core/channel/compress_filter.h"
#include "src/core/surface/api_trace.h"
#include "src/core/surface/completion_queue.h"
#include "src/core/surface/server.h"
#include "src/core/channel/compress_filter.h"
grpc_server *grpc_server_create(const grpc_channel_args *args, void *reserved) {
const grpc_channel_filter *filters[] = {&grpc_compress_filter};
const grpc_channel_filter *filters[3];
size_t num_filters = 0;
filters[num_filters++] = &grpc_compress_filter;
if (grpc_channel_args_is_census_enabled(args)) {
filters[num_filters++] = &grpc_server_census_filter;
}
GRPC_API_TRACE("grpc_server_create(%p, %p)", 2, (args, reserved));
return grpc_server_create_from_filters(filters, GPR_ARRAY_SIZE(filters),
return grpc_server_create_from_filters(filters, num_filters,
args);
}

@ -118,7 +118,7 @@ void grpc_chttp2_encode_data(gpr_uint32 id, gpr_slice_buffer *inbuf,
hdr = gpr_slice_malloc(9);
p = GPR_SLICE_START_PTR(hdr);
GPR_ASSERT(write_bytes < 16777316);
GPR_ASSERT(write_bytes < (1<<24));
*p++ = (gpr_uint8)(write_bytes >> 16);
*p++ = (gpr_uint8)(write_bytes >> 8);
*p++ = (gpr_uint8)(write_bytes);

@ -94,9 +94,6 @@ grpc_chttp2_parse_error grpc_chttp2_data_parser_parse(
grpc_chttp2_transport_parsing *transport_parsing,
grpc_chttp2_stream_parsing *stream_parsing, gpr_slice slice, int is_last);
/* create a slice with an empty data frame and is_last set */
gpr_slice grpc_chttp2_data_frame_create_empty_close(gpr_uint32 id);
void grpc_chttp2_encode_data(gpr_uint32 id, gpr_slice_buffer *inbuf,
gpr_uint32 write_bytes, int is_eof,
gpr_slice_buffer *outbuf);

@ -76,7 +76,6 @@ grpc_chttp2_parse_error grpc_chttp2_ping_parser_parse(
gpr_uint8 *const end = GPR_SLICE_END_PTR(slice);
gpr_uint8 *cur = beg;
grpc_chttp2_ping_parser *p = parser;
grpc_chttp2_outstanding_ping *ping;
while (p->byte != 8 && cur != end) {
p->opaque_8bytes[p->byte] = *cur;
@ -87,15 +86,7 @@ grpc_chttp2_parse_error grpc_chttp2_ping_parser_parse(
if (p->byte == 8) {
GPR_ASSERT(is_last);
if (p->is_ack) {
for (ping = transport_parsing->pings.next;
ping != &transport_parsing->pings; ping = ping->next) {
if (0 == memcmp(p->opaque_8bytes, ping->id, 8)) {
grpc_exec_ctx_enqueue(exec_ctx, ping->on_recv, 1);
}
ping->next->prev = ping->prev;
ping->prev->next = ping->next;
gpr_free(ping);
}
grpc_chttp2_ack_ping(exec_ctx, transport_parsing, p->opaque_8bytes);
} else {
gpr_slice_buffer_add(&transport_parsing->qbuf,
grpc_chttp2_ping_create(1, p->opaque_8bytes));

@ -44,6 +44,8 @@
#include "src/core/transport/chttp2/http2_errors.h"
#include "src/core/transport/chttp2_transport.h"
#define MAX_MAX_HEADER_LIST_SIZE (1024*1024*1024)
/* HTTP/2 mandated initial connection settings */
const grpc_chttp2_setting_parameters
grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS] = {
@ -60,7 +62,7 @@ const grpc_chttp2_setting_parameters
GRPC_CHTTP2_FLOW_CONTROL_ERROR},
{"MAX_FRAME_SIZE", 16384, 16384, 16777215,
GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE, GRPC_CHTTP2_PROTOCOL_ERROR},
{"MAX_HEADER_LIST_SIZE", 0xffffffffu, 0, 0xffffffffu,
{"MAX_HEADER_LIST_SIZE", MAX_MAX_HEADER_LIST_SIZE, 0, MAX_MAX_HEADER_LIST_SIZE,
GRPC_CHTTP2_CLAMP_INVALID_VALUE, GRPC_CHTTP2_PROTOCOL_ERROR},
};

@ -365,10 +365,9 @@ static void hpack_enc(grpc_chttp2_hpack_compressor *c, grpc_mdelem *elem,
GPR_ASSERT(GPR_SLICE_LENGTH(elem->key->slice) > 0);
if (GPR_SLICE_START_PTR(elem->key->slice)[0] != ':') { /* regular header */
st->seen_regular_header = 1;
} else if (st->seen_regular_header != 0) { /* reserved header */
gpr_log(GPR_ERROR,
"Reserved header (colon-prefixed) happening after regular ones.");
abort();
} else {
GPR_ASSERT(st->seen_regular_header == 0 &&
"Reserved header (colon-prefixed) happening after regular ones.");
}
inc_filter(HASH_FRAGMENT_1(elem_hash), &c->filter_elems_sum, c->filter_elems);
@ -458,12 +457,6 @@ static void deadline_enc(grpc_chttp2_hpack_compressor *c, gpr_timespec deadline,
GRPC_MDELEM_UNREF(mdelem);
}
gpr_slice grpc_chttp2_data_frame_create_empty_close(gpr_uint32 id) {
gpr_slice slice = gpr_slice_malloc(9);
fill_header(GPR_SLICE_START_PTR(slice), GRPC_CHTTP2_FRAME_DATA, id, 0, 1);
return slice;
}
static gpr_uint32 elems_for_bytes(gpr_uint32 bytes) {
return (bytes + 31) / 32;
}

@ -728,6 +728,7 @@ static int finish_indexed_field(grpc_chttp2_hpack_parser *p,
/* parse an indexed field with index < 127 */
static int parse_indexed_field(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
p->dynamic_table_update_allowed = 0;
p->index = (*cur) & 0x7f;
return finish_indexed_field(p, cur + 1, end);
}
@ -737,6 +738,7 @@ static int parse_indexed_field_x(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
finish_indexed_field};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = 0x7f;
p->parsing.value = &p->index;
@ -748,6 +750,7 @@ static int parse_indexed_field_x(grpc_chttp2_hpack_parser *p,
static int finish_lithdr_incidx(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(md != NULL); /* handled in string parsing */
return on_hdr(p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
take_string(p, &p->value)),
1) &&
@ -768,6 +771,7 @@ static int parse_lithdr_incidx(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_value_string_with_indexed_key, finish_lithdr_incidx};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = (*cur) & 0x3f;
return parse_string_prefix(p, cur + 1, end);
@ -779,6 +783,7 @@ static int parse_lithdr_incidx_x(grpc_chttp2_hpack_parser *p,
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_string_prefix, parse_value_string_with_indexed_key,
finish_lithdr_incidx};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = 0x3f;
p->parsing.value = &p->index;
@ -791,6 +796,7 @@ static int parse_lithdr_incidx_v(grpc_chttp2_hpack_parser *p,
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_key_string, parse_string_prefix,
parse_value_string_with_literal_key, finish_lithdr_incidx_v};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
return parse_string_prefix(p, cur + 1, end);
}
@ -799,6 +805,7 @@ static int parse_lithdr_incidx_v(grpc_chttp2_hpack_parser *p,
static int finish_lithdr_notidx(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(md != NULL); /* handled in string parsing */
return on_hdr(p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
take_string(p, &p->value)),
0) &&
@ -819,6 +826,7 @@ static int parse_lithdr_notidx(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_value_string_with_indexed_key, finish_lithdr_notidx};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = (*cur) & 0xf;
return parse_string_prefix(p, cur + 1, end);
@ -830,6 +838,7 @@ static int parse_lithdr_notidx_x(grpc_chttp2_hpack_parser *p,
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_string_prefix, parse_value_string_with_indexed_key,
finish_lithdr_notidx};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = 0xf;
p->parsing.value = &p->index;
@ -842,6 +851,7 @@ static int parse_lithdr_notidx_v(grpc_chttp2_hpack_parser *p,
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_key_string, parse_string_prefix,
parse_value_string_with_literal_key, finish_lithdr_notidx_v};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
return parse_string_prefix(p, cur + 1, end);
}
@ -850,6 +860,7 @@ static int parse_lithdr_notidx_v(grpc_chttp2_hpack_parser *p,
static int finish_lithdr_nvridx(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
grpc_mdelem *md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
GPR_ASSERT(md != NULL); /* handled in string parsing */
return on_hdr(p, grpc_mdelem_from_metadata_strings(GRPC_MDSTR_REF(md->key),
take_string(p, &p->value)),
0) &&
@ -870,6 +881,7 @@ static int parse_lithdr_nvridx(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_value_string_with_indexed_key, finish_lithdr_nvridx};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = (*cur) & 0xf;
return parse_string_prefix(p, cur + 1, end);
@ -881,6 +893,7 @@ static int parse_lithdr_nvridx_x(grpc_chttp2_hpack_parser *p,
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_string_prefix, parse_value_string_with_indexed_key,
finish_lithdr_nvridx};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
p->index = 0xf;
p->parsing.value = &p->index;
@ -893,6 +906,7 @@ static int parse_lithdr_nvridx_v(grpc_chttp2_hpack_parser *p,
static const grpc_chttp2_hpack_parser_state and_then[] = {
parse_key_string, parse_string_prefix,
parse_value_string_with_literal_key, finish_lithdr_nvridx_v};
p->dynamic_table_update_allowed = 0;
p->next_state = and_then;
return parse_string_prefix(p, cur + 1, end);
}
@ -908,6 +922,10 @@ static int finish_max_tbl_size(grpc_chttp2_hpack_parser *p,
/* parse a max table size change, max size < 15 */
static int parse_max_tbl_size(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
const gpr_uint8 *end) {
if (p->dynamic_table_update_allowed == 0) {
return 0;
}
p->dynamic_table_update_allowed--;
p->index = (*cur) & 0x1f;
return finish_max_tbl_size(p, cur + 1, end);
}
@ -917,6 +935,10 @@ static int parse_max_tbl_size_x(grpc_chttp2_hpack_parser *p,
const gpr_uint8 *cur, const gpr_uint8 *end) {
static const grpc_chttp2_hpack_parser_state and_then[] = {
finish_max_tbl_size};
if (p->dynamic_table_update_allowed == 0) {
return 0;
}
p->dynamic_table_update_allowed--;
p->next_state = and_then;
p->index = 0x1f;
p->parsing.value = &p->index;
@ -1044,7 +1066,7 @@ static int parse_value4(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
error:
gpr_log(GPR_ERROR,
"integer overflow in hpack integer decoding: have 0x%08x, "
"got byte 0x%02x",
"got byte 0x%02x on byte 5",
*p->parsing.value, *cur);
return parse_error(p, cur, end);
}
@ -1069,7 +1091,8 @@ static int parse_value5up(grpc_chttp2_hpack_parser *p, const gpr_uint8 *cur,
gpr_log(GPR_ERROR,
"integer overflow in hpack integer decoding: have 0x%08x, "
"got byte 0x%02x sometime after byte 4");
"got byte 0x%02x sometime after byte 5",
*p->parsing.value, *cur);
return parse_error(p, cur, end);
}
@ -1300,7 +1323,10 @@ static is_binary_header is_binary_literal_header(grpc_chttp2_hpack_parser *p) {
static is_binary_header is_binary_indexed_header(grpc_chttp2_hpack_parser *p) {
grpc_mdelem *elem = grpc_chttp2_hptbl_lookup(&p->table, p->index);
if (!elem) return ERROR_HEADER;
if (!elem) {
gpr_log(GPR_ERROR, "Invalid HPACK index received: %d", p->index);
return ERROR_HEADER;
}
return grpc_is_binary_header(
(const char *)GPR_SLICE_START_PTR(elem->key->slice),
GPR_SLICE_LENGTH(elem->key->slice))
@ -1338,15 +1364,7 @@ static int parse_value_string_with_literal_key(grpc_chttp2_hpack_parser *p,
/* PUBLIC INTERFACE */
static void on_header_not_set(void *user_data, grpc_mdelem *md) {
char *keyhex = gpr_dump_slice(md->key->slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
char *valuehex =
gpr_dump_slice(md->value->slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_ERROR, "on_header callback not set; key=%s value=%s", keyhex,
valuehex);
gpr_free(keyhex);
gpr_free(valuehex);
GRPC_MDELEM_UNREF(md);
abort();
GPR_UNREACHABLE_CODE(return );
}
void grpc_chttp2_hpack_parser_init(grpc_chttp2_hpack_parser *p) {
@ -1359,6 +1377,7 @@ void grpc_chttp2_hpack_parser_init(grpc_chttp2_hpack_parser *p) {
p->value.str = NULL;
p->value.capacity = 0;
p->value.length = 0;
p->dynamic_table_update_allowed = 2;
grpc_chttp2_hptbl_init(&p->table);
}
@ -1400,20 +1419,25 @@ grpc_chttp2_parse_error grpc_chttp2_header_parser_parse(
GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
return GRPC_CHTTP2_CONNECTION_ERROR;
}
if (parser->is_boundary) {
stream_parsing
->got_metadata_on_parse[stream_parsing->header_frames_received] = 1;
stream_parsing->header_frames_received++;
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
stream_parsing);
}
if (parser->is_eof) {
stream_parsing->received_close = 1;
/* need to check for null stream: this can occur if we receive an invalid
stream id on a header */
if (stream_parsing != NULL) {
if (parser->is_boundary) {
stream_parsing
->got_metadata_on_parse[stream_parsing->header_frames_received] = 1;
stream_parsing->header_frames_received++;
grpc_chttp2_list_add_parsing_seen_stream(transport_parsing,
stream_parsing);
}
if (parser->is_eof) {
stream_parsing->received_close = 1;
}
}
parser->on_header = on_header_not_set;
parser->on_header_user_data = NULL;
parser->is_boundary = 0xde;
parser->is_eof = 0xde;
parser->dynamic_table_update_allowed = 2;
}
GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
return GRPC_CHTTP2_PARSE_OK;

@ -85,6 +85,8 @@ struct grpc_chttp2_hpack_parser {
gpr_uint8 binary;
/* is the current string huffman encoded? */
gpr_uint8 huff;
/* is a dynamic table update allowed? */
gpr_uint8 dynamic_table_update_allowed;
/* set by higher layers, used by grpc_chttp2_header_parser_parse to signal
it should append a metadata boundary at the end of frame */
gpr_uint8 is_boundary;

@ -56,16 +56,6 @@ void grpc_chttp2_incoming_metadata_buffer_destroy(
gpr_free(buffer->elems);
}
void grpc_chttp2_incoming_metadata_buffer_reset(
grpc_chttp2_incoming_metadata_buffer *buffer) {
size_t i;
GPR_ASSERT(!buffer->published);
for (i = 0; i < buffer->count; i++) {
GRPC_MDELEM_UNREF(buffer->elems[i].md);
}
buffer->count = 0;
}
void grpc_chttp2_incoming_metadata_buffer_add(
grpc_chttp2_incoming_metadata_buffer *buffer, grpc_mdelem *elem) {
GPR_ASSERT(!buffer->published);
@ -83,14 +73,6 @@ void grpc_chttp2_incoming_metadata_buffer_set_deadline(
buffer->deadline = deadline;
}
void grpc_chttp2_incoming_metadata_buffer_swap(
grpc_chttp2_incoming_metadata_buffer *a,
grpc_chttp2_incoming_metadata_buffer *b) {
GPR_ASSERT(!a->published);
GPR_ASSERT(!b->published);
GPR_SWAP(grpc_chttp2_incoming_metadata_buffer, *a, *b);
}
void grpc_chttp2_incoming_metadata_buffer_publish(
grpc_chttp2_incoming_metadata_buffer *buffer, grpc_metadata_batch *batch) {
GPR_ASSERT(!buffer->published);

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save