Merge github.com:grpc/grpc into lfe3

pull/13097/head
Craig Tiller 7 years ago
commit 0be9ad4408
  1. 2
      .clang-format
  2. 2
      BUILD
  3. 5
      WORKSPACE
  4. 139
      examples/python/helloworld/helloworld_pb2.py
  5. 3
      examples/python/helloworld/helloworld_pb2_grpc.py
  6. 300
      examples/python/route_guide/route_guide_pb2.py
  7. 3
      examples/python/route_guide/route_guide_pb2_grpc.py
  8. 1
      grpc.def
  9. 2
      include/grpc++/impl/codegen/async_unary_call.h
  10. 3
      include/grpc++/impl/codegen/completion_queue.h
  11. 12
      include/grpc/impl/codegen/sync_generic.h
  12. 1
      include/grpc/slice.h
  13. 3
      src/compiler/cpp_generator.cc
  14. 13
      src/compiler/csharp_generator.cc
  15. 6
      src/compiler/node_generator.cc
  16. 4
      src/compiler/objective_c_generator.cc
  17. 2
      src/compiler/objective_c_generator.h
  18. 2
      src/compiler/objective_c_generator_helpers.h
  19. 6
      src/compiler/objective_c_plugin.cc
  20. 6
      src/compiler/php_generator.cc
  21. 2
      src/compiler/php_plugin.cc
  22. 2
      src/compiler/python_generator.cc
  23. 6
      src/compiler/python_generator_helpers.h
  24. 22
      src/compiler/ruby_generator.cc
  25. 3
      src/core/ext/filters/client_channel/channel_connectivity.cc
  26. 44
      src/core/ext/filters/client_channel/client_channel.cc
  27. 5
      src/core/ext/filters/client_channel/lb_policy.cc
  28. 3
      src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
  29. 34
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  30. 8
      src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
  31. 18
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  32. 26
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
  33. 9
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
  34. 10
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
  35. 9
      src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
  36. 20
      src/core/ext/filters/client_channel/subchannel.cc
  37. 3
      src/core/ext/filters/client_channel/subchannel_index.cc
  38. 3
      src/core/ext/filters/http/message_compress/message_compress_filter.cc
  39. 10
      src/core/ext/filters/http/server/http_server_filter.cc
  40. 58
      src/core/ext/filters/message_size/message_size_filter.cc
  41. 3
      src/core/ext/transport/chttp2/client/insecure/channel_create.cc
  42. 3
      src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc
  43. 5
      src/core/ext/transport/chttp2/server/chttp2_server.cc
  44. 21
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  45. 4
      src/core/ext/transport/chttp2/transport/flow_control.cc
  46. 5
      src/core/ext/transport/chttp2/transport/hpack_encoder.cc
  47. 5
      src/core/ext/transport/chttp2/transport/incoming_metadata.cc
  48. 10
      src/core/ext/transport/chttp2/transport/parsing.cc
  49. 15
      src/core/ext/transport/cronet/transport/cronet_transport.cc
  50. 3
      src/core/ext/transport/inproc/inproc_transport.cc
  51. 73
      src/core/lib/backoff/backoff.cc
  52. 55
      src/core/lib/backoff/backoff.h
  53. 5
      src/core/lib/channel/channel_stack.cc
  54. 4
      src/core/lib/debug/stats.cc
  55. 9
      src/core/lib/debug/stats_data.cc
  56. 5
      src/core/lib/http/httpcli.cc
  57. 4
      src/core/lib/iomgr/endpoint_pair_windows.cc
  58. 4
      src/core/lib/iomgr/error.cc
  59. 10
      src/core/lib/iomgr/ev_epoll1_linux.cc
  60. 32
      src/core/lib/iomgr/ev_epollex_linux.cc
  61. 9
      src/core/lib/iomgr/ev_epollsig_linux.cc
  62. 3
      src/core/lib/iomgr/ev_poll_posix.cc
  63. 6
      src/core/lib/iomgr/iomgr.cc
  64. 4
      src/core/lib/iomgr/load_file.cc
  65. 2
      src/core/lib/iomgr/pollset_windows.cc
  66. 12
      src/core/lib/iomgr/resource_quota.cc
  67. 3
      src/core/lib/iomgr/sockaddr_utils.cc
  68. 8
      src/core/lib/iomgr/tcp_client_posix.cc
  69. 3
      src/core/lib/iomgr/tcp_client_uv.cc
  70. 8
      src/core/lib/iomgr/tcp_client_windows.cc
  71. 5
      src/core/lib/iomgr/tcp_server_posix.cc
  72. 4
      src/core/lib/iomgr/tcp_server_uv.cc
  73. 12
      src/core/lib/iomgr/tcp_server_windows.cc
  74. 5
      src/core/lib/iomgr/tcp_uv.cc
  75. 8
      src/core/lib/iomgr/timer_generic.cc
  76. 5
      src/core/lib/iomgr/timer_heap.cc
  77. 7
      src/core/lib/iomgr/udp_server.cc
  78. 3
      src/core/lib/security/context/security_context.cc
  79. 3
      src/core/lib/security/credentials/fake/fake_credentials.cc
  80. 3
      src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
  81. 3
      src/core/lib/security/credentials/plugin/plugin_credentials.cc
  82. 3
      src/core/lib/security/credentials/ssl/ssl_credentials.cc
  83. 8
      src/core/lib/security/transport/auth_filters.h
  84. 46
      src/core/lib/security/transport/client_auth_filter.cc
  85. 3
      src/core/lib/security/transport/secure_endpoint.cc
  86. 11
      src/core/lib/security/transport/security_connector.cc
  87. 6
      src/core/lib/security/transport/security_handshaker.cc
  88. 3
      src/core/lib/support/avl.cc
  89. 3
      src/core/lib/support/histogram.cc
  90. 1
      src/core/lib/support/log_posix.cc
  91. 4
      src/core/lib/support/mpscq.h
  92. 4
      src/core/lib/support/spinlock.h
  93. 4
      src/core/lib/surface/call.cc
  94. 3
      src/core/lib/surface/channel.cc
  95. 8
      src/core/lib/surface/completion_queue.cc
  96. 14
      src/core/lib/surface/server.cc
  97. 3
      src/core/lib/transport/bdp_estimator.cc
  98. 6
      src/core/lib/transport/metadata.h
  99. 32
      src/core/lib/transport/service_config.cc
  100. 5
      src/core/lib/transport/service_config.h
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1,5 +1,7 @@
---
Language: Cpp
BasedOnStyle: Google
DerivePointerAlignment: false
PointerAlignment: Left
...

@ -1545,7 +1545,7 @@ grpc_cc_library(
grpc_cc_library(
name = "grpc++_config_proto",
external_deps = [
"protobuf",
"protobuf_headers",
],
language = "c++",
public_hdrs = [

@ -23,6 +23,11 @@ bind(
actual = "@com_google_protobuf//:protoc_lib",
)
bind(
name = "protobuf_headers",
actual = "@com_google_protobuf//:protobuf_headers",
)
bind(
name = "protocol_compiler",
actual = "@com_google_protobuf//:protoc",

@ -21,7 +21,6 @@ DESCRIPTOR = _descriptor.FileDescriptor(
syntax='proto3',
serialized_pb=_b('\n\x10helloworld.proto\x12\nhelloworld\"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t2I\n\x07Greeter\x12>\n\x08SayHello\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply\"\x00\x42\x36\n\x1bio.grpc.examples.helloworldB\x0fHelloWorldProtoP\x01\xa2\x02\x03HLWb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
@ -89,6 +88,7 @@ _HELLOREPLY = _descriptor.Descriptor(
DESCRIPTOR.message_types_by_name['HelloRequest'] = _HELLOREQUEST
DESCRIPTOR.message_types_by_name['HelloReply'] = _HELLOREPLY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HelloRequest = _reflection.GeneratedProtocolMessageType('HelloRequest', (_message.Message,), dict(
DESCRIPTOR = _HELLOREQUEST,
@ -107,123 +107,28 @@ _sym_db.RegisterMessage(HelloReply)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001\242\002\003HLW'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
class GreeterStub(object):
"""The greeting service definition.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SayHello = channel.unary_unary(
'/helloworld.Greeter/SayHello',
request_serializer=HelloRequest.SerializeToString,
response_deserializer=HelloReply.FromString,
)
class GreeterServicer(object):
"""The greeting service definition.
"""
def SayHello(self, request, context):
"""Sends a greeting
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
_GREETER = _descriptor.ServiceDescriptor(
name='Greeter',
full_name='helloworld.Greeter',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=93,
serialized_end=166,
methods=[
_descriptor.MethodDescriptor(
name='SayHello',
full_name='helloworld.Greeter.SayHello',
index=0,
containing_service=None,
input_type=_HELLOREQUEST,
output_type=_HELLOREPLY,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_GREETER)
DESCRIPTOR.services_by_name['Greeter'] = _GREETER
def add_GreeterServicer_to_server(servicer, server):
rpc_method_handlers = {
'SayHello': grpc.unary_unary_rpc_method_handler(
servicer.SayHello,
request_deserializer=HelloRequest.FromString,
response_serializer=HelloReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'helloworld.Greeter', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaGreeterServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""The greeting service definition.
"""
def SayHello(self, request, context):
"""Sends a greeting
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaGreeterStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""The greeting service definition.
"""
def SayHello(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Sends a greeting
"""
raise NotImplementedError()
SayHello.future = None
def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('helloworld.Greeter', 'SayHello'): HelloRequest.FromString,
}
response_serializers = {
('helloworld.Greeter', 'SayHello'): HelloReply.SerializeToString,
}
method_implementations = {
('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('helloworld.Greeter', 'SayHello'): HelloRequest.SerializeToString,
}
response_deserializers = {
('helloworld.Greeter', 'SayHello'): HelloReply.FromString,
}
cardinalities = {
'SayHello': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'helloworld.Greeter', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)

@ -1,6 +1,5 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
import helloworld_pb2 as helloworld__pb2

@ -21,7 +21,6 @@ DESCRIPTOR = _descriptor.FileDescriptor(
syntax='proto3',
serialized_pb=_b('\n\x11route_guide.proto\x12\nrouteguide\",\n\x05Point\x12\x10\n\x08latitude\x18\x01 \x01(\x05\x12\x11\n\tlongitude\x18\x02 \x01(\x05\"I\n\tRectangle\x12\x1d\n\x02lo\x18\x01 \x01(\x0b\x32\x11.routeguide.Point\x12\x1d\n\x02hi\x18\x02 \x01(\x0b\x32\x11.routeguide.Point\"<\n\x07\x46\x65\x61ture\x12\x0c\n\x04name\x18\x01 \x01(\t\x12#\n\x08location\x18\x02 \x01(\x0b\x32\x11.routeguide.Point\"A\n\tRouteNote\x12#\n\x08location\x18\x01 \x01(\x0b\x32\x11.routeguide.Point\x12\x0f\n\x07message\x18\x02 \x01(\t\"b\n\x0cRouteSummary\x12\x13\n\x0bpoint_count\x18\x01 \x01(\x05\x12\x15\n\rfeature_count\x18\x02 \x01(\x05\x12\x10\n\x08\x64istance\x18\x03 \x01(\x05\x12\x14\n\x0c\x65lapsed_time\x18\x04 \x01(\x05\x32\x85\x02\n\nRouteGuide\x12\x36\n\nGetFeature\x12\x11.routeguide.Point\x1a\x13.routeguide.Feature\"\x00\x12>\n\x0cListFeatures\x12\x15.routeguide.Rectangle\x1a\x13.routeguide.Feature\"\x00\x30\x01\x12>\n\x0bRecordRoute\x12\x11.routeguide.Point\x1a\x18.routeguide.RouteSummary\"\x00(\x01\x12?\n\tRouteChat\x12\x15.routeguide.RouteNote\x1a\x15.routeguide.RouteNote\"\x00(\x01\x30\x01\x42\x36\n\x1bio.grpc.examples.routeguideB\x0fRouteGuideProtoP\x01\xa2\x02\x03RTGb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
@ -238,6 +237,7 @@ DESCRIPTOR.message_types_by_name['Rectangle'] = _RECTANGLE
DESCRIPTOR.message_types_by_name['Feature'] = _FEATURE
DESCRIPTOR.message_types_by_name['RouteNote'] = _ROUTENOTE
DESCRIPTOR.message_types_by_name['RouteSummary'] = _ROUTESUMMARY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Point = _reflection.GeneratedProtocolMessageType('Point', (_message.Message,), dict(
DESCRIPTOR = _POINT,
@ -277,265 +277,55 @@ _sym_db.RegisterMessage(RouteSummary)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.routeguideB\017RouteGuideProtoP\001\242\002\003RTG'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
class RouteGuideStub(object):
"""Interface exported by the server.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetFeature = channel.unary_unary(
'/routeguide.RouteGuide/GetFeature',
request_serializer=Point.SerializeToString,
response_deserializer=Feature.FromString,
)
self.ListFeatures = channel.unary_stream(
'/routeguide.RouteGuide/ListFeatures',
request_serializer=Rectangle.SerializeToString,
response_deserializer=Feature.FromString,
)
self.RecordRoute = channel.stream_unary(
'/routeguide.RouteGuide/RecordRoute',
request_serializer=Point.SerializeToString,
response_deserializer=RouteSummary.FromString,
)
self.RouteChat = channel.stream_stream(
'/routeguide.RouteGuide/RouteChat',
request_serializer=RouteNote.SerializeToString,
response_deserializer=RouteNote.FromString,
)
class RouteGuideServicer(object):
"""Interface exported by the server.
"""
def GetFeature(self, request, context):
"""A simple RPC.
Obtains the feature at a given position.
A feature with an empty name is returned if there's no feature at the given
position.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListFeatures(self, request, context):
"""A server-to-client streaming RPC.
Obtains the Features available within the given Rectangle. Results are
streamed rather than returned at once (e.g. in a response message with a
repeated field), as the rectangle may cover a large area and contain a
huge number of features.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RecordRoute(self, request_iterator, context):
"""A client-to-server streaming RPC.
Accepts a stream of Points on a route being traversed, returning a
RouteSummary when traversal is completed.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RouteChat(self, request_iterator, context):
"""A Bidirectional streaming RPC.
Accepts a stream of RouteNotes sent while a route is being traversed,
while receiving other RouteNotes (e.g. from other users).
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RouteGuideServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetFeature': grpc.unary_unary_rpc_method_handler(
servicer.GetFeature,
request_deserializer=Point.FromString,
response_serializer=Feature.SerializeToString,
_ROUTEGUIDE = _descriptor.ServiceDescriptor(
name='RouteGuide',
full_name='routeguide.RouteGuide',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=384,
serialized_end=645,
methods=[
_descriptor.MethodDescriptor(
name='GetFeature',
full_name='routeguide.RouteGuide.GetFeature',
index=0,
containing_service=None,
input_type=_POINT,
output_type=_FEATURE,
options=None,
),
'ListFeatures': grpc.unary_stream_rpc_method_handler(
servicer.ListFeatures,
request_deserializer=Rectangle.FromString,
response_serializer=Feature.SerializeToString,
_descriptor.MethodDescriptor(
name='ListFeatures',
full_name='routeguide.RouteGuide.ListFeatures',
index=1,
containing_service=None,
input_type=_RECTANGLE,
output_type=_FEATURE,
options=None,
),
'RecordRoute': grpc.stream_unary_rpc_method_handler(
servicer.RecordRoute,
request_deserializer=Point.FromString,
response_serializer=RouteSummary.SerializeToString,
_descriptor.MethodDescriptor(
name='RecordRoute',
full_name='routeguide.RouteGuide.RecordRoute',
index=2,
containing_service=None,
input_type=_POINT,
output_type=_ROUTESUMMARY,
options=None,
),
'RouteChat': grpc.stream_stream_rpc_method_handler(
servicer.RouteChat,
request_deserializer=RouteNote.FromString,
response_serializer=RouteNote.SerializeToString,
_descriptor.MethodDescriptor(
name='RouteChat',
full_name='routeguide.RouteGuide.RouteChat',
index=3,
containing_service=None,
input_type=_ROUTENOTE,
output_type=_ROUTENOTE,
options=None,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'routeguide.RouteGuide', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaRouteGuideServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""Interface exported by the server.
"""
def GetFeature(self, request, context):
"""A simple RPC.
Obtains the feature at a given position.
A feature with an empty name is returned if there's no feature at the given
position.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def ListFeatures(self, request, context):
"""A server-to-client streaming RPC.
Obtains the Features available within the given Rectangle. Results are
streamed rather than returned at once (e.g. in a response message with a
repeated field), as the rectangle may cover a large area and contain a
huge number of features.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def RecordRoute(self, request_iterator, context):
"""A client-to-server streaming RPC.
Accepts a stream of Points on a route being traversed, returning a
RouteSummary when traversal is completed.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
def RouteChat(self, request_iterator, context):
"""A Bidirectional streaming RPC.
Accepts a stream of RouteNotes sent while a route is being traversed,
while receiving other RouteNotes (e.g. from other users).
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaRouteGuideStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""Interface exported by the server.
"""
def GetFeature(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""A simple RPC.
Obtains the feature at a given position.
A feature with an empty name is returned if there's no feature at the given
position.
"""
raise NotImplementedError()
GetFeature.future = None
def ListFeatures(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""A server-to-client streaming RPC.
Obtains the Features available within the given Rectangle. Results are
streamed rather than returned at once (e.g. in a response message with a
repeated field), as the rectangle may cover a large area and contain a
huge number of features.
"""
raise NotImplementedError()
def RecordRoute(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None):
"""A client-to-server streaming RPC.
Accepts a stream of Points on a route being traversed, returning a
RouteSummary when traversal is completed.
"""
raise NotImplementedError()
RecordRoute.future = None
def RouteChat(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None):
"""A Bidirectional streaming RPC.
Accepts a stream of RouteNotes sent while a route is being traversed,
while receiving other RouteNotes (e.g. from other users).
"""
raise NotImplementedError()
def beta_create_RouteGuide_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('routeguide.RouteGuide', 'GetFeature'): Point.FromString,
('routeguide.RouteGuide', 'ListFeatures'): Rectangle.FromString,
('routeguide.RouteGuide', 'RecordRoute'): Point.FromString,
('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString,
}
response_serializers = {
('routeguide.RouteGuide', 'GetFeature'): Feature.SerializeToString,
('routeguide.RouteGuide', 'ListFeatures'): Feature.SerializeToString,
('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.SerializeToString,
('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString,
}
method_implementations = {
('routeguide.RouteGuide', 'GetFeature'): face_utilities.unary_unary_inline(servicer.GetFeature),
('routeguide.RouteGuide', 'ListFeatures'): face_utilities.unary_stream_inline(servicer.ListFeatures),
('routeguide.RouteGuide', 'RecordRoute'): face_utilities.stream_unary_inline(servicer.RecordRoute),
('routeguide.RouteGuide', 'RouteChat'): face_utilities.stream_stream_inline(servicer.RouteChat),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
])
_sym_db.RegisterServiceDescriptor(_ROUTEGUIDE)
def beta_create_RouteGuide_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
DESCRIPTOR.services_by_name['RouteGuide'] = _ROUTEGUIDE
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('routeguide.RouteGuide', 'GetFeature'): Point.SerializeToString,
('routeguide.RouteGuide', 'ListFeatures'): Rectangle.SerializeToString,
('routeguide.RouteGuide', 'RecordRoute'): Point.SerializeToString,
('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString,
}
response_deserializers = {
('routeguide.RouteGuide', 'GetFeature'): Feature.FromString,
('routeguide.RouteGuide', 'ListFeatures'): Feature.FromString,
('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.FromString,
('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString,
}
cardinalities = {
'GetFeature': cardinality.Cardinality.UNARY_UNARY,
'ListFeatures': cardinality.Cardinality.UNARY_STREAM,
'RecordRoute': cardinality.Cardinality.STREAM_UNARY,
'RouteChat': cardinality.Cardinality.STREAM_STREAM,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'routeguide.RouteGuide', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)

@ -1,6 +1,5 @@
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
import route_guide_pb2 as route__guide__pb2

@ -149,7 +149,6 @@ EXPORTS
grpc_slice_eq
grpc_slice_cmp
grpc_slice_str_cmp
grpc_slice_buf_cmp
grpc_slice_buf_start_eq
grpc_slice_rchr
grpc_slice_chr

@ -297,6 +297,6 @@ class default_delete<grpc::ClientAsyncResponseReaderInterface<R>> {
public:
void operator()(void* p) {}
};
}
} // namespace std
#endif // GRPCXX_IMPL_CODEGEN_ASYNC_UNARY_CALL_H

@ -164,7 +164,8 @@ class CompletionQueue : private GrpcLibraryCodegen {
///
/// \return true if read a regular event, false if the queue is shutting down.
bool Next(void** tag, bool* ok) {
return (AsyncNextInternal(tag, ok, g_core_codegen_interface->gpr_inf_future(
return (AsyncNextInternal(tag, ok,
g_core_codegen_interface->gpr_inf_future(
GPR_CLOCK_REALTIME)) != SHUTDOWN);
}

@ -23,16 +23,22 @@
#include <grpc/impl/codegen/atm.h>
/* gpr_event */
typedef struct { gpr_atm state; } gpr_event;
typedef struct {
gpr_atm state;
} gpr_event;
#define GPR_EVENT_INIT \
{ 0 }
/* gpr_refcount */
typedef struct { gpr_atm count; } gpr_refcount;
typedef struct {
gpr_atm count;
} gpr_refcount;
/* gpr_stats_counter */
typedef struct { gpr_atm value; } gpr_stats_counter;
typedef struct {
gpr_atm value;
} gpr_stats_counter;
#define GPR_STATS_INIT \
{ 0 }

@ -137,7 +137,6 @@ GPRAPI int grpc_slice_eq(grpc_slice a, grpc_slice b);
versions of the API. */
GPRAPI int grpc_slice_cmp(grpc_slice a, grpc_slice b);
GPRAPI int grpc_slice_str_cmp(grpc_slice a, const char* b);
GPRAPI int grpc_slice_buf_cmp(grpc_slice a, const void *b, size_t blen);
/** return non-zero if the first blen bytes of a are equal to b */
GPRAPI int grpc_slice_buf_start_eq(grpc_slice a, const void* b, size_t blen);

@ -1566,7 +1566,8 @@ grpc::string GetMockIncludes(grpc_generator::File *file,
static const char* headers_strs[] = {
"grpc++/impl/codegen/async_stream.h",
"grpc++/impl/codegen/sync_stream.h", "gmock/gmock.h",
"grpc++/impl/codegen/sync_stream.h",
"gmock/gmock.h",
};
std::vector<grpc::string> headers(headers_strs, array_end(headers_strs));
PrintIncludes(printer.get(), headers, params);

@ -23,24 +23,23 @@
#include "src/compiler/config.h"
#include "src/compiler/csharp_generator.h"
#include "src/compiler/csharp_generator.h"
#include "src/compiler/csharp_generator_helpers.h"
using google::protobuf::compiler::csharp::GetFileNamespace;
using google::protobuf::compiler::csharp::GetClassName;
using google::protobuf::compiler::csharp::GetFileNamespace;
using google::protobuf::compiler::csharp::GetReflectionClassName;
using grpc::protobuf::FileDescriptor;
using grpc::protobuf::Descriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::FileDescriptor;
using grpc::protobuf::MethodDescriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::io::Printer;
using grpc::protobuf::io::StringOutputStream;
using grpc_generator::MethodType;
using grpc_generator::GetMethodType;
using grpc_generator::METHODTYPE_NO_STREAMING;
using grpc_generator::METHODTYPE_BIDI_STREAMING;
using grpc_generator::METHODTYPE_CLIENT_STREAMING;
using grpc_generator::METHODTYPE_NO_STREAMING;
using grpc_generator::METHODTYPE_SERVER_STREAMING;
using grpc_generator::METHODTYPE_BIDI_STREAMING;
using grpc_generator::MethodType;
using grpc_generator::StringReplace;
using std::map;
using std::vector;

@ -22,10 +22,10 @@
#include "src/compiler/generator_helpers.h"
#include "src/compiler/node_generator_helpers.h"
using grpc::protobuf::Descriptor;
using grpc::protobuf::FileDescriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::MethodDescriptor;
using grpc::protobuf::Descriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::io::Printer;
using grpc::protobuf::io::StringOutputStream;
using std::map;
@ -234,7 +234,7 @@ void PrintServices(const FileDescriptor *file, Printer *out) {
PrintService(file->service(i), out);
}
}
}
} // namespace
grpc::string GenerateFile(const FileDescriptor* file) {
grpc::string output;

@ -27,10 +27,10 @@
#include <google/protobuf/compiler/objectivec/objectivec_helpers.h>
using ::google::protobuf::compiler::objectivec::ClassName;
using ::grpc::protobuf::io::Printer;
using ::grpc::protobuf::FileDescriptor;
using ::grpc::protobuf::MethodDescriptor;
using ::grpc::protobuf::ServiceDescriptor;
using ::grpc::protobuf::FileDescriptor;
using ::grpc::protobuf::io::Printer;
using ::std::map;
using ::std::set;

@ -23,8 +23,8 @@
namespace grpc_objective_c_generator {
using ::grpc::protobuf::ServiceDescriptor;
using ::grpc::protobuf::FileDescriptor;
using ::grpc::protobuf::ServiceDescriptor;
using ::grpc::string;
// Returns forward declaration of classes in the generated header file.

@ -40,5 +40,5 @@ inline string ServiceClassName(const ServiceDescriptor *service) {
string prefix = file->options().objc_class_prefix();
return prefix + service->name();
}
}
} // namespace grpc_objective_c_generator
#endif // GRPC_INTERNAL_COMPILER_OBJECTIVE_C_GENERATOR_HELPERS_H

@ -26,9 +26,9 @@
#include <google/protobuf/compiler/objectivec/objectivec_helpers.h>
using ::google::protobuf::compiler::objectivec::ProtobufLibraryFrameworkName;
using ::google::protobuf::compiler::objectivec::
IsProtobufLibraryBundledProtoFile;
using ::google::protobuf::compiler::objectivec::ProtobufLibraryFrameworkName;
class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
public:
@ -96,8 +96,8 @@ class ObjectiveCGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
"\nNS_ASSUME_NONNULL_BEGIN\n\n";
static const ::grpc::string kNonNullEnd = "\nNS_ASSUME_NONNULL_END\n";
Write(context, file_name + ".pbrpc.h", imports + '\n' + proto_imports +
'\n' + kNonNullBegin +
Write(context, file_name + ".pbrpc.h",
imports + '\n' + proto_imports + '\n' + kNonNullBegin +
declarations + kNonNullEnd);
}

@ -22,10 +22,10 @@
#include "src/compiler/generator_helpers.h"
#include "src/compiler/php_generator_helpers.h"
using grpc::protobuf::Descriptor;
using grpc::protobuf::FileDescriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::MethodDescriptor;
using grpc::protobuf::Descriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::io::Printer;
using grpc::protobuf::io::StringOutputStream;
using std::map;
@ -148,7 +148,7 @@ void PrintService(const ServiceDescriptor *service,
out->Outdent();
out->Print("}\n");
}
}
} // namespace
grpc::string GenerateFile(const FileDescriptor* file,
const ServiceDescriptor* service,

@ -24,9 +24,9 @@
#include "src/compiler/php_generator.h"
#include "src/compiler/php_generator_helpers.h"
using google::protobuf::compiler::ParseGeneratorParameter;
using grpc_php_generator::GenerateFile;
using grpc_php_generator::GetPHPServiceFilename;
using google::protobuf::compiler::ParseGeneratorParameter;
class PHPGrpcGenerator : public grpc::protobuf::compiler::CodeGenerator {
public:

@ -45,9 +45,9 @@ using std::make_pair;
using std::map;
using std::pair;
using std::replace;
using std::set;
using std::tuple;
using std::vector;
using std::set;
namespace grpc_python_generator {

@ -29,9 +29,6 @@
#include "src/compiler/python_generator.h"
#include "src/compiler/python_private_generator.h"
using std::vector;
using grpc_generator::StringReplace;
using grpc_generator::StripProto;
using grpc::protobuf::Descriptor;
using grpc::protobuf::FileDescriptor;
using grpc::protobuf::MethodDescriptor;
@ -41,6 +38,9 @@ using grpc::protobuf::io::CodedOutputStream;
using grpc::protobuf::io::Printer;
using grpc::protobuf::io::StringOutputStream;
using grpc::protobuf::io::ZeroCopyOutputStream;
using grpc_generator::StringReplace;
using grpc_generator::StripProto;
using std::vector;
namespace grpc_python_generator {

@ -27,8 +27,8 @@
#include "src/compiler/ruby_generator_string-inl.h"
using grpc::protobuf::FileDescriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::MethodDescriptor;
using grpc::protobuf::ServiceDescriptor;
using grpc::protobuf::io::Printer;
using grpc::protobuf::io::StringOutputStream;
using std::map;
@ -51,7 +51,11 @@ void PrintMethod(const MethodDescriptor *method, const grpc::string &package,
output_type = "stream(" + output_type + ")";
}
std::map<grpc::string, grpc::string> method_vars = ListToDict({
"mth.name", method->name(), "input.type", input_type, "output.type",
"mth.name",
method->name(),
"input.type",
input_type,
"output.type",
output_type,
});
out->Print(GetRubyComments(method, true).c_str());
@ -68,7 +72,8 @@ void PrintService(const ServiceDescriptor *service, const grpc::string &package,
// Begin the service module
std::map<grpc::string, grpc::string> module_vars = ListToDict({
"module.name", CapitalizeFirst(service->name()),
"module.name",
CapitalizeFirst(service->name()),
});
out->Print(module_vars, "module $module.name$\n");
out->Indent();
@ -157,7 +162,10 @@ grpc::string GetServices(const FileDescriptor *file) {
// Write out a file header.
std::map<grpc::string, grpc::string> header_comment_vars = ListToDict({
"file.name", file->name(), "file.package", file->package(),
"file.name",
file->name(),
"file.package",
file->package(),
});
out.Print("# Generated by the protocol buffer compiler. DO NOT EDIT!\n");
out.Print(header_comment_vars,
@ -175,7 +183,8 @@ grpc::string GetServices(const FileDescriptor *file) {
// that defines the messages used by the service. This is generated by the
// main ruby plugin.
std::map<grpc::string, grpc::string> dep_vars = ListToDict({
"dep.name", MessagesRequireName(file),
"dep.name",
MessagesRequireName(file),
});
out.Print(dep_vars, "require '$dep.name$'\n");
@ -184,7 +193,8 @@ grpc::string GetServices(const FileDescriptor *file) {
std::vector<grpc::string> modules = Split(file->package(), '.');
for (size_t i = 0; i < modules.size(); ++i) {
std::map<grpc::string, grpc::string> module_vars = ListToDict({
"module.name", PackageToModule(modules[i]),
"module.name",
PackageToModule(modules[i]),
});
out.Print(module_vars, "module $module.name$\n");
out.Indent();

@ -213,7 +213,8 @@ void grpc_channel_watch_connectivity_state(
"deadline=gpr_timespec { tv_sec: %" PRId64
", tv_nsec: %d, clock_type: %d }, "
"cq=%p, tag=%p)",
7, (channel, (int)last_observed_state, deadline.tv_sec, deadline.tv_nsec,
7,
(channel, (int)last_observed_state, deadline.tv_sec, deadline.tv_nsec,
(int)deadline.clock_type, cq, tag));
GPR_ASSERT(grpc_cq_begin_op(cq, tag));

@ -88,7 +88,12 @@ static void method_parameters_unref(method_parameters *method_params) {
}
}
static void method_parameters_free(grpc_exec_ctx *exec_ctx, void *value) {
// Wrappers to pass to grpc_service_config_create_method_config_table().
static void* method_parameters_ref_wrapper(void* value) {
return method_parameters_ref((method_parameters*)value);
}
static void method_parameters_unref_wrapper(grpc_exec_ctx* exec_ctx,
void* value) {
method_parameters_unref((method_parameters*)value);
}
@ -117,24 +122,16 @@ static bool parse_timeout(grpc_json *field, grpc_millis *timeout) {
gpr_free(buf);
return false;
}
// There should always be exactly 3, 6, or 9 fractional digits.
int multiplier = 1;
switch (strlen(decimal_point + 1)) {
case 9:
break;
case 6:
multiplier *= 1000;
break;
case 3:
multiplier *= 1000000;
break;
default: // Unsupported number of digits.
int num_digits = (int)strlen(decimal_point + 1);
if (num_digits > 9) { // We don't accept greater precision than nanos.
gpr_free(buf);
return false;
}
nanos *= multiplier;
for (int i = 0; i < (9 - num_digits); ++i) {
nanos *= 10;
}
}
int seconds = gpr_parse_nonnegative_int(buf);
int seconds = decimal_point == buf ? 0 : gpr_parse_nonnegative_int(buf);
gpr_free(buf);
if (seconds == -1) return false;
*timeout = seconds * GPR_MS_PER_SEC + nanos / GPR_NS_PER_MS;
@ -473,7 +470,7 @@ static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
retry_throttle_data = parsing_state.retry_throttle_data;
method_params_table = grpc_service_config_create_method_config_table(
exec_ctx, service_config, method_parameters_create_from_json,
method_parameters_free);
method_parameters_ref_wrapper, method_parameters_unref_wrapper);
grpc_service_config_destroy(service_config);
}
}
@ -771,7 +768,8 @@ static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
channel_data* chand = (channel_data*)elem->channel_data;
if (chand->resolver != NULL) {
GRPC_CLOSURE_SCHED(
exec_ctx, GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
exec_ctx,
GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}
@ -943,7 +941,8 @@ static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
channel_data* chand = (channel_data*)elem->channel_data;
call_data* calld = (call_data*)elem->call_data;
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: sending %" PRIuPTR
gpr_log(GPR_DEBUG,
"chand=%p calld=%p: sending %" PRIuPTR
" pending batches to subchannel_call=%p",
chand, calld, calld->waiting_for_pick_batches_count,
calld->subchannel_call);
@ -1176,8 +1175,7 @@ typedef struct {
static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx* exec_ctx,
void* arg,
grpc_error* error) {
pick_after_resolver_result_args *args =
(pick_after_resolver_result_args *)arg;
pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
if (args->finished) {
gpr_free(args);
return;
@ -1214,8 +1212,7 @@ static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
static void pick_after_resolver_result_done_locked(grpc_exec_ctx* exec_ctx,
void* arg,
grpc_error* error) {
pick_after_resolver_result_args *args =
(pick_after_resolver_result_args *)arg;
pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
if (args->finished) {
/* cancelled, do nothing */
if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
@ -1549,7 +1546,8 @@ grpc_connectivity_state grpc_client_channel_check_connectivity_state(
if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect");
GRPC_CLOSURE_SCHED(
exec_ctx, GRPC_CLOSURE_CREATE(try_to_connect_locked, chand,
exec_ctx,
GRPC_CLOSURE_CREATE(try_to_connect_locked, chand,
grpc_combiner_scheduler(chand->combiner)),
GRPC_ERROR_NONE);
}

@ -80,8 +80,9 @@ void grpc_lb_policy_unref(grpc_exec_ctx *exec_ctx,
gpr_atm mask = ~(gpr_atm)((1 << WEAK_REF_BITS) - 1);
gpr_atm check = 1 << WEAK_REF_BITS;
if ((old_val & mask) == check) {
GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(
shutdown_locked, policy,
GRPC_CLOSURE_SCHED(
exec_ctx,
GRPC_CLOSURE_CREATE(shutdown_locked, policy,
grpc_combiner_scheduler(policy->combiner)),
GRPC_ERROR_NONE);
} else {

@ -75,8 +75,7 @@ static grpc_error *init_call_elem(grpc_exec_ctx *exec_ctx,
GPR_ASSERT(args->context != NULL);
GPR_ASSERT(args->context[GRPC_GRPCLB_CLIENT_STATS].value != NULL);
calld->client_stats = grpc_grpclb_client_stats_ref(
(grpc_grpclb_client_stats *)args->context[GRPC_GRPCLB_CLIENT_STATS]
.value);
(grpc_grpclb_client_stats*)args->context[GRPC_GRPCLB_CLIENT_STATS].value);
// Record call started.
grpc_grpclb_client_stats_add_call_started(calld->client_stats);
return GRPC_ERROR_NONE;

@ -1163,20 +1163,35 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
"won't work without it. Failing"));
return 0;
}
glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
bool pick_done;
bool pick_done = false;
if (glb_policy->rr_policy != NULL) {
const grpc_connectivity_state rr_connectivity_state =
grpc_lb_policy_check_connectivity_locked(exec_ctx,
glb_policy->rr_policy, NULL);
// The glb_policy->rr_policy may have transitioned to SHUTDOWN but the
// callback registered to capture this event
// (glb_rr_connectivity_changed_locked) may not have been invoked yet. We
// need to make sure we aren't trying to pick from a RR policy instance
// that's in shutdown.
if (rr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO,
"grpclb %p NOT picking from from RR %p: RR conn state=%s",
(void*)glb_policy, (void*)glb_policy->rr_policy,
grpc_connectivity_state_name(rr_connectivity_state));
}
add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
on_complete);
pick_done = false;
} else { // RR not in shutdown
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "grpclb %p about to PICK from RR %p",
(void*)glb_policy, (void*)glb_policy->rr_policy);
}
GRPC_LB_POLICY_REF(glb_policy->rr_policy, "glb_pick");
wrapped_rr_closure_arg* wc_arg =
(wrapped_rr_closure_arg*)gpr_zalloc(sizeof(wrapped_rr_closure_arg));
GRPC_CLOSURE_INIT(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
grpc_schedule_on_exec_ctx);
wc_arg->rr_policy = glb_policy->rr_policy;
@ -1192,7 +1207,8 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
pick_done =
pick_from_internal_rr_locked(exec_ctx, glb_policy, pick_args,
false /* force_async */, target, wc_arg);
} else {
}
} else { // glb_policy->rr_policy == NULL
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG,
"No RR policy in grpclb instance %p. Adding to grpclb's pending "
@ -1201,7 +1217,6 @@ static int glb_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
}
add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
on_complete);
if (!glb_policy->started_picking) {
start_picking_locked(exec_ctx, glb_policy);
}
@ -1266,7 +1281,8 @@ static void maybe_restart_lb_call(grpc_exec_ctx *exec_ctx,
} else if (!glb_policy->shutting_down) {
/* if we aren't shutting down, restart the LB client call after some time */
grpc_millis next_try =
grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state);
grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state)
.next_attempt_start_time;
if (GRPC_TRACER_ON(grpc_lb_glb_trace)) {
gpr_log(GPR_DEBUG, "Connection to LB server lost (grpclb: %p)...",
(void*)glb_policy);
@ -1431,7 +1447,7 @@ static void lb_call_init_locked(grpc_exec_ctx *exec_ctx,
grpc_combiner_scheduler(glb_policy->base.combiner));
grpc_backoff_init(&glb_policy->lb_call_backoff_state,
GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS * 1000,
GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
GRPC_GRPCLB_RECONNECT_JITTER,
GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,

@ -200,8 +200,8 @@ grpc_grpclb_serverlist *grpc_grpclb_response_parse_serverlist(
}
// Second pass: populate servers.
if (sl->num_servers > 0) {
sl->servers = (grpc_grpclb_server **)gpr_zalloc(
sizeof(grpc_grpclb_server *) * sl->num_servers);
sl->servers = (grpc_grpclb_server**)gpr_zalloc(sizeof(grpc_grpclb_server*) *
sl->num_servers);
decode_serverlist_arg decode_arg;
memset(&decode_arg, 0, sizeof(decode_arg));
decode_arg.serverlist = sl;
@ -239,8 +239,8 @@ grpc_grpclb_serverlist *grpc_grpclb_serverlist_copy(
copy->num_servers = sl->num_servers;
memcpy(&copy->expiration_interval, &sl->expiration_interval,
sizeof(grpc_grpclb_duration));
copy->servers = (grpc_grpclb_server **)gpr_malloc(
sizeof(grpc_grpclb_server *) * sl->num_servers);
copy->servers = (grpc_grpclb_server**)gpr_malloc(sizeof(grpc_grpclb_server*) *
sl->num_servers);
for (size_t i = 0; i < sl->num_servers; i++) {
copy->servers[i] =
(grpc_grpclb_server*)gpr_malloc(sizeof(grpc_grpclb_server));

@ -146,8 +146,7 @@ static void update_last_ready_subchannel_index_locked(round_robin_lb_policy *p,
GPR_ASSERT(last_ready_index < p->subchannel_list->num_subchannels);
p->last_ready_subchannel_index = last_ready_index;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(
GPR_DEBUG,
gpr_log(GPR_DEBUG,
"[RR %p] setting last_ready_subchannel_index=%lu (SC %p, CSC %p)",
(void*)p, (unsigned long)last_ready_index,
(void*)p->subchannel_list->subchannels[last_ready_index].subchannel,
@ -277,10 +276,11 @@ static int rr_pick_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_call_context_element* context, void** user_data,
grpc_closure* on_complete) {
round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
GPR_ASSERT(!p->shutdown);
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO, "[RR %p] Trying to pick", (void *)pol);
gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", (void*)pol,
p->shutdown);
}
GPR_ASSERT(!p->shutdown);
if (p->subchannel_list != NULL) {
const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
if (next_ready_index < p->subchannel_list->num_subchannels) {
@ -393,6 +393,11 @@ static grpc_connectivity_state update_lb_connectivity_status_locked(
"rr_shutdown");
p->shutdown = true;
new_state = GRPC_CHANNEL_SHUTDOWN;
if (GRPC_TRACER_ON(grpc_lb_round_robin_trace)) {
gpr_log(GPR_INFO,
"[RR %p] Shutting down: all subchannels have gone into shutdown",
(void*)p);
}
} else if (subchannel_list->num_transient_failures ==
p->subchannel_list->num_subchannels) { /* 4) TRANSIENT_FAILURE */
grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
@ -561,8 +566,9 @@ static void rr_ping_one_locked(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
grpc_connected_subchannel_ping(exec_ctx, target, closure);
GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_ping");
} else {
GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Round Robin not connected"));
GRPC_CLOSURE_SCHED(
exec_ctx, closure,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Round Robin not connected"));
}
}

@ -33,11 +33,12 @@ void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx *exec_ctx,
const char* reason) {
if (sd->subchannel != NULL) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(
GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR
" of %" PRIuPTR " (subchannel %p): unreffing subchannel",
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): unreffing subchannel",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list,
(size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, reason);
@ -76,11 +77,12 @@ void grpc_lb_subchannel_data_start_connectivity_watch(
void grpc_lb_subchannel_data_stop_connectivity_watch(
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(
GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): stopping connectivity watch",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list,
(size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel);
}
GPR_ASSERT(sd->connectivity_notification_pending);
@ -140,7 +142,8 @@ grpc_lb_subchannel_list *grpc_lb_subchannel_list_create(
if (GRPC_TRACER_ON(*tracer)) {
char* address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR
": Created subchannel %p for address uri %s",
tracer->name, p, subchannel_list, subchannel_index, subchannel,
address_uri);
@ -229,11 +232,12 @@ void grpc_lb_subchannel_list_unref_for_connectivity_watch(
static void subchannel_data_cancel_connectivity_watch(
grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd, const char* reason) {
if (GRPC_TRACER_ON(*sd->subchannel_list->tracer)) {
gpr_log(
GPR_DEBUG, "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
gpr_log(GPR_DEBUG,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): canceling connectivity watch (%s)",
sd->subchannel_list->tracer->name, sd->subchannel_list->policy,
sd->subchannel_list, (size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list,
(size_t)(sd - sd->subchannel_list->subchannels),
sd->subchannel_list->num_subchannels, sd->subchannel, reason);
}
grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,

@ -271,7 +271,8 @@ static void dns_ares_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
} else {
const char* msg = grpc_error_string(error);
gpr_log(GPR_DEBUG, "dns resolution failed: %s", msg);
grpc_millis next_try = grpc_backoff_step(exec_ctx, &r->backoff_state);
grpc_millis next_try =
grpc_backoff_step(exec_ctx, &r->backoff_state).next_attempt_start_time;
grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
gpr_log(GPR_INFO, "dns resolution failed (will retry): %s",
grpc_error_string(error));
@ -379,9 +380,9 @@ static grpc_resolver *dns_ares_create(grpc_exec_ctx *exec_ctx,
grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties,
args->pollset_set);
}
grpc_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
GRPC_DNS_RECONNECT_JITTER,
grpc_backoff_init(
&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS * 1000,
GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER, GRPC_DNS_RECONNECT_JITTER,
GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
GRPC_CLOSURE_INIT(&r->dns_ares_on_retry_timer_locked,

@ -118,8 +118,9 @@ static void fd_node_shutdown(grpc_exec_ctx *exec_ctx, fd_node *fdn) {
gpr_mu_unlock(&fdn->mu);
fd_node_destroy(exec_ctx, fdn);
} else {
grpc_fd_shutdown(exec_ctx, fdn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"c-ares fd shutdown"));
grpc_fd_shutdown(
exec_ctx, fdn->fd,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("c-ares fd shutdown"));
gpr_mu_unlock(&fdn->mu);
}
}
@ -165,8 +166,9 @@ void grpc_ares_ev_driver_shutdown(grpc_exec_ctx *exec_ctx,
ev_driver->shutting_down = true;
fd_node* fn = ev_driver->fds;
while (fn != NULL) {
grpc_fd_shutdown(exec_ctx, fn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"grpc_ares_ev_driver_shutdown"));
grpc_fd_shutdown(
exec_ctx, fn->fd,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("grpc_ares_ev_driver_shutdown"));
fn = fn->next;
}
gpr_mu_unlock(&ev_driver->mu);

@ -170,7 +170,8 @@ static void dns_on_resolved_locked(grpc_exec_ctx *exec_ctx, void *arg,
grpc_resolved_addresses_destroy(r->addresses);
grpc_lb_addresses_destroy(exec_ctx, addresses);
} else {
grpc_millis next_try = grpc_backoff_step(exec_ctx, &r->backoff_state);
grpc_millis next_try =
grpc_backoff_step(exec_ctx, &r->backoff_state).next_attempt_start_time;
grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
gpr_log(GPR_INFO, "dns resolution failed (will retry): %s",
grpc_error_string(error));
@ -256,9 +257,9 @@ static grpc_resolver *dns_create(grpc_exec_ctx *exec_ctx,
grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties,
args->pollset_set);
}
grpc_backoff_init(&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS,
GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER,
GRPC_DNS_RECONNECT_JITTER,
grpc_backoff_init(
&r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS * 1000,
GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER, GRPC_DNS_RECONNECT_JITTER,
GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
return &r->base;

@ -117,10 +117,10 @@ struct grpc_subchannel {
external_state_watcher root_external_state_watcher;
/** next connect attempt time */
grpc_millis next_attempt;
/** backoff state */
grpc_backoff backoff_state;
grpc_backoff_result backoff_result;
/** do we have an active alarm? */
bool have_alarm;
/** have we started the backoff loop */
@ -276,8 +276,9 @@ void grpc_subchannel_weak_unref(grpc_exec_ctx *exec_ctx,
gpr_atm old_refs;
old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
if (old_refs == 1) {
GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(subchannel_destroy, c,
grpc_schedule_on_exec_ctx),
GRPC_CLOSURE_SCHED(
exec_ctx,
GRPC_CLOSURE_CREATE(subchannel_destroy, c, grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
}
}
@ -380,7 +381,7 @@ static void continue_connect_locked(grpc_exec_ctx *exec_ctx,
grpc_connect_in_args args;
args.interested_parties = c->pollset_set;
args.deadline = c->next_attempt;
args.deadline = c->backoff_result.current_deadline;
args.channel_args = c->args;
grpc_connectivity_state_set(exec_ctx, &c->state_tracker,
@ -428,7 +429,7 @@ static void on_alarm(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
}
if (error == GRPC_ERROR_NONE) {
gpr_log(GPR_INFO, "Failed to connect to channel, retrying");
c->next_attempt = grpc_backoff_step(exec_ctx, &c->backoff_state);
c->backoff_result = grpc_backoff_step(exec_ctx, &c->backoff_state);
continue_connect_locked(exec_ctx, c);
gpr_mu_unlock(&c->mu);
} else {
@ -465,20 +466,21 @@ static void maybe_start_connecting_locked(grpc_exec_ctx *exec_ctx,
if (!c->backoff_begun) {
c->backoff_begun = true;
c->next_attempt = grpc_backoff_begin(exec_ctx, &c->backoff_state);
c->backoff_result = grpc_backoff_begin(exec_ctx, &c->backoff_state);
continue_connect_locked(exec_ctx, c);
} else {
GPR_ASSERT(!c->have_alarm);
c->have_alarm = true;
const grpc_millis time_til_next =
c->next_attempt - grpc_exec_ctx_now(exec_ctx);
c->backoff_result.next_attempt_start_time - grpc_exec_ctx_now(exec_ctx);
if (time_til_next <= 0) {
gpr_log(GPR_INFO, "Retry immediately");
} else {
gpr_log(GPR_INFO, "Retry in %" PRIdPTR " milliseconds", time_til_next);
}
GRPC_CLOSURE_INIT(&c->on_alarm, on_alarm, c, grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &c->alarm, c->next_attempt, &c->on_alarm);
grpc_timer_init(exec_ctx, &c->alarm,
c->backoff_result.next_attempt_start_time, &c->on_alarm);
}
}

@ -104,8 +104,7 @@ static long sck_avl_compare(void *a, void *b, void *unused) {
static void scv_avl_destroy(void* p, void* user_data) {
grpc_exec_ctx* exec_ctx = (grpc_exec_ctx*)user_data;
GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, (grpc_subchannel *)p,
"subchannel_index");
GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, (grpc_subchannel*)p, "subchannel_index");
}
static void* scv_avl_copy(void* p, void* unused) {

@ -250,7 +250,8 @@ static void finish_send_message(grpc_exec_ctx *exec_ctx,
const float savings_ratio = 1.0f - (float)after_size / (float)before_size;
GPR_ASSERT(grpc_compression_algorithm_name(calld->compression_algorithm,
&algo_name));
gpr_log(GPR_DEBUG, "Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR
gpr_log(GPR_DEBUG,
"Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR
" bytes (%.2f%% savings)",
algo_name, before_size, after_size, 100 * savings_ratio);
}

@ -62,7 +62,9 @@ typedef struct call_data {
grpc_closure hs_recv_message_ready;
} call_data;
typedef struct channel_data { uint8_t unused; } channel_data;
typedef struct channel_data {
uint8_t unused;
} channel_data;
static grpc_error* server_filter_outgoing_metadata(grpc_exec_ctx* exec_ctx,
grpc_call_element* elem,
@ -241,10 +243,10 @@ static grpc_error *server_filter_incoming_metadata(grpc_exec_ctx *exec_ctx,
grpc_linked_mdelem* el = b->idx.named.host;
grpc_mdelem md = GRPC_MDELEM_REF(el->md);
grpc_metadata_batch_remove(exec_ctx, b, el);
add_error(
error_name, &error,
add_error(error_name, &error,
grpc_metadata_batch_add_head(
exec_ctx, b, el, grpc_mdelem_from_slices(
exec_ctx, b, el,
grpc_mdelem_from_slices(
exec_ctx, GRPC_MDSTR_AUTHORITY,
grpc_slice_ref_internal(GRPC_MDVALUE(md)))));
GRPC_MDELEM_UNREF(exec_ctx, md);

@ -30,16 +30,34 @@
#include "src/core/lib/surface/channel_init.h"
#include "src/core/lib/transport/service_config.h"
typedef struct message_size_limits {
typedef struct {
int max_send_size;
int max_recv_size;
} message_size_limits;
static void message_size_limits_free(grpc_exec_ctx* exec_ctx, void* value) {
typedef struct {
gpr_refcount refs;
message_size_limits limits;
} refcounted_message_size_limits;
static void* refcounted_message_size_limits_ref(void* value) {
refcounted_message_size_limits* limits =
(refcounted_message_size_limits*)value;
gpr_ref(&limits->refs);
return value;
}
static void refcounted_message_size_limits_unref(grpc_exec_ctx* exec_ctx,
void* value) {
refcounted_message_size_limits* limits =
(refcounted_message_size_limits*)value;
if (gpr_unref(&limits->refs)) {
gpr_free(value);
}
}
static void* message_size_limits_create_from_json(const grpc_json* json) {
static void* refcounted_message_size_limits_create_from_json(
const grpc_json* json) {
int max_request_message_bytes = -1;
int max_response_message_bytes = -1;
for (grpc_json* field = json->child; field != NULL; field = field->next) {
@ -60,10 +78,12 @@ static void* message_size_limits_create_from_json(const grpc_json* json) {
if (max_response_message_bytes == -1) return NULL;
}
}
message_size_limits* value =
(message_size_limits*)gpr_malloc(sizeof(message_size_limits));
value->max_send_size = max_request_message_bytes;
value->max_recv_size = max_response_message_bytes;
refcounted_message_size_limits* value =
(refcounted_message_size_limits*)gpr_malloc(
sizeof(refcounted_message_size_limits));
gpr_ref_init(&value->refs, 1);
value->limits.max_send_size = max_request_message_bytes;
value->limits.max_recv_size = max_response_message_bytes;
return value;
}
@ -82,7 +102,7 @@ typedef struct call_data {
typedef struct channel_data {
message_size_limits limits;
// Maps path names to message_size_limits structs.
// Maps path names to refcounted_message_size_limits structs.
grpc_slice_hash_table* method_limit_table;
} channel_data;
@ -164,19 +184,19 @@ static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
// size to the receive limit.
calld->limits = chand->limits;
if (chand->method_limit_table != NULL) {
message_size_limits* limits =
(message_size_limits*)grpc_method_config_table_get(
refcounted_message_size_limits* limits =
(refcounted_message_size_limits*)grpc_method_config_table_get(
exec_ctx, chand->method_limit_table, args->path);
if (limits != NULL) {
if (limits->max_send_size >= 0 &&
(limits->max_send_size < calld->limits.max_send_size ||
if (limits->limits.max_send_size >= 0 &&
(limits->limits.max_send_size < calld->limits.max_send_size ||
calld->limits.max_send_size < 0)) {
calld->limits.max_send_size = limits->max_send_size;
calld->limits.max_send_size = limits->limits.max_send_size;
}
if (limits->max_recv_size >= 0 &&
(limits->max_recv_size < calld->limits.max_recv_size ||
if (limits->limits.max_recv_size >= 0 &&
(limits->limits.max_recv_size < calld->limits.max_recv_size ||
calld->limits.max_recv_size < 0)) {
calld->limits.max_recv_size = limits->max_recv_size;
calld->limits.max_recv_size = limits->limits.max_recv_size;
}
}
}
@ -237,8 +257,10 @@ static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
if (service_config != NULL) {
chand->method_limit_table =
grpc_service_config_create_method_config_table(
exec_ctx, service_config, message_size_limits_create_from_json,
message_size_limits_free);
exec_ctx, service_config,
refcounted_message_size_limits_create_from_json,
refcounted_message_size_limits_ref,
refcounted_message_size_limits_unref);
grpc_service_config_destroy(service_config);
}
}

@ -98,7 +98,8 @@ grpc_channel *grpc_insecure_channel_create(const char *target,
// Clean up.
grpc_channel_args_destroy(&exec_ctx, new_args);
grpc_exec_ctx_finish(&exec_ctx);
return channel != NULL ? channel : grpc_lame_client_channel_create(
return channel != NULL ? channel
: grpc_lame_client_channel_create(
target, GRPC_STATUS_INTERNAL,
"Failed to create client channel");
}

@ -62,7 +62,8 @@ grpc_channel *grpc_insecure_channel_create_from_fd(
grpc_exec_ctx_finish(&exec_ctx);
return channel != NULL ? channel : grpc_lame_client_channel_create(
return channel != NULL ? channel
: grpc_lame_client_channel_create(
target, GRPC_STATUS_INTERNAL,
"Failed to create client channel");
}

@ -250,8 +250,9 @@ grpc_error *grpc_chttp2_server_add_port(grpc_exec_ctx *exec_ctx,
goto error;
} else if (count != naddrs) {
char* msg;
gpr_asprintf(&msg, "Only %" PRIuPTR
" addresses added out of total %" PRIuPTR " resolved",
gpr_asprintf(&msg,
"Only %" PRIuPTR " addresses added out of total %" PRIuPTR
" resolved",
count, naddrs);
err = GRPC_ERROR_CREATE_REFERENCING_FROM_COPIED_STRING(msg, errors, naddrs);
gpr_free(msg);

@ -789,7 +789,8 @@ static void destroy_stream(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
s->destroy_stream_arg = then_schedule_closure;
GRPC_CLOSURE_SCHED(
exec_ctx, GRPC_CLOSURE_INIT(&s->destroy_stream, destroy_stream_locked, s,
exec_ctx,
GRPC_CLOSURE_INIT(&s->destroy_stream, destroy_stream_locked, s,
grpc_combiner_scheduler(t->combiner)),
GRPC_ERROR_NONE);
GPR_TIMER_END("destroy_stream", 0);
@ -1025,11 +1026,13 @@ static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *gt,
GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx);
}
set_write_state(
exec_ctx, t, r.partial ? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE
exec_ctx, t,
r.partial ? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE
: GRPC_CHTTP2_WRITE_STATE_WRITING,
begin_writing_desc(r.partial, scheduler == grpc_schedule_on_exec_ctx));
GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_INIT(&t->write_action,
write_action, t, scheduler),
GRPC_CLOSURE_SCHED(
exec_ctx,
GRPC_CLOSURE_INIT(&t->write_action, write_action, t, scheduler),
GRPC_ERROR_NONE);
} else {
GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN(exec_ctx);
@ -1772,7 +1775,8 @@ void grpc_chttp2_add_ping_strike(grpc_exec_ctx *exec_ctx,
GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM));
/*The transport will be closed after the write is done */
close_transport_locked(
exec_ctx, t, grpc_error_set_int(
exec_ctx, t,
grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many pings"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
}
@ -2955,9 +2959,10 @@ static void incoming_byte_stream_destroy(grpc_exec_ctx *exec_ctx,
grpc_chttp2_incoming_byte_stream* bs =
(grpc_chttp2_incoming_byte_stream*)byte_stream;
GRPC_CLOSURE_SCHED(
exec_ctx, GRPC_CLOSURE_INIT(
&bs->destroy_action, incoming_byte_stream_destroy_locked,
bs, grpc_combiner_scheduler(bs->transport->combiner)),
exec_ctx,
GRPC_CLOSURE_INIT(&bs->destroy_action,
incoming_byte_stream_destroy_locked, bs,
grpc_combiner_scheduler(bs->transport->combiner)),
GRPC_ERROR_NONE);
GPR_TIMER_END("incoming_byte_stream_destroy", 0);
}

@ -224,8 +224,8 @@ grpc_error* StreamFlowControl::RecvData(int64_t incoming_frame_size) {
incoming_frame_size, acked_stream_window, sent_stream_window);
} else {
char* msg;
gpr_asprintf(&msg, "frame of size %" PRId64
" overflows local window of %" PRId64,
gpr_asprintf(
&msg, "frame of size %" PRId64 " overflows local window of %" PRId64,
incoming_frame_size, acked_stream_window);
grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
gpr_free(msg);

@ -540,9 +540,8 @@ static void hpack_enc(grpc_exec_ctx *exec_ctx, grpc_chttp2_hpack_compressor *c,
decoder_space_usage < MAX_DECODER_SPACE_USAGE &&
c->filter_elems[HASH_FRAGMENT_1(elem_hash)] >=
c->filter_elems_sum / ONE_ON_ADD_PROBABILITY;
void (*maybe_add)(grpc_exec_ctx *, grpc_chttp2_hpack_compressor *,
grpc_mdelem, size_t) =
should_add_elem ? add_elem : add_nothing;
void (*maybe_add)(grpc_exec_ctx*, grpc_chttp2_hpack_compressor*, grpc_mdelem,
size_t) = should_add_elem ? add_elem : add_nothing;
void (*emit)(grpc_exec_ctx*, grpc_chttp2_hpack_compressor*, uint32_t,
grpc_mdelem, framer_state*) =
should_add_elem ? emit_lithdr_incidx : emit_lithdr_noidx;

@ -42,8 +42,9 @@ grpc_error *grpc_chttp2_incoming_metadata_buffer_add(
grpc_mdelem elem) {
buffer->size += GRPC_MDELEM_LENGTH(elem);
return grpc_metadata_batch_add_tail(
exec_ctx, &buffer->batch, (grpc_linked_mdelem *)gpr_arena_alloc(
buffer->arena, sizeof(grpc_linked_mdelem)),
exec_ctx, &buffer->batch,
(grpc_linked_mdelem*)gpr_arena_alloc(buffer->arena,
sizeof(grpc_linked_mdelem)),
elem);
}

@ -216,8 +216,9 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
case GRPC_DTS_FRAME:
GPR_ASSERT(cur < end);
if ((uint32_t)(end - cur) == t->incoming_frame_size) {
err = parse_frame_slice(
exec_ctx, t, grpc_slice_sub_no_ref(slice, (size_t)(cur - beg),
err =
parse_frame_slice(exec_ctx, t,
grpc_slice_sub_no_ref(slice, (size_t)(cur - beg),
(size_t)(end - beg)),
1);
if (err != GRPC_ERROR_NONE) {
@ -240,8 +241,9 @@ grpc_error *grpc_chttp2_perform_read(grpc_exec_ctx *exec_ctx,
t->incoming_stream = NULL;
goto dts_fh_0; /* loop */
} else {
err = parse_frame_slice(
exec_ctx, t, grpc_slice_sub_no_ref(slice, (size_t)(cur - beg),
err =
parse_frame_slice(exec_ctx, t,
grpc_slice_sub_no_ref(slice, (size_t)(cur - beg),
(size_t)(end - beg)),
0);
if (err != GRPC_ERROR_NONE) {

@ -526,12 +526,12 @@ static void on_response_headers_received(
grpc_chttp2_incoming_metadata_buffer_init(&s->state.rs.initial_metadata,
s->arena);
for (size_t i = 0; i < headers->count; i++) {
GRPC_LOG_IF_ERROR(
"on_response_headers_received",
GRPC_LOG_IF_ERROR("on_response_headers_received",
grpc_chttp2_incoming_metadata_buffer_add(
&exec_ctx, &s->state.rs.initial_metadata,
grpc_mdelem_from_slices(
&exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(
&exec_ctx,
grpc_slice_intern(grpc_slice_from_static_string(
headers->headers[i].key)),
grpc_slice_intern(grpc_slice_from_static_string(
headers->headers[i].value)))));
@ -636,12 +636,12 @@ static void on_response_trailers_received(
for (size_t i = 0; i < trailers->count; i++) {
CRONET_LOG(GPR_DEBUG, "trailer key=%s, value=%s", trailers->headers[i].key,
trailers->headers[i].value);
GRPC_LOG_IF_ERROR(
"on_response_trailers_received",
GRPC_LOG_IF_ERROR("on_response_trailers_received",
grpc_chttp2_incoming_metadata_buffer_add(
&exec_ctx, &s->state.rs.trailing_metadata,
grpc_mdelem_from_slices(
&exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(
&exec_ctx,
grpc_slice_intern(grpc_slice_from_static_string(
trailers->headers[i].key)),
grpc_slice_intern(grpc_slice_from_static_string(
trailers->headers[i].value)))));
@ -1207,8 +1207,7 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
if (stream_state->rs.compressed) {
stream_state->rs.sbs.base.flags |= GRPC_WRITE_INTERNAL_COMPRESS;
}
*((grpc_byte_buffer **)
stream_op->payload->recv_message.recv_message) =
*((grpc_byte_buffer**)stream_op->payload->recv_message.recv_message) =
(grpc_byte_buffer*)&stream_state->rs.sbs;
GRPC_CLOSURE_SCHED(
exec_ctx, stream_op->payload->recv_message.recv_message_ready,

@ -972,7 +972,8 @@ static void perform_stream_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
// 4. We want to receive a message and there is a message ready
// 5. There is trailing metadata, even if nothing specifically wants
// that because that can shut down the receive message as well
if ((op->send_message && other && ((other->recv_message_op != NULL) ||
if ((op->send_message && other &&
((other->recv_message_op != NULL) ||
(other->recv_trailing_md_op != NULL))) ||
(op->send_trailing_metadata && !op->send_message) ||
(op->recv_initial_metadata && s->to_read_initial_md_filled) ||

@ -20,23 +20,27 @@
#include <grpc/support/useful.h>
void grpc_backoff_init(grpc_backoff *backoff,
grpc_millis initial_connect_timeout, double multiplier,
double jitter, grpc_millis min_timeout_millis,
grpc_millis max_timeout_millis) {
backoff->initial_connect_timeout = initial_connect_timeout;
void grpc_backoff_init(grpc_backoff* backoff, grpc_millis initial_backoff,
double multiplier, double jitter,
grpc_millis min_connect_timeout,
grpc_millis max_backoff) {
backoff->initial_backoff = initial_backoff;
backoff->multiplier = multiplier;
backoff->jitter = jitter;
backoff->min_timeout_millis = min_timeout_millis;
backoff->max_timeout_millis = max_timeout_millis;
backoff->min_connect_timeout = min_connect_timeout;
backoff->max_backoff = max_backoff;
backoff->rng_state = (uint32_t)gpr_now(GPR_CLOCK_REALTIME).tv_nsec;
}
grpc_millis grpc_backoff_begin(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff) {
backoff->current_timeout_millis = backoff->initial_connect_timeout;
const grpc_millis first_timeout =
GPR_MAX(backoff->current_timeout_millis, backoff->min_timeout_millis);
return grpc_exec_ctx_now(exec_ctx) + first_timeout;
grpc_backoff_result grpc_backoff_begin(grpc_exec_ctx* exec_ctx,
grpc_backoff* backoff) {
backoff->current_backoff = backoff->initial_backoff;
const grpc_millis initial_timeout =
GPR_MAX(backoff->initial_backoff, backoff->min_connect_timeout);
const grpc_millis now = grpc_exec_ctx_now(exec_ctx);
const grpc_backoff_result result = {now + initial_timeout,
now + backoff->current_backoff};
return result;
}
/* Generate a random number between 0 and 1. */
@ -45,29 +49,32 @@ static double generate_uniform_random_number(uint32_t *rng_state) {
return *rng_state / (double)((uint32_t)1 << 31);
}
grpc_millis grpc_backoff_step(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff) {
const double new_timeout_millis =
backoff->multiplier * (double)backoff->current_timeout_millis;
backoff->current_timeout_millis =
GPR_MIN((grpc_millis)new_timeout_millis, backoff->max_timeout_millis);
const double jitter_range_width = backoff->jitter * new_timeout_millis;
const double jitter =
(2 * generate_uniform_random_number(&backoff->rng_state) - 1) *
jitter_range_width;
backoff->current_timeout_millis =
(grpc_millis)((double)(backoff->current_timeout_millis) + jitter);
const grpc_millis current_deadline =
grpc_exec_ctx_now(exec_ctx) + backoff->current_timeout_millis;
const grpc_millis min_deadline =
grpc_exec_ctx_now(exec_ctx) + backoff->min_timeout_millis;
static double generate_uniform_random_number_between(uint32_t* rng_state,
double a, double b) {
if (a == b) return a;
if (a > b) GPR_SWAP(double, a, b); // make sure a < b
const double range = b - a;
return a + generate_uniform_random_number(rng_state) * range;
}
return GPR_MAX(current_deadline, min_deadline);
grpc_backoff_result grpc_backoff_step(grpc_exec_ctx* exec_ctx,
grpc_backoff* backoff) {
backoff->current_backoff = (grpc_millis)(GPR_MIN(
backoff->current_backoff * backoff->multiplier, backoff->max_backoff));
const double jitter = generate_uniform_random_number_between(
&backoff->rng_state, -backoff->jitter * backoff->current_backoff,
backoff->jitter * backoff->current_backoff);
const grpc_millis current_timeout =
GPR_MAX((grpc_millis)(backoff->current_backoff + jitter),
backoff->min_connect_timeout);
const grpc_millis next_timeout = GPR_MIN(
(grpc_millis)(backoff->current_backoff + jitter), backoff->max_backoff);
const grpc_millis now = grpc_exec_ctx_now(exec_ctx);
const grpc_backoff_result result = {now + current_timeout,
now + next_timeout};
return result;
}
void grpc_backoff_reset(grpc_backoff* backoff) {
backoff->current_timeout_millis = backoff->initial_connect_timeout;
backoff->current_backoff = backoff->initial_backoff;
}

@ -27,36 +27,53 @@ extern "C" {
typedef struct {
/// const: how long to wait after the first failure before retrying
grpc_millis initial_connect_timeout;
grpc_millis initial_backoff;
/// const: factor with which to multiply backoff after a failed retry
double multiplier;
/// const: amount to randomize backoffs
double jitter;
/// const: minimum time between retries in milliseconds
grpc_millis min_timeout_millis;
/// const: maximum time between retries in milliseconds
grpc_millis max_timeout_millis;
/// const: minimum time between retries
grpc_millis min_connect_timeout;
/// const: maximum time between retries
grpc_millis max_backoff;
/// current delay before retries
grpc_millis current_backoff;
/// random number generator
uint32_t rng_state;
/// current retry timeout in milliseconds
grpc_millis current_timeout_millis;
} grpc_backoff;
typedef struct {
/// Deadline to be used for the current attempt.
grpc_millis current_deadline;
/// Deadline to be used for the next attempt, following the backoff strategy.
grpc_millis next_attempt_start_time;
} grpc_backoff_result;
/// Initialize backoff machinery - does not need to be destroyed
void grpc_backoff_init(grpc_backoff *backoff,
grpc_millis initial_connect_timeout, double multiplier,
double jitter, grpc_millis min_timeout_millis,
grpc_millis max_timeout_millis);
/// Begin retry loop: returns a timespec for the NEXT retry
grpc_millis grpc_backoff_begin(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff);
/// Step a retry loop: returns a timespec for the NEXT retry
grpc_millis grpc_backoff_step(grpc_exec_ctx *exec_ctx, grpc_backoff *backoff);
void grpc_backoff_init(grpc_backoff* backoff, grpc_millis initial_backoff,
double multiplier, double jitter,
grpc_millis min_connect_timeout,
grpc_millis max_backoff);
/// Begin retry loop: returns the deadlines to be used for the current attempt
/// and the subsequent retry, if any.
grpc_backoff_result grpc_backoff_begin(grpc_exec_ctx* exec_ctx,
grpc_backoff* backoff);
/// Step a retry loop: returns the deadlines to be used for the current attempt
/// and the subsequent retry, if any.
grpc_backoff_result grpc_backoff_step(grpc_exec_ctx* exec_ctx,
grpc_backoff* backoff);
/// Reset the backoff, so the next grpc_backoff_step will be a
/// grpc_backoff_begin
/// instead
/// grpc_backoff_begin.
void grpc_backoff_reset(grpc_backoff* backoff);
#ifdef __cplusplus

@ -104,9 +104,8 @@ grpc_error *grpc_channel_stack_init(
GRPC_STREAM_REF_INIT(&stack->refcount, initial_refs, destroy, destroy_arg,
name);
elems = CHANNEL_ELEMS_FROM_STACK(stack);
user_data =
((char *)elems) +
ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_channel_element));
user_data = ((char*)elems) + ROUND_UP_TO_ALIGNMENT_SIZE(
filter_count * sizeof(grpc_channel_element));
/* init per-filter data */
grpc_error* first_error = GRPC_ERROR_NONE;

@ -119,8 +119,8 @@ static double threshold_for_count_below(const gpr_atm *bucket_counts,
should lie */
lower_bound = bucket_boundaries[lower_idx];
upper_bound = bucket_boundaries[lower_idx + 1];
return upper_bound -
(upper_bound - lower_bound) * (count_so_far - count_below) /
return upper_bound - (upper_bound - lower_bound) *
(count_so_far - count_below) /
(double)bucket_counts[lower_idx];
}
}

@ -123,8 +123,10 @@ const char *grpc_stats_counter_name[GRPC_STATS_COUNTER_COUNT] = {
const char* grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of client side calls created by this process",
"Number of server side calls created by this process",
"Number of completion queues created", "Number of client channels created",
"Number of client subchannels created", "Number of server channels created",
"Number of completion queues created",
"Number of client channels created",
"Number of client subchannels created",
"Number of server channels created",
"Number of polling syscalls (epoll_wait, poll, etc) made by this process",
"Number of sleeping syscalls made by this process",
"How many polling wakeups were performed by the process (only valid for "
@ -154,7 +156,8 @@ const char *grpc_stats_counter_doc[GRPC_STATS_COUNTER_COUNT] = {
"Number of batches containing receive initial metadata",
"Number of batches containing receive message",
"Number of batches containing receive trailing metadata",
"Number of settings frames sent", "Number of HTTP2 pings sent by process",
"Number of settings frames sent",
"Number of HTTP2 pings sent by process",
"Number of HTTP2 writes initiated",
"Number of HTTP2 writes offloaded to the executor from application threads",
"Number of HTTP2 writes that finished seeing more data needed to be "

@ -178,8 +178,9 @@ static void on_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
internal_request* req = (internal_request*)arg;
if (!ep) {
next_address(exec_ctx, req, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Unexplained handshake failure"));
next_address(
exec_ctx, req,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unexplained handshake failure"));
return;
}

@ -54,8 +54,8 @@ static void create_sockets(SOCKET sv[2]) {
WSA_FLAG_OVERLAPPED);
GPR_ASSERT(cli_sock != INVALID_SOCKET);
GPR_ASSERT(WSAConnect(cli_sock, (struct sockaddr *)&addr, addr_len, NULL,
NULL, NULL, NULL) == 0);
GPR_ASSERT(WSAConnect(cli_sock, (struct sockaddr*)&addr, addr_len, NULL, NULL,
NULL, NULL) == 0);
svr_sock = accept(lst_sock, (struct sockaddr*)&addr, &addr_len);
GPR_ASSERT(svr_sock != INVALID_SOCKET);

@ -712,8 +712,8 @@ static char *finish_kvs(kv_pairs *kvs) {
append_chr('{', &s, &sz, &cap);
for (size_t i = 0; i < kvs->num_kvs; i++) {
if (i != 0) append_chr(',', &s, &sz, &cap);
append_esc_str((const uint8_t *)kvs->kvs[i].key, strlen(kvs->kvs[i].key),
&s, &sz, &cap);
append_esc_str((const uint8_t*)kvs->kvs[i].key, strlen(kvs->kvs[i].key), &s,
&sz, &cap);
gpr_free(kvs->kvs[i].key);
append_chr(':', &s, &sz, &cap);
append_str(kvs->kvs[i].value, &s, &sz, &cap);

@ -444,8 +444,8 @@ static grpc_error *pollset_global_init(void) {
return GRPC_OS_ERROR(errno, "epoll_ctl");
}
g_num_neighborhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBORHOODS);
g_neighborhoods = (pollset_neighborhood *)gpr_zalloc(
sizeof(*g_neighborhoods) * g_num_neighborhoods);
g_neighborhoods = (pollset_neighborhood*)gpr_zalloc(sizeof(*g_neighborhoods) *
g_num_neighborhoods);
for (size_t i = 0; i < g_num_neighborhoods; i++) {
gpr_mu_init(&g_neighborhoods[i].mu);
}
@ -996,10 +996,10 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
gpr_strvec log;
gpr_strvec_init(&log);
char* tmp;
gpr_asprintf(
&tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset,
gpr_asprintf(&tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset,
specific_worker, (void*)gpr_tls_get(&g_current_thread_pollset),
(void *)gpr_tls_get(&g_current_thread_worker), pollset->root_worker);
(void*)gpr_tls_get(&g_current_thread_worker),
pollset->root_worker);
gpr_strvec_add(&log, tmp);
if (pollset->root_worker != NULL) {
gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",

@ -307,8 +307,9 @@ static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n) {
#endif
gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
if (old == n) {
GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(fd_destroy, fd,
grpc_schedule_on_exec_ctx),
GRPC_CLOSURE_SCHED(
exec_ctx,
GRPC_CLOSURE_CREATE(fd_destroy, fd, grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
} else {
GPR_ASSERT(old > n);
@ -624,8 +625,7 @@ static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
"PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p",
pollset, specific_worker,
(void*)gpr_tls_get(&g_current_thread_pollset),
(void *)gpr_tls_get(&g_current_thread_worker),
pollset->root_worker);
(void*)gpr_tls_get(&g_current_thread_worker), pollset->root_worker);
}
if (specific_worker == NULL) {
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
@ -984,8 +984,9 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
WORKER_PTR->originator = gettid();
#endif
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_log(GPR_DEBUG, "PS:%p work hdl=%p worker=%p now=%" PRIdPTR
" deadline=%" PRIdPTR " kwp=%d pollable=%p",
gpr_log(GPR_DEBUG,
"PS:%p work hdl=%p worker=%p now=%" PRIdPTR " deadline=%" PRIdPTR
" kwp=%d pollable=%p",
pollset, worker_hdl, WORKER_PTR, grpc_exec_ctx_now(exec_ctx),
deadline, pollset->kicked_without_poller, pollset->active_pollable);
}
@ -999,8 +1000,9 @@ static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
gpr_tls_set(&g_current_thread_worker, (intptr_t)WORKER_PTR);
if (WORKER_PTR->pollable_obj->event_cursor ==
WORKER_PTR->pollable_obj->event_count) {
append_error(&error, pollable_epoll(exec_ctx, WORKER_PTR->pollable_obj,
deadline),
append_error(
&error,
pollable_epoll(exec_ctx, WORKER_PTR->pollable_obj, deadline),
err_desc);
}
append_error(&error,
@ -1368,13 +1370,15 @@ static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
}
size_t initial_a_fd_count = a->fd_count;
a->fd_count = 0;
append_error(&error, add_fds_to_pollsets(exec_ctx, a->fds, initial_a_fd_count,
b->pollsets, b->pollset_count,
"merge_a2b", a->fds, &a->fd_count),
append_error(
&error,
add_fds_to_pollsets(exec_ctx, a->fds, initial_a_fd_count, b->pollsets,
b->pollset_count, "merge_a2b", a->fds, &a->fd_count),
err_desc);
append_error(&error, add_fds_to_pollsets(exec_ctx, b->fds, b->fd_count,
a->pollsets, a->pollset_count,
"merge_b2a", a->fds, &a->fd_count),
append_error(
&error,
add_fds_to_pollsets(exec_ctx, b->fds, b->fd_count, a->pollsets,
a->pollset_count, "merge_b2a", a->fds, &a->fd_count),
err_desc);
if (a->pollset_capacity < a->pollset_count + b->pollset_count) {
a->pollset_capacity =

@ -291,7 +291,8 @@ static void pi_add_ref_dbg(polling_island *pi, const char *reason,
const char* file, int line) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
gpr_log(GPR_DEBUG, "Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
gpr_log(GPR_DEBUG,
"Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
" (%s) - (%s, %d)",
pi, old_cnt, old_cnt + 1, reason, file, line);
}
@ -302,7 +303,8 @@ static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
const char* reason, const char* file, int line) {
if (GRPC_TRACER_ON(grpc_polling_trace)) {
gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
gpr_log(GPR_DEBUG, "Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
gpr_log(GPR_DEBUG,
"Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
" (%s) - (%s, %d)",
pi, old_cnt, (old_cnt - 1), reason, file, line);
}
@ -1486,8 +1488,7 @@ retry:
} else {
GRPC_POLLING_TRACE(
"add_poll_object: Same polling island. pi: %p (%s, %s)",
(void *)pi_new, poll_obj_string(item_type),
poll_obj_string(bag_type));
(void*)pi_new, poll_obj_string(item_type), poll_obj_string(bag_type));
}
} else if (item->pi == NULL) {
/* GPR_ASSERT(bag->pi != NULL) */

@ -1623,8 +1623,7 @@ static void global_cv_fd_table_init() {
gpr_cv_init(&g_cvfds.shutdown_cv);
gpr_ref_init(&g_cvfds.pollcount, 1);
g_cvfds.size = CV_DEFAULT_TABLE_SIZE;
g_cvfds.cvfds =
(fd_node *)gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
g_cvfds.cvfds = (fd_node*)gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
g_cvfds.free_fds = NULL;
thread_grace = gpr_time_from_millis(POLLCV_THREAD_GRACE_MS, GPR_TIMESPAN);
for (int i = 0; i < CV_DEFAULT_TABLE_SIZE; i++) {

@ -109,7 +109,8 @@ void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) {
}
if (g_root_object.next != &g_root_object) {
if (grpc_iomgr_abort_on_leaks()) {
gpr_log(GPR_DEBUG, "Failed to free %" PRIuPTR
gpr_log(GPR_DEBUG,
"Failed to free %" PRIuPTR
" iomgr objects before shutdown deadline: "
"memory leaks are likely",
count_objects());
@ -121,7 +122,8 @@ void grpc_iomgr_shutdown(grpc_exec_ctx *exec_ctx) {
if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) {
if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > 0) {
if (g_root_object.next != &g_root_object) {
gpr_log(GPR_DEBUG, "Failed to free %" PRIuPTR
gpr_log(GPR_DEBUG,
"Failed to free %" PRIuPTR
" iomgr objects before shutdown deadline: "
"memory leaks are likely",
count_objects());

@ -47,8 +47,8 @@ grpc_error *grpc_load_file(const char *filename, int add_null_terminator,
/* Converting to size_t on the assumption that it will not fail */
contents_size = (size_t)ftell(file);
fseek(file, 0, SEEK_SET);
contents = (unsigned char *)gpr_malloc(contents_size +
(add_null_terminator ? 1 : 0));
contents =
(unsigned char*)gpr_malloc(contents_size + (add_null_terminator ? 1 : 0));
bytes_read = fread(contents, 1, contents_size, file);
if (bytes_read < contents_size) {
error = GRPC_OS_ERROR(errno, "fread");

@ -161,8 +161,10 @@ grpc_error *grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
while (!worker.kicked) {
if (gpr_cv_wait(&worker.cv, &grpc_polling_mu,
grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) {
grpc_exec_ctx_invalidate_now(exec_ctx);
break;
}
grpc_exec_ctx_invalidate_now(exec_ctx);
}
} else {
pollset->kicked_without_pollers = 0;

@ -277,8 +277,7 @@ static void rq_update_estimate(grpc_resource_quota *resource_quota) {
gpr_atm memory_usage_estimation = MEMORY_USAGE_ESTIMATION_MAX;
if (resource_quota->size != 0) {
memory_usage_estimation =
GPR_CLAMP((gpr_atm)((1.0 -
((double)resource_quota->free_pool) /
GPR_CLAMP((gpr_atm)((1.0 - ((double)resource_quota->free_pool) /
((double)resource_quota->size)) *
MEMORY_USAGE_ESTIMATION_MAX),
0, MEMORY_USAGE_ESTIMATION_MAX);
@ -295,7 +294,8 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
GRPC_RULIST_AWAITING_ALLOCATION))) {
gpr_mu_lock(&resource_user->mu);
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "RQ: check allocation for user %p shutdown=%" PRIdPTR
gpr_log(GPR_DEBUG,
"RQ: check allocation for user %p shutdown=%" PRIdPTR
" free_pool=%" PRId64,
resource_user, gpr_atm_no_barrier_load(&resource_user->shutdown),
resource_user->free_pool);
@ -320,7 +320,8 @@ static bool rq_alloc(grpc_exec_ctx *exec_ctx,
resource_quota->free_pool -= amt;
rq_update_estimate(resource_quota);
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "RQ %s %s: grant alloc %" PRId64
gpr_log(GPR_DEBUG,
"RQ %s %s: grant alloc %" PRId64
" bytes; rq_free_pool -> %" PRId64,
resource_quota->name, resource_user->name, amt,
resource_quota->free_pool);
@ -357,7 +358,8 @@ static bool rq_reclaim_from_per_user_free_pool(
resource_quota->free_pool += amt;
rq_update_estimate(resource_quota);
if (GRPC_TRACER_ON(grpc_resource_quota_trace)) {
gpr_log(GPR_DEBUG, "RQ %s %s: reclaim_from_per_user_free_pool %" PRId64
gpr_log(GPR_DEBUG,
"RQ %s %s: reclaim_from_per_user_free_pool %" PRId64
" bytes; rq_free_pool -> %" PRId64,
resource_quota->name, resource_user->name, amt,
resource_quota->free_pool);

@ -132,8 +132,7 @@ void grpc_sockaddr_make_wildcard4(int port,
void grpc_sockaddr_make_wildcard6(int port,
grpc_resolved_address* resolved_wild_out) {
struct sockaddr_in6 *wild_out =
(struct sockaddr_in6 *)resolved_wild_out->addr;
struct sockaddr_in6* wild_out = (struct sockaddr_in6*)resolved_wild_out->addr;
GPR_ASSERT(port >= 0 && port < 65536);
memset(resolved_wild_out, 0, sizeof(*resolved_wild_out));
wild_out->sin6_family = AF_INET6;

@ -106,8 +106,9 @@ static void tc_on_alarm(grpc_exec_ctx *exec_ctx, void *acp, grpc_error *error) {
}
gpr_mu_lock(&ac->mu);
if (ac->fd != NULL) {
grpc_fd_shutdown(exec_ctx, ac->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"connect() timed out"));
grpc_fd_shutdown(
exec_ctx, ac->fd,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("connect() timed out"));
}
done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
@ -279,8 +280,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
do {
GPR_ASSERT(addr->len < ~(socklen_t)0);
err =
connect(fd, (const struct sockaddr *)addr->addr, (socklen_t)addr->len);
err = connect(fd, (const struct sockaddr*)addr->addr, (socklen_t)addr->len);
} while (err < 0 && errno == EINTR);
addr_str = grpc_sockaddr_to_uri(addr);

@ -154,8 +154,7 @@ static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
// TODO(murgatroid99): figure out what the return value here means
uv_tcp_connect(&connect->connect_req, connect->tcp_handle,
(const struct sockaddr *)resolved_addr->addr,
uv_tc_on_connect);
(const struct sockaddr*)resolved_addr->addr, uv_tc_on_connect);
GRPC_CLOSURE_INIT(&connect->on_alarm, uv_tc_on_alarm, connect,
grpc_schedule_on_exec_ctx);
grpc_timer_init(exec_ctx, &connect->alarm, deadline, &connect->on_alarm);

@ -172,8 +172,8 @@ static void tcp_client_connect_impl(
grpc_sockaddr_make_wildcard6(0, &local_address);
status = bind(sock, (struct sockaddr *)&local_address.addr,
(int)local_address.len);
status =
bind(sock, (struct sockaddr*)&local_address.addr, (int)local_address.len);
if (status != 0) {
error = GRPC_WSA_ERROR(WSAGetLastError(), "bind");
goto failure;
@ -181,8 +181,8 @@ static void tcp_client_connect_impl(
socket = grpc_winsocket_create(sock, "client");
info = &socket->write_info;
success = ConnectEx(sock, (struct sockaddr *)&addr->addr, (int)addr->len,
NULL, 0, NULL, &info->overlapped);
success = ConnectEx(sock, (struct sockaddr*)&addr->addr, (int)addr->len, NULL,
0, NULL, &info->overlapped);
/* It wouldn't be unusual to get a success immediately. But we'll still get
an IOCP notification, so let's ignore it. */

@ -185,8 +185,9 @@ static void tcp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
if (s->active_ports) {
grpc_tcp_listener* sp;
for (sp = s->head; sp; sp = sp->next) {
grpc_fd_shutdown(exec_ctx, sp->emfd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Server destroyed"));
grpc_fd_shutdown(
exec_ctx, sp->emfd,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server destroyed"));
}
gpr_mu_unlock(&s->mu);
} else {

@ -356,8 +356,8 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
(int*)&sockname_temp.len)) {
*port = grpc_sockaddr_get_port(&sockname_temp);
if (*port > 0) {
allocated_addr = (grpc_resolved_address *)gpr_malloc(
sizeof(grpc_resolved_address));
allocated_addr =
(grpc_resolved_address*)gpr_malloc(sizeof(grpc_resolved_address));
memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, *port);
addr = allocated_addr;

@ -138,8 +138,9 @@ static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
}
GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(destroy_server, s,
grpc_schedule_on_exec_ctx),
GRPC_CLOSURE_SCHED(
exec_ctx,
GRPC_CLOSURE_CREATE(destroy_server, s, grpc_schedule_on_exec_ctx),
GRPC_ERROR_NONE);
}
@ -346,8 +347,7 @@ static void on_accept(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
gpr_free(utf8_message);
}
int peer_name_len = (int)peer_name.len;
err =
getpeername(sock, (struct sockaddr *)peer_name.addr, &peer_name_len);
err = getpeername(sock, (struct sockaddr*)peer_name.addr, &peer_name_len);
peer_name.len = (size_t)peer_name_len;
if (!err) {
peer_name_string = grpc_sockaddr_to_uri(&peer_name);
@ -475,8 +475,8 @@ grpc_error *grpc_tcp_server_add_port(grpc_tcp_server *s,
sockname_temp.len = (size_t)sockname_temp_len;
*port = grpc_sockaddr_get_port(&sockname_temp);
if (*port > 0) {
allocated_addr = (grpc_resolved_address *)gpr_malloc(
sizeof(grpc_resolved_address));
allocated_addr =
(grpc_resolved_address*)gpr_malloc(sizeof(grpc_resolved_address));
memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, *port);
addr = allocated_addr;

@ -252,8 +252,9 @@ static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
}
if (tcp->shutting_down) {
GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"TCP socket is shutting down"));
GRPC_CLOSURE_SCHED(
exec_ctx, cb,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP socket is shutting down"));
return;
}

@ -368,7 +368,8 @@ void grpc_timer_init(grpc_exec_ctx *exec_ctx, grpc_timer *timer,
list_join(&shard->list, timer);
}
if (GRPC_TRACER_ON(grpc_timer_trace)) {
gpr_log(GPR_DEBUG, " .. add to shard %d with queue_deadline_cap=%" PRIdPTR
gpr_log(GPR_DEBUG,
" .. add to shard %d with queue_deadline_cap=%" PRIdPTR
" => is_first_timer=%s",
(int)(shard - g_shards), shard->queue_deadline_cap,
is_first_timer ? "true" : "false");
@ -633,8 +634,9 @@ grpc_timer_check_result grpc_timer_check(grpc_exec_ctx *exec_ctx,
} else {
gpr_asprintf(&next_str, "%" PRIdPTR, *next);
}
gpr_log(GPR_DEBUG, "TIMER CHECK BEGIN: now=%" PRIdPTR
" next=%s tls_min=%" PRIdPTR " glob_min=%" PRIdPTR,
gpr_log(GPR_DEBUG,
"TIMER CHECK BEGIN: now=%" PRIdPTR " next=%s tls_min=%" PRIdPTR
" glob_min=%" PRIdPTR,
now, next_str, gpr_tls_get(&g_last_seen_min_timer),
gpr_atm_no_barrier_load(&g_shared_mutables.min_timer));
gpr_free(next_str);

@ -53,9 +53,8 @@ static void adjust_downwards(grpc_timer **first, uint32_t i, uint32_t length,
uint32_t left_child = 1u + 2u * i;
if (left_child >= length) break;
uint32_t right_child = left_child + 1;
uint32_t next_i =
right_child < length &&
first[left_child]->deadline > first[right_child]->deadline
uint32_t next_i = right_child < length && first[left_child]->deadline >
first[right_child]->deadline
? right_child
: left_child;
if (t->deadline <= first[next_i]->deadline) break;

@ -258,8 +258,7 @@ static int bind_socket(grpc_socket_factory *socket_factory, int sockfd,
const grpc_resolved_address* addr) {
return (socket_factory != NULL)
? grpc_socket_factory_bind(socket_factory, sockfd, addr)
: bind(sockfd, (struct sockaddr *)addr->addr,
(socklen_t)addr->len);
: bind(sockfd, (struct sockaddr*)addr->addr, (socklen_t)addr->len);
}
/* Prepare a recently-created socket for listening. */
@ -445,8 +444,8 @@ int grpc_udp_server_add_port(grpc_udp_server *s,
(socklen_t*)&sockname_temp.len)) {
port = grpc_sockaddr_get_port(&sockname_temp);
if (port > 0) {
allocated_addr = (grpc_resolved_address *)gpr_malloc(
sizeof(grpc_resolved_address));
allocated_addr =
(grpc_resolved_address*)gpr_malloc(sizeof(grpc_resolved_address));
memcpy(allocated_addr, addr, sizeof(grpc_resolved_address));
grpc_sockaddr_set_port(allocated_addr, port);
addr = allocated_addr;

@ -273,7 +273,8 @@ void grpc_auth_context_add_property(grpc_auth_context *ctx, const char *name,
GRPC_API_TRACE(
"grpc_auth_context_add_property(ctx=%p, name=%s, value=%*.*s, "
"value_length=%lu)",
6, (ctx, name, (int)value_length, (int)value_length, value,
6,
(ctx, name, (int)value_length, (int)value_length, value,
(unsigned long)value_length));
ensure_auth_context_capacity(ctx);
prop = &ctx->properties.array[ctx->properties.count++];

@ -131,8 +131,7 @@ static grpc_call_credentials_vtable md_only_test_vtable = {
grpc_call_credentials* grpc_md_only_test_credentials_create(
grpc_exec_ctx* exec_ctx, const char* md_key, const char* md_value,
bool is_async) {
grpc_md_only_test_credentials *c =
(grpc_md_only_test_credentials *)gpr_zalloc(
grpc_md_only_test_credentials* c = (grpc_md_only_test_credentials*)gpr_zalloc(
sizeof(grpc_md_only_test_credentials));
c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2;
c->base.vtable = &md_only_test_vtable;

@ -511,8 +511,7 @@ static grpc_call_credentials_vtable access_token_vtable = {
grpc_call_credentials* grpc_access_token_credentials_create(
const char* access_token, void* reserved) {
grpc_access_token_credentials *c =
(grpc_access_token_credentials *)gpr_zalloc(
grpc_access_token_credentials* c = (grpc_access_token_credentials*)gpr_zalloc(
sizeof(grpc_access_token_credentials));
GRPC_API_TRACE(
"grpc_access_token_credentials_create(access_token=<redacted>, "

@ -258,8 +258,7 @@ static grpc_call_credentials_vtable plugin_vtable = {
grpc_call_credentials* grpc_metadata_credentials_create_from_plugin(
grpc_metadata_credentials_plugin plugin, void* reserved) {
grpc_plugin_credentials *c =
(grpc_plugin_credentials *)gpr_zalloc(sizeof(*c));
grpc_plugin_credentials* c = (grpc_plugin_credentials*)gpr_zalloc(sizeof(*c));
GRPC_API_TRACE("grpc_metadata_credentials_create_from_plugin(reserved=%p)", 1,
(reserved));
GPR_ASSERT(reserved == NULL);

@ -274,7 +274,8 @@ grpc_server_credentials *grpc_ssl_server_credentials_create_ex(
"grpc_ssl_server_credentials_create_ex("
"pem_root_certs=%s, pem_key_cert_pairs=%p, num_key_cert_pairs=%lu, "
"client_certificate_request=%d, reserved=%p)",
5, (pem_root_certs, pem_key_cert_pairs, (unsigned long)num_key_cert_pairs,
5,
(pem_root_certs, pem_key_cert_pairs, (unsigned long)num_key_cert_pairs,
client_certificate_request, reserved));
GPR_ASSERT(reserved == NULL);

@ -19,6 +19,7 @@
#ifndef GRPC_CORE_LIB_SECURITY_TRANSPORT_AUTH_FILTERS_H
#define GRPC_CORE_LIB_SECURITY_TRANSPORT_AUTH_FILTERS_H
#include <grpc/grpc_security.h>
#include "src/core/lib/channel/channel_stack.h"
#ifdef __cplusplus
@ -28,6 +29,13 @@ extern "C" {
extern const grpc_channel_filter grpc_client_auth_filter;
extern const grpc_channel_filter grpc_server_auth_filter;
void grpc_auth_metadata_context_build(
const char* url_scheme, grpc_slice call_host, grpc_slice call_method,
grpc_auth_context* auth_context,
grpc_auth_metadata_context* auth_md_context);
void grpc_auth_metadata_context_reset(grpc_auth_metadata_context* context);
#ifdef __cplusplus
}
#endif

@ -65,7 +65,7 @@ typedef struct {
grpc_auth_context* auth_context;
} channel_data;
static void reset_auth_metadata_context(
void grpc_auth_metadata_context_reset(
grpc_auth_metadata_context* auth_md_context) {
if (auth_md_context->service_url != NULL) {
gpr_free((char*)auth_md_context->service_url);
@ -96,7 +96,7 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *arg,
grpc_call_element* elem =
(grpc_call_element*)batch->handler_private.extra_arg;
call_data* calld = (call_data*)elem->call_data;
reset_auth_metadata_context(&calld->auth_md_context);
grpc_auth_metadata_context_reset(&calld->auth_md_context);
grpc_error* error = GRPC_ERROR_REF(input_error);
if (error == GRPC_ERROR_NONE) {
GPR_ASSERT(calld->md_array.size <= MAX_CREDENTIALS_METADATA_COUNT);
@ -119,34 +119,41 @@ static void on_credentials_metadata(grpc_exec_ctx *exec_ctx, void *arg,
}
}
void build_auth_metadata_context(grpc_security_connector *sc,
void grpc_auth_metadata_context_build(
const char* url_scheme, grpc_slice call_host, grpc_slice call_method,
grpc_auth_context* auth_context,
call_data *calld) {
char *service = grpc_slice_to_c_string(calld->method);
grpc_auth_metadata_context* auth_md_context) {
char* service = grpc_slice_to_c_string(call_method);
char* last_slash = strrchr(service, '/');
char* method_name = NULL;
char* service_url = NULL;
reset_auth_metadata_context(&calld->auth_md_context);
grpc_auth_metadata_context_reset(auth_md_context);
if (last_slash == NULL) {
gpr_log(GPR_ERROR, "No '/' found in fully qualified method name");
service[0] = '\0';
method_name = gpr_strdup("");
} else if (last_slash == service) {
/* No service part in fully qualified method name: will just be "/". */
service[1] = '\0';
method_name = gpr_strdup("");
} else {
*last_slash = '\0';
method_name = gpr_strdup(last_slash + 1);
}
if (method_name == NULL) method_name = gpr_strdup("");
char *host = grpc_slice_to_c_string(calld->host);
gpr_asprintf(&service_url, "%s://%s%s",
sc->url_scheme == NULL ? "" : sc->url_scheme, host, service);
calld->auth_md_context.service_url = service_url;
calld->auth_md_context.method_name = method_name;
calld->auth_md_context.channel_auth_context =
char* host_and_port = grpc_slice_to_c_string(call_host);
if (strcmp(url_scheme, GRPC_SSL_URL_SCHEME) == 0) {
/* Remove the port if it is 443. */
char* port_delimiter = strrchr(host_and_port, ':');
if (port_delimiter != NULL && strcmp(port_delimiter + 1, "443") == 0) {
*port_delimiter = '\0';
}
}
gpr_asprintf(&service_url, "%s://%s%s", url_scheme == NULL ? "" : url_scheme,
host_and_port, service);
auth_md_context->service_url = service_url;
auth_md_context->method_name = method_name;
auth_md_context->channel_auth_context =
GRPC_AUTH_CONTEXT_REF(auth_context, "grpc_auth_metadata_context");
gpr_free(service);
gpr_free(host);
gpr_free(host_and_port);
}
static void cancel_get_request_metadata(grpc_exec_ctx* exec_ctx, void* arg,
@ -198,8 +205,9 @@ static void send_security_metadata(grpc_exec_ctx *exec_ctx,
call_creds_has_md ? ctx->creds : channel_call_creds);
}
build_auth_metadata_context(&chand->security_connector->base,
chand->auth_context, calld);
grpc_auth_metadata_context_build(
chand->security_connector->base.url_scheme, calld->host, calld->method,
chand->auth_context, &calld->auth_md_context);
GPR_ASSERT(calld->pollent != NULL);
@ -369,7 +377,7 @@ static void destroy_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
if (calld->have_method) {
grpc_slice_unref_internal(exec_ctx, calld->method);
}
reset_auth_metadata_context(&calld->auth_md_context);
grpc_auth_metadata_context_reset(&calld->auth_md_context);
}
/* Constructor for channel_data */

@ -156,7 +156,8 @@ static void on_read(grpc_exec_ctx *exec_ctx, void *user_data,
if (error != GRPC_ERROR_NONE) {
grpc_slice_buffer_reset_and_unref_internal(exec_ctx, ep->read_buffer);
call_read_cb(exec_ctx, ep, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
call_read_cb(exec_ctx, ep,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Secure read failed", &error, 1));
return;
}

@ -252,8 +252,8 @@ static const grpc_arg_pointer_vtable connector_arg_vtable = {
connector_arg_copy, connector_arg_destroy, connector_cmp};
grpc_arg grpc_security_connector_to_arg(grpc_security_connector* sc) {
return grpc_channel_arg_pointer_create((char *)GRPC_ARG_SECURITY_CONNECTOR,
sc, &connector_arg_vtable);
return grpc_channel_arg_pointer_create((char*)GRPC_ARG_SECURITY_CONNECTOR, sc,
&connector_arg_vtable);
}
grpc_security_connector* grpc_security_connector_from_arg(const grpc_arg* arg) {
@ -812,7 +812,8 @@ static void ssl_channel_check_peer(grpc_exec_ctx *exec_ctx,
grpc_closure* on_peer_checked) {
grpc_ssl_channel_security_connector* c =
(grpc_ssl_channel_security_connector*)sc;
grpc_error *error = ssl_check_peer(sc, c->overridden_target_name != NULL
grpc_error* error = ssl_check_peer(sc,
c->overridden_target_name != NULL
? c->overridden_target_name
: c->target_name,
&peer, auth_context);
@ -873,8 +874,8 @@ tsi_peer tsi_shallow_peer_from_ssl_auth_context(
while (grpc_auth_property_iterator_next(&it) != NULL) max_num_props++;
if (max_num_props > 0) {
peer.properties = (tsi_peer_property *)gpr_malloc(
max_num_props * sizeof(tsi_peer_property));
peer.properties = (tsi_peer_property*)gpr_malloc(max_num_props *
sizeof(tsi_peer_property));
it = grpc_auth_context_property_iterator(auth_context);
while ((prop = grpc_auth_property_iterator_next(&it)) != NULL) {
if (strcmp(prop->name, GRPC_X509_SAN_PROPERTY_NAME) == 0) {

@ -304,7 +304,8 @@ static void on_handshake_data_received_from_peer(grpc_exec_ctx *exec_ctx,
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
exec_ctx, h, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
exec_ctx, h,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Handshake read failed", &error, 1));
gpr_mu_unlock(&h->mu);
security_handshaker_unref(exec_ctx, h);
@ -347,7 +348,8 @@ static void on_handshake_data_sent_to_peer(grpc_exec_ctx *exec_ctx, void *arg,
gpr_mu_lock(&h->mu);
if (error != GRPC_ERROR_NONE || h->shutdown) {
security_handshake_failed_locked(
exec_ctx, h, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
exec_ctx, h,
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Handshake write failed", &error, 1));
gpr_mu_unlock(&h->mu);
security_handshaker_unref(exec_ctx, h);

@ -59,7 +59,8 @@ static long node_height(gpr_avl_node *node) {
#ifndef NDEBUG
static long calculate_height(gpr_avl_node* node) {
return node == NULL ? 0 : 1 + GPR_MAX(calculate_height(node->left),
return node == NULL ? 0
: 1 + GPR_MAX(calculate_height(node->left),
calculate_height(node->right));
}

@ -183,8 +183,7 @@ static double threshold_for_count_below(gpr_histogram *h, double count_below) {
should lie */
lower_bound = bucket_start(h, (double)lower_idx);
upper_bound = bucket_start(h, (double)(lower_idx + 1));
return GPR_CLAMP(upper_bound -
(upper_bound - lower_bound) *
return GPR_CLAMP(upper_bound - (upper_bound - lower_bound) *
(count_so_far - count_below) /
h->buckets[lower_idx],
h->min_seen, h->max_seen);

@ -27,7 +27,6 @@
#include <pthread.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdio.h>
#include <string.h>
#include <time.h>

@ -33,7 +33,9 @@ extern "C" {
// List node (include this in a data structure at the top, and add application
// fields after it - to simulate inheritance)
typedef struct gpr_mpscq_node { gpr_atm next; } gpr_mpscq_node;
typedef struct gpr_mpscq_node {
gpr_atm next;
} gpr_mpscq_node;
// Actual queue type
typedef struct gpr_mpscq {

@ -23,7 +23,9 @@
/* Simple spinlock. No backoff strategy, gpr_spinlock_lock is almost always
a concurrency code smell. */
typedef struct { gpr_atm atm; } gpr_spinlock;
typedef struct {
gpr_atm atm;
} gpr_spinlock;
#ifdef __cplusplus
#define GPR_SPINLOCK_INITIALIZER (gpr_spinlock{0})

@ -1639,8 +1639,8 @@ static void receiving_initial_metadata_ready(grpc_exec_ctx *exec_ctx,
}
} else {
/* Already received messages */
saved_rsr_closure = GRPC_CLOSURE_CREATE(receiving_stream_ready,
(batch_control *)rsr_bctlp,
saved_rsr_closure =
GRPC_CLOSURE_CREATE(receiving_stream_ready, (batch_control*)rsr_bctlp,
grpc_schedule_on_exec_ctx);
/* No need to modify recv_state */
break;

@ -365,7 +365,8 @@ grpc_call *grpc_channel_create_registered_call(
"deadline=gpr_timespec { tv_sec: %" PRId64
", tv_nsec: %d, clock_type: %d }, "
"reserved=%p)",
9, (channel, parent_call, (unsigned)propagation_mask, completion_queue,
9,
(channel, parent_call, (unsigned)propagation_mask, completion_queue,
registered_call_handle, deadline.tv_sec, deadline.tv_nsec,
(int)deadline.clock_type, reserved));
GPR_ASSERT(!reserved);

@ -879,7 +879,8 @@ static grpc_event cq_next(grpc_completion_queue *cq, gpr_timespec deadline,
"deadline=gpr_timespec { tv_sec: %" PRId64
", tv_nsec: %d, clock_type: %d }, "
"reserved=%p)",
5, (cq, deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type,
5,
(cq, deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type,
reserved));
GPR_ASSERT(!reserved);
@ -1115,8 +1116,9 @@ static grpc_event cq_pluck(grpc_completion_queue *cq, void *tag,
"deadline=gpr_timespec { tv_sec: %" PRId64
", tv_nsec: %d, clock_type: %d }, "
"reserved=%p)",
6, (cq, tag, deadline.tv_sec, deadline.tv_nsec,
(int)deadline.clock_type, reserved));
6,
(cq, tag, deadline.tv_sec, deadline.tv_nsec, (int)deadline.clock_type,
reserved));
}
GPR_ASSERT(!reserved);

@ -1109,8 +1109,8 @@ void grpc_server_start(grpc_server *server) {
for (int j = 0; j < server->max_requested_calls_per_cq; j++) {
gpr_stack_lockfree_push(server->request_freelist_per_cq[i], j);
}
server->requested_calls_per_cq[i] = (requested_call *)gpr_malloc(
(size_t)server->max_requested_calls_per_cq *
server->requested_calls_per_cq[i] =
(requested_call*)gpr_malloc((size_t)server->max_requested_calls_per_cq *
sizeof(*server->requested_calls_per_cq[i]));
}
request_matcher_init(&server->unregistered_request_matcher,
@ -1269,8 +1269,8 @@ void grpc_server_shutdown_and_notify(grpc_server *server,
/* stay locked, and gather up some stuff to do */
GPR_ASSERT(grpc_cq_begin_op(cq, tag));
if (server->shutdown_published) {
grpc_cq_end_op(
&exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown, NULL,
grpc_cq_end_op(&exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown,
NULL,
(grpc_cq_completion*)gpr_malloc(sizeof(grpc_cq_completion)));
gpr_mu_unlock(&server->mu_global);
goto done;
@ -1443,7 +1443,8 @@ grpc_call_error grpc_server_request_call(
"grpc_server_request_call("
"server=%p, call=%p, details=%p, initial_metadata=%p, "
"cq_bound_to_call=%p, cq_for_notification=%p, tag=%p)",
7, (server, call, details, initial_metadata, cq_bound_to_call,
7,
(server, call, details, initial_metadata, cq_bound_to_call,
cq_for_notification, tag));
size_t cq_idx;
for (cq_idx = 0; cq_idx < server->cq_count; cq_idx++) {
@ -1491,7 +1492,8 @@ grpc_call_error grpc_server_request_registered_call(
"server=%p, rmp=%p, call=%p, deadline=%p, initial_metadata=%p, "
"optional_payload=%p, cq_bound_to_call=%p, cq_for_notification=%p, "
"tag=%p)",
9, (server, rmp, call, deadline, initial_metadata, optional_payload,
9,
(server, rmp, call, deadline, initial_metadata, optional_payload,
cq_bound_to_call, cq_for_notification, tag));
size_t cq_idx;

@ -45,7 +45,8 @@ grpc_millis BdpEstimator::CompletePing(grpc_exec_ctx *exec_ctx) {
double bw = dt > 0 ? ((double)accumulator_ / dt) : 0;
int start_inter_ping_delay = inter_ping_delay_;
if (GRPC_TRACER_ON(grpc_bdp_estimator_trace)) {
gpr_log(GPR_DEBUG, "bdp[%s]:complete acc=%" PRId64 " est=%" PRId64
gpr_log(GPR_DEBUG,
"bdp[%s]:complete acc=%" PRId64 " est=%" PRId64
" dt=%lf bw=%lfMbs bw_est=%lfMbs",
name_, accumulator_, estimate_, dt, bw / 125000.0,
bw_est_ / 125000.0);

@ -98,8 +98,7 @@ struct grpc_mdelem {
uintptr_t payload;
};
#define GRPC_MDELEM_DATA(md) \
((grpc_mdelem_data *)((md).payload & ~(uintptr_t)3))
#define GRPC_MDELEM_DATA(md) ((grpc_mdelem_data*)((md).payload & ~(uintptr_t)3))
#define GRPC_MDELEM_STORAGE(md) \
((grpc_mdelem_data_storage)((md).payload & (uintptr_t)3))
#ifdef __cplusplus
@ -137,8 +136,7 @@ size_t grpc_mdelem_get_size_in_hpack_table(grpc_mdelem elem,
/* Mutator and accessor for grpc_mdelem user data. The destructor function
is used as a type tag and is checked during user_data fetch. */
void *grpc_mdelem_get_user_data(grpc_mdelem md,
void (*if_destroy_func)(void *));
void* grpc_mdelem_get_user_data(grpc_mdelem md, void (*if_destroy_func)(void*));
void* grpc_mdelem_set_user_data(grpc_mdelem md, void (*destroy_func)(void*),
void* user_data);

@ -111,7 +111,13 @@ const char* grpc_service_config_get_lb_policy_name(
static size_t count_names_in_method_config_json(grpc_json* json) {
size_t num_names = 0;
for (grpc_json* field = json->child; field != NULL; field = field->next) {
if (field->key != NULL && strcmp(field->key, "name") == 0) ++num_names;
if (field->key != NULL && strcmp(field->key, "name") == 0) {
if (field->type != GRPC_JSON_ARRAY) return -1;
for (grpc_json* name = field->child; name != NULL; name = name->next) {
if (name->type != GRPC_JSON_OBJECT) return -1;
++num_names;
}
}
}
return num_names;
}
@ -148,6 +154,8 @@ static char* parse_json_method_name(grpc_json* json) {
static bool parse_json_method_config(
grpc_exec_ctx* exec_ctx, grpc_json* json,
void* (*create_value)(const grpc_json* method_config_json),
void* (*ref_value)(void* value),
void (*unref_value)(grpc_exec_ctx* exec_ctx, void* value),
grpc_slice_hash_table_entry* entries, size_t* idx) {
// Construct value.
void* method_config = create_value(json);
@ -162,6 +170,7 @@ static bool parse_json_method_config(
if (child->type != GRPC_JSON_ARRAY) goto done;
for (grpc_json* name = child->child; name != NULL; name = name->next) {
char* path = parse_json_method_name(name);
if (path == NULL) goto done;
gpr_strvec_add(&paths, path);
}
}
@ -170,11 +179,12 @@ static bool parse_json_method_config(
// Add entry for each path.
for (size_t i = 0; i < paths.count; ++i) {
entries[*idx].key = grpc_slice_from_copied_string(paths.strs[i]);
entries[*idx].value = method_config;
entries[*idx].value = ref_value(method_config);
++*idx;
}
success = true;
done:
unref_value(exec_ctx, method_config);
gpr_strvec_destroy(&paths);
return success;
}
@ -182,7 +192,8 @@ done:
grpc_slice_hash_table* grpc_service_config_create_method_config_table(
grpc_exec_ctx* exec_ctx, const grpc_service_config* service_config,
void* (*create_value)(const grpc_json* method_config_json),
void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value)) {
void* (*ref_value)(void* value),
void (*unref_value)(grpc_exec_ctx* exec_ctx, void* value)) {
const grpc_json* json = service_config->json_tree;
// Traverse parsed JSON tree.
if (json->type != GRPC_JSON_OBJECT || json->key != NULL) return NULL;
@ -196,7 +207,9 @@ grpc_slice_hash_table* grpc_service_config_create_method_config_table(
// Find number of entries.
for (grpc_json* method = field->child; method != NULL;
method = method->next) {
num_entries += count_names_in_method_config_json(method);
size_t count = count_names_in_method_config_json(method);
if (count <= 0) return NULL;
num_entries += count;
}
// Populate method config table entries.
entries = (grpc_slice_hash_table_entry*)gpr_malloc(
@ -204,8 +217,13 @@ grpc_slice_hash_table* grpc_service_config_create_method_config_table(
size_t idx = 0;
for (grpc_json* method = field->child; method != NULL;
method = method->next) {
if (!parse_json_method_config(exec_ctx, method, create_value, entries,
&idx)) {
if (!parse_json_method_config(exec_ctx, method, create_value, ref_value,
unref_value, entries, &idx)) {
for (size_t i = 0; i < idx; ++i) {
grpc_slice_unref_internal(exec_ctx, entries[i].key);
unref_value(exec_ctx, entries[i].value);
}
gpr_free(entries);
return NULL;
}
}
@ -216,7 +234,7 @@ grpc_slice_hash_table* grpc_service_config_create_method_config_table(
grpc_slice_hash_table* method_config_table = NULL;
if (entries != NULL) {
method_config_table =
grpc_slice_hash_table_create(num_entries, entries, destroy_value, NULL);
grpc_slice_hash_table_create(num_entries, entries, unref_value, NULL);
gpr_free(entries);
}
return method_config_table;

@ -46,12 +46,13 @@ const char* grpc_service_config_get_lb_policy_name(
/// Creates a method config table based on the data in \a json.
/// The table's keys are request paths. The table's value type is
/// returned by \a create_value(), based on data parsed from the JSON tree.
/// \a destroy_value is used to clean up values.
/// \a ref_value() and \a unref_value() are used to ref and unref values.
/// Returns NULL on error.
grpc_slice_hash_table* grpc_service_config_create_method_config_table(
grpc_exec_ctx* exec_ctx, const grpc_service_config* service_config,
void* (*create_value)(const grpc_json* method_config_json),
void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value));
void* (*ref_value)(void* value),
void (*unref_value)(grpc_exec_ctx* exec_ctx, void* value));
/// A helper function for looking up values in the table returned by
/// \a grpc_service_config_create_method_config_table().

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save