Merge remote-tracking branch 'upstream/master' into c++_subchannel_list

reviewable/pr14886/r6
Mark D. Roth 7 years ago
commit 0839ac6d18
  1. 1
      README.md
  2. 5
      examples/csharp/helloworld-from-cli/global.json
  3. 151
      src/core/ext/filters/client_channel/client_channel.cc
  4. 4
      src/core/ext/filters/client_channel/lb_policy.cc
  5. 17
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  6. 10
      src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
  7. 33
      src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
  8. 20
      src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
  9. 4
      src/core/ext/filters/http/message_compress/message_compress_filter.cc
  10. 25
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  11. 6
      src/core/ext/transport/chttp2/transport/frame_settings.cc
  12. 4
      src/core/ext/transport/chttp2/transport/hpack_encoder.cc
  13. 2
      src/core/ext/transport/chttp2/transport/hpack_parser.cc
  14. 4
      src/core/ext/transport/chttp2/transport/hpack_table.cc
  15. 6
      src/core/ext/transport/chttp2/transport/stream_lists.cc
  16. 10
      src/core/ext/transport/chttp2/transport/writing.cc
  17. 84
      src/core/ext/transport/inproc/inproc_transport.cc
  18. 8
      src/core/lib/channel/handshaker.cc
  19. 4
      src/core/lib/gprpp/orphanable.h
  20. 4
      src/core/lib/gprpp/ref_counted.h
  21. 26
      src/core/lib/iomgr/call_combiner.cc
  22. 22
      src/core/lib/iomgr/combiner.cc
  23. 48
      src/core/lib/iomgr/ev_epoll1_linux.cc
  24. 54
      src/core/lib/iomgr/ev_epollex_linux.cc
  25. 4
      src/core/lib/iomgr/ev_epollsig_linux.cc
  26. 6
      src/core/lib/iomgr/ev_poll_posix.cc
  27. 2
      src/core/lib/iomgr/ev_posix.cc
  28. 12
      src/core/lib/iomgr/executor.cc
  29. 21
      src/core/lib/iomgr/resource_quota.cc
  30. 4
      src/core/lib/iomgr/tcp_client_custom.cc
  31. 8
      src/core/lib/iomgr/tcp_client_posix.cc
  32. 20
      src/core/lib/iomgr/tcp_custom.cc
  33. 48
      src/core/lib/iomgr/tcp_posix.cc
  34. 10
      src/core/lib/iomgr/tcp_server_custom.cc
  35. 2
      src/core/lib/iomgr/tcp_server_posix.cc
  36. 3
      src/core/lib/iomgr/tcp_uv.cc
  37. 34
      src/core/lib/iomgr/timer_generic.cc
  38. 23
      src/core/lib/iomgr/timer_manager.cc
  39. 3
      src/core/lib/iomgr/timer_uv.cc
  40. 4
      src/core/lib/security/transport/secure_endpoint.cc
  41. 4
      src/core/lib/surface/call.cc
  42. 16
      src/core/lib/surface/server.cc
  43. 6
      src/core/lib/transport/bdp_estimator.cc
  44. 4
      src/core/lib/transport/bdp_estimator.h
  45. 13
      src/core/lib/transport/connectivity_state.cc
  46. 8
      src/csharp/Grpc.Core/NativeDeps.Mac.csproj.include
  47. 11
      src/csharp/Grpc.Core/RpcException.cs
  48. 5
      src/csharp/global.json
  49. 9
      src/objective-c/BoringSSL.podspec
  50. 59
      src/php/ext/grpc/channel.c
  51. 1
      src/php/ext/grpc/channel.h
  52. 3
      src/php/tests/unit_tests/CallCredentials2Test.php
  53. 3
      src/php/tests/unit_tests/CallCredentialsTest.php
  54. 3
      src/php/tests/unit_tests/CallTest.php
  55. 8
      src/php/tests/unit_tests/ChannelTest.php
  56. 3
      src/php/tests/unit_tests/EndToEndTest.php
  57. 115
      src/php/tests/unit_tests/PersistentChannelTest.php
  58. 3
      src/php/tests/unit_tests/SecureEndToEndTest.php
  59. 3
      src/php/tests/unit_tests/ServerTest.php
  60. 3
      src/php/tests/unit_tests/TimevalTest.php
  61. 4
      templates/tools/dockerfile/python_deps.include
  62. 2
      templates/tools/dockerfile/test/cxx_alpine_x64/Dockerfile.template
  63. 57
      test/core/tsi/alts/fake_handshaker/BUILD
  64. 268
      test/core/tsi/alts/fake_handshaker/fake_handshaker_server.cc
  65. 224
      test/core/tsi/alts/fake_handshaker/handshaker.proto
  66. 40
      test/core/tsi/alts/fake_handshaker/transport_security_common.proto
  67. 1
      test/core/tsi/ssl_session_cache_test.cc
  68. 22
      test/core/tsi/ssl_transport_security_test.cc
  69. 2
      test/cpp/interop/client.cc
  70. 5
      test/cpp/interop/client_helper.cc
  71. 2
      test/cpp/interop/http2_client.cc
  72. 2
      test/cpp/interop/interop_server.cc
  73. 6
      test/cpp/interop/reconnect_interop_client.cc
  74. 3
      test/cpp/interop/server_helper.cc
  75. 10
      test/cpp/interop/stress_test.cc
  76. 26
      test/cpp/util/create_test_channel.cc
  77. 16
      test/cpp/util/create_test_channel.h
  78. 6
      test/cpp/util/test_credentials_provider.cc
  79. 2
      test/cpp/util/test_credentials_provider.h
  80. 6075
      third_party/address_sorting/LICENSE
  81. 7
      tools/distrib/pylint_code.sh
  82. 2
      tools/distrib/python/docgen.py
  83. 2
      tools/distrib/yapf_code.sh
  84. 4
      tools/dockerfile/grpc_clang_tidy/Dockerfile
  85. 4
      tools/dockerfile/interoptest/grpc_interop_csharp/Dockerfile
  86. 4
      tools/dockerfile/interoptest/grpc_interop_csharpcoreclr/Dockerfile
  87. 4
      tools/dockerfile/interoptest/grpc_interop_cxx/Dockerfile
  88. 4
      tools/dockerfile/interoptest/grpc_interop_go/Dockerfile
  89. 4
      tools/dockerfile/interoptest/grpc_interop_go1.7/Dockerfile
  90. 4
      tools/dockerfile/interoptest/grpc_interop_go1.8/Dockerfile
  91. 4
      tools/dockerfile/interoptest/grpc_interop_http2/Dockerfile
  92. 4
      tools/dockerfile/interoptest/grpc_interop_java/Dockerfile
  93. 4
      tools/dockerfile/interoptest/grpc_interop_java_oracle8/Dockerfile
  94. 4
      tools/dockerfile/interoptest/grpc_interop_node/Dockerfile
  95. 11
      tools/dockerfile/interoptest/grpc_interop_node/build_interop.sh
  96. 4
      tools/dockerfile/interoptest/grpc_interop_python/Dockerfile
  97. 4
      tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile
  98. 4
      tools/dockerfile/test/csharp_jessie_x64/Dockerfile
  99. 2
      tools/dockerfile/test/cxx_alpine_x64/Dockerfile
  100. 4
      tools/dockerfile/test/cxx_jessie_x64/Dockerfile
  101. Some files were not shown because too many files have changed in this diff Show More

@ -39,6 +39,7 @@ Libraries in different languages may be in different states of development. We a
| Java | [grpc-java](http://github.com/grpc/grpc-java) | | Java | [grpc-java](http://github.com/grpc/grpc-java) |
| Go | [grpc-go](http://github.com/grpc/grpc-go) | | Go | [grpc-go](http://github.com/grpc/grpc-go) |
| NodeJS | [grpc-node](https://github.com/grpc/grpc-node) | | NodeJS | [grpc-node](https://github.com/grpc/grpc-node) |
| WebJS | [grpc-web](https://github.com/grpc/grpc-web) |
| Dart | [grpc-dart](https://github.com/grpc/grpc-dart) | | Dart | [grpc-dart](https://github.com/grpc/grpc-dart) |
See [MANIFEST.md](MANIFEST.md) for a listing of top-level items in the See [MANIFEST.md](MANIFEST.md) for a listing of top-level items in the

@ -1,5 +0,0 @@
{
"sdk": {
"version": "1.0.0"
}
}

@ -174,7 +174,7 @@ static void set_channel_connectivity_state_locked(channel_data* chand,
} }
} }
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: setting connectivity state to %s", chand, gpr_log(GPR_INFO, "chand=%p: setting connectivity state to %s", chand,
grpc_connectivity_state_name(state)); grpc_connectivity_state_name(state));
} }
grpc_connectivity_state_set(&chand->state_tracker, state, error, reason); grpc_connectivity_state_set(&chand->state_tracker, state, error, reason);
@ -186,7 +186,7 @@ static void on_lb_policy_state_changed_locked(void* arg, grpc_error* error) {
/* check if the notification is for the latest policy */ /* check if the notification is for the latest policy */
if (w->lb_policy == w->chand->lb_policy.get()) { if (w->lb_policy == w->chand->lb_policy.get()) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: lb_policy=%p state changed to %s", w->chand, gpr_log(GPR_INFO, "chand=%p: lb_policy=%p state changed to %s", w->chand,
w->lb_policy, grpc_connectivity_state_name(w->state)); w->lb_policy, grpc_connectivity_state_name(w->state));
} }
set_channel_connectivity_state_locked(w->chand, w->state, set_channel_connectivity_state_locked(w->chand, w->state,
@ -215,7 +215,7 @@ static void watch_lb_policy_locked(channel_data* chand,
static void start_resolving_locked(channel_data* chand) { static void start_resolving_locked(channel_data* chand) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: starting name resolution", chand); gpr_log(GPR_INFO, "chand=%p: starting name resolution", chand);
} }
GPR_ASSERT(!chand->started_resolving); GPR_ASSERT(!chand->started_resolving);
chand->started_resolving = true; chand->started_resolving = true;
@ -297,7 +297,7 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
return; return;
} }
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: started name re-resolving", chand); gpr_log(GPR_INFO, "chand=%p: started name re-resolving", chand);
} }
chand->resolver->RequestReresolutionLocked(); chand->resolver->RequestReresolutionLocked();
// Give back the closure to the LB policy. // Give back the closure to the LB policy.
@ -311,7 +311,7 @@ static void request_reresolution_locked(void* arg, grpc_error* error) {
static void on_resolver_result_changed_locked(void* arg, grpc_error* error) { static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
channel_data* chand = static_cast<channel_data*>(arg); channel_data* chand = static_cast<channel_data*>(arg);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p: got resolver result: resolver_result=%p error=%s", chand, "chand=%p: got resolver result: resolver_result=%p error=%s", chand,
chand->resolver_result, grpc_error_string(error)); chand->resolver_result, grpc_error_string(error));
} }
@ -431,7 +431,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
} }
} }
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p: resolver result: lb_policy_name=\"%s\"%s, " "chand=%p: resolver result: lb_policy_name=\"%s\"%s, "
"service_config=\"%s\"", "service_config=\"%s\"",
chand, lb_policy_name_dup, chand, lb_policy_name_dup,
@ -466,7 +466,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
chand->resolver == nullptr) { chand->resolver == nullptr) {
if (chand->lb_policy != nullptr) { if (chand->lb_policy != nullptr) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: unreffing lb_policy=%p", chand, gpr_log(GPR_INFO, "chand=%p: unreffing lb_policy=%p", chand,
chand->lb_policy.get()); chand->lb_policy.get());
} }
grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(), grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties(),
@ -480,11 +480,11 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
// error or shutdown. // error or shutdown.
if (error != GRPC_ERROR_NONE || chand->resolver == nullptr) { if (error != GRPC_ERROR_NONE || chand->resolver == nullptr) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: shutting down", chand); gpr_log(GPR_INFO, "chand=%p: shutting down", chand);
} }
if (chand->resolver != nullptr) { if (chand->resolver != nullptr) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: shutting down resolver", chand); gpr_log(GPR_INFO, "chand=%p: shutting down resolver", chand);
} }
chand->resolver.reset(); chand->resolver.reset();
} }
@ -506,7 +506,7 @@ static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy"); GRPC_ERROR_CREATE_FROM_STATIC_STRING("No load balancing policy");
if (lb_policy_created) { if (lb_policy_created) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p: initializing new LB policy", chand); gpr_log(GPR_INFO, "chand=%p: initializing new LB policy", chand);
} }
GRPC_ERROR_UNREF(state_error); GRPC_ERROR_UNREF(state_error);
state = chand->lb_policy->CheckConnectivityLocked(&state_error); state = chand->lb_policy->CheckConnectivityLocked(&state_error);
@ -999,7 +999,7 @@ static void maybe_cache_send_ops_for_batch(call_data* calld,
static void free_cached_send_initial_metadata(channel_data* chand, static void free_cached_send_initial_metadata(channel_data* chand,
call_data* calld) { call_data* calld) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: destroying calld->send_initial_metadata", chand, "chand=%p calld=%p: destroying calld->send_initial_metadata", chand,
calld); calld);
} }
@ -1010,7 +1010,7 @@ static void free_cached_send_initial_metadata(channel_data* chand,
static void free_cached_send_message(channel_data* chand, call_data* calld, static void free_cached_send_message(channel_data* chand, call_data* calld,
size_t idx) { size_t idx) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR "]", "chand=%p calld=%p: destroying calld->send_messages[%" PRIuPTR "]",
chand, calld, idx); chand, calld, idx);
} }
@ -1021,7 +1021,7 @@ static void free_cached_send_message(channel_data* chand, call_data* calld,
static void free_cached_send_trailing_metadata(channel_data* chand, static void free_cached_send_trailing_metadata(channel_data* chand,
call_data* calld) { call_data* calld) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: destroying calld->send_trailing_metadata", "chand=%p calld=%p: destroying calld->send_trailing_metadata",
chand, calld); chand, calld);
} }
@ -1088,7 +1088,7 @@ static void pending_batches_add(grpc_call_element* elem,
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
const size_t idx = get_batch_index(batch); const size_t idx = get_batch_index(batch);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: adding pending batch at index %" PRIuPTR, chand, "chand=%p calld=%p: adding pending batch at index %" PRIuPTR, chand,
calld, idx); calld, idx);
} }
@ -1116,7 +1116,7 @@ static void pending_batches_add(grpc_call_element* elem,
} }
if (calld->bytes_buffered_for_retry > chand->per_rpc_retry_buffer_size) { if (calld->bytes_buffered_for_retry > chand->per_rpc_retry_buffer_size) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: exceeded retry buffer size, committing", "chand=%p calld=%p: exceeded retry buffer size, committing",
chand, calld); chand, calld);
} }
@ -1131,7 +1131,7 @@ static void pending_batches_add(grpc_call_element* elem,
// retries are disabled so that we don't bother with retry overhead. // retries are disabled so that we don't bother with retry overhead.
if (calld->num_attempts_completed == 0) { if (calld->num_attempts_completed == 0) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: disabling retries before first attempt", "chand=%p calld=%p: disabling retries before first attempt",
chand, calld); chand, calld);
} }
@ -1178,7 +1178,7 @@ static void pending_batches_fail(grpc_call_element* elem, grpc_error* error,
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) { for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
if (calld->pending_batches[i].batch != nullptr) ++num_batches; if (calld->pending_batches[i].batch != nullptr) ++num_batches;
} }
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s", "chand=%p calld=%p: failing %" PRIuPTR " pending batches: %s",
elem->channel_data, calld, num_batches, grpc_error_string(error)); elem->channel_data, calld, num_batches, grpc_error_string(error));
} }
@ -1240,7 +1240,7 @@ static void pending_batches_resume(grpc_call_element* elem) {
for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) { for (size_t i = 0; i < GPR_ARRAY_SIZE(calld->pending_batches); ++i) {
if (calld->pending_batches[i].batch != nullptr) ++num_batches; if (calld->pending_batches[i].batch != nullptr) ++num_batches;
} }
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: starting %" PRIuPTR "chand=%p calld=%p: starting %" PRIuPTR
" pending batches on subchannel_call=%p", " pending batches on subchannel_call=%p",
chand, calld, num_batches, calld->subchannel_call); chand, calld, num_batches, calld->subchannel_call);
@ -1285,7 +1285,7 @@ static void maybe_clear_pending_batch(grpc_call_element* elem,
(!batch->recv_message || (!batch->recv_message ||
batch->payload->recv_message.recv_message_ready == nullptr)) { batch->payload->recv_message.recv_message_ready == nullptr)) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: clearing pending batch", chand, gpr_log(GPR_INFO, "chand=%p calld=%p: clearing pending batch", chand,
calld); calld);
} }
pending_batch_clear(calld, pending); pending_batch_clear(calld, pending);
@ -1375,7 +1375,7 @@ static void retry_commit(grpc_call_element* elem,
if (calld->retry_committed) return; if (calld->retry_committed) return;
calld->retry_committed = true; calld->retry_committed = true;
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: committing retries", chand, calld); gpr_log(GPR_INFO, "chand=%p calld=%p: committing retries", chand, calld);
} }
if (retry_state != nullptr) { if (retry_state != nullptr) {
free_cached_send_op_data_after_commit(elem, retry_state); free_cached_send_op_data_after_commit(elem, retry_state);
@ -1420,7 +1420,7 @@ static void do_retry(grpc_call_element* elem,
next_attempt_time = calld->retry_backoff->NextAttemptTime(); next_attempt_time = calld->retry_backoff->NextAttemptTime();
} }
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: retrying failed call in %" PRIuPTR " ms", chand, "chand=%p calld=%p: retrying failed call in %" PRIuPTR " ms", chand,
calld, next_attempt_time - grpc_core::ExecCtx::Get()->Now()); calld, next_attempt_time - grpc_core::ExecCtx::Get()->Now());
} }
@ -1454,7 +1454,7 @@ static bool maybe_retry(grpc_call_element* elem,
batch_data->subchannel_call)); batch_data->subchannel_call));
if (retry_state->retry_dispatched) { if (retry_state->retry_dispatched) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: retry already dispatched", chand, gpr_log(GPR_INFO, "chand=%p calld=%p: retry already dispatched", chand,
calld); calld);
} }
return true; return true;
@ -1466,14 +1466,14 @@ static bool maybe_retry(grpc_call_element* elem,
calld->retry_throttle_data->RecordSuccess(); calld->retry_throttle_data->RecordSuccess();
} }
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: call succeeded", chand, calld); gpr_log(GPR_INFO, "chand=%p calld=%p: call succeeded", chand, calld);
} }
return false; return false;
} }
// Status is not OK. Check whether the status is retryable. // Status is not OK. Check whether the status is retryable.
if (!retry_policy->retryable_status_codes.Contains(status)) { if (!retry_policy->retryable_status_codes.Contains(status)) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: status %s not configured as retryable", chand, "chand=%p calld=%p: status %s not configured as retryable", chand,
calld, grpc_status_code_to_string(status)); calld, grpc_status_code_to_string(status));
} }
@ -1489,14 +1489,14 @@ static bool maybe_retry(grpc_call_element* elem,
if (calld->retry_throttle_data != nullptr && if (calld->retry_throttle_data != nullptr &&
!calld->retry_throttle_data->RecordFailure()) { !calld->retry_throttle_data->RecordFailure()) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: retries throttled", chand, calld); gpr_log(GPR_INFO, "chand=%p calld=%p: retries throttled", chand, calld);
} }
return false; return false;
} }
// Check whether the call is committed. // Check whether the call is committed.
if (calld->retry_committed) { if (calld->retry_committed) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: retries already committed", chand, gpr_log(GPR_INFO, "chand=%p calld=%p: retries already committed", chand,
calld); calld);
} }
return false; return false;
@ -1505,7 +1505,7 @@ static bool maybe_retry(grpc_call_element* elem,
++calld->num_attempts_completed; ++calld->num_attempts_completed;
if (calld->num_attempts_completed >= retry_policy->max_attempts) { if (calld->num_attempts_completed >= retry_policy->max_attempts) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: exceeded %d retry attempts", chand, gpr_log(GPR_INFO, "chand=%p calld=%p: exceeded %d retry attempts", chand,
calld, retry_policy->max_attempts); calld, retry_policy->max_attempts);
} }
return false; return false;
@ -1513,7 +1513,7 @@ static bool maybe_retry(grpc_call_element* elem,
// If the call was cancelled from the surface, don't retry. // If the call was cancelled from the surface, don't retry.
if (calld->cancel_error != GRPC_ERROR_NONE) { if (calld->cancel_error != GRPC_ERROR_NONE) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: call cancelled from surface, not retrying", "chand=%p calld=%p: call cancelled from surface, not retrying",
chand, calld); chand, calld);
} }
@ -1526,16 +1526,15 @@ static bool maybe_retry(grpc_call_element* elem,
uint32_t ms; uint32_t ms;
if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(*server_pushback_md), &ms)) { if (!grpc_parse_slice_to_uint32(GRPC_MDVALUE(*server_pushback_md), &ms)) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: not retrying due to server push-back", "chand=%p calld=%p: not retrying due to server push-back",
chand, calld); chand, calld);
} }
return false; return false;
} else { } else {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO, "chand=%p calld=%p: server push-back: retry in %u ms",
"chand=%p calld=%p: server push-back: retry in %u ms", chand, chand, calld, ms);
calld, ms);
} }
server_pushback_ms = (grpc_millis)ms; server_pushback_ms = (grpc_millis)ms;
} }
@ -1608,7 +1607,7 @@ static void invoke_recv_initial_metadata_callback(void* arg,
batch->payload->recv_initial_metadata.recv_initial_metadata_ready != batch->payload->recv_initial_metadata.recv_initial_metadata_ready !=
nullptr) { nullptr) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: invoking recv_initial_metadata_ready for " "chand=%p calld=%p: invoking recv_initial_metadata_ready for "
"pending batch at index %" PRIuPTR, "pending batch at index %" PRIuPTR,
chand, calld, i); chand, calld, i);
@ -1644,7 +1643,7 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: got recv_initial_metadata_ready, error=%s", "chand=%p calld=%p: got recv_initial_metadata_ready, error=%s",
chand, calld, grpc_error_string(error)); chand, calld, grpc_error_string(error));
} }
@ -1659,7 +1658,7 @@ static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
if ((batch_data->trailing_metadata_available || error != GRPC_ERROR_NONE) && if ((batch_data->trailing_metadata_available || error != GRPC_ERROR_NONE) &&
!retry_state->completed_recv_trailing_metadata) { !retry_state->completed_recv_trailing_metadata) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: deferring recv_initial_metadata_ready " "chand=%p calld=%p: deferring recv_initial_metadata_ready "
"(Trailers-Only)", "(Trailers-Only)",
chand, calld); chand, calld);
@ -1701,7 +1700,7 @@ static void invoke_recv_message_callback(void* arg, grpc_error* error) {
if (batch != nullptr && batch->recv_message && if (batch != nullptr && batch->recv_message &&
batch->payload->recv_message.recv_message_ready != nullptr) { batch->payload->recv_message.recv_message_ready != nullptr) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: invoking recv_message_ready for " "chand=%p calld=%p: invoking recv_message_ready for "
"pending batch at index %" PRIuPTR, "pending batch at index %" PRIuPTR,
chand, calld, i); chand, calld, i);
@ -1734,7 +1733,7 @@ static void recv_message_ready(void* arg, grpc_error* error) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: got recv_message_ready, error=%s", gpr_log(GPR_INFO, "chand=%p calld=%p: got recv_message_ready, error=%s",
chand, calld, grpc_error_string(error)); chand, calld, grpc_error_string(error));
} }
subchannel_call_retry_state* retry_state = subchannel_call_retry_state* retry_state =
@ -1748,7 +1747,7 @@ static void recv_message_ready(void* arg, grpc_error* error) {
if ((batch_data->recv_message == nullptr || error != GRPC_ERROR_NONE) && if ((batch_data->recv_message == nullptr || error != GRPC_ERROR_NONE) &&
!retry_state->completed_recv_trailing_metadata) { !retry_state->completed_recv_trailing_metadata) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: deferring recv_message_ready (nullptr " "chand=%p calld=%p: deferring recv_message_ready (nullptr "
"message and recv_trailing_metadata pending)", "message and recv_trailing_metadata pending)",
chand, calld); chand, calld);
@ -1796,7 +1795,7 @@ static void execute_closures_in_call_combiner(grpc_call_element* elem,
// have to re-enter the call combiner. // have to re-enter the call combiner.
if (num_closures > 0) { if (num_closures > 0) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: %s starting closure: %s", chand, gpr_log(GPR_INFO, "chand=%p calld=%p: %s starting closure: %s", chand,
calld, caller, closures[0].reason); calld, caller, closures[0].reason);
} }
GRPC_CLOSURE_SCHED(closures[0].closure, closures[0].error); GRPC_CLOSURE_SCHED(closures[0].closure, closures[0].error);
@ -1805,7 +1804,7 @@ static void execute_closures_in_call_combiner(grpc_call_element* elem,
} }
for (size_t i = 1; i < num_closures; ++i) { for (size_t i = 1; i < num_closures; ++i) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: %s starting closure in call combiner: %s", "chand=%p calld=%p: %s starting closure in call combiner: %s",
chand, calld, caller, closures[i].reason); chand, calld, caller, closures[i].reason);
} }
@ -1817,7 +1816,7 @@ static void execute_closures_in_call_combiner(grpc_call_element* elem,
} }
} else { } else {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: no closures to run for %s", chand, gpr_log(GPR_INFO, "chand=%p calld=%p: no closures to run for %s", chand,
calld, caller); calld, caller);
} }
GRPC_CALL_COMBINER_STOP(calld->call_combiner, "no closures to run"); GRPC_CALL_COMBINER_STOP(calld->call_combiner, "no closures to run");
@ -1912,7 +1911,7 @@ static void add_closures_for_replay_or_pending_send_ops(
} }
if (have_pending_send_message_ops || have_pending_send_trailing_metadata_op) { if (have_pending_send_message_ops || have_pending_send_trailing_metadata_op) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: starting next batch for pending send op(s)", "chand=%p calld=%p: starting next batch for pending send op(s)",
chand, calld); chand, calld);
} }
@ -1937,7 +1936,7 @@ static void add_closures_for_completed_pending_batches(
pending_batch* pending = &calld->pending_batches[i]; pending_batch* pending = &calld->pending_batches[i];
if (pending_batch_is_completed(pending, calld, retry_state)) { if (pending_batch_is_completed(pending, calld, retry_state)) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: pending batch completed at index %" PRIuPTR, "chand=%p calld=%p: pending batch completed at index %" PRIuPTR,
chand, calld, i); chand, calld, i);
} }
@ -1970,7 +1969,7 @@ static void add_closures_to_fail_unstarted_pending_batches(
pending_batch* pending = &calld->pending_batches[i]; pending_batch* pending = &calld->pending_batches[i];
if (pending_batch_is_unstarted(pending, calld, retry_state)) { if (pending_batch_is_unstarted(pending, calld, retry_state)) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: failing unstarted pending batch at index " "chand=%p calld=%p: failing unstarted pending batch at index "
"%" PRIuPTR, "%" PRIuPTR,
chand, calld, i); chand, calld, i);
@ -2014,7 +2013,7 @@ static void on_complete(void* arg, grpc_error* error) {
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
char* batch_str = grpc_transport_stream_op_batch_string(&batch_data->batch); char* batch_str = grpc_transport_stream_op_batch_string(&batch_data->batch);
gpr_log(GPR_DEBUG, "chand=%p calld=%p: got on_complete, error=%s, batch=%s", gpr_log(GPR_INFO, "chand=%p calld=%p: got on_complete, error=%s, batch=%s",
chand, calld, grpc_error_string(error), batch_str); chand, calld, grpc_error_string(error), batch_str);
gpr_free(batch_str); gpr_free(batch_str);
} }
@ -2031,7 +2030,7 @@ static void on_complete(void* arg, grpc_error* error) {
update_retry_state_for_completed_batch(batch_data, retry_state); update_retry_state_for_completed_batch(batch_data, retry_state);
if (call_finished) { if (call_finished) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: call already finished", chand, gpr_log(GPR_INFO, "chand=%p calld=%p: call already finished", chand,
calld); calld);
} }
} else { } else {
@ -2059,7 +2058,7 @@ static void on_complete(void* arg, grpc_error* error) {
// If the call just finished, check if we should retry. // If the call just finished, check if we should retry.
if (call_finished) { if (call_finished) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: call finished, status=%s", chand, gpr_log(GPR_INFO, "chand=%p calld=%p: call finished, status=%s", chand,
calld, grpc_status_code_to_string(status)); calld, grpc_status_code_to_string(status));
} }
if (maybe_retry(elem, batch_data, status, server_pushback_md)) { if (maybe_retry(elem, batch_data, status, server_pushback_md)) {
@ -2224,7 +2223,7 @@ static void add_retriable_send_message_op(
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: starting calld->send_messages[%" PRIuPTR "]", "chand=%p calld=%p: starting calld->send_messages[%" PRIuPTR "]",
chand, calld, retry_state->started_send_message_count); chand, calld, retry_state->started_send_message_count);
} }
@ -2311,7 +2310,7 @@ static void start_internal_recv_trailing_metadata(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: call failed but recv_trailing_metadata not " "chand=%p calld=%p: call failed but recv_trailing_metadata not "
"started; starting it internally", "started; starting it internally",
chand, calld); chand, calld);
@ -2343,7 +2342,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
!retry_state->started_send_initial_metadata && !retry_state->started_send_initial_metadata &&
!calld->pending_send_initial_metadata) { !calld->pending_send_initial_metadata) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: replaying previously completed " "chand=%p calld=%p: replaying previously completed "
"send_initial_metadata op", "send_initial_metadata op",
chand, calld); chand, calld);
@ -2359,7 +2358,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
retry_state->completed_send_message_count && retry_state->completed_send_message_count &&
!calld->pending_send_message) { !calld->pending_send_message) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: replaying previously completed " "chand=%p calld=%p: replaying previously completed "
"send_message op", "send_message op",
chand, calld); chand, calld);
@ -2378,7 +2377,7 @@ static subchannel_batch_data* maybe_create_subchannel_batch_for_replay(
!retry_state->started_send_trailing_metadata && !retry_state->started_send_trailing_metadata &&
!calld->pending_send_trailing_metadata) { !calld->pending_send_trailing_metadata) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: replaying previously completed " "chand=%p calld=%p: replaying previously completed "
"send_trailing_metadata op", "send_trailing_metadata op",
chand, calld); chand, calld);
@ -2518,7 +2517,7 @@ static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: constructing retriable batches", gpr_log(GPR_INFO, "chand=%p calld=%p: constructing retriable batches",
chand, calld); chand, calld);
} }
subchannel_call_retry_state* retry_state = subchannel_call_retry_state* retry_state =
@ -2541,7 +2540,7 @@ static void start_retriable_subchannel_batches(void* arg, grpc_error* ignored) {
&num_closures); &num_closures);
// Start batches on subchannel call. // Start batches on subchannel call.
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: starting %" PRIuPTR "chand=%p calld=%p: starting %" PRIuPTR
" retriable batches on subchannel_call=%p", " retriable batches on subchannel_call=%p",
chand, calld, num_closures, calld->subchannel_call); chand, calld, num_closures, calld->subchannel_call);
@ -2572,7 +2571,7 @@ static void create_subchannel_call(grpc_call_element* elem, grpc_error* error) {
grpc_error* new_error = calld->pick.connected_subchannel->CreateCall( grpc_error* new_error = calld->pick.connected_subchannel->CreateCall(
call_args, &calld->subchannel_call); call_args, &calld->subchannel_call);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s", gpr_log(GPR_INFO, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
chand, calld, calld->subchannel_call, grpc_error_string(new_error)); chand, calld, calld->subchannel_call, grpc_error_string(new_error));
} }
if (new_error != GRPC_ERROR_NONE) { if (new_error != GRPC_ERROR_NONE) {
@ -2613,7 +2612,7 @@ static void pick_done(void* arg, grpc_error* error) {
: GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to create subchannel", &error, 1); "Failed to create subchannel", &error, 1);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: failed to create subchannel: error=%s", "chand=%p calld=%p: failed to create subchannel: error=%s",
chand, calld, grpc_error_string(new_error)); chand, calld, grpc_error_string(new_error));
} }
@ -2657,7 +2656,7 @@ static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
// the one we started it on. However, this will just be a no-op. // the one we started it on. However, this will just be a no-op.
if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) { if (error != GRPC_ERROR_NONE && chand->lb_policy != nullptr) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p", gpr_log(GPR_INFO, "chand=%p calld=%p: cancelling pick from LB policy %p",
chand, calld, chand->lb_policy.get()); chand, calld, chand->lb_policy.get());
} }
chand->lb_policy->CancelPickLocked(&calld->pick, GRPC_ERROR_REF(error)); chand->lb_policy->CancelPickLocked(&calld->pick, GRPC_ERROR_REF(error));
@ -2672,8 +2671,8 @@ static void pick_callback_done_locked(void* arg, grpc_error* error) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed asynchronously", gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed asynchronously", chand,
chand, calld); calld);
} }
async_pick_done_locked(elem, GRPC_ERROR_REF(error)); async_pick_done_locked(elem, GRPC_ERROR_REF(error));
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback"); GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
@ -2685,7 +2684,7 @@ static void apply_service_config_to_call_locked(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: applying service config to call", gpr_log(GPR_INFO, "chand=%p calld=%p: applying service config to call",
chand, calld); chand, calld);
} }
if (chand->retry_throttle_data != nullptr) { if (chand->retry_throttle_data != nullptr) {
@ -2723,8 +2722,8 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p", gpr_log(GPR_INFO, "chand=%p calld=%p: starting pick on lb_policy=%p", chand,
chand, calld, chand->lb_policy.get()); calld, chand->lb_policy.get());
} }
// Only get service config data on the first attempt. // Only get service config data on the first attempt.
if (calld->num_attempts_completed == 0) { if (calld->num_attempts_completed == 0) {
@ -2771,7 +2770,7 @@ static bool pick_callback_start_locked(grpc_call_element* elem) {
if (pick_done) { if (pick_done) {
// Pick completed synchronously. // Pick completed synchronously.
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously", gpr_log(GPR_INFO, "chand=%p calld=%p: pick completed synchronously",
chand, calld); chand, calld);
} }
GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback"); GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback");
@ -2815,7 +2814,7 @@ static void pick_after_resolver_result_cancel_locked(void* arg,
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: cancelling pick waiting for resolver result", "chand=%p calld=%p: cancelling pick waiting for resolver result",
chand, calld); chand, calld);
} }
@ -2835,7 +2834,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
if (args->finished) { if (args->finished) {
/* cancelled, do nothing */ /* cancelled, do nothing */
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "call cancelled before resolver result"); gpr_log(GPR_INFO, "call cancelled before resolver result");
} }
gpr_free(args); gpr_free(args);
return; return;
@ -2846,14 +2845,14 @@ static void pick_after_resolver_result_done_locked(void* arg,
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data", gpr_log(GPR_INFO, "chand=%p calld=%p: resolver failed to return data",
chand, calld); chand, calld);
} }
async_pick_done_locked(elem, GRPC_ERROR_REF(error)); async_pick_done_locked(elem, GRPC_ERROR_REF(error));
} else if (chand->resolver == nullptr) { } else if (chand->resolver == nullptr) {
// Shutting down. // Shutting down.
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver disconnected", chand, gpr_log(GPR_INFO, "chand=%p calld=%p: resolver disconnected", chand,
calld); calld);
} }
async_pick_done_locked( async_pick_done_locked(
@ -2869,7 +2868,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
.send_initial_metadata_flags; .send_initial_metadata_flags;
if (send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) { if (send_initial_metadata_flags & GRPC_INITIAL_METADATA_WAIT_FOR_READY) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: resolver returned but no LB policy; " "chand=%p calld=%p: resolver returned but no LB policy; "
"wait_for_ready=true; trying again", "wait_for_ready=true; trying again",
chand, calld); chand, calld);
@ -2877,7 +2876,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
pick_after_resolver_result_start_locked(elem); pick_after_resolver_result_start_locked(elem);
} else { } else {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: resolver returned but no LB policy; " "chand=%p calld=%p: resolver returned but no LB policy; "
"wait_for_ready=false; failing", "wait_for_ready=false; failing",
chand, calld); chand, calld);
@ -2890,7 +2889,7 @@ static void pick_after_resolver_result_done_locked(void* arg,
} }
} else { } else {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick", gpr_log(GPR_INFO, "chand=%p calld=%p: resolver returned, doing pick",
chand, calld); chand, calld);
} }
if (pick_callback_start_locked(elem)) { if (pick_callback_start_locked(elem)) {
@ -2908,7 +2907,7 @@ static void pick_after_resolver_result_start_locked(grpc_call_element* elem) {
channel_data* chand = static_cast<channel_data*>(elem->channel_data); channel_data* chand = static_cast<channel_data*>(elem->channel_data);
call_data* calld = static_cast<call_data*>(elem->call_data); call_data* calld = static_cast<call_data*>(elem->call_data);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: deferring pick pending resolver result", chand, "chand=%p calld=%p: deferring pick pending resolver result", chand,
calld); calld);
} }
@ -2975,7 +2974,7 @@ static void cc_start_transport_stream_op_batch(
// If we've previously been cancelled, immediately fail any new batches. // If we've previously been cancelled, immediately fail any new batches.
if (calld->cancel_error != GRPC_ERROR_NONE) { if (calld->cancel_error != GRPC_ERROR_NONE) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: failing batch with error: %s", gpr_log(GPR_INFO, "chand=%p calld=%p: failing batch with error: %s",
chand, calld, grpc_error_string(calld->cancel_error)); chand, calld, grpc_error_string(calld->cancel_error));
} }
// Note: This will release the call combiner. // Note: This will release the call combiner.
@ -2994,7 +2993,7 @@ static void cc_start_transport_stream_op_batch(
calld->cancel_error = calld->cancel_error =
GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error); GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: recording cancel_error=%s", chand, gpr_log(GPR_INFO, "chand=%p calld=%p: recording cancel_error=%s", chand,
calld, grpc_error_string(calld->cancel_error)); calld, grpc_error_string(calld->cancel_error));
} }
// If we do not have a subchannel call (i.e., a pick has not yet // If we do not have a subchannel call (i.e., a pick has not yet
@ -3020,7 +3019,7 @@ static void cc_start_transport_stream_op_batch(
// streaming calls). // streaming calls).
if (calld->subchannel_call != nullptr) { if (calld->subchannel_call != nullptr) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: starting batch on subchannel_call=%p", chand, "chand=%p calld=%p: starting batch on subchannel_call=%p", chand,
calld, calld->subchannel_call); calld, calld->subchannel_call);
} }
@ -3032,7 +3031,7 @@ static void cc_start_transport_stream_op_batch(
// combiner to start a pick. // combiner to start a pick.
if (batch->send_initial_metadata) { if (batch->send_initial_metadata) {
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, "chand=%p calld=%p: entering client_channel combiner", gpr_log(GPR_INFO, "chand=%p calld=%p: entering client_channel combiner",
chand, calld); chand, calld);
} }
GRPC_CLOSURE_SCHED( GRPC_CLOSURE_SCHED(
@ -3042,7 +3041,7 @@ static void cc_start_transport_stream_op_batch(
} else { } else {
// For all other batches, release the call combiner. // For all other batches, release the call combiner.
if (grpc_client_channel_trace.enabled()) { if (grpc_client_channel_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"chand=%p calld=%p: saved batch, yeilding call combiner", chand, "chand=%p calld=%p: saved batch, yeilding call combiner", chand,
calld); calld);
} }

@ -44,13 +44,13 @@ void LoadBalancingPolicy::TryReresolutionLocked(
GRPC_CLOSURE_SCHED(request_reresolution_, error); GRPC_CLOSURE_SCHED(request_reresolution_, error);
request_reresolution_ = nullptr; request_reresolution_ = nullptr;
if (grpc_lb_trace->enabled()) { if (grpc_lb_trace->enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"%s %p: scheduling re-resolution closure with error=%s.", "%s %p: scheduling re-resolution closure with error=%s.",
grpc_lb_trace->name(), this, grpc_error_string(error)); grpc_lb_trace->name(), this, grpc_error_string(error));
} }
} else { } else {
if (grpc_lb_trace->enabled()) { if (grpc_lb_trace->enabled()) {
gpr_log(GPR_DEBUG, "%s %p: no available re-resolution closure.", gpr_log(GPR_INFO, "%s %p: no available re-resolution closure.",
grpc_lb_trace->name(), this); grpc_lb_trace->name(), this);
} }
} }

@ -1247,7 +1247,7 @@ bool GrpcLb::PickLocked(PickState* pick) {
} }
} else { // rr_policy_ == NULL } else { // rr_policy_ == NULL
if (grpc_lb_glb_trace.enabled()) { if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"[grpclb %p] No RR policy. Adding to grpclb's pending picks", "[grpclb %p] No RR policy. Adding to grpclb's pending picks",
this); this);
} }
@ -1413,14 +1413,13 @@ void GrpcLb::OnFallbackTimerLocked(void* arg, grpc_error* error) {
void GrpcLb::StartBalancerCallRetryTimerLocked() { void GrpcLb::StartBalancerCallRetryTimerLocked() {
grpc_millis next_try = lb_call_backoff_.NextAttemptTime(); grpc_millis next_try = lb_call_backoff_.NextAttemptTime();
if (grpc_lb_glb_trace.enabled()) { if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_DEBUG, "[grpclb %p] Connection to LB server lost...", this); gpr_log(GPR_INFO, "[grpclb %p] Connection to LB server lost...", this);
grpc_millis timeout = next_try - ExecCtx::Get()->Now(); grpc_millis timeout = next_try - ExecCtx::Get()->Now();
if (timeout > 0) { if (timeout > 0) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO, "[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.",
"[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.", this, this, timeout);
timeout);
} else { } else {
gpr_log(GPR_DEBUG, "[grpclb %p] ... retry_timer_active immediately.", gpr_log(GPR_INFO, "[grpclb %p] ... retry_timer_active immediately.",
this); this);
} }
} }
@ -1728,7 +1727,7 @@ void GrpcLb::CreateOrUpdateRoundRobinPolicyLocked() {
GPR_ASSERT(args != nullptr); GPR_ASSERT(args != nullptr);
if (rr_policy_ != nullptr) { if (rr_policy_ != nullptr) {
if (grpc_lb_glb_trace.enabled()) { if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_DEBUG, "[grpclb %p] Updating RR policy %p", this, gpr_log(GPR_INFO, "[grpclb %p] Updating RR policy %p", this,
rr_policy_.get()); rr_policy_.get());
} }
rr_policy_->UpdateLocked(*args); rr_policy_->UpdateLocked(*args);
@ -1739,7 +1738,7 @@ void GrpcLb::CreateOrUpdateRoundRobinPolicyLocked() {
lb_policy_args.args = args; lb_policy_args.args = args;
CreateRoundRobinPolicyLocked(lb_policy_args); CreateRoundRobinPolicyLocked(lb_policy_args);
if (grpc_lb_glb_trace.enabled()) { if (grpc_lb_glb_trace.enabled()) {
gpr_log(GPR_DEBUG, "[grpclb %p] Created new RR policy %p", this, gpr_log(GPR_INFO, "[grpclb %p] Created new RR policy %p", this,
rr_policy_.get()); rr_policy_.get());
} }
} }
@ -1755,7 +1754,7 @@ void GrpcLb::OnRoundRobinRequestReresolutionLocked(void* arg,
} }
if (grpc_lb_glb_trace.enabled()) { if (grpc_lb_glb_trace.enabled()) {
gpr_log( gpr_log(
GPR_DEBUG, GPR_INFO,
"[grpclb %p] Re-resolution requested from the internal RR policy (%p).", "[grpclb %p] Re-resolution requested from the internal RR policy (%p).",
grpclb_policy, grpclb_policy->rr_policy_.get()); grpclb_policy, grpclb_policy->rr_policy_.get());
} }

@ -118,7 +118,7 @@ PickFirst::PickFirst(const Args& args) : LoadBalancingPolicy(args) {
grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE, grpc_connectivity_state_init(&state_tracker_, GRPC_CHANNEL_IDLE,
"pick_first"); "pick_first");
if (grpc_lb_pick_first_trace.enabled()) { if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Pick First %p created.", this); gpr_log(GPR_INFO, "Pick First %p created.", this);
} }
UpdateLocked(*args.args); UpdateLocked(*args.args);
grpc_subchannel_index_ref(); grpc_subchannel_index_ref();
@ -126,7 +126,7 @@ PickFirst::PickFirst(const Args& args) : LoadBalancingPolicy(args) {
PickFirst::~PickFirst() { PickFirst::~PickFirst() {
if (grpc_lb_pick_first_trace.enabled()) { if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Destroying Pick First %p", this); gpr_log(GPR_INFO, "Destroying Pick First %p", this);
} }
GPR_ASSERT(subchannel_list_ == nullptr); GPR_ASSERT(subchannel_list_ == nullptr);
GPR_ASSERT(latest_pending_subchannel_list_ == nullptr); GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
@ -149,7 +149,7 @@ void PickFirst::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
void PickFirst::ShutdownLocked() { void PickFirst::ShutdownLocked() {
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"); grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_pick_first_trace.enabled()) { if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, "Pick First %p Shutting down", this); gpr_log(GPR_INFO, "Pick First %p Shutting down", this);
} }
shutdown_ = true; shutdown_ = true;
PickState* pick; PickState* pick;
@ -351,7 +351,7 @@ void PickFirst::UpdateLocked(const grpc_channel_args& args) {
// subchannel list. // subchannel list.
if (latest_pending_subchannel_list_ != nullptr) { if (latest_pending_subchannel_list_ != nullptr) {
if (grpc_lb_pick_first_trace.enabled()) { if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"Pick First %p Shutting down latest pending subchannel list " "Pick First %p Shutting down latest pending subchannel list "
"%p, about to be replaced by newer latest %p", "%p, about to be replaced by newer latest %p",
this, latest_pending_subchannel_list_.get(), this, latest_pending_subchannel_list_.get(),
@ -372,7 +372,7 @@ void PickFirst::PickFirstSubchannelData::ProcessConnectivityChangeLocked(
grpc_error* error) { grpc_error* error) {
PickFirst* p = static_cast<PickFirst*>(subchannel_list()->policy()); PickFirst* p = static_cast<PickFirst*>(subchannel_list()->policy());
if (grpc_lb_pick_first_trace.enabled()) { if (grpc_lb_pick_first_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"Pick First %p connectivity changed for subchannel %p (%" PRIuPTR "Pick First %p connectivity changed for subchannel %p (%" PRIuPTR
" of %" PRIuPTR " of %" PRIuPTR
"), subchannel_list %p: state=%s p->shutdown_=%d " "), subchannel_list %p: state=%s p->shutdown_=%d "

@ -199,7 +199,7 @@ RoundRobin::RoundRobin(const Args& args) : LoadBalancingPolicy(args) {
"round_robin"); "round_robin");
UpdateLocked(*args.args); UpdateLocked(*args.args);
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Created with %" PRIuPTR " subchannels", this, gpr_log(GPR_INFO, "[RR %p] Created with %" PRIuPTR " subchannels", this,
subchannel_list_->num_subchannels()); subchannel_list_->num_subchannels());
} }
grpc_subchannel_index_ref(); grpc_subchannel_index_ref();
@ -207,7 +207,7 @@ RoundRobin::RoundRobin(const Args& args) : LoadBalancingPolicy(args) {
RoundRobin::~RoundRobin() { RoundRobin::~RoundRobin() {
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy", this); gpr_log(GPR_INFO, "[RR %p] Destroying Round Robin policy", this);
} }
GPR_ASSERT(subchannel_list_ == nullptr); GPR_ASSERT(subchannel_list_ == nullptr);
GPR_ASSERT(latest_pending_subchannel_list_ == nullptr); GPR_ASSERT(latest_pending_subchannel_list_ == nullptr);
@ -237,7 +237,7 @@ size_t RoundRobin::GetNextReadySubchannelIndexLocked() {
subchannel_list_->num_subchannels(); subchannel_list_->num_subchannels();
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log( gpr_log(
GPR_DEBUG, GPR_INFO,
"[RR %p] checking subchannel %p, subchannel_list %p, index %" PRIuPTR "[RR %p] checking subchannel %p, subchannel_list %p, index %" PRIuPTR
": state=%s", ": state=%s",
this, subchannel_list_->subchannel(index)->subchannel(), this, subchannel_list_->subchannel(index)->subchannel(),
@ -248,7 +248,7 @@ size_t RoundRobin::GetNextReadySubchannelIndexLocked() {
if (subchannel_list_->subchannel(index)->connectivity_state() == if (subchannel_list_->subchannel(index)->connectivity_state() ==
GRPC_CHANNEL_READY) { GRPC_CHANNEL_READY) {
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"[RR %p] found next ready subchannel (%p) at index %" PRIuPTR "[RR %p] found next ready subchannel (%p) at index %" PRIuPTR
" of subchannel_list %p", " of subchannel_list %p",
this, subchannel_list_->subchannel(index)->subchannel(), index, this, subchannel_list_->subchannel(index)->subchannel(), index,
@ -258,7 +258,7 @@ size_t RoundRobin::GetNextReadySubchannelIndexLocked() {
} }
} }
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] no subchannels in ready state", this); gpr_log(GPR_INFO, "[RR %p] no subchannels in ready state", this);
} }
return subchannel_list_->num_subchannels(); return subchannel_list_->num_subchannels();
} }
@ -268,13 +268,13 @@ void RoundRobin::UpdateLastReadySubchannelIndexLocked(size_t last_ready_index) {
GPR_ASSERT(last_ready_index < subchannel_list_->num_subchannels()); GPR_ASSERT(last_ready_index < subchannel_list_->num_subchannels());
last_ready_subchannel_index_ = last_ready_index; last_ready_subchannel_index_ = last_ready_index;
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log( gpr_log(GPR_INFO,
GPR_DEBUG,
"[RR %p] setting last_ready_subchannel_index=%" PRIuPTR "[RR %p] setting last_ready_subchannel_index=%" PRIuPTR
" (SC %p, CSC %p)", " (SC %p, CSC %p)",
this, last_ready_index, this, last_ready_index,
subchannel_list_->subchannel(last_ready_index)->subchannel(), subchannel_list_->subchannel(last_ready_index)->subchannel(),
subchannel_list_->subchannel(last_ready_index)->connected_subchannel()); subchannel_list_->subchannel(last_ready_index)
->connected_subchannel());
} }
} }
@ -292,7 +292,7 @@ void RoundRobin::HandOffPendingPicksLocked(LoadBalancingPolicy* new_policy) {
void RoundRobin::ShutdownLocked() { void RoundRobin::ShutdownLocked() {
grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"); grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Shutting down", this); gpr_log(GPR_INFO, "[RR %p] Shutting down", this);
} }
shutdown_ = true; shutdown_ = true;
PickState* pick; PickState* pick;
@ -373,7 +373,7 @@ bool RoundRobin::DoPickLocked(PickState* pick) {
*pick->user_data = sd->user_data(); *pick->user_data = sd->user_data();
} }
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, " "[RR %p] Picked target <-- Subchannel %p (connected %p) (sl %p, "
"index %" PRIuPTR ")", "index %" PRIuPTR ")",
this, sd->subchannel(), pick->connected_subchannel.get(), this, sd->subchannel(), pick->connected_subchannel.get(),
@ -397,8 +397,7 @@ void RoundRobin::DrainPendingPicksLocked() {
bool RoundRobin::PickLocked(PickState* pick) { bool RoundRobin::PickLocked(PickState* pick) {
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] Trying to pick (shutdown: %d)", this, gpr_log(GPR_INFO, "[RR %p] Trying to pick (shutdown: %d)", this, shutdown_);
shutdown_);
} }
GPR_ASSERT(!shutdown_); GPR_ASSERT(!shutdown_);
if (subchannel_list_ != nullptr) { if (subchannel_list_ != nullptr) {
@ -529,7 +528,7 @@ void RoundRobin::RoundRobinSubchannelList::
p->subchannel_list_ != nullptr p->subchannel_list_ != nullptr
? p->subchannel_list_->num_subchannels() ? p->subchannel_list_->num_subchannels()
: 0; : 0;
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"[RR %p] phasing out subchannel list %p (size %" PRIuPTR "[RR %p] phasing out subchannel list %p (size %" PRIuPTR
") in favor of %p (size %" PRIuPTR ")", ") in favor of %p (size %" PRIuPTR ")",
p, p->subchannel_list_.get(), old_num_subchannels, this, p, p->subchannel_list_.get(), old_num_subchannels, this,
@ -550,7 +549,7 @@ void RoundRobin::RoundRobinSubchannelData::ProcessConnectivityChangeLocked(
RoundRobin* p = static_cast<RoundRobin*>(subchannel_list()->policy()); RoundRobin* p = static_cast<RoundRobin*>(subchannel_list()->policy());
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log( gpr_log(
GPR_DEBUG, GPR_INFO,
"[RR %p] connectivity changed for subchannel %p, subchannel_list %p " "[RR %p] connectivity changed for subchannel %p, subchannel_list %p "
"(index %" PRIuPTR " of %" PRIuPTR "(index %" PRIuPTR " of %" PRIuPTR
"): prev_state=%s new_state=%s " "): prev_state=%s new_state=%s "
@ -570,7 +569,7 @@ void RoundRobin::RoundRobinSubchannelData::ProcessConnectivityChangeLocked(
if (connectivity_state() == GRPC_CHANNEL_TRANSIENT_FAILURE && if (connectivity_state() == GRPC_CHANNEL_TRANSIENT_FAILURE &&
subchannel_list()->started_watching()) { subchannel_list()->started_watching()) {
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"[RR %p] Subchannel %p has gone into TRANSIENT_FAILURE. " "[RR %p] Subchannel %p has gone into TRANSIENT_FAILURE. "
"Requesting re-resolution", "Requesting re-resolution",
p, subchannel()); p, subchannel());
@ -632,13 +631,13 @@ void RoundRobin::UpdateLocked(const grpc_channel_args& args) {
grpc_lb_addresses* addresses = grpc_lb_addresses* addresses =
static_cast<grpc_lb_addresses*>(arg->value.pointer.p); static_cast<grpc_lb_addresses*>(arg->value.pointer.p);
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, "[RR %p] received update with %" PRIuPTR " addresses", gpr_log(GPR_INFO, "[RR %p] received update with %" PRIuPTR " addresses",
this, addresses->num_addresses); this, addresses->num_addresses);
} }
// Replace latest_pending_subchannel_list_. // Replace latest_pending_subchannel_list_.
if (latest_pending_subchannel_list_ != nullptr) { if (latest_pending_subchannel_list_ != nullptr) {
if (grpc_lb_round_robin_trace.enabled()) { if (grpc_lb_round_robin_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"[RR %p] Shutting down previous pending subchannel list %p", this, "[RR %p] Shutting down previous pending subchannel list %p", this,
latest_pending_subchannel_list_.get()); latest_pending_subchannel_list_.get());
} }

@ -306,7 +306,7 @@ void SubchannelData<SubchannelListType, SubchannelDataType>::
UnrefSubchannelLocked(const char* reason) { UnrefSubchannelLocked(const char* reason) {
if (subchannel_ != nullptr) { if (subchannel_ != nullptr) {
if (subchannel_list_->tracer()->enabled()) { if (subchannel_list_->tracer()->enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): unreffing subchannel", " (subchannel %p): unreffing subchannel",
subchannel_list_->tracer()->name(), subchannel_list_->policy(), subchannel_list_->tracer()->name(), subchannel_list_->policy(),
@ -323,7 +323,7 @@ template <typename SubchannelListType, typename SubchannelDataType>
void SubchannelData<SubchannelListType, void SubchannelData<SubchannelListType,
SubchannelDataType>::StartConnectivityWatchLocked() { SubchannelDataType>::StartConnectivityWatchLocked() {
if (subchannel_list_->tracer()->enabled()) { if (subchannel_list_->tracer()->enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): starting watch: requesting connectivity change " " (subchannel %p): starting watch: requesting connectivity change "
"notification (from %s)", "notification (from %s)",
@ -344,7 +344,7 @@ template <typename SubchannelListType, typename SubchannelDataType>
void SubchannelData<SubchannelListType, void SubchannelData<SubchannelListType,
SubchannelDataType>::RenewConnectivityWatchLocked() { SubchannelDataType>::RenewConnectivityWatchLocked() {
if (subchannel_list_->tracer()->enabled()) { if (subchannel_list_->tracer()->enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): renewing watch: requesting connectivity change " " (subchannel %p): renewing watch: requesting connectivity change "
"notification (from %s)", "notification (from %s)",
@ -363,7 +363,7 @@ template <typename SubchannelListType, typename SubchannelDataType>
void SubchannelData<SubchannelListType, void SubchannelData<SubchannelListType,
SubchannelDataType>::StopConnectivityWatchLocked() { SubchannelDataType>::StopConnectivityWatchLocked() {
if (subchannel_list_->tracer()->enabled()) { if (subchannel_list_->tracer()->enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): stopping connectivity watch", " (subchannel %p): stopping connectivity watch",
subchannel_list_->tracer()->name(), subchannel_list_->policy(), subchannel_list_->tracer()->name(), subchannel_list_->policy(),
@ -379,7 +379,7 @@ template <typename SubchannelListType, typename SubchannelDataType>
void SubchannelData<SubchannelListType, SubchannelDataType>:: void SubchannelData<SubchannelListType, SubchannelDataType>::
CancelConnectivityWatchLocked(const char* reason) { CancelConnectivityWatchLocked(const char* reason) {
if (subchannel_list_->tracer()->enabled()) { if (subchannel_list_->tracer()->enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
" (subchannel %p): canceling connectivity watch (%s)", " (subchannel %p): canceling connectivity watch (%s)",
subchannel_list_->tracer()->name(), subchannel_list_->policy(), subchannel_list_->tracer()->name(), subchannel_list_->policy(),
@ -492,7 +492,7 @@ SubchannelList<SubchannelListType, SubchannelDataType>::SubchannelList(
tracer_(tracer), tracer_(tracer),
combiner_(GRPC_COMBINER_REF(combiner, "subchannel_list")) { combiner_(GRPC_COMBINER_REF(combiner, "subchannel_list")) {
if (tracer_->enabled()) { if (tracer_->enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"[%s %p] Creating subchannel list %p for %" PRIuPTR " subchannels", "[%s %p] Creating subchannel list %p for %" PRIuPTR " subchannels",
tracer_->name(), policy, this, addresses->num_addresses); tracer_->name(), policy, this, addresses->num_addresses);
} }
@ -521,7 +521,7 @@ SubchannelList<SubchannelListType, SubchannelDataType>::SubchannelList(
if (tracer_->enabled()) { if (tracer_->enabled()) {
char* address_uri = char* address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address); grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"[%s %p] could not create subchannel for address uri %s, " "[%s %p] could not create subchannel for address uri %s, "
"ignoring", "ignoring",
tracer_->name(), policy_, address_uri); tracer_->name(), policy_, address_uri);
@ -532,7 +532,7 @@ SubchannelList<SubchannelListType, SubchannelDataType>::SubchannelList(
if (tracer_->enabled()) { if (tracer_->enabled()) {
char* address_uri = char* address_uri =
grpc_sockaddr_to_uri(&addresses->addresses[i].address); grpc_sockaddr_to_uri(&addresses->addresses[i].address);
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"[%s %p] subchannel list %p index %" PRIuPTR "[%s %p] subchannel list %p index %" PRIuPTR
": Created subchannel %p for address uri %s", ": Created subchannel %p for address uri %s",
tracer_->name(), policy_, this, subchannels_.size(), subchannel, tracer_->name(), policy_, this, subchannels_.size(), subchannel,
@ -548,7 +548,7 @@ SubchannelList<SubchannelListType, SubchannelDataType>::SubchannelList(
template <typename SubchannelListType, typename SubchannelDataType> template <typename SubchannelListType, typename SubchannelDataType>
SubchannelList<SubchannelListType, SubchannelDataType>::~SubchannelList() { SubchannelList<SubchannelListType, SubchannelDataType>::~SubchannelList() {
if (tracer_->enabled()) { if (tracer_->enabled()) {
gpr_log(GPR_DEBUG, "[%s %p] Destroying subchannel_list %p", tracer_->name(), gpr_log(GPR_INFO, "[%s %p] Destroying subchannel_list %p", tracer_->name(),
policy_, this); policy_, this);
} }
GRPC_COMBINER_UNREF(combiner_, "subchannel_list"); GRPC_COMBINER_UNREF(combiner_, "subchannel_list");
@ -557,7 +557,7 @@ SubchannelList<SubchannelListType, SubchannelDataType>::~SubchannelList() {
template <typename SubchannelListType, typename SubchannelDataType> template <typename SubchannelListType, typename SubchannelDataType>
void SubchannelList<SubchannelListType, SubchannelDataType>::ShutdownLocked() { void SubchannelList<SubchannelListType, SubchannelDataType>::ShutdownLocked() {
if (tracer_->enabled()) { if (tracer_->enabled()) {
gpr_log(GPR_DEBUG, "[%s %p] Shutting down subchannel_list %p", gpr_log(GPR_INFO, "[%s %p] Shutting down subchannel_list %p",
tracer_->name(), policy_, this); tracer_->name(), policy_, this);
} }
GPR_ASSERT(!shutting_down_); GPR_ASSERT(!shutting_down_);

@ -234,7 +234,7 @@ static void finish_send_message(grpc_call_element* elem) {
static_cast<float>(before_size); static_cast<float>(before_size);
GPR_ASSERT(grpc_message_compression_algorithm_name( GPR_ASSERT(grpc_message_compression_algorithm_name(
calld->message_compression_algorithm, &algo_name)); calld->message_compression_algorithm, &algo_name));
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR "Compressed[%s] %" PRIuPTR " bytes vs. %" PRIuPTR
" bytes (%.2f%% savings)", " bytes (%.2f%% savings)",
algo_name, before_size, after_size, 100 * savings_ratio); algo_name, before_size, after_size, 100 * savings_ratio);
@ -246,7 +246,7 @@ static void finish_send_message(grpc_call_element* elem) {
const char* algo_name; const char* algo_name;
GPR_ASSERT(grpc_message_compression_algorithm_name( GPR_ASSERT(grpc_message_compression_algorithm_name(
calld->message_compression_algorithm, &algo_name)); calld->message_compression_algorithm, &algo_name));
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"Algorithm '%s' enabled but decided not to compress. Input size: " "Algorithm '%s' enabled but decided not to compress. Input size: "
"%" PRIuPTR, "%" PRIuPTR,
algo_name, calld->slices.length); algo_name, calld->slices.length);

@ -807,7 +807,7 @@ static const char* write_state_name(grpc_chttp2_write_state st) {
static void set_write_state(grpc_chttp2_transport* t, static void set_write_state(grpc_chttp2_transport* t,
grpc_chttp2_write_state st, const char* reason) { grpc_chttp2_write_state st, const char* reason) {
GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_DEBUG, "W:%p %s state %s -> %s [%s]", t, GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "W:%p %s state %s -> %s [%s]", t,
t->is_client ? "CLIENT" : "SERVER", t->is_client ? "CLIENT" : "SERVER",
write_state_name(t->write_state), write_state_name(t->write_state),
write_state_name(st), reason)); write_state_name(st), reason));
@ -1072,7 +1072,7 @@ void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
uint32_t goaway_error, uint32_t goaway_error,
grpc_slice goaway_text) { grpc_slice goaway_text) {
// GRPC_CHTTP2_IF_TRACING( // GRPC_CHTTP2_IF_TRACING(
// gpr_log(GPR_DEBUG, "got goaway [%d]: %s", goaway_error, msg)); // gpr_log(GPR_INFO, "got goaway [%d]: %s", goaway_error, msg));
// Discard the error from a previous goaway frame (if any) // Discard the error from a previous goaway frame (if any)
if (t->goaway_error != GRPC_ERROR_NONE) { if (t->goaway_error != GRPC_ERROR_NONE) {
@ -1118,7 +1118,7 @@ static void maybe_start_some_streams(grpc_chttp2_transport* t) {
grpc_chttp2_list_pop_waiting_for_concurrency(t, &s)) { grpc_chttp2_list_pop_waiting_for_concurrency(t, &s)) {
/* safe since we can't (legally) be parsing this stream yet */ /* safe since we can't (legally) be parsing this stream yet */
GRPC_CHTTP2_IF_TRACING(gpr_log( GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_DEBUG, "HTTP:%s: Allocating new grpc_chttp2_stream %p to id %d", GPR_INFO, "HTTP:%s: Allocating new grpc_chttp2_stream %p to id %d",
t->is_client ? "CLI" : "SVR", s, t->next_stream_id)); t->is_client ? "CLI" : "SVR", s, t->next_stream_id));
GPR_ASSERT(s->id == 0); GPR_ASSERT(s->id == 0);
@ -1183,7 +1183,7 @@ void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t,
if (grpc_http_trace.enabled()) { if (grpc_http_trace.enabled()) {
const char* errstr = grpc_error_string(error); const char* errstr = grpc_error_string(error);
gpr_log( gpr_log(
GPR_DEBUG, GPR_INFO,
"complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s " "complete_closure_step: t=%p %p refs=%d flags=0x%04x desc=%s err=%s "
"write_state=%s", "write_state=%s",
t, closure, t, closure,
@ -1336,7 +1336,7 @@ static void perform_stream_op_locked(void* stream_op,
if (grpc_http_trace.enabled()) { if (grpc_http_trace.enabled()) {
char* str = grpc_transport_stream_op_batch_string(op); char* str = grpc_transport_stream_op_batch_string(op);
gpr_log(GPR_DEBUG, "perform_stream_op_locked: %s; on_complete = %p", str, gpr_log(GPR_INFO, "perform_stream_op_locked: %s; on_complete = %p", str,
op->on_complete); op->on_complete);
gpr_free(str); gpr_free(str);
if (op->send_initial_metadata) { if (op->send_initial_metadata) {
@ -1638,7 +1638,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
if (grpc_http_trace.enabled()) { if (grpc_http_trace.enabled()) {
char* str = grpc_transport_stream_op_batch_string(op); char* str = grpc_transport_stream_op_batch_string(op);
gpr_log(GPR_DEBUG, "perform_stream_op[s=%p]: %s", s, str); gpr_log(GPR_INFO, "perform_stream_op[s=%p]: %s", s, str);
gpr_free(str); gpr_free(str);
} }
@ -2529,7 +2529,7 @@ static void schedule_bdp_ping_locked(grpc_chttp2_transport* t) {
static void start_bdp_ping_locked(void* tp, grpc_error* error) { static void start_bdp_ping_locked(void* tp, grpc_error* error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp); grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
if (grpc_http_trace.enabled()) { if (grpc_http_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s: Start BDP ping err=%s", t->peer_string, gpr_log(GPR_INFO, "%s: Start BDP ping err=%s", t->peer_string,
grpc_error_string(error)); grpc_error_string(error));
} }
/* Reset the keepalive ping timer */ /* Reset the keepalive ping timer */
@ -2542,7 +2542,7 @@ static void start_bdp_ping_locked(void* tp, grpc_error* error) {
static void finish_bdp_ping_locked(void* tp, grpc_error* error) { static void finish_bdp_ping_locked(void* tp, grpc_error* error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp); grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
if (grpc_http_trace.enabled()) { if (grpc_http_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s: Complete BDP ping err=%s", t->peer_string, gpr_log(GPR_INFO, "%s: Complete BDP ping err=%s", t->peer_string,
grpc_error_string(error)); grpc_error_string(error));
} }
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
@ -2716,8 +2716,7 @@ static void keepalive_watchdog_fired_locked(void* arg, grpc_error* error) {
static void connectivity_state_set(grpc_chttp2_transport* t, static void connectivity_state_set(grpc_chttp2_transport* t,
grpc_connectivity_state state, grpc_connectivity_state state,
grpc_error* error, const char* reason) { grpc_error* error, const char* reason) {
GRPC_CHTTP2_IF_TRACING( GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_INFO, "set connectivity_state=%d", state));
gpr_log(GPR_DEBUG, "set connectivity_state=%d", state));
grpc_connectivity_state_set(&t->channel_callback.state_tracker, state, error, grpc_connectivity_state_set(&t->channel_callback.state_tracker, state, error,
reason); reason);
} }
@ -2984,7 +2983,7 @@ static void benign_reclaimer_locked(void* arg, grpc_error* error) {
/* Channel with no active streams: send a goaway to try and make it /* Channel with no active streams: send a goaway to try and make it
* disconnect cleanly */ * disconnect cleanly */
if (grpc_resource_quota_trace.enabled()) { if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "HTTP2: %s - send goaway to free memory", gpr_log(GPR_INFO, "HTTP2: %s - send goaway to free memory",
t->peer_string); t->peer_string);
} }
send_goaway(t, send_goaway(t,
@ -2992,7 +2991,7 @@ static void benign_reclaimer_locked(void* arg, grpc_error* error) {
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Buffers full"), GRPC_ERROR_CREATE_FROM_STATIC_STRING("Buffers full"),
GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM)); GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM));
} else if (error == GRPC_ERROR_NONE && grpc_resource_quota_trace.enabled()) { } else if (error == GRPC_ERROR_NONE && grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR "HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR
" streams", " streams",
t->peer_string, grpc_chttp2_stream_map_size(&t->stream_map)); t->peer_string, grpc_chttp2_stream_map_size(&t->stream_map));
@ -3013,7 +3012,7 @@ static void destructive_reclaimer_locked(void* arg, grpc_error* error) {
grpc_chttp2_stream* s = static_cast<grpc_chttp2_stream*>( grpc_chttp2_stream* s = static_cast<grpc_chttp2_stream*>(
grpc_chttp2_stream_map_rand(&t->stream_map)); grpc_chttp2_stream_map_rand(&t->stream_map));
if (grpc_resource_quota_trace.enabled()) { if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "HTTP2: %s - abandon stream id %d", t->peer_string, gpr_log(GPR_INFO, "HTTP2: %s - abandon stream id %d", t->peer_string,
s->id); s->id);
} }
grpc_chttp2_cancel_stream( grpc_chttp2_cancel_stream(

@ -217,14 +217,14 @@ grpc_error* grpc_chttp2_settings_parser_parse(void* p, grpc_chttp2_transport* t,
t->initial_window_update += static_cast<int64_t>(parser->value) - t->initial_window_update += static_cast<int64_t>(parser->value) -
parser->incoming_settings[id]; parser->incoming_settings[id];
if (grpc_http_trace.enabled() || grpc_flowctl_trace.enabled()) { if (grpc_http_trace.enabled() || grpc_flowctl_trace.enabled()) {
gpr_log(GPR_DEBUG, "%p[%s] adding %d for initial_window change", gpr_log(GPR_INFO, "%p[%s] adding %d for initial_window change", t,
t, t->is_client ? "cli" : "svr", t->is_client ? "cli" : "svr",
static_cast<int>(t->initial_window_update)); static_cast<int>(t->initial_window_update));
} }
} }
parser->incoming_settings[id] = parser->value; parser->incoming_settings[id] = parser->value;
if (grpc_http_trace.enabled()) { if (grpc_http_trace.enabled()) {
gpr_log(GPR_DEBUG, "CHTTP2:%s:%s: got setting %s = %d", gpr_log(GPR_INFO, "CHTTP2:%s:%s: got setting %s = %d",
t->is_client ? "CLI" : "SVR", t->peer_string, sp->name, t->is_client ? "CLI" : "SVR", t->peer_string, sp->name,
parser->value); parser->value);
} }

@ -470,7 +470,7 @@ static void hpack_enc(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
v = grpc_slice_to_c_string(GRPC_MDVALUE(elem)); v = grpc_slice_to_c_string(GRPC_MDVALUE(elem));
} }
gpr_log( gpr_log(
GPR_DEBUG, GPR_INFO,
"Encode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d", "Encode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d",
k, v, GRPC_MDELEM_IS_INTERNED(elem), GRPC_MDELEM_STORAGE(elem), k, v, GRPC_MDELEM_IS_INTERNED(elem), GRPC_MDELEM_STORAGE(elem),
grpc_slice_is_interned(GRPC_MDKEY(elem)), grpc_slice_is_interned(GRPC_MDKEY(elem)),
@ -654,7 +654,7 @@ void grpc_chttp2_hpack_compressor_set_max_table_size(
} }
c->advertise_table_size_change = 1; c->advertise_table_size_change = 1;
if (grpc_http_trace.enabled()) { if (grpc_http_trace.enabled()) {
gpr_log(GPR_DEBUG, "set max table size from encoder to %d", max_table_size); gpr_log(GPR_INFO, "set max table size from encoder to %d", max_table_size);
} }
} }

@ -633,7 +633,7 @@ static grpc_error* on_hdr(grpc_chttp2_hpack_parser* p, grpc_mdelem md,
v = grpc_slice_to_c_string(GRPC_MDVALUE(md)); v = grpc_slice_to_c_string(GRPC_MDVALUE(md));
} }
gpr_log( gpr_log(
GPR_DEBUG, GPR_INFO,
"Decode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d", "Decode: '%s: %s', elem_interned=%d [%d], k_interned=%d, v_interned=%d",
k, v, GRPC_MDELEM_IS_INTERNED(md), GRPC_MDELEM_STORAGE(md), k, v, GRPC_MDELEM_IS_INTERNED(md), GRPC_MDELEM_STORAGE(md),
grpc_slice_is_interned(GRPC_MDKEY(md)), grpc_slice_is_interned(GRPC_MDKEY(md)),

@ -247,7 +247,7 @@ void grpc_chttp2_hptbl_set_max_bytes(grpc_chttp2_hptbl* tbl,
return; return;
} }
if (grpc_http_trace.enabled()) { if (grpc_http_trace.enabled()) {
gpr_log(GPR_DEBUG, "Update hpack parser max size to %d", max_bytes); gpr_log(GPR_INFO, "Update hpack parser max size to %d", max_bytes);
} }
while (tbl->mem_used > max_bytes) { while (tbl->mem_used > max_bytes) {
evict1(tbl); evict1(tbl);
@ -270,7 +270,7 @@ grpc_error* grpc_chttp2_hptbl_set_current_table_size(grpc_chttp2_hptbl* tbl,
return err; return err;
} }
if (grpc_http_trace.enabled()) { if (grpc_http_trace.enabled()) {
gpr_log(GPR_DEBUG, "Update hpack parser table size to %d", bytes); gpr_log(GPR_INFO, "Update hpack parser table size to %d", bytes);
} }
while (tbl->mem_used > bytes) { while (tbl->mem_used > bytes) {
evict1(tbl); evict1(tbl);

@ -68,7 +68,7 @@ static bool stream_list_pop(grpc_chttp2_transport* t,
} }
*stream = s; *stream = s;
if (s && grpc_trace_http2_stream_state.enabled()) { if (s && grpc_trace_http2_stream_state.enabled()) {
gpr_log(GPR_DEBUG, "%p[%d][%s]: pop from %s", t, s->id, gpr_log(GPR_INFO, "%p[%d][%s]: pop from %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id)); t->is_client ? "cli" : "svr", stream_list_id_string(id));
} }
return s != nullptr; return s != nullptr;
@ -90,7 +90,7 @@ static void stream_list_remove(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
t->lists[id].tail = s->links[id].prev; t->lists[id].tail = s->links[id].prev;
} }
if (grpc_trace_http2_stream_state.enabled()) { if (grpc_trace_http2_stream_state.enabled()) {
gpr_log(GPR_DEBUG, "%p[%d][%s]: remove from %s", t, s->id, gpr_log(GPR_INFO, "%p[%d][%s]: remove from %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id)); t->is_client ? "cli" : "svr", stream_list_id_string(id));
} }
} }
@ -122,7 +122,7 @@ static void stream_list_add_tail(grpc_chttp2_transport* t,
t->lists[id].tail = s; t->lists[id].tail = s;
s->included[id] = 1; s->included[id] = 1;
if (grpc_trace_http2_stream_state.enabled()) { if (grpc_trace_http2_stream_state.enabled()) {
gpr_log(GPR_DEBUG, "%p[%d][%s]: add to %s", t, s->id, gpr_log(GPR_INFO, "%p[%d][%s]: add to %s", t, s->id,
t->is_client ? "cli" : "svr", stream_list_id_string(id)); t->is_client ? "cli" : "svr", stream_list_id_string(id));
} }
} }

@ -52,7 +52,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_INFLIGHT])) { if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_INFLIGHT])) {
/* ping already in-flight: wait */ /* ping already in-flight: wait */
if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) { if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s: Ping delayed [%p]: already pinging", gpr_log(GPR_INFO, "%s: Ping delayed [%p]: already pinging",
t->is_client ? "CLIENT" : "SERVER", t->peer_string); t->is_client ? "CLIENT" : "SERVER", t->peer_string);
} }
return; return;
@ -61,7 +61,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
t->ping_policy.max_pings_without_data != 0) { t->ping_policy.max_pings_without_data != 0) {
/* need to receive something of substance before sending a ping again */ /* need to receive something of substance before sending a ping again */
if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) { if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s: Ping delayed [%p]: too many recent pings: %d/%d", gpr_log(GPR_INFO, "%s: Ping delayed [%p]: too many recent pings: %d/%d",
t->is_client ? "CLIENT" : "SERVER", t->peer_string, t->is_client ? "CLIENT" : "SERVER", t->peer_string,
t->ping_state.pings_before_data_required, t->ping_state.pings_before_data_required,
t->ping_policy.max_pings_without_data); t->ping_policy.max_pings_without_data);
@ -81,7 +81,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
if (next_allowed_ping > now) { if (next_allowed_ping > now) {
/* not enough elapsed time between successive pings */ /* not enough elapsed time between successive pings */
if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) { if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"%s: Ping delayed [%p]: not enough time elapsed since last ping. " "%s: Ping delayed [%p]: not enough time elapsed since last ping. "
" Last ping %f: Next ping %f: Now %f", " Last ping %f: Next ping %f: Now %f",
t->is_client ? "CLIENT" : "SERVER", t->peer_string, t->is_client ? "CLIENT" : "SERVER", t->peer_string,
@ -107,7 +107,7 @@ static void maybe_initiate_ping(grpc_chttp2_transport* t) {
GRPC_STATS_INC_HTTP2_PINGS_SENT(); GRPC_STATS_INC_HTTP2_PINGS_SENT();
t->ping_state.last_ping_sent_time = now; t->ping_state.last_ping_sent_time = now;
if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) { if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_DEBUG, "%s: Ping sent [%p]: %d/%d", gpr_log(GPR_INFO, "%s: Ping sent [%p]: %d/%d",
t->is_client ? "CLIENT" : "SERVER", t->peer_string, t->is_client ? "CLIENT" : "SERVER", t->peer_string,
t->ping_state.pings_before_data_required, t->ping_state.pings_before_data_required,
t->ping_policy.max_pings_without_data); t->ping_policy.max_pings_without_data);
@ -401,7 +401,7 @@ class StreamWriteContext {
StreamWriteContext(WriteContext* write_context, grpc_chttp2_stream* s) StreamWriteContext(WriteContext* write_context, grpc_chttp2_stream* s)
: write_context_(write_context), t_(write_context->transport()), s_(s) { : write_context_(write_context), t_(write_context->transport()), s_(s) {
GRPC_CHTTP2_IF_TRACING( GRPC_CHTTP2_IF_TRACING(
gpr_log(GPR_DEBUG, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t_, gpr_log(GPR_INFO, "W:%p %s[%d] im-(sent,send)=(%d,%d) announce=%d", t_,
t_->is_client ? "CLIENT" : "SERVER", s->id, t_->is_client ? "CLIENT" : "SERVER", s->id,
s->sent_initial_metadata, s->send_initial_metadata != nullptr, s->sent_initial_metadata, s->send_initial_metadata != nullptr,
(int)(s->flow_control->local_window_delta() - (int)(s->flow_control->local_window_delta() -

@ -125,12 +125,12 @@ static bool cancel_stream_locked(inproc_stream* s, grpc_error* error);
static void op_state_machine(void* arg, grpc_error* error); static void op_state_machine(void* arg, grpc_error* error);
static void ref_transport(inproc_transport* t) { static void ref_transport(inproc_transport* t) {
INPROC_LOG(GPR_DEBUG, "ref_transport %p", t); INPROC_LOG(GPR_INFO, "ref_transport %p", t);
gpr_ref(&t->refs); gpr_ref(&t->refs);
} }
static void really_destroy_transport(inproc_transport* t) { static void really_destroy_transport(inproc_transport* t) {
INPROC_LOG(GPR_DEBUG, "really_destroy_transport %p", t); INPROC_LOG(GPR_INFO, "really_destroy_transport %p", t);
grpc_connectivity_state_destroy(&t->connectivity); grpc_connectivity_state_destroy(&t->connectivity);
if (gpr_unref(&t->mu->refs)) { if (gpr_unref(&t->mu->refs)) {
gpr_free(t->mu); gpr_free(t->mu);
@ -139,7 +139,7 @@ static void really_destroy_transport(inproc_transport* t) {
} }
static void unref_transport(inproc_transport* t) { static void unref_transport(inproc_transport* t) {
INPROC_LOG(GPR_DEBUG, "unref_transport %p", t); INPROC_LOG(GPR_INFO, "unref_transport %p", t);
if (gpr_unref(&t->refs)) { if (gpr_unref(&t->refs)) {
really_destroy_transport(t); really_destroy_transport(t);
} }
@ -154,17 +154,17 @@ static void unref_transport(inproc_transport* t) {
#endif #endif
static void ref_stream(inproc_stream* s, const char* reason) { static void ref_stream(inproc_stream* s, const char* reason) {
INPROC_LOG(GPR_DEBUG, "ref_stream %p %s", s, reason); INPROC_LOG(GPR_INFO, "ref_stream %p %s", s, reason);
STREAM_REF(s->refs, reason); STREAM_REF(s->refs, reason);
} }
static void unref_stream(inproc_stream* s, const char* reason) { static void unref_stream(inproc_stream* s, const char* reason) {
INPROC_LOG(GPR_DEBUG, "unref_stream %p %s", s, reason); INPROC_LOG(GPR_INFO, "unref_stream %p %s", s, reason);
STREAM_UNREF(s->refs, reason); STREAM_UNREF(s->refs, reason);
} }
static void really_destroy_stream(inproc_stream* s) { static void really_destroy_stream(inproc_stream* s) {
INPROC_LOG(GPR_DEBUG, "really_destroy_stream %p", s); INPROC_LOG(GPR_INFO, "really_destroy_stream %p", s);
GRPC_ERROR_UNREF(s->write_buffer_cancel_error); GRPC_ERROR_UNREF(s->write_buffer_cancel_error);
GRPC_ERROR_UNREF(s->cancel_self_error); GRPC_ERROR_UNREF(s->cancel_self_error);
@ -225,7 +225,7 @@ static grpc_error* fill_in_metadata(inproc_stream* s,
static int init_stream(grpc_transport* gt, grpc_stream* gs, static int init_stream(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount, const void* server_data, grpc_stream_refcount* refcount, const void* server_data,
gpr_arena* arena) { gpr_arena* arena) {
INPROC_LOG(GPR_DEBUG, "init_stream %p %p %p", gt, gs, server_data); INPROC_LOG(GPR_INFO, "init_stream %p %p %p", gt, gs, server_data);
inproc_transport* t = reinterpret_cast<inproc_transport*>(gt); inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
inproc_stream* s = reinterpret_cast<inproc_stream*>(gs); inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
s->arena = arena; s->arena = arena;
@ -282,8 +282,8 @@ static int init_stream(grpc_transport* gt, grpc_stream* gs,
// Pass the client-side stream address to the server-side for a ref // Pass the client-side stream address to the server-side for a ref
ref_stream(s, "inproc_init_stream:clt"); // ref it now on behalf of server ref_stream(s, "inproc_init_stream:clt"); // ref it now on behalf of server
// side to avoid destruction // side to avoid destruction
INPROC_LOG(GPR_DEBUG, "calling accept stream cb %p %p", INPROC_LOG(GPR_INFO, "calling accept stream cb %p %p", st->accept_stream_cb,
st->accept_stream_cb, st->accept_stream_data); st->accept_stream_data);
(*st->accept_stream_cb)(st->accept_stream_data, &st->base, (void*)s); (*st->accept_stream_cb)(st->accept_stream_data, &st->base, (void*)s);
} else { } else {
// This is the server-side and is being called through accept_stream_cb // This is the server-side and is being called through accept_stream_cb
@ -378,7 +378,7 @@ static void complete_if_batch_end_locked(inproc_stream* s, grpc_error* error,
int is_rtm = static_cast<int>(op == s->recv_trailing_md_op); int is_rtm = static_cast<int>(op == s->recv_trailing_md_op);
if ((is_sm + is_stm + is_rim + is_rm + is_rtm) == 1) { if ((is_sm + is_stm + is_rim + is_rm + is_rtm) == 1) {
INPROC_LOG(GPR_DEBUG, "%s %p %p %p", msg, s, op, error); INPROC_LOG(GPR_INFO, "%s %p %p %p", msg, s, op, error);
GRPC_CLOSURE_SCHED(op->on_complete, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(op->on_complete, GRPC_ERROR_REF(error));
} }
} }
@ -393,7 +393,7 @@ static void maybe_schedule_op_closure_locked(inproc_stream* s,
} }
static void fail_helper_locked(inproc_stream* s, grpc_error* error) { static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
INPROC_LOG(GPR_DEBUG, "op_state_machine %p fail_helper", s); INPROC_LOG(GPR_INFO, "op_state_machine %p fail_helper", s);
// If we're failing this side, we need to make sure that // If we're failing this side, we need to make sure that
// we also send or have already sent trailing metadata // we also send or have already sent trailing metadata
if (!s->trailing_md_sent) { if (!s->trailing_md_sent) {
@ -458,7 +458,7 @@ static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
*s->recv_initial_md_op->payload->recv_initial_metadata *s->recv_initial_md_op->payload->recv_initial_metadata
.trailing_metadata_available = true; .trailing_metadata_available = true;
} }
INPROC_LOG(GPR_DEBUG, INPROC_LOG(GPR_INFO,
"fail_helper %p scheduling initial-metadata-ready %p %p", s, "fail_helper %p scheduling initial-metadata-ready %p %p", s,
error, err); error, err);
GRPC_CLOSURE_SCHED(s->recv_initial_md_op->payload->recv_initial_metadata GRPC_CLOSURE_SCHED(s->recv_initial_md_op->payload->recv_initial_metadata
@ -472,7 +472,7 @@ static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
s->recv_initial_md_op = nullptr; s->recv_initial_md_op = nullptr;
} }
if (s->recv_message_op) { if (s->recv_message_op) {
INPROC_LOG(GPR_DEBUG, "fail_helper %p scheduling message-ready %p", s, INPROC_LOG(GPR_INFO, "fail_helper %p scheduling message-ready %p", s,
error); error);
GRPC_CLOSURE_SCHED( GRPC_CLOSURE_SCHED(
s->recv_message_op->payload->recv_message.recv_message_ready, s->recv_message_op->payload->recv_message.recv_message_ready,
@ -496,9 +496,8 @@ static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
s->send_trailing_md_op = nullptr; s->send_trailing_md_op = nullptr;
} }
if (s->recv_trailing_md_op) { if (s->recv_trailing_md_op) {
INPROC_LOG(GPR_DEBUG, INPROC_LOG(GPR_INFO, "fail_helper %p scheduling trailing-md-on-complete %p",
"fail_helper %p scheduling trailing-md-on-complete %p", s, s, error);
error);
complete_if_batch_end_locked( complete_if_batch_end_locked(
s, error, s->recv_trailing_md_op, s, error, s->recv_trailing_md_op,
"fail_helper scheduling recv-trailing-metadata-on-complete"); "fail_helper scheduling recv-trailing-metadata-on-complete");
@ -549,7 +548,7 @@ static void message_transfer_locked(inproc_stream* sender,
receiver->recv_stream.Init(&receiver->recv_message, 0); receiver->recv_stream.Init(&receiver->recv_message, 0);
receiver->recv_message_op->payload->recv_message.recv_message->reset( receiver->recv_message_op->payload->recv_message.recv_message->reset(
receiver->recv_stream.get()); receiver->recv_stream.get());
INPROC_LOG(GPR_DEBUG, "message_transfer_locked %p scheduling message-ready", INPROC_LOG(GPR_INFO, "message_transfer_locked %p scheduling message-ready",
receiver); receiver);
GRPC_CLOSURE_SCHED( GRPC_CLOSURE_SCHED(
receiver->recv_message_op->payload->recv_message.recv_message_ready, receiver->recv_message_op->payload->recv_message.recv_message_ready,
@ -577,7 +576,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
bool needs_close = false; bool needs_close = false;
INPROC_LOG(GPR_DEBUG, "op_state_machine %p", arg); INPROC_LOG(GPR_INFO, "op_state_machine %p", arg);
inproc_stream* s = static_cast<inproc_stream*>(arg); inproc_stream* s = static_cast<inproc_stream*>(arg);
gpr_mu* mu = &s->t->mu->mu; // keep aside in case s gets closed gpr_mu* mu = &s->t->mu->mu; // keep aside in case s gets closed
gpr_mu_lock(mu); gpr_mu_lock(mu);
@ -626,7 +625,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
: &other->to_read_trailing_md_filled; : &other->to_read_trailing_md_filled;
if (*destfilled || s->trailing_md_sent) { if (*destfilled || s->trailing_md_sent) {
// The buffer is already in use; that's an error! // The buffer is already in use; that's an error!
INPROC_LOG(GPR_DEBUG, "Extra trailing metadata %p", s); INPROC_LOG(GPR_INFO, "Extra trailing metadata %p", s);
new_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra trailing metadata"); new_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra trailing metadata");
fail_helper_locked(s, GRPC_ERROR_REF(new_err)); fail_helper_locked(s, GRPC_ERROR_REF(new_err));
goto done; goto done;
@ -639,7 +638,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
} }
s->trailing_md_sent = true; s->trailing_md_sent = true;
if (!s->t->is_client && s->trailing_md_recvd && s->recv_trailing_md_op) { if (!s->t->is_client && s->trailing_md_recvd && s->recv_trailing_md_op) {
INPROC_LOG(GPR_DEBUG, INPROC_LOG(GPR_INFO,
"op_state_machine %p scheduling trailing-md-on-complete", s); "op_state_machine %p scheduling trailing-md-on-complete", s);
GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->on_complete, GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->on_complete,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
@ -658,7 +657,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
new_err = new_err =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd initial md"); GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd initial md");
INPROC_LOG( INPROC_LOG(
GPR_DEBUG, GPR_INFO,
"op_state_machine %p scheduling on_complete errors for already " "op_state_machine %p scheduling on_complete errors for already "
"recvd initial md %p", "recvd initial md %p",
s, new_err); s, new_err);
@ -684,7 +683,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
} }
grpc_metadata_batch_clear(&s->to_read_initial_md); grpc_metadata_batch_clear(&s->to_read_initial_md);
s->to_read_initial_md_filled = false; s->to_read_initial_md_filled = false;
INPROC_LOG(GPR_DEBUG, INPROC_LOG(GPR_INFO,
"op_state_machine %p scheduling initial-metadata-ready %p", s, "op_state_machine %p scheduling initial-metadata-ready %p", s,
new_err); new_err);
GRPC_CLOSURE_SCHED(s->recv_initial_md_op->payload->recv_initial_metadata GRPC_CLOSURE_SCHED(s->recv_initial_md_op->payload->recv_initial_metadata
@ -696,7 +695,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
s->recv_initial_md_op = nullptr; s->recv_initial_md_op = nullptr;
if (new_err != GRPC_ERROR_NONE) { if (new_err != GRPC_ERROR_NONE) {
INPROC_LOG(GPR_DEBUG, INPROC_LOG(GPR_INFO,
"op_state_machine %p scheduling on_complete errors2 %p", s, "op_state_machine %p scheduling on_complete errors2 %p", s,
new_err); new_err);
fail_helper_locked(s, GRPC_ERROR_REF(new_err)); fail_helper_locked(s, GRPC_ERROR_REF(new_err));
@ -719,7 +718,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
new_err = new_err =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd trailing md"); GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already recvd trailing md");
INPROC_LOG( INPROC_LOG(
GPR_DEBUG, GPR_INFO,
"op_state_machine %p scheduling on_complete errors for already " "op_state_machine %p scheduling on_complete errors for already "
"recvd trailing md %p", "recvd trailing md %p",
s, new_err); s, new_err);
@ -729,7 +728,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
if (s->recv_message_op != nullptr) { if (s->recv_message_op != nullptr) {
// This message needs to be wrapped up because it will never be // This message needs to be wrapped up because it will never be
// satisfied // satisfied
INPROC_LOG(GPR_DEBUG, "op_state_machine %p scheduling message-ready", s); INPROC_LOG(GPR_INFO, "op_state_machine %p scheduling message-ready", s);
GRPC_CLOSURE_SCHED( GRPC_CLOSURE_SCHED(
s->recv_message_op->payload->recv_message.recv_message_ready, s->recv_message_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
@ -764,7 +763,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
// (If the server hasn't already sent its trailing md, it doesn't have // (If the server hasn't already sent its trailing md, it doesn't have
// a final status, so don't mark this op complete) // a final status, so don't mark this op complete)
if (s->t->is_client || s->trailing_md_sent) { if (s->t->is_client || s->trailing_md_sent) {
INPROC_LOG(GPR_DEBUG, INPROC_LOG(GPR_INFO,
"op_state_machine %p scheduling trailing-md-on-complete %p", "op_state_machine %p scheduling trailing-md-on-complete %p",
s, new_err); s, new_err);
GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->on_complete, GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->on_complete,
@ -772,21 +771,21 @@ static void op_state_machine(void* arg, grpc_error* error) {
s->recv_trailing_md_op = nullptr; s->recv_trailing_md_op = nullptr;
needs_close = true; needs_close = true;
} else { } else {
INPROC_LOG(GPR_DEBUG, INPROC_LOG(GPR_INFO,
"op_state_machine %p server needs to delay handling " "op_state_machine %p server needs to delay handling "
"trailing-md-on-complete %p", "trailing-md-on-complete %p",
s, new_err); s, new_err);
} }
} else { } else {
INPROC_LOG( INPROC_LOG(
GPR_DEBUG, GPR_INFO,
"op_state_machine %p has trailing md but not yet waiting for it", s); "op_state_machine %p has trailing md but not yet waiting for it", s);
} }
} }
if (s->trailing_md_recvd && s->recv_message_op) { if (s->trailing_md_recvd && s->recv_message_op) {
// No further message will come on this stream, so finish off the // No further message will come on this stream, so finish off the
// recv_message_op // recv_message_op
INPROC_LOG(GPR_DEBUG, "op_state_machine %p scheduling message-ready", s); INPROC_LOG(GPR_INFO, "op_state_machine %p scheduling message-ready", s);
GRPC_CLOSURE_SCHED( GRPC_CLOSURE_SCHED(
s->recv_message_op->payload->recv_message.recv_message_ready, s->recv_message_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_NONE); GRPC_ERROR_NONE);
@ -810,7 +809,7 @@ static void op_state_machine(void* arg, grpc_error* error) {
// Didn't get the item we wanted so we still need to get // Didn't get the item we wanted so we still need to get
// rescheduled // rescheduled
INPROC_LOG( INPROC_LOG(
GPR_DEBUG, "op_state_machine %p still needs closure %p %p %p %p %p", s, GPR_INFO, "op_state_machine %p still needs closure %p %p %p %p %p", s,
s->send_message_op, s->send_trailing_md_op, s->recv_initial_md_op, s->send_message_op, s->send_trailing_md_op, s->recv_initial_md_op,
s->recv_message_op, s->recv_trailing_md_op); s->recv_message_op, s->recv_trailing_md_op);
s->ops_needed = true; s->ops_needed = true;
@ -826,8 +825,7 @@ done:
static bool cancel_stream_locked(inproc_stream* s, grpc_error* error) { static bool cancel_stream_locked(inproc_stream* s, grpc_error* error) {
bool ret = false; // was the cancel accepted bool ret = false; // was the cancel accepted
INPROC_LOG(GPR_DEBUG, "cancel_stream %p with %s", s, INPROC_LOG(GPR_INFO, "cancel_stream %p with %s", s, grpc_error_string(error));
grpc_error_string(error));
if (s->cancel_self_error == GRPC_ERROR_NONE) { if (s->cancel_self_error == GRPC_ERROR_NONE) {
ret = true; ret = true;
s->cancel_self_error = GRPC_ERROR_REF(error); s->cancel_self_error = GRPC_ERROR_REF(error);
@ -877,7 +875,7 @@ static bool cancel_stream_locked(inproc_stream* s, grpc_error* error) {
static void perform_stream_op(grpc_transport* gt, grpc_stream* gs, static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
grpc_transport_stream_op_batch* op) { grpc_transport_stream_op_batch* op) {
INPROC_LOG(GPR_DEBUG, "perform_stream_op %p %p %p", gt, gs, op); INPROC_LOG(GPR_INFO, "perform_stream_op %p %p %p", gt, gs, op);
inproc_stream* s = reinterpret_cast<inproc_stream*>(gs); inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
gpr_mu* mu = &s->t->mu->mu; // save aside in case s gets closed gpr_mu* mu = &s->t->mu->mu; // save aside in case s gets closed
gpr_mu_lock(mu); gpr_mu_lock(mu);
@ -907,7 +905,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
// already self-canceled so still give it an error // already self-canceled so still give it an error
error = GRPC_ERROR_REF(s->cancel_self_error); error = GRPC_ERROR_REF(s->cancel_self_error);
} else { } else {
INPROC_LOG(GPR_DEBUG, "perform_stream_op %p %s%s%s%s%s%s%s", s, INPROC_LOG(GPR_INFO, "perform_stream_op %p %s%s%s%s%s%s%s", s,
s->t->is_client ? "client" : "server", s->t->is_client ? "client" : "server",
op->send_initial_metadata ? " send_initial_metadata" : "", op->send_initial_metadata ? " send_initial_metadata" : "",
op->send_message ? " send_message" : "", op->send_message ? " send_message" : "",
@ -936,7 +934,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
: &other->to_read_initial_md_filled; : &other->to_read_initial_md_filled;
if (*destfilled || s->initial_md_sent) { if (*destfilled || s->initial_md_sent) {
// The buffer is already in use; that's an error! // The buffer is already in use; that's an error!
INPROC_LOG(GPR_DEBUG, "Extra initial metadata %p", s); INPROC_LOG(GPR_INFO, "Extra initial metadata %p", s);
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra initial metadata"); error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra initial metadata");
} else { } else {
if (!other || !other->closed) { if (!other || !other->closed) {
@ -1013,7 +1011,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
true; true;
} }
INPROC_LOG( INPROC_LOG(
GPR_DEBUG, GPR_INFO,
"perform_stream_op error %p scheduling initial-metadata-ready %p", "perform_stream_op error %p scheduling initial-metadata-ready %p",
s, error); s, error);
GRPC_CLOSURE_SCHED( GRPC_CLOSURE_SCHED(
@ -1022,14 +1020,14 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
} }
if (op->recv_message) { if (op->recv_message) {
INPROC_LOG( INPROC_LOG(
GPR_DEBUG, GPR_INFO,
"perform_stream_op error %p scheduling recv message-ready %p", s, "perform_stream_op error %p scheduling recv message-ready %p", s,
error); error);
GRPC_CLOSURE_SCHED(op->payload->recv_message.recv_message_ready, GRPC_CLOSURE_SCHED(op->payload->recv_message.recv_message_ready,
GRPC_ERROR_REF(error)); GRPC_ERROR_REF(error));
} }
} }
INPROC_LOG(GPR_DEBUG, "perform_stream_op %p scheduling on_complete %p", s, INPROC_LOG(GPR_INFO, "perform_stream_op %p scheduling on_complete %p", s,
error); error);
GRPC_CLOSURE_SCHED(on_complete, GRPC_ERROR_REF(error)); GRPC_CLOSURE_SCHED(on_complete, GRPC_ERROR_REF(error));
} }
@ -1042,7 +1040,7 @@ static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
} }
static void close_transport_locked(inproc_transport* t) { static void close_transport_locked(inproc_transport* t) {
INPROC_LOG(GPR_DEBUG, "close_transport %p %d", t, t->is_closed); INPROC_LOG(GPR_INFO, "close_transport %p %d", t, t->is_closed);
grpc_connectivity_state_set( grpc_connectivity_state_set(
&t->connectivity, GRPC_CHANNEL_SHUTDOWN, &t->connectivity, GRPC_CHANNEL_SHUTDOWN,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Closing transport."), GRPC_ERROR_CREATE_FROM_STATIC_STRING("Closing transport."),
@ -1063,7 +1061,7 @@ static void close_transport_locked(inproc_transport* t) {
static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) { static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
inproc_transport* t = reinterpret_cast<inproc_transport*>(gt); inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
INPROC_LOG(GPR_DEBUG, "perform_transport_op %p %p", t, op); INPROC_LOG(GPR_INFO, "perform_transport_op %p %p", t, op);
gpr_mu_lock(&t->mu->mu); gpr_mu_lock(&t->mu->mu);
if (op->on_connectivity_state_change) { if (op->on_connectivity_state_change) {
grpc_connectivity_state_notify_on_state_change( grpc_connectivity_state_notify_on_state_change(
@ -1096,7 +1094,7 @@ static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
static void destroy_stream(grpc_transport* gt, grpc_stream* gs, static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
grpc_closure* then_schedule_closure) { grpc_closure* then_schedule_closure) {
INPROC_LOG(GPR_DEBUG, "destroy_stream %p %p", gs, then_schedule_closure); INPROC_LOG(GPR_INFO, "destroy_stream %p %p", gs, then_schedule_closure);
inproc_stream* s = reinterpret_cast<inproc_stream*>(gs); inproc_stream* s = reinterpret_cast<inproc_stream*>(gs);
s->closure_at_destroy = then_schedule_closure; s->closure_at_destroy = then_schedule_closure;
really_destroy_stream(s); really_destroy_stream(s);
@ -1104,7 +1102,7 @@ static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
static void destroy_transport(grpc_transport* gt) { static void destroy_transport(grpc_transport* gt) {
inproc_transport* t = reinterpret_cast<inproc_transport*>(gt); inproc_transport* t = reinterpret_cast<inproc_transport*>(gt);
INPROC_LOG(GPR_DEBUG, "destroy_transport %p", t); INPROC_LOG(GPR_INFO, "destroy_transport %p", t);
gpr_mu_lock(&t->mu->mu); gpr_mu_lock(&t->mu->mu);
close_transport_locked(t); close_transport_locked(t);
gpr_mu_unlock(&t->mu->mu); gpr_mu_unlock(&t->mu->mu);
@ -1165,7 +1163,7 @@ static void inproc_transports_create(grpc_transport** server_transport,
const grpc_channel_args* server_args, const grpc_channel_args* server_args,
grpc_transport** client_transport, grpc_transport** client_transport,
const grpc_channel_args* client_args) { const grpc_channel_args* client_args) {
INPROC_LOG(GPR_DEBUG, "inproc_transports_create"); INPROC_LOG(GPR_INFO, "inproc_transports_create");
inproc_transport* st = inproc_transport* st =
static_cast<inproc_transport*>(gpr_zalloc(sizeof(*st))); static_cast<inproc_transport*>(gpr_zalloc(sizeof(*st)));
inproc_transport* ct = inproc_transport* ct =

@ -137,7 +137,7 @@ void grpc_handshake_manager_add(grpc_handshake_manager* mgr,
grpc_handshaker* handshaker) { grpc_handshaker* handshaker) {
if (grpc_handshaker_trace.enabled()) { if (grpc_handshaker_trace.enabled()) {
gpr_log( gpr_log(
GPR_DEBUG, GPR_INFO,
"handshake_manager %p: adding handshaker %s [%p] at index %" PRIuPTR, "handshake_manager %p: adding handshaker %s [%p] at index %" PRIuPTR,
mgr, grpc_handshaker_name(handshaker), handshaker, mgr->count); mgr, grpc_handshaker_name(handshaker), handshaker, mgr->count);
} }
@ -208,7 +208,7 @@ static bool call_next_handshaker_locked(grpc_handshake_manager* mgr,
grpc_error* error) { grpc_error* error) {
if (grpc_handshaker_trace.enabled()) { if (grpc_handshaker_trace.enabled()) {
char* args_str = handshaker_args_string(&mgr->args); char* args_str = handshaker_args_string(&mgr->args);
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"handshake_manager %p: error=%s shutdown=%d index=%" PRIuPTR "handshake_manager %p: error=%s shutdown=%d index=%" PRIuPTR
", args=%s", ", args=%s",
mgr, grpc_error_string(error), mgr->shutdown, mgr->index, args_str); mgr, grpc_error_string(error), mgr->shutdown, mgr->index, args_str);
@ -221,7 +221,7 @@ static bool call_next_handshaker_locked(grpc_handshake_manager* mgr,
if (error != GRPC_ERROR_NONE || mgr->shutdown || mgr->args.exit_early || if (error != GRPC_ERROR_NONE || mgr->shutdown || mgr->args.exit_early ||
mgr->index == mgr->count) { mgr->index == mgr->count) {
if (grpc_handshaker_trace.enabled()) { if (grpc_handshaker_trace.enabled()) {
gpr_log(GPR_DEBUG, "handshake_manager %p: handshaking complete", mgr); gpr_log(GPR_INFO, "handshake_manager %p: handshaking complete", mgr);
} }
// Cancel deadline timer, since we're invoking the on_handshake_done // Cancel deadline timer, since we're invoking the on_handshake_done
// callback now. // callback now.
@ -231,7 +231,7 @@ static bool call_next_handshaker_locked(grpc_handshake_manager* mgr,
} else { } else {
if (grpc_handshaker_trace.enabled()) { if (grpc_handshaker_trace.enabled()) {
gpr_log( gpr_log(
GPR_DEBUG, GPR_INFO,
"handshake_manager %p: calling handshaker %s [%p] at index %" PRIuPTR, "handshake_manager %p: calling handshaker %s [%p] at index %" PRIuPTR,
mgr, grpc_handshaker_name(mgr->handshakers[mgr->index]), mgr, grpc_handshaker_name(mgr->handshakers[mgr->index]),
mgr->handshakers[mgr->index], mgr->index); mgr->handshakers[mgr->index], mgr->index);

@ -159,7 +159,7 @@ class InternallyRefCountedWithTracing : public Orphanable {
const char* reason) GRPC_MUST_USE_RESULT { const char* reason) GRPC_MUST_USE_RESULT {
if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) { if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count); gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
gpr_log(GPR_DEBUG, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s", gpr_log(GPR_INFO, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
trace_flag_->name(), this, location.file(), location.line(), trace_flag_->name(), this, location.file(), location.line(),
old_refs, old_refs + 1, reason); old_refs, old_refs + 1, reason);
} }
@ -180,7 +180,7 @@ class InternallyRefCountedWithTracing : public Orphanable {
void Unref(const DebugLocation& location, const char* reason) { void Unref(const DebugLocation& location, const char* reason) {
if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) { if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count); gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
gpr_log(GPR_DEBUG, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s", gpr_log(GPR_INFO, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s",
trace_flag_->name(), this, location.file(), location.line(), trace_flag_->name(), this, location.file(), location.line(),
old_refs, old_refs - 1, reason); old_refs, old_refs - 1, reason);
} }

@ -100,7 +100,7 @@ class RefCountedWithTracing {
const char* reason) GRPC_MUST_USE_RESULT { const char* reason) GRPC_MUST_USE_RESULT {
if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) { if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count); gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
gpr_log(GPR_DEBUG, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s", gpr_log(GPR_INFO, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
trace_flag_->name(), this, location.file(), location.line(), trace_flag_->name(), this, location.file(), location.line(),
old_refs, old_refs + 1, reason); old_refs, old_refs + 1, reason);
} }
@ -121,7 +121,7 @@ class RefCountedWithTracing {
void Unref(const DebugLocation& location, const char* reason) { void Unref(const DebugLocation& location, const char* reason) {
if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) { if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count); gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
gpr_log(GPR_DEBUG, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s", gpr_log(GPR_INFO, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s",
trace_flag_->name(), this, location.file(), location.line(), trace_flag_->name(), this, location.file(), location.line(),
old_refs, old_refs - 1, reason); old_refs, old_refs - 1, reason);
} }

@ -64,7 +64,7 @@ void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
const char* reason) { const char* reason) {
GPR_TIMER_SCOPE("call_combiner_start", 0); GPR_TIMER_SCOPE("call_combiner_start", 0);
if (grpc_call_combiner_trace.enabled()) { if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"==> grpc_call_combiner_start() [%p] closure=%p [" DEBUG_FMT_STR "==> grpc_call_combiner_start() [%p] closure=%p [" DEBUG_FMT_STR
"%s] error=%s", "%s] error=%s",
call_combiner, closure DEBUG_FMT_ARGS, reason, call_combiner, closure DEBUG_FMT_ARGS, reason,
@ -73,7 +73,7 @@ void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
size_t prev_size = static_cast<size_t>( size_t prev_size = static_cast<size_t>(
gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)1)); gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)1));
if (grpc_call_combiner_trace.enabled()) { if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size, gpr_log(GPR_INFO, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size + 1); prev_size + 1);
} }
GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS(); GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS();
@ -82,7 +82,7 @@ void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
GPR_TIMER_MARK("call_combiner_initiate", 0); GPR_TIMER_MARK("call_combiner_initiate", 0);
if (grpc_call_combiner_trace.enabled()) { if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " EXECUTING IMMEDIATELY"); gpr_log(GPR_INFO, " EXECUTING IMMEDIATELY");
} }
// Queue was empty, so execute this closure immediately. // Queue was empty, so execute this closure immediately.
GRPC_CLOSURE_SCHED(closure, error); GRPC_CLOSURE_SCHED(closure, error);
@ -101,21 +101,21 @@ void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
const char* reason) { const char* reason) {
GPR_TIMER_SCOPE("call_combiner_stop", 0); GPR_TIMER_SCOPE("call_combiner_stop", 0);
if (grpc_call_combiner_trace.enabled()) { if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"==> grpc_call_combiner_stop() [%p] [" DEBUG_FMT_STR "%s]", "==> grpc_call_combiner_stop() [%p] [" DEBUG_FMT_STR "%s]",
call_combiner DEBUG_FMT_ARGS, reason); call_combiner DEBUG_FMT_ARGS, reason);
} }
size_t prev_size = static_cast<size_t>( size_t prev_size = static_cast<size_t>(
gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)-1)); gpr_atm_full_fetch_add(&call_combiner->size, (gpr_atm)-1));
if (grpc_call_combiner_trace.enabled()) { if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size, gpr_log(GPR_INFO, " size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
prev_size - 1); prev_size - 1);
} }
GPR_ASSERT(prev_size >= 1); GPR_ASSERT(prev_size >= 1);
if (prev_size > 1) { if (prev_size > 1) {
while (true) { while (true) {
if (grpc_call_combiner_trace.enabled()) { if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " checking queue"); gpr_log(GPR_INFO, " checking queue");
} }
bool empty; bool empty;
grpc_closure* closure = reinterpret_cast<grpc_closure*>( grpc_closure* closure = reinterpret_cast<grpc_closure*>(
@ -124,19 +124,19 @@ void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
// This can happen either due to a race condition within the mpscq // This can happen either due to a race condition within the mpscq
// code or because of a race with grpc_call_combiner_start(). // code or because of a race with grpc_call_combiner_start().
if (grpc_call_combiner_trace.enabled()) { if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " queue returned no result; checking again"); gpr_log(GPR_INFO, " queue returned no result; checking again");
} }
continue; continue;
} }
if (grpc_call_combiner_trace.enabled()) { if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " EXECUTING FROM QUEUE: closure=%p error=%s", gpr_log(GPR_INFO, " EXECUTING FROM QUEUE: closure=%p error=%s",
closure, grpc_error_string(closure->error_data.error)); closure, grpc_error_string(closure->error_data.error));
} }
GRPC_CLOSURE_SCHED(closure, closure->error_data.error); GRPC_CLOSURE_SCHED(closure, closure->error_data.error);
break; break;
} }
} else if (grpc_call_combiner_trace.enabled()) { } else if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, " queue empty"); gpr_log(GPR_INFO, " queue empty");
} }
} }
@ -151,7 +151,7 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
// Otherwise, store the new closure. // Otherwise, store the new closure.
if (original_error != GRPC_ERROR_NONE) { if (original_error != GRPC_ERROR_NONE) {
if (grpc_call_combiner_trace.enabled()) { if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"call_combiner=%p: scheduling notify_on_cancel callback=%p " "call_combiner=%p: scheduling notify_on_cancel callback=%p "
"for pre-existing cancellation", "for pre-existing cancellation",
call_combiner, closure); call_combiner, closure);
@ -162,7 +162,7 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state, if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
(gpr_atm)closure)) { (gpr_atm)closure)) {
if (grpc_call_combiner_trace.enabled()) { if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, "call_combiner=%p: setting notify_on_cancel=%p", gpr_log(GPR_INFO, "call_combiner=%p: setting notify_on_cancel=%p",
call_combiner, closure); call_combiner, closure);
} }
// If we replaced an earlier closure, invoke the original // If we replaced an earlier closure, invoke the original
@ -171,7 +171,7 @@ void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
if (original_state != 0) { if (original_state != 0) {
closure = (grpc_closure*)original_state; closure = (grpc_closure*)original_state;
if (grpc_call_combiner_trace.enabled()) { if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"call_combiner=%p: scheduling old cancel callback=%p", "call_combiner=%p: scheduling old cancel callback=%p",
call_combiner, closure); call_combiner, closure);
} }
@ -199,7 +199,7 @@ void grpc_call_combiner_cancel(grpc_call_combiner* call_combiner,
if (original_state != 0) { if (original_state != 0) {
grpc_closure* notify_on_cancel = (grpc_closure*)original_state; grpc_closure* notify_on_cancel = (grpc_closure*)original_state;
if (grpc_call_combiner_trace.enabled()) { if (grpc_call_combiner_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"call_combiner=%p: scheduling notify_on_cancel callback=%p", "call_combiner=%p: scheduling notify_on_cancel callback=%p",
call_combiner, notify_on_cancel); call_combiner, notify_on_cancel);
} }

@ -83,12 +83,12 @@ grpc_combiner* grpc_combiner_create(void) {
grpc_closure_list_init(&lock->final_list); grpc_closure_list_init(&lock->final_list);
GRPC_CLOSURE_INIT(&lock->offload, offload, lock, GRPC_CLOSURE_INIT(&lock->offload, offload, lock,
grpc_executor_scheduler(GRPC_EXECUTOR_SHORT)); grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock)); GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p create", lock));
return lock; return lock;
} }
static void really_destroy(grpc_combiner* lock) { static void really_destroy(grpc_combiner* lock) {
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p really_destroy", lock)); GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p really_destroy", lock));
GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0); GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0);
gpr_mpscq_destroy(&lock->queue); gpr_mpscq_destroy(&lock->queue);
gpr_free(lock); gpr_free(lock);
@ -97,7 +97,7 @@ static void really_destroy(grpc_combiner* lock) {
static void start_destroy(grpc_combiner* lock) { static void start_destroy(grpc_combiner* lock) {
gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED); gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED);
GRPC_COMBINER_TRACE(gpr_log( GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state)); GPR_INFO, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state));
if (old_state == 1) { if (old_state == 1) {
really_destroy(lock); really_destroy(lock);
} }
@ -159,7 +159,7 @@ static void combiner_exec(grpc_closure* cl, grpc_error* error) {
GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(); GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS();
grpc_combiner* lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler); grpc_combiner* lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler);
gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT); gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, GRPC_COMBINER_TRACE(gpr_log(GPR_INFO,
"C:%p grpc_combiner_execute c=%p last=%" PRIdPTR, "C:%p grpc_combiner_execute c=%p last=%" PRIdPTR,
lock, cl, last)); lock, cl, last));
if (last == 1) { if (last == 1) {
@ -203,7 +203,7 @@ static void offload(void* arg, grpc_error* error) {
static void queue_offload(grpc_combiner* lock) { static void queue_offload(grpc_combiner* lock) {
GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(); GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED();
move_next(); move_next();
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock)); GRPC_COMBINER_TRACE(gpr_log(GPR_INFO, "C:%p queue_offload", lock));
GRPC_CLOSURE_SCHED(&lock->offload, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(&lock->offload, GRPC_ERROR_NONE);
} }
@ -218,7 +218,7 @@ bool grpc_combiner_continue_exec_ctx() {
bool contended = bool contended =
gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null) == 0; gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null) == 0;
GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, GRPC_COMBINER_TRACE(gpr_log(GPR_INFO,
"C:%p grpc_combiner_continue_exec_ctx " "C:%p grpc_combiner_continue_exec_ctx "
"contended=%d " "contended=%d "
"exec_ctx_ready_to_finish=%d " "exec_ctx_ready_to_finish=%d "
@ -242,7 +242,7 @@ bool grpc_combiner_continue_exec_ctx() {
(gpr_atm_acq_load(&lock->state) >> 1) > 1) { (gpr_atm_acq_load(&lock->state) >> 1) > 1) {
gpr_mpscq_node* n = gpr_mpscq_pop(&lock->queue); gpr_mpscq_node* n = gpr_mpscq_pop(&lock->queue);
GRPC_COMBINER_TRACE( GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n)); gpr_log(GPR_INFO, "C:%p maybe_finish_one n=%p", lock, n));
if (n == nullptr) { if (n == nullptr) {
// queue is in an inconsistent state: use this as a cue that we should // queue is in an inconsistent state: use this as a cue that we should
// go off and do something else for a while (and come back later) // go off and do something else for a while (and come back later)
@ -266,7 +266,7 @@ bool grpc_combiner_continue_exec_ctx() {
while (c != nullptr) { while (c != nullptr) {
GPR_TIMER_SCOPE("combiner.exec_1final", 0); GPR_TIMER_SCOPE("combiner.exec_1final", 0);
GRPC_COMBINER_TRACE( GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c)); gpr_log(GPR_INFO, "C:%p execute_final[%d] c=%p", lock, loops, c));
grpc_closure* next = c->next_data.next; grpc_closure* next = c->next_data.next;
grpc_error* error = c->error_data.error; grpc_error* error = c->error_data.error;
#ifndef NDEBUG #ifndef NDEBUG
@ -284,7 +284,7 @@ bool grpc_combiner_continue_exec_ctx() {
gpr_atm old_state = gpr_atm old_state =
gpr_atm_full_fetch_add(&lock->state, -STATE_ELEM_COUNT_LOW_BIT); gpr_atm_full_fetch_add(&lock->state, -STATE_ELEM_COUNT_LOW_BIT);
GRPC_COMBINER_TRACE( GRPC_COMBINER_TRACE(
gpr_log(GPR_DEBUG, "C:%p finish old_state=%" PRIdPTR, lock, old_state)); gpr_log(GPR_INFO, "C:%p finish old_state=%" PRIdPTR, lock, old_state));
// Define a macro to ease readability of the following switch statement. // Define a macro to ease readability of the following switch statement.
#define OLD_STATE_WAS(orphaned, elem_count) \ #define OLD_STATE_WAS(orphaned, elem_count) \
(((orphaned) ? 0 : STATE_UNORPHANED) | \ (((orphaned) ? 0 : STATE_UNORPHANED) | \
@ -327,8 +327,8 @@ static void combiner_finally_exec(grpc_closure* closure, grpc_error* error) {
grpc_combiner* lock = grpc_combiner* lock =
COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler); COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler);
GRPC_COMBINER_TRACE(gpr_log( GRPC_COMBINER_TRACE(gpr_log(
GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p", lock, GPR_INFO, "C:%p grpc_combiner_execute_finally c=%p; ac=%p", lock, closure,
closure, grpc_core::ExecCtx::Get()->combiner_data()->active_combiner)); grpc_core::ExecCtx::Get()->combiner_data()->active_combiner));
if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner != lock) { if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner != lock) {
GPR_TIMER_MARK("slowpath", 0); GPR_TIMER_MARK("slowpath", 0);
GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(enqueue_finally, closure, GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(enqueue_finally, closure,

@ -658,7 +658,7 @@ static grpc_error* do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) {
GRPC_STATS_INC_POLL_EVENTS_RETURNED(r); GRPC_STATS_INC_POLL_EVENTS_RETURNED(r);
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r); gpr_log(GPR_INFO, "ps: %p poll got %d events", ps, r);
} }
gpr_atm_rel_store(&g_epoll_set.num_events, r); gpr_atm_rel_store(&g_epoll_set.num_events, r);
@ -678,7 +678,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
pollset->begin_refs++; pollset->begin_refs++;
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p BEGIN_STARTS:%p", pollset, worker); gpr_log(GPR_INFO, "PS:%p BEGIN_STARTS:%p", pollset, worker);
} }
if (pollset->seen_inactive) { if (pollset->seen_inactive) {
@ -697,7 +697,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
gpr_mu_lock(&neighborhood->mu); gpr_mu_lock(&neighborhood->mu);
gpr_mu_lock(&pollset->mu); gpr_mu_lock(&pollset->mu);
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d", gpr_log(GPR_INFO, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
pollset, worker, kick_state_string(worker->state), pollset, worker, kick_state_string(worker->state),
is_reassigning); is_reassigning);
} }
@ -749,7 +749,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
gpr_cv_init(&worker->cv); gpr_cv_init(&worker->cv);
while (worker->state == UNKICKED && !pollset->shutting_down) { while (worker->state == UNKICKED && !pollset->shutting_down) {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d", gpr_log(GPR_INFO, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
pollset, worker, kick_state_string(worker->state), pollset, worker, kick_state_string(worker->state),
pollset->shutting_down); pollset->shutting_down);
} }
@ -766,7 +766,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
} }
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d " "PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
"kicked_without_poller: %d", "kicked_without_poller: %d",
pollset, worker, kick_state_string(worker->state), pollset, worker, kick_state_string(worker->state),
@ -809,7 +809,7 @@ static bool check_neighborhood_for_available_poller(
if (gpr_atm_no_barrier_cas(&g_active_poller, 0, if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
(gpr_atm)inspect_worker)) { (gpr_atm)inspect_worker)) {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. choose next poller to be %p", gpr_log(GPR_INFO, " .. choose next poller to be %p",
inspect_worker); inspect_worker);
} }
SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER); SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
@ -820,7 +820,7 @@ static bool check_neighborhood_for_available_poller(
} }
} else { } else {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. beaten to choose next poller"); gpr_log(GPR_INFO, " .. beaten to choose next poller");
} }
} }
// even if we didn't win the cas, there's a worker, we can stop // even if we didn't win the cas, there's a worker, we can stop
@ -838,7 +838,7 @@ static bool check_neighborhood_for_available_poller(
} }
if (!found_worker) { if (!found_worker) {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect); gpr_log(GPR_INFO, " .. mark pollset %p inactive", inspect);
} }
inspect->seen_inactive = true; inspect->seen_inactive = true;
if (inspect == neighborhood->active_root) { if (inspect == neighborhood->active_root) {
@ -858,7 +858,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
grpc_pollset_worker** worker_hdl) { grpc_pollset_worker** worker_hdl) {
GPR_TIMER_SCOPE("end_worker", 0); GPR_TIMER_SCOPE("end_worker", 0);
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p END_WORKER:%p", pollset, worker); gpr_log(GPR_INFO, "PS:%p END_WORKER:%p", pollset, worker);
} }
if (worker_hdl != nullptr) *worker_hdl = nullptr; if (worker_hdl != nullptr) *worker_hdl = nullptr;
/* Make sure we appear kicked */ /* Make sure we appear kicked */
@ -868,7 +868,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) { if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
if (worker->next != worker && worker->next->state == UNKICKED) { if (worker->next != worker && worker->next->state == UNKICKED) {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker); gpr_log(GPR_INFO, " .. choose next poller to be peer %p", worker);
} }
GPR_ASSERT(worker->next->initialized_cv); GPR_ASSERT(worker->next->initialized_cv);
gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next); gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
@ -920,7 +920,7 @@ static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
gpr_cv_destroy(&worker->cv); gpr_cv_destroy(&worker->cv);
} }
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. remove worker"); gpr_log(GPR_INFO, " .. remove worker");
} }
if (EMPTIED == worker_remove(pollset, worker)) { if (EMPTIED == worker_remove(pollset, worker)) {
pollset_maybe_finish_shutdown(pollset); pollset_maybe_finish_shutdown(pollset);
@ -1022,7 +1022,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(); GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
pollset->kicked_without_poller = true; pollset->kicked_without_poller = true;
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. kicked_without_poller"); gpr_log(GPR_INFO, " .. kicked_without_poller");
} }
goto done; goto done;
} }
@ -1030,14 +1030,14 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
if (root_worker->state == KICKED) { if (root_worker->state == KICKED) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(); GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. already kicked %p", root_worker); gpr_log(GPR_INFO, " .. already kicked %p", root_worker);
} }
SET_KICK_STATE(root_worker, KICKED); SET_KICK_STATE(root_worker, KICKED);
goto done; goto done;
} else if (next_worker->state == KICKED) { } else if (next_worker->state == KICKED) {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(); GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. already kicked %p", next_worker); gpr_log(GPR_INFO, " .. already kicked %p", next_worker);
} }
SET_KICK_STATE(next_worker, KICKED); SET_KICK_STATE(next_worker, KICKED);
goto done; goto done;
@ -1048,7 +1048,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
&g_active_poller)) { &g_active_poller)) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(); GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. kicked %p", root_worker); gpr_log(GPR_INFO, " .. kicked %p", root_worker);
} }
SET_KICK_STATE(root_worker, KICKED); SET_KICK_STATE(root_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd); ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
@ -1056,7 +1056,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
} else if (next_worker->state == UNKICKED) { } else if (next_worker->state == UNKICKED) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(); GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. kicked %p", next_worker); gpr_log(GPR_INFO, " .. kicked %p", next_worker);
} }
GPR_ASSERT(next_worker->initialized_cv); GPR_ASSERT(next_worker->initialized_cv);
SET_KICK_STATE(next_worker, KICKED); SET_KICK_STATE(next_worker, KICKED);
@ -1066,7 +1066,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
if (root_worker->state != DESIGNATED_POLLER) { if (root_worker->state != DESIGNATED_POLLER) {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log( gpr_log(
GPR_DEBUG, GPR_INFO,
" .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)", " .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)",
root_worker, root_worker->initialized_cv, next_worker); root_worker, root_worker->initialized_cv, next_worker);
} }
@ -1079,7 +1079,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
} else { } else {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(); GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. non-root poller %p (root=%p)", next_worker, gpr_log(GPR_INFO, " .. non-root poller %p (root=%p)", next_worker,
root_worker); root_worker);
} }
SET_KICK_STATE(next_worker, KICKED); SET_KICK_STATE(next_worker, KICKED);
@ -1095,7 +1095,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
} else { } else {
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(); GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. kicked while waking up"); gpr_log(GPR_INFO, " .. kicked while waking up");
} }
goto done; goto done;
} }
@ -1105,14 +1105,14 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
if (specific_worker->state == KICKED) { if (specific_worker->state == KICKED) {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. specific worker already kicked"); gpr_log(GPR_INFO, " .. specific worker already kicked");
} }
goto done; goto done;
} else if (gpr_tls_get(&g_current_thread_worker) == } else if (gpr_tls_get(&g_current_thread_worker) ==
(intptr_t)specific_worker) { (intptr_t)specific_worker) {
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(); GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. mark %p kicked", specific_worker); gpr_log(GPR_INFO, " .. mark %p kicked", specific_worker);
} }
SET_KICK_STATE(specific_worker, KICKED); SET_KICK_STATE(specific_worker, KICKED);
goto done; goto done;
@ -1120,7 +1120,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
(grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) { (grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(); GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. kick active poller"); gpr_log(GPR_INFO, " .. kick active poller");
} }
SET_KICK_STATE(specific_worker, KICKED); SET_KICK_STATE(specific_worker, KICKED);
ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd); ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
@ -1128,7 +1128,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
} else if (specific_worker->initialized_cv) { } else if (specific_worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(); GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. kick waiting worker"); gpr_log(GPR_INFO, " .. kick waiting worker");
} }
SET_KICK_STATE(specific_worker, KICKED); SET_KICK_STATE(specific_worker, KICKED);
gpr_cv_signal(&specific_worker->cv); gpr_cv_signal(&specific_worker->cv);
@ -1136,7 +1136,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
} else { } else {
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(); GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. kick non-waiting worker"); gpr_log(GPR_INFO, " .. kick non-waiting worker");
} }
SET_KICK_STATE(specific_worker, KICKED); SET_KICK_STATE(specific_worker, KICKED);
goto done; goto done;

@ -518,7 +518,7 @@ static grpc_error* pollable_add_fd(pollable* p, grpc_fd* fd) {
const int epfd = p->epfd; const int epfd = p->epfd;
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "add fd %p (%d) to pollable %p", fd, fd->fd, p); gpr_log(GPR_INFO, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
} }
struct epoll_event ev_fd; struct epoll_event ev_fd;
@ -560,7 +560,7 @@ static void pollset_global_shutdown(void) {
/* pollset->mu must be held while calling this function */ /* pollset->mu must be held while calling this function */
static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) { static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"PS:%p (pollable:%p) maybe_finish_shutdown sc=%p (target:!NULL) " "PS:%p (pollable:%p) maybe_finish_shutdown sc=%p (target:!NULL) "
"rw=%p (target:NULL) cpsc=%d (target:0)", "rw=%p (target:NULL) cpsc=%d (target:0)",
pollset, pollset->active_pollable, pollset->shutdown_closure, pollset, pollset->active_pollable, pollset->shutdown_closure,
@ -585,14 +585,14 @@ static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
GPR_ASSERT(specific_worker != nullptr); GPR_ASSERT(specific_worker != nullptr);
if (specific_worker->kicked) { if (specific_worker->kicked) {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_already_kicked", p); gpr_log(GPR_INFO, "PS:%p kicked_specific_but_already_kicked", p);
} }
GRPC_STATS_INC_POLLSET_KICKED_AGAIN(); GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
} }
if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) { if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_awake", p); gpr_log(GPR_INFO, "PS:%p kicked_specific_but_awake", p);
} }
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(); GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
specific_worker->kicked = true; specific_worker->kicked = true;
@ -601,7 +601,7 @@ static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
if (specific_worker == p->root_worker) { if (specific_worker == p->root_worker) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(); GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_wakeup_fd", p); gpr_log(GPR_INFO, "PS:%p kicked_specific_via_wakeup_fd", p);
} }
specific_worker->kicked = true; specific_worker->kicked = true;
grpc_error* error = grpc_wakeup_fd_wakeup(&p->wakeup); grpc_error* error = grpc_wakeup_fd_wakeup(&p->wakeup);
@ -610,7 +610,7 @@ static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
if (specific_worker->initialized_cv) { if (specific_worker->initialized_cv) {
GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(); GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_cv", p); gpr_log(GPR_INFO, "PS:%p kicked_specific_via_cv", p);
} }
specific_worker->kicked = true; specific_worker->kicked = true;
gpr_cv_signal(&specific_worker->cv); gpr_cv_signal(&specific_worker->cv);
@ -626,7 +626,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
GPR_TIMER_SCOPE("pollset_kick", 0); GPR_TIMER_SCOPE("pollset_kick", 0);
GRPC_STATS_INC_POLLSET_KICK(); GRPC_STATS_INC_POLLSET_KICK();
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p", "PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p",
pollset, specific_worker, pollset, specific_worker,
(void*)gpr_tls_get(&g_current_thread_pollset), (void*)gpr_tls_get(&g_current_thread_pollset),
@ -636,7 +636,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) { if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
if (pollset->root_worker == nullptr) { if (pollset->root_worker == nullptr) {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_any_without_poller", pollset); gpr_log(GPR_INFO, "PS:%p kicked_any_without_poller", pollset);
} }
GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(); GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
pollset->kicked_without_poller = true; pollset->kicked_without_poller = true;
@ -662,7 +662,7 @@ static grpc_error* pollset_kick(grpc_pollset* pollset,
} }
} else { } else {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p kicked_any_but_awake", pollset); gpr_log(GPR_INFO, "PS:%p kicked_any_but_awake", pollset);
} }
GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(); GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
return GRPC_ERROR_NONE; return GRPC_ERROR_NONE;
@ -784,7 +784,7 @@ static grpc_error* pollable_process_events(grpc_pollset* pollset,
void* data_ptr = ev->data.ptr; void* data_ptr = ev->data.ptr;
if (1 & (intptr_t)data_ptr) { if (1 & (intptr_t)data_ptr) {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p got pollset_wakeup %p", pollset, data_ptr); gpr_log(GPR_INFO, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
} }
append_error(&error, append_error(&error,
grpc_wakeup_fd_consume_wakeup( grpc_wakeup_fd_consume_wakeup(
@ -797,7 +797,7 @@ static grpc_error* pollable_process_events(grpc_pollset* pollset,
bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0; bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
bool write_ev = (ev->events & EPOLLOUT) != 0; bool write_ev = (ev->events & EPOLLOUT) != 0;
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"PS:%p got fd %p: cancel=%d read=%d " "PS:%p got fd %p: cancel=%d read=%d "
"write=%d", "write=%d",
pollset, fd, cancel, read_ev, write_ev); pollset, fd, cancel, read_ev, write_ev);
@ -827,7 +827,7 @@ static grpc_error* pollable_epoll(pollable* p, grpc_millis deadline) {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
char* desc = pollable_desc(p); char* desc = pollable_desc(p);
gpr_log(GPR_DEBUG, "POLLABLE:%p[%s] poll for %dms", p, desc, timeout); gpr_log(GPR_INFO, "POLLABLE:%p[%s] poll for %dms", p, desc, timeout);
gpr_free(desc); gpr_free(desc);
} }
@ -846,7 +846,7 @@ static grpc_error* pollable_epoll(pollable* p, grpc_millis deadline) {
if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait"); if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "POLLABLE:%p got %d events", p, r); gpr_log(GPR_INFO, "POLLABLE:%p got %d events", p, r);
} }
p->event_cursor = 0; p->event_cursor = 0;
@ -917,7 +917,7 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
gpr_mu_unlock(&pollset->mu); gpr_mu_unlock(&pollset->mu);
if (grpc_polling_trace.enabled() && if (grpc_polling_trace.enabled() &&
worker->pollable_obj->root_worker != worker) { worker->pollable_obj->root_worker != worker) {
gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset, gpr_log(GPR_INFO, "PS:%p wait %p w=%p for %dms", pollset,
worker->pollable_obj, worker, worker->pollable_obj, worker,
poll_deadline_to_millis_timeout(deadline)); poll_deadline_to_millis_timeout(deadline));
} }
@ -925,19 +925,19 @@ static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->mu, if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->mu,
grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) { grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p timeout_wait %p w=%p", pollset, gpr_log(GPR_INFO, "PS:%p timeout_wait %p w=%p", pollset,
worker->pollable_obj, worker); worker->pollable_obj, worker);
} }
do_poll = false; do_poll = false;
} else if (worker->kicked) { } else if (worker->kicked) {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset, gpr_log(GPR_INFO, "PS:%p wakeup %p w=%p", pollset,
worker->pollable_obj, worker); worker->pollable_obj, worker);
} }
do_poll = false; do_poll = false;
} else if (grpc_polling_trace.enabled() && } else if (grpc_polling_trace.enabled() &&
worker->pollable_obj->root_worker != worker) { worker->pollable_obj->root_worker != worker) {
gpr_log(GPR_DEBUG, "PS:%p spurious_wakeup %p w=%p", pollset, gpr_log(GPR_INFO, "PS:%p spurious_wakeup %p w=%p", pollset,
worker->pollable_obj, worker); worker->pollable_obj, worker);
} }
} }
@ -1009,7 +1009,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
WORKER_PTR->originator = gettid(); WORKER_PTR->originator = gettid();
#endif #endif
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"PS:%p work hdl=%p worker=%p now=%" PRIdPTR " deadline=%" PRIdPTR "PS:%p work hdl=%p worker=%p now=%" PRIdPTR " deadline=%" PRIdPTR
" kwp=%d pollable=%p", " kwp=%d pollable=%p",
pollset, worker_hdl, WORKER_PTR, grpc_core::ExecCtx::Get()->Now(), pollset, worker_hdl, WORKER_PTR, grpc_core::ExecCtx::Get()->Now(),
@ -1050,7 +1050,7 @@ static grpc_error* pollset_transition_pollable_from_empty_to_fd_locked(
static const char* err_desc = "pollset_transition_pollable_from_empty_to_fd"; static const char* err_desc = "pollset_transition_pollable_from_empty_to_fd";
grpc_error* error = GRPC_ERROR_NONE; grpc_error* error = GRPC_ERROR_NONE;
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"PS:%p add fd %p (%d); transition pollable from empty to fd", "PS:%p add fd %p (%d); transition pollable from empty to fd",
pollset, fd, fd->fd); pollset, fd, fd->fd);
} }
@ -1067,7 +1067,7 @@ static grpc_error* pollset_transition_pollable_from_fd_to_multi_locked(
grpc_error* error = GRPC_ERROR_NONE; grpc_error* error = GRPC_ERROR_NONE;
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log( gpr_log(
GPR_DEBUG, GPR_INFO,
"PS:%p add fd %p (%d); transition pollable from fd %p to multipoller", "PS:%p add fd %p (%d); transition pollable from fd %p to multipoller",
pollset, and_add_fd, and_add_fd ? and_add_fd->fd : -1, pollset, and_add_fd, and_add_fd ? and_add_fd->fd : -1,
pollset->active_pollable->owner_fd); pollset->active_pollable->owner_fd);
@ -1137,7 +1137,7 @@ static grpc_error* pollset_as_multipollable_locked(grpc_pollset* pollset,
/* Any workers currently polling on this pollset must now be woked up so /* Any workers currently polling on this pollset must now be woked up so
* that they can pick up the new active_pollable */ * that they can pick up the new active_pollable */
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"PS:%p active pollable transition from empty to multi", "PS:%p active pollable transition from empty to multi",
pollset); pollset);
} }
@ -1224,7 +1224,7 @@ static void pollset_set_unref(grpc_pollset_set* pss) {
static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) { static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {
GPR_TIMER_SCOPE("pollset_set_add_fd", 0); GPR_TIMER_SCOPE("pollset_set_add_fd", 0);
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd); gpr_log(GPR_INFO, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd);
} }
grpc_error* error = GRPC_ERROR_NONE; grpc_error* error = GRPC_ERROR_NONE;
static const char* err_desc = "pollset_set_add_fd"; static const char* err_desc = "pollset_set_add_fd";
@ -1248,7 +1248,7 @@ static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {
static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) { static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {
GPR_TIMER_SCOPE("pollset_set_del_fd", 0); GPR_TIMER_SCOPE("pollset_set_del_fd", 0);
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS:%p: del fd %p", pss, fd); gpr_log(GPR_INFO, "PSS:%p: del fd %p", pss, fd);
} }
pss = pss_lock_adam(pss); pss = pss_lock_adam(pss);
size_t i; size_t i;
@ -1269,7 +1269,7 @@ static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {
static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) { static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
GPR_TIMER_SCOPE("pollset_set_del_pollset", 0); GPR_TIMER_SCOPE("pollset_set_del_pollset", 0);
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS:%p: del pollset %p", pss, ps); gpr_log(GPR_INFO, "PSS:%p: del pollset %p", pss, ps);
} }
pss = pss_lock_adam(pss); pss = pss_lock_adam(pss);
size_t i; size_t i;
@ -1321,7 +1321,7 @@ static grpc_error* add_fds_to_pollsets(grpc_fd** fds, size_t fd_count,
static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) { static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
GPR_TIMER_SCOPE("pollset_set_add_pollset", 0); GPR_TIMER_SCOPE("pollset_set_add_pollset", 0);
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS:%p: add pollset %p", pss, ps); gpr_log(GPR_INFO, "PSS:%p: add pollset %p", pss, ps);
} }
grpc_error* error = GRPC_ERROR_NONE; grpc_error* error = GRPC_ERROR_NONE;
static const char* err_desc = "pollset_set_add_pollset"; static const char* err_desc = "pollset_set_add_pollset";
@ -1358,7 +1358,7 @@ static void pollset_set_add_pollset_set(grpc_pollset_set* a,
grpc_pollset_set* b) { grpc_pollset_set* b) {
GPR_TIMER_SCOPE("pollset_set_add_pollset_set", 0); GPR_TIMER_SCOPE("pollset_set_add_pollset_set", 0);
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS: merge (%p, %p)", a, b); gpr_log(GPR_INFO, "PSS: merge (%p, %p)", a, b);
} }
grpc_error* error = GRPC_ERROR_NONE; grpc_error* error = GRPC_ERROR_NONE;
static const char* err_desc = "pollset_set_add_fd"; static const char* err_desc = "pollset_set_add_fd";
@ -1392,7 +1392,7 @@ static void pollset_set_add_pollset_set(grpc_pollset_set* a,
GPR_SWAP(grpc_pollset_set*, a, b); GPR_SWAP(grpc_pollset_set*, a, b);
} }
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "PSS: parent %p to %p", b, a); gpr_log(GPR_INFO, "PSS: parent %p to %p", b, a);
} }
gpr_ref(&a->refs); gpr_ref(&a->refs);
b->parent = a; b->parent = a;

@ -292,7 +292,7 @@ static void pi_add_ref_dbg(polling_island* pi, const char* reason,
const char* file, int line) { const char* file, int line) {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count); gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR "Add ref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
" (%s) - (%s, %d)", " (%s) - (%s, %d)",
pi, old_cnt, old_cnt + 1, reason, file, line); pi, old_cnt, old_cnt + 1, reason, file, line);
@ -304,7 +304,7 @@ static void pi_unref_dbg(polling_island* pi, const char* reason,
const char* file, int line) { const char* file, int line) {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count); gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR "Unref pi: %p, old:%" PRIdPTR " -> new:%" PRIdPTR
" (%s) - (%s, %d)", " (%s) - (%s, %d)",
pi, old_cnt, (old_cnt - 1), reason, file, line); pi, old_cnt, (old_cnt - 1), reason, file, line);

@ -983,7 +983,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
GRPC_SCHEDULING_END_BLOCKING_REGION; GRPC_SCHEDULING_END_BLOCKING_REGION;
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "%p poll=%d", pollset, r); gpr_log(GPR_INFO, "%p poll=%d", pollset, r);
} }
if (r < 0) { if (r < 0) {
@ -1007,7 +1007,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
} else { } else {
if (pfds[0].revents & POLLIN_CHECK) { if (pfds[0].revents & POLLIN_CHECK) {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "%p: got_wakeup", pollset); gpr_log(GPR_INFO, "%p: got_wakeup", pollset);
} }
work_combine_error( work_combine_error(
&error, grpc_wakeup_fd_consume_wakeup(&worker.wakeup_fd->fd)); &error, grpc_wakeup_fd_consume_wakeup(&worker.wakeup_fd->fd));
@ -1017,7 +1017,7 @@ static grpc_error* pollset_work(grpc_pollset* pollset,
fd_end_poll(&watchers[i], 0, 0, nullptr); fd_end_poll(&watchers[i], 0, 0, nullptr);
} else { } else {
if (grpc_polling_trace.enabled()) { if (grpc_polling_trace.enabled()) {
gpr_log(GPR_DEBUG, "%p got_event: %d r:%d w:%d [%d]", pollset, gpr_log(GPR_INFO, "%p got_event: %d r:%d w:%d [%d]", pollset,
pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0, pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0,
(pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents); (pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents);
} }

@ -48,7 +48,7 @@ grpc_core::DebugOnlyTraceFlag grpc_polling_api_trace(false, "polling_api");
// Polling API trace only enabled in debug builds // Polling API trace only enabled in debug builds
#define GRPC_POLLING_API_TRACE(format, ...) \ #define GRPC_POLLING_API_TRACE(format, ...) \
if (grpc_polling_api_trace.enabled()) { \ if (grpc_polling_api_trace.enabled()) { \
gpr_log(GPR_DEBUG, "(polling-api) " format, __VA_ARGS__); \ gpr_log(GPR_INFO, "(polling-api) " format, __VA_ARGS__); \
} }
#else #else
#define GRPC_POLLING_API_TRACE(...) #define GRPC_POLLING_API_TRACE(...)

@ -69,7 +69,7 @@ static size_t run_closures(grpc_closure_list list) {
gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c, gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
c->file_created, c->line_created); c->file_created, c->line_created);
#else #else
gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c); gpr_log(GPR_INFO, "EXECUTOR: run %p", c);
#endif #endif
} }
#ifndef NDEBUG #ifndef NDEBUG
@ -150,7 +150,7 @@ static void executor_thread(void* arg) {
size_t subtract_depth = 0; size_t subtract_depth = 0;
for (;;) { for (;;) {
if (executor_trace.enabled()) { if (executor_trace.enabled()) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")", gpr_log(GPR_INFO, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")",
static_cast<int>(ts - g_thread_state), subtract_depth); static_cast<int>(ts - g_thread_state), subtract_depth);
} }
gpr_mu_lock(&ts->mu); gpr_mu_lock(&ts->mu);
@ -161,7 +161,7 @@ static void executor_thread(void* arg) {
} }
if (ts->shutdown) { if (ts->shutdown) {
if (executor_trace.enabled()) { if (executor_trace.enabled()) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: shutdown", gpr_log(GPR_INFO, "EXECUTOR[%d]: shutdown",
static_cast<int>(ts - g_thread_state)); static_cast<int>(ts - g_thread_state));
} }
gpr_mu_unlock(&ts->mu); gpr_mu_unlock(&ts->mu);
@ -172,7 +172,7 @@ static void executor_thread(void* arg) {
ts->elems = GRPC_CLOSURE_LIST_INIT; ts->elems = GRPC_CLOSURE_LIST_INIT;
gpr_mu_unlock(&ts->mu); gpr_mu_unlock(&ts->mu);
if (executor_trace.enabled()) { if (executor_trace.enabled()) {
gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", gpr_log(GPR_INFO, "EXECUTOR[%d]: execute",
static_cast<int>(ts - g_thread_state)); static_cast<int>(ts - g_thread_state));
} }
@ -199,7 +199,7 @@ static void executor_push(grpc_closure* closure, grpc_error* error,
gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p (created %s:%d) inline", gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p (created %s:%d) inline",
closure, closure->file_created, closure->line_created); closure, closure->file_created, closure->line_created);
#else #else
gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p inline", closure); gpr_log(GPR_INFO, "EXECUTOR: schedule %p inline", closure);
#endif #endif
} }
grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(), grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(),
@ -225,7 +225,7 @@ static void executor_push(grpc_closure* closure, grpc_error* error,
closure, is_short ? "short" : "long", closure->file_created, closure, is_short ? "short" : "long", closure->file_created,
closure->line_created, static_cast<int>(ts - g_thread_state)); closure->line_created, static_cast<int>(ts - g_thread_state));
#else #else
gpr_log(GPR_DEBUG, "EXECUTOR: try to schedule %p (%s) to thread %d", gpr_log(GPR_INFO, "EXECUTOR: try to schedule %p (%s) to thread %d",
closure, is_short ? "short" : "long", closure, is_short ? "short" : "long",
(int)(ts - g_thread_state)); (int)(ts - g_thread_state));
#endif #endif

@ -289,7 +289,7 @@ static bool rq_alloc(grpc_resource_quota* resource_quota) {
GRPC_RULIST_AWAITING_ALLOCATION))) { GRPC_RULIST_AWAITING_ALLOCATION))) {
gpr_mu_lock(&resource_user->mu); gpr_mu_lock(&resource_user->mu);
if (grpc_resource_quota_trace.enabled()) { if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"RQ: check allocation for user %p shutdown=%" PRIdPTR "RQ: check allocation for user %p shutdown=%" PRIdPTR
" free_pool=%" PRId64, " free_pool=%" PRId64,
resource_user, gpr_atm_no_barrier_load(&resource_user->shutdown), resource_user, gpr_atm_no_barrier_load(&resource_user->shutdown),
@ -315,7 +315,7 @@ static bool rq_alloc(grpc_resource_quota* resource_quota) {
resource_quota->free_pool -= amt; resource_quota->free_pool -= amt;
rq_update_estimate(resource_quota); rq_update_estimate(resource_quota);
if (grpc_resource_quota_trace.enabled()) { if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"RQ %s %s: grant alloc %" PRId64 "RQ %s %s: grant alloc %" PRId64
" bytes; rq_free_pool -> %" PRId64, " bytes; rq_free_pool -> %" PRId64,
resource_quota->name, resource_user->name, amt, resource_quota->name, resource_user->name, amt,
@ -323,7 +323,7 @@ static bool rq_alloc(grpc_resource_quota* resource_quota) {
} }
} else if (grpc_resource_quota_trace.enabled() && } else if (grpc_resource_quota_trace.enabled() &&
resource_user->free_pool >= 0) { resource_user->free_pool >= 0) {
gpr_log(GPR_DEBUG, "RQ %s %s: discard already satisfied alloc request", gpr_log(GPR_INFO, "RQ %s %s: discard already satisfied alloc request",
resource_quota->name, resource_user->name); resource_quota->name, resource_user->name);
} }
if (resource_user->free_pool >= 0) { if (resource_user->free_pool >= 0) {
@ -353,7 +353,7 @@ static bool rq_reclaim_from_per_user_free_pool(
resource_quota->free_pool += amt; resource_quota->free_pool += amt;
rq_update_estimate(resource_quota); rq_update_estimate(resource_quota);
if (grpc_resource_quota_trace.enabled()) { if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"RQ %s %s: reclaim_from_per_user_free_pool %" PRId64 "RQ %s %s: reclaim_from_per_user_free_pool %" PRId64
" bytes; rq_free_pool -> %" PRId64, " bytes; rq_free_pool -> %" PRId64,
resource_quota->name, resource_user->name, amt, resource_quota->name, resource_user->name, amt,
@ -376,9 +376,8 @@ static bool rq_reclaim(grpc_resource_quota* resource_quota, bool destructive) {
grpc_resource_user* resource_user = rulist_pop_head(resource_quota, list); grpc_resource_user* resource_user = rulist_pop_head(resource_quota, list);
if (resource_user == nullptr) return false; if (resource_user == nullptr) return false;
if (grpc_resource_quota_trace.enabled()) { if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RQ %s %s: initiate %s reclamation", gpr_log(GPR_INFO, "RQ %s %s: initiate %s reclamation", resource_quota->name,
resource_quota->name, resource_user->name, resource_user->name, destructive ? "destructive" : "benign");
destructive ? "destructive" : "benign");
} }
resource_quota->reclaiming = true; resource_quota->reclaiming = true;
grpc_resource_quota_ref_internal(resource_quota); grpc_resource_quota_ref_internal(resource_quota);
@ -506,7 +505,7 @@ static void ru_post_destructive_reclaimer(void* ru, grpc_error* error) {
static void ru_shutdown(void* ru, grpc_error* error) { static void ru_shutdown(void* ru, grpc_error* error) {
if (grpc_resource_quota_trace.enabled()) { if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RU shutdown %p", ru); gpr_log(GPR_INFO, "RU shutdown %p", ru);
} }
grpc_resource_user* resource_user = static_cast<grpc_resource_user*>(ru); grpc_resource_user* resource_user = static_cast<grpc_resource_user*>(ru);
gpr_mu_lock(&resource_user->mu); gpr_mu_lock(&resource_user->mu);
@ -793,7 +792,7 @@ void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
resource_user->free_pool -= static_cast<int64_t>(size); resource_user->free_pool -= static_cast<int64_t>(size);
resource_user->outstanding_allocations += static_cast<int64_t>(size); resource_user->outstanding_allocations += static_cast<int64_t>(size);
if (grpc_resource_quota_trace.enabled()) { if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64, gpr_log(GPR_INFO, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name, resource_user->name, size, resource_user->resource_quota->name, resource_user->name, size,
resource_user->free_pool); resource_user->free_pool);
} }
@ -816,7 +815,7 @@ void grpc_resource_user_free(grpc_resource_user* resource_user, size_t size) {
bool was_zero_or_negative = resource_user->free_pool <= 0; bool was_zero_or_negative = resource_user->free_pool <= 0;
resource_user->free_pool += static_cast<int64_t>(size); resource_user->free_pool += static_cast<int64_t>(size);
if (grpc_resource_quota_trace.enabled()) { if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RQ %s %s: free %" PRIdPTR "; free_pool -> %" PRId64, gpr_log(GPR_INFO, "RQ %s %s: free %" PRIdPTR "; free_pool -> %" PRId64,
resource_user->resource_quota->name, resource_user->name, size, resource_user->resource_quota->name, resource_user->name, size,
resource_user->free_pool); resource_user->free_pool);
} }
@ -842,7 +841,7 @@ void grpc_resource_user_post_reclaimer(grpc_resource_user* resource_user,
void grpc_resource_user_finish_reclamation(grpc_resource_user* resource_user) { void grpc_resource_user_finish_reclamation(grpc_resource_user* resource_user) {
if (grpc_resource_quota_trace.enabled()) { if (grpc_resource_quota_trace.enabled()) {
gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete", gpr_log(GPR_INFO, "RQ %s %s: reclamation complete",
resource_user->resource_quota->name, resource_user->name); resource_user->resource_quota->name, resource_user->name);
} }
GRPC_CLOSURE_SCHED( GRPC_CLOSURE_SCHED(

@ -66,7 +66,7 @@ static void on_alarm(void* acp, grpc_error* error) {
grpc_custom_tcp_connect* connect = socket->connector; grpc_custom_tcp_connect* connect = socket->connector;
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error); const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: on_alarm: error=%s",
connect->addr_name, str); connect->addr_name, str);
} }
if (error == GRPC_ERROR_NONE) { if (error == GRPC_ERROR_NONE) {
@ -136,7 +136,7 @@ static void tcp_connect(grpc_closure* closure, grpc_endpoint** ep,
connect->refs = 2; connect->refs = 2;
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %p %s: asynchronously connecting", gpr_log(GPR_INFO, "CLIENT_CONNECT: %p %s: asynchronously connecting",
socket, connect->addr_name); socket, connect->addr_name);
} }

@ -104,7 +104,7 @@ static void tc_on_alarm(void* acp, grpc_error* error) {
async_connect* ac = static_cast<async_connect*>(acp); async_connect* ac = static_cast<async_connect*>(acp);
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error); const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str, gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: on_alarm: error=%s", ac->addr_str,
str); str);
} }
gpr_mu_lock(&ac->mu); gpr_mu_lock(&ac->mu);
@ -141,8 +141,8 @@ static void on_writable(void* acp, grpc_error* error) {
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error); const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: on_writable: error=%s", gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: on_writable: error=%s", ac->addr_str,
ac->addr_str, str); str);
} }
gpr_mu_lock(&ac->mu); gpr_mu_lock(&ac->mu);
@ -325,7 +325,7 @@ void grpc_tcp_client_create_from_prepared_fd(
ac->channel_args = grpc_channel_args_copy(channel_args); ac->channel_args = grpc_channel_args_copy(channel_args);
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "CLIENT_CONNECT: %s: asynchronously connecting fd %p", gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: asynchronously connecting fd %p",
ac->addr_str, fdobj); ac->addr_str, fdobj);
} }

@ -125,16 +125,16 @@ static void tcp_ref(custom_tcp_endpoint* tcp) { gpr_ref(&tcp->refcount); }
static void call_read_cb(custom_tcp_endpoint* tcp, grpc_error* error) { static void call_read_cb(custom_tcp_endpoint* tcp, grpc_error* error) {
grpc_closure* cb = tcp->read_cb; grpc_closure* cb = tcp->read_cb;
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp->socket, cb, cb->cb, gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp->socket, cb, cb->cb,
cb->cb_arg); cb->cb_arg);
size_t i; size_t i;
const char* str = grpc_error_string(error); const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "read: error=%s", str); gpr_log(GPR_INFO, "read: error=%s", str);
for (i = 0; i < tcp->read_slices->count; i++) { for (i = 0; i < tcp->read_slices->count; i++) {
char* dump = grpc_dump_slice(tcp->read_slices->slices[i], char* dump = grpc_dump_slice(tcp->read_slices->slices[i],
GPR_DUMP_HEX | GPR_DUMP_ASCII); GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump); gpr_log(GPR_INFO, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump);
gpr_free(dump); gpr_free(dump);
} }
} }
@ -171,7 +171,7 @@ static void custom_read_callback(grpc_custom_socket* socket, size_t nread,
static void tcp_read_allocation_done(void* tcpp, grpc_error* error) { static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)tcpp; custom_tcp_endpoint* tcp = (custom_tcp_endpoint*)tcpp;
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp->socket, gpr_log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp->socket,
grpc_error_string(error)); grpc_error_string(error));
} }
if (error == GRPC_ERROR_NONE) { if (error == GRPC_ERROR_NONE) {
@ -188,7 +188,7 @@ static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
} }
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error); const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "Initiating read on %p: error=%s", tcp->socket, str); gpr_log(GPR_INFO, "Initiating read on %p: error=%s", tcp->socket, str);
} }
} }
@ -214,7 +214,7 @@ static void custom_write_callback(grpc_custom_socket* socket,
tcp->write_cb = nullptr; tcp->write_cb = nullptr;
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error); const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp->socket, str); gpr_log(GPR_INFO, "write complete on %p: error=%s", tcp->socket, str);
} }
TCP_UNREF(tcp, "write"); TCP_UNREF(tcp, "write");
GRPC_CLOSURE_SCHED(cb, error); GRPC_CLOSURE_SCHED(cb, error);
@ -231,8 +231,8 @@ static void endpoint_write(grpc_endpoint* ep, grpc_slice_buffer* write_slices,
for (j = 0; j < write_slices->count; j++) { for (j = 0; j < write_slices->count; j++) {
char* data = grpc_dump_slice(write_slices->slices[j], char* data = grpc_dump_slice(write_slices->slices[j],
GPR_DUMP_HEX | GPR_DUMP_ASCII); GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp->socket, gpr_log(GPR_INFO, "WRITE %p (peer=%s): %s", tcp->socket, tcp->peer_string,
tcp->peer_string, data); data);
gpr_free(data); gpr_free(data);
} }
} }
@ -283,7 +283,7 @@ static void endpoint_shutdown(grpc_endpoint* ep, grpc_error* why) {
if (!tcp->shutting_down) { if (!tcp->shutting_down) {
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(why); const char* str = grpc_error_string(why);
gpr_log(GPR_DEBUG, "TCP %p shutdown why=%s", tcp->socket, str); gpr_log(GPR_INFO, "TCP %p shutdown why=%s", tcp->socket, str);
} }
tcp->shutting_down = true; tcp->shutting_down = true;
// GRPC_CLOSURE_SCHED(tcp->read_cb, GRPC_ERROR_REF(why)); // GRPC_CLOSURE_SCHED(tcp->read_cb, GRPC_ERROR_REF(why));
@ -345,7 +345,7 @@ grpc_endpoint* custom_tcp_endpoint_create(grpc_custom_socket* socket,
grpc_core::ExecCtx exec_ctx; grpc_core::ExecCtx exec_ctx;
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "Creating TCP endpoint %p", socket); gpr_log(GPR_INFO, "Creating TCP endpoint %p", socket);
} }
memset(tcp, 0, sizeof(custom_tcp_endpoint)); memset(tcp, 0, sizeof(custom_tcp_endpoint));
socket->refs++; socket->refs++;

@ -120,7 +120,7 @@ static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */,
static void done_poller(void* bp, grpc_error* error_ignored) { static void done_poller(void* bp, grpc_error* error_ignored) {
backup_poller* p = static_cast<backup_poller*>(bp); backup_poller* p = static_cast<backup_poller*>(bp);
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p destroy", p); gpr_log(GPR_INFO, "BACKUP_POLLER:%p destroy", p);
} }
grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p)); grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p));
gpr_free(p); gpr_free(p);
@ -129,7 +129,7 @@ static void done_poller(void* bp, grpc_error* error_ignored) {
static void run_poller(void* bp, grpc_error* error_ignored) { static void run_poller(void* bp, grpc_error* error_ignored) {
backup_poller* p = static_cast<backup_poller*>(bp); backup_poller* p = static_cast<backup_poller*>(bp);
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p run", p); gpr_log(GPR_INFO, "BACKUP_POLLER:%p run", p);
} }
gpr_mu_lock(p->pollset_mu); gpr_mu_lock(p->pollset_mu);
grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 10 * GPR_MS_PER_SEC; grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 10 * GPR_MS_PER_SEC;
@ -145,18 +145,18 @@ static void run_poller(void* bp, grpc_error* error_ignored) {
gpr_mu_lock(p->pollset_mu); gpr_mu_lock(p->pollset_mu);
bool cas_ok = gpr_atm_full_cas(&g_backup_poller, (gpr_atm)p, 0); bool cas_ok = gpr_atm_full_cas(&g_backup_poller, (gpr_atm)p, 0);
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok); gpr_log(GPR_INFO, "BACKUP_POLLER:%p done cas_ok=%d", p, cas_ok);
} }
gpr_mu_unlock(p->pollset_mu); gpr_mu_unlock(p->pollset_mu);
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p shutdown", p); gpr_log(GPR_INFO, "BACKUP_POLLER:%p shutdown", p);
} }
grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p), grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p),
GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p, GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
grpc_schedule_on_exec_ctx)); grpc_schedule_on_exec_ctx));
} else { } else {
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p reschedule", p); gpr_log(GPR_INFO, "BACKUP_POLLER:%p reschedule", p);
} }
GRPC_CLOSURE_SCHED(&p->run_poller, GRPC_ERROR_NONE); GRPC_CLOSURE_SCHED(&p->run_poller, GRPC_ERROR_NONE);
} }
@ -167,7 +167,7 @@ static void drop_uncovered(grpc_tcp* tcp) {
gpr_atm old_count = gpr_atm old_count =
gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1); gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1);
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p uncover cnt %d->%d", p, gpr_log(GPR_INFO, "BACKUP_POLLER:%p uncover cnt %d->%d", p,
static_cast<int>(old_count), static_cast<int>(old_count) - 1); static_cast<int>(old_count), static_cast<int>(old_count) - 1);
} }
GPR_ASSERT(old_count != 1); GPR_ASSERT(old_count != 1);
@ -178,7 +178,7 @@ static void cover_self(grpc_tcp* tcp) {
gpr_atm old_count = gpr_atm old_count =
gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2); gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2);
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER: cover cnt %d->%d", gpr_log(GPR_INFO, "BACKUP_POLLER: cover cnt %d->%d",
static_cast<int>(old_count), 2 + static_cast<int>(old_count)); static_cast<int>(old_count), 2 + static_cast<int>(old_count));
} }
if (old_count == 0) { if (old_count == 0) {
@ -186,7 +186,7 @@ static void cover_self(grpc_tcp* tcp) {
p = static_cast<backup_poller*>( p = static_cast<backup_poller*>(
gpr_zalloc(sizeof(*p) + grpc_pollset_size())); gpr_zalloc(sizeof(*p) + grpc_pollset_size()));
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p create", p); gpr_log(GPR_INFO, "BACKUP_POLLER:%p create", p);
} }
grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu); grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p); gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p);
@ -201,7 +201,7 @@ static void cover_self(grpc_tcp* tcp) {
} }
} }
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p add %p", p, tcp); gpr_log(GPR_INFO, "BACKUP_POLLER:%p add %p", p, tcp);
} }
grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd); grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd);
if (old_count != 0) { if (old_count != 0) {
@ -211,7 +211,7 @@ static void cover_self(grpc_tcp* tcp) {
static void notify_on_read(grpc_tcp* tcp) { static void notify_on_read(grpc_tcp* tcp) {
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p notify_on_read", tcp); gpr_log(GPR_INFO, "TCP:%p notify_on_read", tcp);
} }
GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp, GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
grpc_schedule_on_exec_ctx); grpc_schedule_on_exec_ctx);
@ -220,7 +220,7 @@ static void notify_on_read(grpc_tcp* tcp) {
static void notify_on_write(grpc_tcp* tcp) { static void notify_on_write(grpc_tcp* tcp) {
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p notify_on_write", tcp); gpr_log(GPR_INFO, "TCP:%p notify_on_write", tcp);
} }
cover_self(tcp); cover_self(tcp);
GRPC_CLOSURE_INIT(&tcp->write_done_closure, GRPC_CLOSURE_INIT(&tcp->write_done_closure,
@ -231,7 +231,7 @@ static void notify_on_write(grpc_tcp* tcp) {
static void tcp_drop_uncovered_then_handle_write(void* arg, grpc_error* error) { static void tcp_drop_uncovered_then_handle_write(void* arg, grpc_error* error) {
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p got_write: %s", arg, grpc_error_string(error)); gpr_log(GPR_INFO, "TCP:%p got_write: %s", arg, grpc_error_string(error));
} }
drop_uncovered(static_cast<grpc_tcp*>(arg)); drop_uncovered(static_cast<grpc_tcp*>(arg));
tcp_handle_write(arg, error); tcp_handle_write(arg, error);
@ -351,15 +351,15 @@ static void call_read_cb(grpc_tcp* tcp, grpc_error* error) {
grpc_closure* cb = tcp->read_cb; grpc_closure* cb = tcp->read_cb;
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg); gpr_log(GPR_INFO, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
size_t i; size_t i;
const char* str = grpc_error_string(error); const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "read: error=%s", str); gpr_log(GPR_INFO, "read: error=%s", str);
for (i = 0; i < tcp->incoming_buffer->count; i++) { for (i = 0; i < tcp->incoming_buffer->count; i++) {
char* dump = grpc_dump_slice(tcp->incoming_buffer->slices[i], char* dump = grpc_dump_slice(tcp->incoming_buffer->slices[i],
GPR_DUMP_HEX | GPR_DUMP_ASCII); GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump); gpr_log(GPR_INFO, "READ %p (peer=%s): %s", tcp, tcp->peer_string, dump);
gpr_free(dump); gpr_free(dump);
} }
} }
@ -441,7 +441,7 @@ static void tcp_do_read(grpc_tcp* tcp) {
static void tcp_read_allocation_done(void* tcpp, grpc_error* error) { static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
grpc_tcp* tcp = static_cast<grpc_tcp*>(tcpp); grpc_tcp* tcp = static_cast<grpc_tcp*>(tcpp);
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp, gpr_log(GPR_INFO, "TCP:%p read_allocation_done: %s", tcp,
grpc_error_string(error)); grpc_error_string(error));
} }
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
@ -459,13 +459,13 @@ static void tcp_continue_read(grpc_tcp* tcp) {
if (tcp->incoming_buffer->length < target_read_size && if (tcp->incoming_buffer->length < target_read_size &&
tcp->incoming_buffer->count < MAX_READ_IOVEC) { tcp->incoming_buffer->count < MAX_READ_IOVEC) {
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p alloc_slices", tcp); gpr_log(GPR_INFO, "TCP:%p alloc_slices", tcp);
} }
grpc_resource_user_alloc_slices(&tcp->slice_allocator, target_read_size, 1, grpc_resource_user_alloc_slices(&tcp->slice_allocator, target_read_size, 1,
tcp->incoming_buffer); tcp->incoming_buffer);
} else { } else {
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p do_read", tcp); gpr_log(GPR_INFO, "TCP:%p do_read", tcp);
} }
tcp_do_read(tcp); tcp_do_read(tcp);
} }
@ -475,7 +475,7 @@ static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error) {
grpc_tcp* tcp = static_cast<grpc_tcp*>(arg); grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
GPR_ASSERT(!tcp->finished_edge); GPR_ASSERT(!tcp->finished_edge);
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "TCP:%p got_read: %s", tcp, grpc_error_string(error)); gpr_log(GPR_INFO, "TCP:%p got_read: %s", tcp, grpc_error_string(error));
} }
if (error != GRPC_ERROR_NONE) { if (error != GRPC_ERROR_NONE) {
@ -618,7 +618,7 @@ static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error) {
if (!tcp_flush(tcp, &error)) { if (!tcp_flush(tcp, &error)) {
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "write: delayed"); gpr_log(GPR_INFO, "write: delayed");
} }
notify_on_write(tcp); notify_on_write(tcp);
} else { } else {
@ -626,7 +626,7 @@ static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error) {
tcp->write_cb = nullptr; tcp->write_cb = nullptr;
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error); const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write: %s", str); gpr_log(GPR_INFO, "write: %s", str);
} }
GRPC_CLOSURE_RUN(cb, error); GRPC_CLOSURE_RUN(cb, error);
@ -646,7 +646,7 @@ static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
for (i = 0; i < buf->count; i++) { for (i = 0; i < buf->count; i++) {
char* data = char* data =
grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data); gpr_log(GPR_INFO, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
gpr_free(data); gpr_free(data);
} }
} }
@ -668,13 +668,13 @@ static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
TCP_REF(tcp, "write"); TCP_REF(tcp, "write");
tcp->write_cb = cb; tcp->write_cb = cb;
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "write: delayed"); gpr_log(GPR_INFO, "write: delayed");
} }
notify_on_write(tcp); notify_on_write(tcp);
} else { } else {
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
const char* str = grpc_error_string(error); const char* str = grpc_error_string(error);
gpr_log(GPR_DEBUG, "write: %s", str); gpr_log(GPR_INFO, "write: %s", str);
} }
GRPC_CLOSURE_SCHED(cb, error); GRPC_CLOSURE_SCHED(cb, error);
} }

@ -222,10 +222,10 @@ static void finish_accept(grpc_tcp_listener* sp, grpc_custom_socket* socket) {
} }
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
if (peer_name_string) { if (peer_name_string) {
gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection: %s", gpr_log(GPR_INFO, "SERVER_CONNECT: %p accepted connection: %s",
sp->server, peer_name_string); sp->server, peer_name_string);
} else { } else {
gpr_log(GPR_DEBUG, "SERVER_CONNECT: %p accepted connection", sp->server); gpr_log(GPR_INFO, "SERVER_CONNECT: %p accepted connection", sp->server);
} }
} }
ep = custom_tcp_endpoint_create(socket, sp->server->resource_quota, ep = custom_tcp_endpoint_create(socket, sp->server->resource_quota,
@ -377,10 +377,10 @@ static grpc_error* tcp_server_add_port(grpc_tcp_server* s,
grpc_sockaddr_to_string(&port_string, addr, 0); grpc_sockaddr_to_string(&port_string, addr, 0);
const char* str = grpc_error_string(error); const char* str = grpc_error_string(error);
if (port_string) { if (port_string) {
gpr_log(GPR_DEBUG, "SERVER %p add_port %s error=%s", s, port_string, str); gpr_log(GPR_INFO, "SERVER %p add_port %s error=%s", s, port_string, str);
gpr_free(port_string); gpr_free(port_string);
} else { } else {
gpr_log(GPR_DEBUG, "SERVER %p add_port error=%s", s, str); gpr_log(GPR_INFO, "SERVER %p add_port error=%s", s, str);
} }
} }
@ -419,7 +419,7 @@ static void tcp_server_start(grpc_tcp_server* server, grpc_pollset** pollsets,
(void)pollset_count; (void)pollset_count;
GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD(); GRPC_CUSTOM_IOMGR_ASSERT_SAME_THREAD();
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "SERVER_START %p", server); gpr_log(GPR_INFO, "SERVER_START %p", server);
} }
GPR_ASSERT(on_accept_cb); GPR_ASSERT(on_accept_cb);
GPR_ASSERT(!server->on_accept_cb); GPR_ASSERT(!server->on_accept_cb);

@ -228,7 +228,7 @@ static void on_read(void* arg, grpc_error* err) {
gpr_asprintf(&name, "tcp-server-connection:%s", addr_str); gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);
if (grpc_tcp_trace.enabled()) { if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str); gpr_log(GPR_INFO, "SERVER_CONNECT: incoming connection: %s", addr_str);
} }
grpc_fd* fdobj = grpc_fd_create(fd, name); grpc_fd* fdobj = grpc_fd_create(fd, name);

@ -204,6 +204,9 @@ static grpc_error* uv_socket_init_helper(uv_socket_t* uv_socket, int domain) {
uv_socket->write_buffers = nullptr; uv_socket->write_buffers = nullptr;
uv_socket->read_len = 0; uv_socket->read_len = 0;
uv_tcp_nodelay(uv_socket->handle, 1); uv_tcp_nodelay(uv_socket->handle, 1);
// Node uses a garbage collector to call destructors, so we don't
// want to hold the uv loop open with active gRPC objects.
uv_unref((uv_handle_t*)uv_socket->handle);
uv_socket->pending_connection = false; uv_socket->pending_connection = false;
uv_socket->accept_socket = nullptr; uv_socket->accept_socket = nullptr;
uv_socket->accept_error = GRPC_ERROR_NONE; uv_socket->accept_error = GRPC_ERROR_NONE;

@ -346,9 +346,9 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
#endif #endif
if (grpc_timer_trace.enabled()) { if (grpc_timer_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO, "TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]",
"TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]", timer, timer, deadline, grpc_core::ExecCtx::Get()->Now(), closure,
deadline, grpc_core::ExecCtx::Get()->Now(), closure, closure->cb); closure->cb);
} }
if (!g_shared_mutables.initialized) { if (!g_shared_mutables.initialized) {
@ -382,7 +382,7 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
list_join(&shard->list, timer); list_join(&shard->list, timer);
} }
if (grpc_timer_trace.enabled()) { if (grpc_timer_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
" .. add to shard %d with queue_deadline_cap=%" PRIdPTR " .. add to shard %d with queue_deadline_cap=%" PRIdPTR
" => is_first_timer=%s", " => is_first_timer=%s",
static_cast<int>(shard - g_shards), shard->queue_deadline_cap, static_cast<int>(shard - g_shards), shard->queue_deadline_cap,
@ -404,7 +404,7 @@ static void timer_init(grpc_timer* timer, grpc_millis deadline,
if (is_first_timer) { if (is_first_timer) {
gpr_mu_lock(&g_shared_mutables.mu); gpr_mu_lock(&g_shared_mutables.mu);
if (grpc_timer_trace.enabled()) { if (grpc_timer_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. old shard min_deadline=%" PRIdPTR, gpr_log(GPR_INFO, " .. old shard min_deadline=%" PRIdPTR,
shard->min_deadline); shard->min_deadline);
} }
if (deadline < shard->min_deadline) { if (deadline < shard->min_deadline) {
@ -434,7 +434,7 @@ static void timer_cancel(grpc_timer* timer) {
timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, g_num_shards)]; timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, g_num_shards)];
gpr_mu_lock(&shard->mu); gpr_mu_lock(&shard->mu);
if (grpc_timer_trace.enabled()) { if (grpc_timer_trace.enabled()) {
gpr_log(GPR_DEBUG, "TIMER %p: CANCEL pending=%s", timer, gpr_log(GPR_INFO, "TIMER %p: CANCEL pending=%s", timer,
timer->pending ? "true" : "false"); timer->pending ? "true" : "false");
} }
@ -475,7 +475,7 @@ static int refill_heap(timer_shard* shard, gpr_atm now) {
static_cast<gpr_atm>(deadline_delta * 1000.0)); static_cast<gpr_atm>(deadline_delta * 1000.0));
if (grpc_timer_check_trace.enabled()) { if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. shard[%d]->queue_deadline_cap --> %" PRIdPTR, gpr_log(GPR_INFO, " .. shard[%d]->queue_deadline_cap --> %" PRIdPTR,
static_cast<int>(shard - g_shards), shard->queue_deadline_cap); static_cast<int>(shard - g_shards), shard->queue_deadline_cap);
} }
for (timer = shard->list.next; timer != &shard->list; timer = next) { for (timer = shard->list.next; timer != &shard->list; timer = next) {
@ -483,7 +483,7 @@ static int refill_heap(timer_shard* shard, gpr_atm now) {
if (timer->deadline < shard->queue_deadline_cap) { if (timer->deadline < shard->queue_deadline_cap) {
if (grpc_timer_check_trace.enabled()) { if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. add timer with deadline %" PRIdPTR " to heap", gpr_log(GPR_INFO, " .. add timer with deadline %" PRIdPTR " to heap",
timer->deadline); timer->deadline);
} }
list_remove(timer); list_remove(timer);
@ -500,7 +500,7 @@ static grpc_timer* pop_one(timer_shard* shard, gpr_atm now) {
grpc_timer* timer; grpc_timer* timer;
for (;;) { for (;;) {
if (grpc_timer_check_trace.enabled()) { if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. shard[%d]: heap_empty=%s", gpr_log(GPR_INFO, " .. shard[%d]: heap_empty=%s",
static_cast<int>(shard - g_shards), static_cast<int>(shard - g_shards),
grpc_timer_heap_is_empty(&shard->heap) ? "true" : "false"); grpc_timer_heap_is_empty(&shard->heap) ? "true" : "false");
} }
@ -510,13 +510,13 @@ static grpc_timer* pop_one(timer_shard* shard, gpr_atm now) {
} }
timer = grpc_timer_heap_top(&shard->heap); timer = grpc_timer_heap_top(&shard->heap);
if (grpc_timer_check_trace.enabled()) { if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
" .. check top timer deadline=%" PRIdPTR " now=%" PRIdPTR, " .. check top timer deadline=%" PRIdPTR " now=%" PRIdPTR,
timer->deadline, now); timer->deadline, now);
} }
if (timer->deadline > now) return nullptr; if (timer->deadline > now) return nullptr;
if (grpc_timer_trace.enabled()) { if (grpc_timer_trace.enabled()) {
gpr_log(GPR_DEBUG, "TIMER %p: FIRE %" PRIdPTR "ms late via %s scheduler", gpr_log(GPR_INFO, "TIMER %p: FIRE %" PRIdPTR "ms late via %s scheduler",
timer, now - timer->deadline, timer, now - timer->deadline,
timer->closure->scheduler->vtable->name); timer->closure->scheduler->vtable->name);
} }
@ -540,7 +540,7 @@ static size_t pop_timers(timer_shard* shard, gpr_atm now,
*new_min_deadline = compute_min_deadline(shard); *new_min_deadline = compute_min_deadline(shard);
gpr_mu_unlock(&shard->mu); gpr_mu_unlock(&shard->mu);
if (grpc_timer_check_trace.enabled()) { if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. shard[%d] popped %" PRIdPTR, gpr_log(GPR_INFO, " .. shard[%d] popped %" PRIdPTR,
static_cast<int>(shard - g_shards), n); static_cast<int>(shard - g_shards), n);
} }
return n; return n;
@ -563,7 +563,7 @@ static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
result = GRPC_TIMERS_CHECKED_AND_EMPTY; result = GRPC_TIMERS_CHECKED_AND_EMPTY;
if (grpc_timer_check_trace.enabled()) { if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, " .. shard[%d]->min_deadline = %" PRIdPTR, gpr_log(GPR_INFO, " .. shard[%d]->min_deadline = %" PRIdPTR,
static_cast<int>(g_shard_queue[0] - g_shards), static_cast<int>(g_shard_queue[0] - g_shards),
g_shard_queue[0]->min_deadline); g_shard_queue[0]->min_deadline);
} }
@ -580,7 +580,7 @@ static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
} }
if (grpc_timer_check_trace.enabled()) { if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
" .. result --> %d" " .. result --> %d"
", shard[%d]->min_deadline %" PRIdPTR " --> %" PRIdPTR ", shard[%d]->min_deadline %" PRIdPTR " --> %" PRIdPTR
", now=%" PRIdPTR, ", now=%" PRIdPTR,
@ -624,7 +624,7 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
*next = GPR_MIN(*next, min_timer); *next = GPR_MIN(*next, min_timer);
} }
if (grpc_timer_check_trace.enabled()) { if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"TIMER CHECK SKIP: now=%" PRIdPTR " min_timer=%" PRIdPTR, now, "TIMER CHECK SKIP: now=%" PRIdPTR " min_timer=%" PRIdPTR, now,
min_timer); min_timer);
} }
@ -644,7 +644,7 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
} else { } else {
gpr_asprintf(&next_str, "%" PRIdPTR, *next); gpr_asprintf(&next_str, "%" PRIdPTR, *next);
} }
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"TIMER CHECK BEGIN: now=%" PRIdPTR " next=%s tls_min=%" PRIdPTR "TIMER CHECK BEGIN: now=%" PRIdPTR " next=%s tls_min=%" PRIdPTR
" glob_min=%" PRIdPTR, " glob_min=%" PRIdPTR,
now, next_str, gpr_tls_get(&g_last_seen_min_timer), now, next_str, gpr_tls_get(&g_last_seen_min_timer),
@ -662,7 +662,7 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
} else { } else {
gpr_asprintf(&next_str, "%" PRIdPTR, *next); gpr_asprintf(&next_str, "%" PRIdPTR, *next);
} }
gpr_log(GPR_DEBUG, "TIMER CHECK END: r=%d; next=%s", r, next_str); gpr_log(GPR_INFO, "TIMER CHECK END: r=%d; next=%s", r, next_str);
gpr_free(next_str); gpr_free(next_str);
} }
return r; return r;

@ -82,7 +82,7 @@ static void start_timer_thread_and_unlock(void) {
++g_thread_count; ++g_thread_count;
gpr_mu_unlock(&g_mu); gpr_mu_unlock(&g_mu);
if (grpc_timer_check_trace.enabled()) { if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "Spawn timer thread"); gpr_log(GPR_INFO, "Spawn timer thread");
} }
completed_thread* ct = completed_thread* ct =
static_cast<completed_thread*>(gpr_malloc(sizeof(*ct))); static_cast<completed_thread*>(gpr_malloc(sizeof(*ct)));
@ -108,7 +108,7 @@ static void run_some_timers() {
// waiter so that the next deadline is not missed // waiter so that the next deadline is not missed
if (!g_has_timed_waiter) { if (!g_has_timed_waiter) {
if (grpc_timer_check_trace.enabled()) { if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "kick untimed waiter"); gpr_log(GPR_INFO, "kick untimed waiter");
} }
gpr_cv_signal(&g_cv_wait); gpr_cv_signal(&g_cv_wait);
} }
@ -116,7 +116,7 @@ static void run_some_timers() {
} }
// without our lock, flush the exec_ctx // without our lock, flush the exec_ctx
if (grpc_timer_check_trace.enabled()) { if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "flush exec_ctx"); gpr_log(GPR_INFO, "flush exec_ctx");
} }
grpc_core::ExecCtx::Get()->Flush(); grpc_core::ExecCtx::Get()->Flush();
gpr_mu_lock(&g_mu); gpr_mu_lock(&g_mu);
@ -172,8 +172,7 @@ static bool wait_until(grpc_millis next) {
if (grpc_timer_check_trace.enabled()) { if (grpc_timer_check_trace.enabled()) {
grpc_millis wait_time = next - grpc_core::ExecCtx::Get()->Now(); grpc_millis wait_time = next - grpc_core::ExecCtx::Get()->Now();
gpr_log(GPR_DEBUG, "sleep for a %" PRIdPTR " milliseconds", gpr_log(GPR_INFO, "sleep for a %" PRIdPTR " milliseconds", wait_time);
wait_time);
} }
} else { // g_timed_waiter == true && next >= g_timed_waiter_deadline } else { // g_timed_waiter == true && next >= g_timed_waiter_deadline
next = GRPC_MILLIS_INF_FUTURE; next = GRPC_MILLIS_INF_FUTURE;
@ -181,14 +180,14 @@ static bool wait_until(grpc_millis next) {
} }
if (grpc_timer_check_trace.enabled() && next == GRPC_MILLIS_INF_FUTURE) { if (grpc_timer_check_trace.enabled() && next == GRPC_MILLIS_INF_FUTURE) {
gpr_log(GPR_DEBUG, "sleep until kicked"); gpr_log(GPR_INFO, "sleep until kicked");
} }
gpr_cv_wait(&g_cv_wait, &g_mu, gpr_cv_wait(&g_cv_wait, &g_mu,
grpc_millis_to_timespec(next, GPR_CLOCK_MONOTONIC)); grpc_millis_to_timespec(next, GPR_CLOCK_MONOTONIC));
if (grpc_timer_check_trace.enabled()) { if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "wait ended: was_timed:%d kicked:%d", gpr_log(GPR_INFO, "wait ended: was_timed:%d kicked:%d",
my_timed_waiter_generation == g_timed_waiter_generation, my_timed_waiter_generation == g_timed_waiter_generation,
g_kicked); g_kicked);
} }
@ -233,7 +232,7 @@ static void timer_main_loop() {
Consequently, we can just sleep forever here and be happy at some Consequently, we can just sleep forever here and be happy at some
saved wakeup cycles. */ saved wakeup cycles. */
if (grpc_timer_check_trace.enabled()) { if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "timers not checked: expect another thread to"); gpr_log(GPR_INFO, "timers not checked: expect another thread to");
} }
next = GRPC_MILLIS_INF_FUTURE; next = GRPC_MILLIS_INF_FUTURE;
/* fall through */ /* fall through */
@ -259,7 +258,7 @@ static void timer_thread_cleanup(completed_thread* ct) {
g_completed_threads = ct; g_completed_threads = ct;
gpr_mu_unlock(&g_mu); gpr_mu_unlock(&g_mu);
if (grpc_timer_check_trace.enabled()) { if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "End timer thread"); gpr_log(GPR_INFO, "End timer thread");
} }
} }
@ -301,18 +300,18 @@ void grpc_timer_manager_init(void) {
static void stop_threads(void) { static void stop_threads(void) {
gpr_mu_lock(&g_mu); gpr_mu_lock(&g_mu);
if (grpc_timer_check_trace.enabled()) { if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "stop timer threads: threaded=%d", g_threaded); gpr_log(GPR_INFO, "stop timer threads: threaded=%d", g_threaded);
} }
if (g_threaded) { if (g_threaded) {
g_threaded = false; g_threaded = false;
gpr_cv_broadcast(&g_cv_wait); gpr_cv_broadcast(&g_cv_wait);
if (grpc_timer_check_trace.enabled()) { if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "num timer threads: %d", g_thread_count); gpr_log(GPR_INFO, "num timer threads: %d", g_thread_count);
} }
while (g_thread_count > 0) { while (g_thread_count > 0) {
gpr_cv_wait(&g_cv_shutdown, &g_mu, gpr_inf_future(GPR_CLOCK_MONOTONIC)); gpr_cv_wait(&g_cv_shutdown, &g_mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
if (grpc_timer_check_trace.enabled()) { if (grpc_timer_check_trace.enabled()) {
gpr_log(GPR_DEBUG, "num timer threads: %d", g_thread_count); gpr_log(GPR_INFO, "num timer threads: %d", g_thread_count);
} }
gc_completed_threads(); gc_completed_threads();
} }

@ -52,6 +52,9 @@ static void timer_start(grpc_custom_timer* t) {
uv_timer->data = t; uv_timer->data = t;
t->timer = (void*)uv_timer; t->timer = (void*)uv_timer;
uv_timer_start(uv_timer, run_expired_timer, t->timeout_ms, 0); uv_timer_start(uv_timer, run_expired_timer, t->timeout_ms, 0);
// Node uses a garbage collector to call destructors, so we don't
// want to hold the uv loop open with active gRPC objects.
uv_unref((uv_handle_t*)uv_timer);
} }
static void timer_stop(grpc_custom_timer* t) { static void timer_stop(grpc_custom_timer* t) {

@ -133,7 +133,7 @@ static void call_read_cb(secure_endpoint* ep, grpc_error* error) {
for (i = 0; i < ep->read_buffer->count; i++) { for (i = 0; i < ep->read_buffer->count; i++) {
char* data = grpc_dump_slice(ep->read_buffer->slices[i], char* data = grpc_dump_slice(ep->read_buffer->slices[i],
GPR_DUMP_HEX | GPR_DUMP_ASCII); GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "READ %p: %s", ep, data); gpr_log(GPR_INFO, "READ %p: %s", ep, data);
gpr_free(data); gpr_free(data);
} }
} }
@ -269,7 +269,7 @@ static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
for (i = 0; i < slices->count; i++) { for (i = 0; i < slices->count; i++) {
char* data = char* data =
grpc_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII); grpc_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "WRITE %p: %s", ep, data); gpr_log(GPR_INFO, "WRITE %p: %s", ep, data);
gpr_free(data); gpr_free(data);
} }
} }

@ -747,10 +747,10 @@ static void get_final_status(
status[i] = unpack_received_status(gpr_atm_acq_load(&call->status[i])); status[i] = unpack_received_status(gpr_atm_acq_load(&call->status[i]));
} }
if (grpc_call_error_trace.enabled()) { if (grpc_call_error_trace.enabled()) {
gpr_log(GPR_DEBUG, "get_final_status %s", call->is_client ? "CLI" : "SVR"); gpr_log(GPR_INFO, "get_final_status %s", call->is_client ? "CLI" : "SVR");
for (i = 0; i < STATUS_SOURCE_COUNT; i++) { for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
if (status[i].is_set) { if (status[i].is_set) {
gpr_log(GPR_DEBUG, " %d: %s", i, grpc_error_string(status[i].error)); gpr_log(GPR_INFO, " %d: %s", i, grpc_error_string(status[i].error));
} }
} }
} }

@ -1161,6 +1161,22 @@ static void listener_destroy_done(void* s, grpc_error* error) {
gpr_mu_unlock(&server->mu_global); gpr_mu_unlock(&server->mu_global);
} }
/*
- Kills all pending requests-for-incoming-RPC-calls (i.e the requests made via
grpc_server_request_call and grpc_server_request_registered call will now be
cancelled). See 'kill_pending_work_locked()'
- Shuts down the listeners (i.e the server will no longer listen on the port
for new incoming channels).
- Iterates through all channels on the server and sends shutdown msg (see
'channel_broadcaster_shutdown()' for details) to the clients via the
transport layer. The transport layer then guarantees the following:
-- Sends shutdown to the client (for eg: HTTP2 transport sends GOAWAY)
-- If the server has outstanding calls that are in the process, the
connection is NOT closed until the server is done with all those calls
-- Once, there are no more calls in progress, the channel is closed
*/
void grpc_server_shutdown_and_notify(grpc_server* server, void grpc_server_shutdown_and_notify(grpc_server* server,
grpc_completion_queue* cq, void* tag) { grpc_completion_queue* cq, void* tag) {
listener* l; listener* l;

@ -47,7 +47,7 @@ grpc_millis BdpEstimator::CompletePing() {
double bw = dt > 0 ? (static_cast<double>(accumulator_) / dt) : 0; double bw = dt > 0 ? (static_cast<double>(accumulator_) / dt) : 0;
int start_inter_ping_delay = inter_ping_delay_; int start_inter_ping_delay = inter_ping_delay_;
if (grpc_bdp_estimator_trace.enabled()) { if (grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_DEBUG, gpr_log(GPR_INFO,
"bdp[%s]:complete acc=%" PRId64 " est=%" PRId64 "bdp[%s]:complete acc=%" PRId64 " est=%" PRId64
" dt=%lf bw=%lfMbs bw_est=%lfMbs", " dt=%lf bw=%lfMbs bw_est=%lfMbs",
name_, accumulator_, estimate_, dt, bw / 125000.0, name_, accumulator_, estimate_, dt, bw / 125000.0,
@ -58,7 +58,7 @@ grpc_millis BdpEstimator::CompletePing() {
estimate_ = GPR_MAX(accumulator_, estimate_ * 2); estimate_ = GPR_MAX(accumulator_, estimate_ * 2);
bw_est_ = bw; bw_est_ = bw;
if (grpc_bdp_estimator_trace.enabled()) { if (grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_DEBUG, "bdp[%s]: estimate increased to %" PRId64, name_, gpr_log(GPR_INFO, "bdp[%s]: estimate increased to %" PRId64, name_,
estimate_); estimate_);
} }
inter_ping_delay_ /= 2; // if the ping estimate changes, inter_ping_delay_ /= 2; // if the ping estimate changes,
@ -75,7 +75,7 @@ grpc_millis BdpEstimator::CompletePing() {
if (start_inter_ping_delay != inter_ping_delay_) { if (start_inter_ping_delay != inter_ping_delay_) {
stable_estimate_count_ = 0; stable_estimate_count_ = 0;
if (grpc_bdp_estimator_trace.enabled()) { if (grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_DEBUG, "bdp[%s]:update_inter_time to %dms", name_, gpr_log(GPR_INFO, "bdp[%s]:update_inter_time to %dms", name_,
inter_ping_delay_); inter_ping_delay_);
} }
} }

@ -50,7 +50,7 @@ class BdpEstimator {
// transport (but not necessarily started) // transport (but not necessarily started)
void SchedulePing() { void SchedulePing() {
if (grpc_bdp_estimator_trace.enabled()) { if (grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_DEBUG, "bdp[%s]:sched acc=%" PRId64 " est=%" PRId64, name_, gpr_log(GPR_INFO, "bdp[%s]:sched acc=%" PRId64 " est=%" PRId64, name_,
accumulator_, estimate_); accumulator_, estimate_);
} }
GPR_ASSERT(ping_state_ == PingState::UNSCHEDULED); GPR_ASSERT(ping_state_ == PingState::UNSCHEDULED);
@ -63,7 +63,7 @@ class BdpEstimator {
// the ping is on the wire // the ping is on the wire
void StartPing() { void StartPing() {
if (grpc_bdp_estimator_trace.enabled()) { if (grpc_bdp_estimator_trace.enabled()) {
gpr_log(GPR_DEBUG, "bdp[%s]:start acc=%" PRId64 " est=%" PRId64, name_, gpr_log(GPR_INFO, "bdp[%s]:start acc=%" PRId64 " est=%" PRId64, name_,
accumulator_, estimate_); accumulator_, estimate_);
} }
GPR_ASSERT(ping_state_ == PingState::SCHEDULED); GPR_ASSERT(ping_state_ == PingState::SCHEDULED);

@ -78,7 +78,7 @@ grpc_connectivity_state grpc_connectivity_state_check(
grpc_connectivity_state cur = static_cast<grpc_connectivity_state>( grpc_connectivity_state cur = static_cast<grpc_connectivity_state>(
gpr_atm_no_barrier_load(&tracker->current_state_atm)); gpr_atm_no_barrier_load(&tracker->current_state_atm));
if (grpc_connectivity_state_trace.enabled()) { if (grpc_connectivity_state_trace.enabled()) {
gpr_log(GPR_DEBUG, "CONWATCH: %p %s: get %s", tracker, tracker->name, gpr_log(GPR_INFO, "CONWATCH: %p %s: get %s", tracker, tracker->name,
grpc_connectivity_state_name(cur)); grpc_connectivity_state_name(cur));
} }
return cur; return cur;
@ -89,7 +89,7 @@ grpc_connectivity_state grpc_connectivity_state_get(
grpc_connectivity_state cur = static_cast<grpc_connectivity_state>( grpc_connectivity_state cur = static_cast<grpc_connectivity_state>(
gpr_atm_no_barrier_load(&tracker->current_state_atm)); gpr_atm_no_barrier_load(&tracker->current_state_atm));
if (grpc_connectivity_state_trace.enabled()) { if (grpc_connectivity_state_trace.enabled()) {
gpr_log(GPR_DEBUG, "CONWATCH: %p %s: get %s", tracker, tracker->name, gpr_log(GPR_INFO, "CONWATCH: %p %s: get %s", tracker, tracker->name,
grpc_connectivity_state_name(cur)); grpc_connectivity_state_name(cur));
} }
if (error != nullptr) { if (error != nullptr) {
@ -110,10 +110,10 @@ bool grpc_connectivity_state_notify_on_state_change(
gpr_atm_no_barrier_load(&tracker->current_state_atm)); gpr_atm_no_barrier_load(&tracker->current_state_atm));
if (grpc_connectivity_state_trace.enabled()) { if (grpc_connectivity_state_trace.enabled()) {
if (current == nullptr) { if (current == nullptr) {
gpr_log(GPR_DEBUG, "CONWATCH: %p %s: unsubscribe notify=%p", tracker, gpr_log(GPR_INFO, "CONWATCH: %p %s: unsubscribe notify=%p", tracker,
tracker->name, notify); tracker->name, notify);
} else { } else {
gpr_log(GPR_DEBUG, "CONWATCH: %p %s: from %s [cur=%s] notify=%p", tracker, gpr_log(GPR_INFO, "CONWATCH: %p %s: from %s [cur=%s] notify=%p", tracker,
tracker->name, grpc_connectivity_state_name(*current), tracker->name, grpc_connectivity_state_name(*current),
grpc_connectivity_state_name(cur), notify); grpc_connectivity_state_name(cur), notify);
} }
@ -161,7 +161,7 @@ void grpc_connectivity_state_set(grpc_connectivity_state_tracker* tracker,
grpc_connectivity_state_watcher* w; grpc_connectivity_state_watcher* w;
if (grpc_connectivity_state_trace.enabled()) { if (grpc_connectivity_state_trace.enabled()) {
const char* error_string = grpc_error_string(error); const char* error_string = grpc_error_string(error);
gpr_log(GPR_DEBUG, "SET: %p %s: %s --> %s [%s] error=%p %s", tracker, gpr_log(GPR_INFO, "SET: %p %s: %s --> %s [%s] error=%p %s", tracker,
tracker->name, grpc_connectivity_state_name(cur), tracker->name, grpc_connectivity_state_name(cur),
grpc_connectivity_state_name(state), reason, error, error_string); grpc_connectivity_state_name(state), reason, error, error_string);
} }
@ -187,8 +187,7 @@ void grpc_connectivity_state_set(grpc_connectivity_state_tracker* tracker,
*w->current = state; *w->current = state;
tracker->watchers = w->next; tracker->watchers = w->next;
if (grpc_connectivity_state_trace.enabled()) { if (grpc_connectivity_state_trace.enabled()) {
gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name, gpr_log(GPR_INFO, "NOTIFY: %p %s: %p", tracker, tracker->name, w->notify);
w->notify);
} }
GRPC_CLOSURE_SCHED(w->notify, GRPC_ERROR_REF(tracker->current_error)); GRPC_CLOSURE_SCHED(w->notify, GRPC_ERROR_REF(tracker->current_error));
gpr_free(w); gpr_free(w);

@ -1,13 +1,5 @@
<Project> <Project>
<ItemGroup> <ItemGroup>
<!-- We are relying on run_tests.py to build grpc_csharp_ext with the right bitness
and we copy it as both x86 (needed by net45) and x64 (needed by netcoreapp1.0) as we don't
know which one will be needed to run the tests. -->
<Content Include="..\..\..\libs\$(NativeDependenciesConfigurationUnix)\libgrpc_csharp_ext.dylib">
<Link>libgrpc_csharp_ext.x86.dylib</Link>
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Pack>false</Pack>
</Content>
<Content Include="..\..\..\libs\$(NativeDependenciesConfigurationUnix)\libgrpc_csharp_ext.dylib"> <Content Include="..\..\..\libs\$(NativeDependenciesConfigurationUnix)\libgrpc_csharp_ext.dylib">
<Link>libgrpc_csharp_ext.x64.dylib</Link> <Link>libgrpc_csharp_ext.x64.dylib</Link>
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory> <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>

@ -72,6 +72,17 @@ namespace Grpc.Core
} }
} }
/// <summary>
/// Returns the status code of the call, as a convenient alternative to <see cref="StatusCode">Status.StatusCode</see>.
/// </summary>
public StatusCode StatusCode
{
get
{
return status.StatusCode;
}
}
/// <summary> /// <summary>
/// Gets the call trailing metadata. /// Gets the call trailing metadata.
/// Trailers only have meaningful content for client-side calls (in which case they represent the trailing metadata sent by the server when closing the call). /// Trailers only have meaningful content for client-side calls (in which case they represent the trailing metadata sent by the server when closing the call).

@ -1,5 +0,0 @@
{
"sdk": {
"version": "2.1.4"
}
}

@ -31,7 +31,7 @@
Pod::Spec.new do |s| Pod::Spec.new do |s|
s.name = 'BoringSSL' s.name = 'BoringSSL'
version = '10.0' version = '10.0.2'
s.version = version s.version = version
s.summary = 'BoringSSL is a fork of OpenSSL that is designed to meet Google’s needs.' s.summary = 'BoringSSL is a fork of OpenSSL that is designed to meet Google’s needs.'
# Adapted from the homepage: # Adapted from the homepage:
@ -67,11 +67,9 @@ Pod::Spec.new do |s|
# "The name and email addresses of the library maintainers, not the Podspec maintainer." # "The name and email addresses of the library maintainers, not the Podspec maintainer."
s.authors = 'Adam Langley', 'David Benjamin', 'Matt Braithwaite' s.authors = 'Adam Langley', 'David Benjamin', 'Matt Braithwaite'
versions = version.split('.')
major_version = versions[0] + '.0'
s.source = { s.source = {
:git => 'https://boringssl.googlesource.com/boringssl', :git => 'https://boringssl.googlesource.com/boringssl',
:tag => "version_for_cocoapods_#{major_version}", :commit => "a20bb7ff8bb5057065a2e7941249773f9676cf45",
} }
s.ios.deployment_target = '5.0' s.ios.deployment_target = '5.0'
@ -123,7 +121,8 @@ Pod::Spec.new do |s|
'ssl/**/*.{h,cc}', 'ssl/**/*.{h,cc}',
'*.{h,c}', '*.{h,c}',
'crypto/*.{h,c}', 'crypto/*.{h,c}',
'crypto/**/*.{h,c}' 'crypto/**/*.{h,c}',
'third_party/fiat/*.{h,c}'
ss.private_header_files = 'ssl/*.h', ss.private_header_files = 'ssl/*.h',
'ssl/**/*.h', 'ssl/**/*.h',
'*.h', '*.h',

@ -193,7 +193,6 @@ void create_and_add_channel_to_persistent_list(
create_channel(channel, target, args, creds); create_channel(channel, target, args, creds);
le->channel = channel->wrapper; le->channel = channel->wrapper;
le->ref_count = 1;
new_rsrc.ptr = le; new_rsrc.ptr = le;
gpr_mu_lock(&global_persistent_list_mu); gpr_mu_lock(&global_persistent_list_mu);
PHP_GRPC_PERSISTENT_LIST_UPDATE(&grpc_persistent_list, key, key_len, PHP_GRPC_PERSISTENT_LIST_UPDATE(&grpc_persistent_list, key, key_len,
@ -342,7 +341,6 @@ PHP_METHOD(Channel, __construct) {
free(channel->wrapper->target); free(channel->wrapper->target);
free(channel->wrapper->args_hashstr); free(channel->wrapper->args_hashstr);
free(channel->wrapper); free(channel->wrapper);
le->ref_count += 1;
channel->wrapper = le->channel; channel->wrapper = le->channel;
channel->wrapper->ref_count += 1; channel->wrapper->ref_count += 1;
} }
@ -534,53 +532,6 @@ static void php_grpc_channel_plink_dtor(php_grpc_zend_resource *rsrc
} }
} }
/**
* Clean all channels in the persistent.
* @return void
*/
PHP_METHOD(Channel, cleanPersistentList) {
zend_hash_clean(&grpc_persistent_list);
}
/**
* Return an array of persistent list.
* @return array
*/
PHP_METHOD(Channel, getPersistentList) {
array_init(return_value);
zval *data;
PHP_GRPC_HASH_FOREACH_VAL_START(&grpc_persistent_list, data)
php_grpc_zend_resource *rsrc =
(php_grpc_zend_resource*) PHP_GRPC_HASH_VALPTR_TO_VAL(data)
if (rsrc == NULL) {
break;
}
channel_persistent_le_t* le = rsrc->ptr;
zval* ret_arr;
PHP_GRPC_MAKE_STD_ZVAL(ret_arr);
array_init(ret_arr);
// Info about the target
PHP_GRPC_ADD_STRING_TO_ARRAY(ret_arr, "target",
sizeof("target"), le->channel->target, true);
// Info about key
PHP_GRPC_ADD_STRING_TO_ARRAY(ret_arr, "key",
sizeof("key"), le->channel->key, true);
// Info about persistent channel ref_count
PHP_GRPC_ADD_LONG_TO_ARRAY(ret_arr, "ref_count",
sizeof("ref_count"), le->ref_count);
// Info about connectivity status
int state =
grpc_channel_check_connectivity_state(le->channel->wrapped, (int)0);
// It should be set to 'true' in PHP 5.6.33
PHP_GRPC_ADD_LONG_TO_ARRAY(ret_arr, "connectivity_status",
sizeof("connectivity_status"), state);
// Info about the channel is closed or not
PHP_GRPC_ADD_BOOL_TO_ARRAY(ret_arr, "is_valid",
sizeof("is_valid"), le->channel->is_valid);
add_assoc_zval(return_value, le->channel->target, ret_arr);
PHP_GRPC_HASH_FOREACH_END()
}
ZEND_BEGIN_ARG_INFO_EX(arginfo_construct, 0, 0, 2) ZEND_BEGIN_ARG_INFO_EX(arginfo_construct, 0, 0, 2)
ZEND_ARG_INFO(0, target) ZEND_ARG_INFO(0, target)
ZEND_ARG_INFO(0, args) ZEND_ARG_INFO(0, args)
@ -601,12 +552,6 @@ ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_close, 0, 0, 0) ZEND_BEGIN_ARG_INFO_EX(arginfo_close, 0, 0, 0)
ZEND_END_ARG_INFO() ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_cleanPersistentList, 0, 0, 0)
ZEND_END_ARG_INFO()
ZEND_BEGIN_ARG_INFO_EX(arginfo_getPersistentList, 0, 0, 0)
ZEND_END_ARG_INFO()
static zend_function_entry channel_methods[] = { static zend_function_entry channel_methods[] = {
PHP_ME(Channel, __construct, arginfo_construct, PHP_ME(Channel, __construct, arginfo_construct,
ZEND_ACC_PUBLIC | ZEND_ACC_CTOR) ZEND_ACC_PUBLIC | ZEND_ACC_CTOR)
@ -618,10 +563,6 @@ static zend_function_entry channel_methods[] = {
ZEND_ACC_PUBLIC) ZEND_ACC_PUBLIC)
PHP_ME(Channel, close, arginfo_close, PHP_ME(Channel, close, arginfo_close,
ZEND_ACC_PUBLIC) ZEND_ACC_PUBLIC)
PHP_ME(Channel, cleanPersistentList, arginfo_cleanPersistentList,
ZEND_ACC_PUBLIC)
PHP_ME(Channel, getPersistentList, arginfo_getPersistentList,
ZEND_ACC_PUBLIC)
PHP_FE_END PHP_FE_END
}; };

@ -84,7 +84,6 @@ void php_grpc_delete_persistent_list_entry(char *key, php_grpc_int key_len
typedef struct _channel_persistent_le { typedef struct _channel_persistent_le {
grpc_channel_wrapper *channel; grpc_channel_wrapper *channel;
size_t ref_count;
} channel_persistent_le_t; } channel_persistent_le_t;

@ -46,9 +46,6 @@ class CallCredentials2Test extends PHPUnit_Framework_TestCase
{ {
unset($this->channel); unset($this->channel);
unset($this->server); unset($this->server);
$channel_clean_persistent =
new Grpc\Channel('localhost:50010', []);
$channel_clean_persistent->cleanPersistentList();
} }
public function callbackFunc($context) public function callbackFunc($context)

@ -52,9 +52,6 @@ class CallCredentialsTest extends PHPUnit_Framework_TestCase
{ {
unset($this->channel); unset($this->channel);
unset($this->server); unset($this->server);
$channel_clean_persistent =
new Grpc\Channel('localhost:50010', []);
$channel_clean_persistent->cleanPersistentList();
} }
public function callbackFunc($context) public function callbackFunc($context)

@ -38,9 +38,6 @@ class CallTest extends PHPUnit_Framework_TestCase
public function tearDown() public function tearDown()
{ {
$this->channel->close(); $this->channel->close();
$channel_clean_persistent =
new Grpc\Channel('localhost:50010', []);
$channel_clean_persistent->cleanPersistentList();
} }
public function testConstructor() public function testConstructor()

@ -28,9 +28,6 @@ class ChannelTest extends PHPUnit_Framework_TestCase
if (!empty($this->channel)) { if (!empty($this->channel)) {
$this->channel->close(); $this->channel->close();
} }
$channel_clean_persistent =
new Grpc\Channel('localhost:50010', []);
$channel_clean_persistent->cleanPersistentList();
} }
public function testInsecureCredentials() public function testInsecureCredentials()
@ -383,11 +380,6 @@ class ChannelTest extends PHPUnit_Framework_TestCase
// close channel1 // close channel1
$this->channel1->close(); $this->channel1->close();
// channel2 is now in SHUTDOWN state
$state = $this->channel2->getConnectivityState();
$this->assertEquals(GRPC\CHANNEL_FATAL_FAILURE, $state);
// calling it again will result in an exception because the
// channel is already closed // channel is already closed
$state = $this->channel2->getConnectivityState(); $state = $this->channel2->getConnectivityState();
} }

@ -29,9 +29,6 @@ class EndToEndTest extends PHPUnit_Framework_TestCase
public function tearDown() public function tearDown()
{ {
$this->channel->close(); $this->channel->close();
$channel_clean_persistent =
new Grpc\Channel('localhost:50010', []);
$channel_clean_persistent->cleanPersistentList();
} }
public function testSimpleRequestBody() public function testSimpleRequestBody()

@ -1,115 +0,0 @@
<?php
/*
*
* Copyright 2015 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
class PersistentListTest extends PHPUnit_Framework_TestCase
{
public function setUp()
{
}
public function tearDown()
{
$channel_clean_persistent =
new Grpc\Channel('localhost:50010', []);
$channel_clean_persistent->cleanPersistentList();
}
public function waitUntilNotIdle($channel) {
for ($i = 0; $i < 10; $i++) {
$now = Grpc\Timeval::now();
$deadline = $now->add(new Grpc\Timeval(1000));
if ($channel->watchConnectivityState(GRPC\CHANNEL_IDLE,
$deadline)) {
return true;
}
}
$this->assertTrue(false);
}
public function assertConnecting($state) {
$this->assertTrue($state == GRPC\CHANNEL_CONNECTING ||
$state == GRPC\CHANNEL_TRANSIENT_FAILURE);
}
public function testPersistentChennelCreateOneChannel()
{
$this->channel1 = new Grpc\Channel('localhost:1', []);
$plist = $this->channel1->getPersistentList();
$this->assertEquals($plist['localhost:1']['target'], 'localhost:1');
$this->assertArrayHasKey('localhost:1', $plist);
$this->assertEquals($plist['localhost:1']['ref_count'], 1);
$this->assertEquals($plist['localhost:1']['connectivity_status'],
GRPC\CHANNEL_IDLE);
$this->assertEquals($plist['localhost:1']['is_valid'], 1);
$this->channel1->close();
}
public function testPersistentChennelStatusChange()
{
$this->channel1 = new Grpc\Channel('localhost:1', []);
$plist = $this->channel1->getPersistentList();
$this->assertEquals($plist['localhost:1']['connectivity_status'],
GRPC\CHANNEL_IDLE);
$this->assertEquals($plist['localhost:1']['is_valid'], 1);
$state = $this->channel1->getConnectivityState(true);
$this->waitUntilNotIdle($this->channel1);
$plist = $this->channel1->getPersistentList();
$this->assertConnecting($plist['localhost:1']['connectivity_status']);
$this->assertEquals($plist['localhost:1']['is_valid'], 1);
$this->channel1->close();
}
public function testPersistentChennelCloseChannel()
{
$this->channel1 = new Grpc\Channel('localhost:1', []);
$plist = $this->channel1->getPersistentList();
$this->assertEquals($plist['localhost:1']['ref_count'], 1);
$this->channel1->close();
$plist = $this->channel1->getPersistentList();
$this->assertArrayNotHasKey('localhost:1', $plist);
}
public function testPersistentChannelSameHost()
{
$this->channel1 = new Grpc\Channel('localhost:1', []);
$this->channel2 = new Grpc\Channel('localhost:1', []);
//ref_count should be 2
$plist = $this->channel1->getPersistentList();
$this->assertArrayHasKey('localhost:1', $plist);
$this->assertEquals($plist['localhost:1']['ref_count'], 2);
$this->channel1->close();
$this->channel2->close();
}
public function testPersistentChannelDifferentHost()
{
$this->channel1 = new Grpc\Channel('localhost:1', []);
$this->channel2 = new Grpc\Channel('localhost:2', []);
$plist = $this->channel1->getPersistentList();
$this->assertArrayHasKey('localhost:1', $plist);
$this->assertArrayHasKey('localhost:2', $plist);
$this->assertEquals($plist['localhost:1']['ref_count'], 1);
$this->assertEquals($plist['localhost:2']['ref_count'], 1);
$this->channel1->close();
$this->channel2->close();
}
}

@ -44,9 +44,6 @@ class SecureEndToEndTest extends PHPUnit_Framework_TestCase
public function tearDown() public function tearDown()
{ {
$this->channel->close(); $this->channel->close();
$channel_clean_persistent =
new Grpc\Channel('localhost:50010', []);
$channel_clean_persistent->cleanPersistentList();
} }
public function testSimpleRequestBody() public function testSimpleRequestBody()

@ -27,9 +27,6 @@ class ServerTest extends PHPUnit_Framework_TestCase
public function tearDown() public function tearDown()
{ {
unset($this->server); unset($this->server);
$channel_clean_persistent =
new Grpc\Channel('localhost:50010', []);
$channel_clean_persistent->cleanPersistentList();
} }
public function testConstructorWithNull() public function testConstructorWithNull()

@ -25,9 +25,6 @@ class TimevalTest extends PHPUnit_Framework_TestCase
public function tearDown() public function tearDown()
{ {
unset($this->time); unset($this->time);
$channel_clean_persistent =
new Grpc\Channel('localhost:50010', []);
$channel_clean_persistent->cleanPersistentList();
} }
public function testConstructorWithInt() public function testConstructorWithInt()

@ -9,6 +9,6 @@ RUN apt-get update && apt-get install -y ${'\\'}
python-pip python-pip
# Install Python packages from PyPI # Install Python packages from PyPI
RUN pip install --upgrade pip==9.0.2 RUN pip install --upgrade pip==10.0.1
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.2.post1 six==1.10.0 twisted==17.5.0

@ -40,7 +40,7 @@
zip zip
# Install Python packages from PyPI # Install Python packages from PyPI
RUN pip install --upgrade pip==9.0.2 RUN pip install --upgrade pip==10.0.1
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0

@ -0,0 +1,57 @@
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
licenses(["notice"]) # Apache v2
load("//bazel:grpc_build_system.bzl", "grpc_proto_library", "grpc_cc_library", "grpc_cc_binary", "grpc_package")
grpc_package(name = "test/core/tsi/alts/fake_handshaker", visibility = "public")
grpc_proto_library(
name = "transport_security_common_proto",
srcs = ["transport_security_common.proto"],
has_services = False,
)
grpc_proto_library(
name = "handshaker_proto",
srcs = ["handshaker.proto"],
has_services = True,
deps = [
"transport_security_common_proto",
],
)
grpc_cc_library(
name = "fake_handshaker_lib",
testonly = True,
srcs = ["fake_handshaker_server.cc"],
language = "C++",
deps = [
"handshaker_proto",
"transport_security_common_proto",
"//:grpc++",
"//test/cpp/util:test_config",
],
)
grpc_cc_binary(
name = "fake_handshaker_server",
testonly = True,
srcs = ["fake_handshaker_server.cc"],
language = "C++",
deps = [
"fake_handshaker_lib",
],
)

@ -0,0 +1,268 @@
/*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <memory>
#include <sstream>
#include <string>
#include <gflags/gflags.h>
#include <grpc/grpc.h>
#include <grpc/support/log.h>
#include <grpcpp/impl/codegen/async_stream.h>
#include <grpcpp/security/server_credentials.h>
#include <grpcpp/server.h>
#include <grpcpp/server_builder.h>
#include <grpcpp/server_context.h>
#include "test/core/tsi/alts/fake_handshaker/handshaker.grpc.pb.h"
#include "test/core/tsi/alts/fake_handshaker/handshaker.pb.h"
#include "test/core/tsi/alts/fake_handshaker/transport_security_common.pb.h"
#include "test/cpp/util/test_config.h"
DEFINE_int32(handshaker_port, 55056,
"TCP port on which the fake handshaker server listens to.");
// Fake handshake messages.
constexpr char kClientInitFrame[] = "ClientInit";
constexpr char kServerFrame[] = "ServerInitAndFinished";
constexpr char kClientFinishFrame[] = "ClientFinished";
// Error messages.
constexpr char kInvalidFrameError[] = "Invalid input frame.";
constexpr char kWrongStateError[] = "Wrong handshake state.";
namespace grpc {
namespace gcp {
// FakeHandshakeService implements a fake handshaker service using a fake key
// exchange protocol. The fake key exchange protocol is a 3-message protocol:
// - Client first sends ClientInit message to Server.
// - Server then sends ServerInitAndFinished message back to Client.
// - Client finally sends ClientFinished message to Server.
// This fake handshaker service is intended for ALTS integration testing without
// relying on real ALTS handshaker service inside GCE.
// It is thread-safe.
class FakeHandshakerService : public HandshakerService::Service {
public:
Status DoHandshake(
ServerContext* server_context,
ServerReaderWriter<HandshakerResp, HandshakerReq>* stream) override {
Status status;
HandshakerContext context;
HandshakerReq request;
HandshakerResp response;
gpr_log(GPR_DEBUG, "Start a new handshake.");
while (stream->Read(&request)) {
status = ProcessRequest(&context, request, &response);
if (!status.ok()) return WriteErrorResponse(stream, status);
stream->Write(response);
if (context.state == COMPLETED) return Status::OK;
request.Clear();
}
return Status::OK;
}
private:
// HandshakeState is used by fake handshaker server to keep track of client's
// handshake status. In the beginning of a handshake, the state is INITIAL.
// If start_client or start_server request is called, the state becomes at
// least STARTED. When the handshaker server produces the first fame, the
// state becomes SENT. After the handshaker server processes the final frame
// from the peer, the state becomes COMPLETED.
enum HandshakeState { INITIAL, STARTED, SENT, COMPLETED };
struct HandshakerContext {
bool is_client = true;
HandshakeState state = INITIAL;
};
Status ProcessRequest(HandshakerContext* context,
const HandshakerReq& request,
HandshakerResp* response) {
GPR_ASSERT(context != nullptr && response != nullptr);
response->Clear();
if (request.has_client_start()) {
gpr_log(GPR_DEBUG, "Process client start request.");
return ProcessClientStart(context, request.client_start(), response);
} else if (request.has_server_start()) {
gpr_log(GPR_DEBUG, "Process server start request.");
return ProcessServerStart(context, request.server_start(), response);
} else if (request.has_next()) {
gpr_log(GPR_DEBUG, "Process next request.");
return ProcessNext(context, request.next(), response);
}
return Status(StatusCode::INVALID_ARGUMENT, "Request is empty.");
}
Status ProcessClientStart(HandshakerContext* context,
const StartClientHandshakeReq& request,
HandshakerResp* response) {
GPR_ASSERT(context != nullptr && response != nullptr);
// Checks request.
if (context->state != INITIAL) {
return Status(StatusCode::FAILED_PRECONDITION, kWrongStateError);
}
if (request.application_protocols_size() == 0) {
return Status(StatusCode::INVALID_ARGUMENT,
"At least one application protocol needed.");
}
if (request.record_protocols_size() == 0) {
return Status(StatusCode::INVALID_ARGUMENT,
"At least one record protocol needed.");
}
// Sets response.
response->set_out_frames(kClientInitFrame);
response->set_bytes_consumed(0);
response->mutable_status()->set_code(StatusCode::OK);
// Updates handshaker context.
context->is_client = true;
context->state = SENT;
return Status::OK;
}
Status ProcessServerStart(HandshakerContext* context,
const StartServerHandshakeReq& request,
HandshakerResp* response) {
GPR_ASSERT(context != nullptr && response != nullptr);
// Checks request.
if (context->state != INITIAL) {
return Status(StatusCode::FAILED_PRECONDITION, kWrongStateError);
}
if (request.application_protocols_size() == 0) {
return Status(StatusCode::INVALID_ARGUMENT,
"At least one application protocol needed.");
}
if (request.handshake_parameters().empty()) {
return Status(StatusCode::INVALID_ARGUMENT,
"At least one set of handshake parameters needed.");
}
// Sets response.
if (request.in_bytes().empty()) {
// start_server request does not have in_bytes.
response->set_bytes_consumed(0);
context->state = STARTED;
} else {
// start_server request has in_bytes.
if (request.in_bytes() == kClientInitFrame) {
response->set_out_frames(kServerFrame);
response->set_bytes_consumed(strlen(kClientInitFrame));
context->state = SENT;
} else {
return Status(StatusCode::UNKNOWN, kInvalidFrameError);
}
}
response->mutable_status()->set_code(StatusCode::OK);
context->is_client = false;
return Status::OK;
}
Status ProcessNext(HandshakerContext* context,
const NextHandshakeMessageReq& request,
HandshakerResp* response) {
GPR_ASSERT(context != nullptr && response != nullptr);
if (context->is_client) {
// Processes next request on client side.
if (context->state != SENT) {
return Status(StatusCode::FAILED_PRECONDITION, kWrongStateError);
}
if (request.in_bytes() != kServerFrame) {
return Status(StatusCode::UNKNOWN, kInvalidFrameError);
}
response->set_out_frames(kClientFinishFrame);
response->set_bytes_consumed(strlen(kServerFrame));
context->state = COMPLETED;
} else {
// Processes next request on server side.
HandshakeState current_state = context->state;
if (current_state == STARTED) {
if (request.in_bytes() != kClientInitFrame) {
return Status(StatusCode::UNKNOWN, kInvalidFrameError);
}
response->set_out_frames(kServerFrame);
response->set_bytes_consumed(strlen(kClientInitFrame));
context->state = SENT;
} else if (current_state == SENT) {
// Client finish frame may be sent along with the first payload from the
// client, handshaker only consumes the client finish frame.
if (request.in_bytes().substr(0, strlen(kClientFinishFrame)) !=
kClientFinishFrame) {
return Status(StatusCode::UNKNOWN, kInvalidFrameError);
}
response->set_bytes_consumed(strlen(kClientFinishFrame));
context->state = COMPLETED;
} else {
return Status(StatusCode::FAILED_PRECONDITION, kWrongStateError);
}
}
// At this point, processing next request succeeded.
response->mutable_status()->set_code(StatusCode::OK);
if (context->state == COMPLETED) {
*response->mutable_result() = GetHandshakerResult();
}
return Status::OK;
}
Status WriteErrorResponse(
ServerReaderWriter<HandshakerResp, HandshakerReq>* stream,
const Status& status) {
GPR_ASSERT(!status.ok());
HandshakerResp response;
response.mutable_status()->set_code(status.error_code());
response.mutable_status()->set_details(status.error_message());
stream->Write(response);
return status;
}
HandshakerResult GetHandshakerResult() {
HandshakerResult result;
result.set_application_protocol("grpc");
result.set_record_protocol("ALTSRP_GCM_AES128_REKEY");
result.mutable_peer_identity()->set_service_account("peer_identity");
result.mutable_local_identity()->set_service_account("local_identity");
string key(1024, '\0');
result.set_key_data(key);
result.mutable_peer_rpc_versions()->mutable_max_rpc_version()->set_major(2);
result.mutable_peer_rpc_versions()->mutable_max_rpc_version()->set_minor(1);
result.mutable_peer_rpc_versions()->mutable_min_rpc_version()->set_major(2);
result.mutable_peer_rpc_versions()->mutable_min_rpc_version()->set_minor(1);
return result;
}
};
} // namespace gcp
} // namespace grpc
void RunServer() {
GPR_ASSERT(FLAGS_handshaker_port != 0);
std::ostringstream server_address;
server_address << "[::1]:" << FLAGS_handshaker_port;
grpc::gcp::FakeHandshakerService service;
grpc::ServerBuilder builder;
builder.AddListeningPort(server_address.str(),
grpc::InsecureServerCredentials());
builder.RegisterService(&service);
std::unique_ptr<grpc::Server> server(builder.BuildAndStart());
gpr_log(GPR_INFO, "Fake handshaker server listening on %s",
server_address.str().c_str());
server->Wait();
}
int main(int argc, char** argv) {
grpc::testing::InitTest(&argc, &argv, true);
RunServer();
return 0;
}

@ -0,0 +1,224 @@
// Copyright 2018 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
import "test/core/tsi/alts/fake_handshaker/transport_security_common.proto";
package grpc.gcp;
option java_package = "io.grpc.alts.internal";
enum HandshakeProtocol {
// Default value.
HANDSHAKE_PROTOCOL_UNSPECIFIED = 0;
// TLS handshake protocol.
TLS = 1;
// Application Layer Transport Security handshake protocol.
ALTS = 2;
}
enum NetworkProtocol {
NETWORK_PROTOCOL_UNSPECIFIED = 0;
TCP = 1;
UDP = 2;
}
message Endpoint {
// IP address. It should contain an IPv4 or IPv6 string literal, e.g.
// "192.168.0.1" or "2001:db8::1".
string ip_address = 1;
// Port number.
int32 port = 2;
// Network protocol (e.g., TCP, UDP) associated with this endpoint.
NetworkProtocol protocol = 3;
}
message Identity {
oneof identity_oneof {
// Service account of a connection endpoint.
string service_account = 1;
// Hostname of a connection endpoint.
string hostname = 2;
}
}
message StartClientHandshakeReq {
// Handshake security protocol requested by the client.
HandshakeProtocol handshake_security_protocol = 1;
// The application protocols supported by the client, e.g., "h2" (for http2),
// "grpc".
repeated string application_protocols = 2;
// The record protocols supported by the client, e.g.,
// "ALTSRP_GCM_AES128".
repeated string record_protocols = 3;
// (Optional) Describes which server identities are acceptable by the client.
// If target identities are provided and none of them matches the peer
// identity of the server, handshake will fail.
repeated Identity target_identities = 4;
// (Optional) Application may specify a local identity. Otherwise, the
// handshaker chooses a default local identity.
Identity local_identity = 5;
// (Optional) Local endpoint information of the connection to the server,
// such as local IP address, port number, and network protocol.
Endpoint local_endpoint = 6;
// (Optional) Endpoint information of the remote server, such as IP address,
// port number, and network protocol.
Endpoint remote_endpoint = 7;
// (Optional) If target name is provided, a secure naming check is performed
// to verify that the peer authenticated identity is indeed authorized to run
// the target name.
string target_name = 8;
// (Optional) RPC protocol versions supported by the client.
RpcProtocolVersions rpc_versions = 9;
}
message ServerHandshakeParameters {
// The record protocols supported by the server, e.g.,
// "ALTSRP_GCM_AES128".
repeated string record_protocols = 1;
// (Optional) A list of local identities supported by the server, if
// specified. Otherwise, the handshaker chooses a default local identity.
repeated Identity local_identities = 2;
}
message StartServerHandshakeReq {
// The application protocols supported by the server, e.g., "h2" (for http2),
// "grpc".
repeated string application_protocols = 1;
// Handshake parameters (record protocols and local identities supported by
// the server) mapped by the handshake protocol. Each handshake security
// protocol (e.g., TLS or ALTS) has its own set of record protocols and local
// identities. Since protobuf does not support enum as key to the map, the key
// to handshake_parameters is the integer value of HandshakeProtocol enum.
map<int32, ServerHandshakeParameters> handshake_parameters = 2;
// Bytes in out_frames returned from the peer's HandshakerResp. It is possible
// that the peer's out_frames are split into multiple HandshakReq messages.
bytes in_bytes = 3;
// (Optional) Local endpoint information of the connection to the client,
// such as local IP address, port number, and network protocol.
Endpoint local_endpoint = 4;
// (Optional) Endpoint information of the remote client, such as IP address,
// port number, and network protocol.
Endpoint remote_endpoint = 5;
// (Optional) RPC protocol versions supported by the server.
RpcProtocolVersions rpc_versions = 6;
}
message NextHandshakeMessageReq {
// Bytes in out_frames returned from the peer's HandshakerResp. It is possible
// that the peer's out_frames are split into multiple NextHandshakerMessageReq
// messages.
bytes in_bytes = 1;
}
message HandshakerReq {
oneof req_oneof {
// The start client handshake request message.
StartClientHandshakeReq client_start = 1;
// The start server handshake request message.
StartServerHandshakeReq server_start = 2;
// The next handshake request message.
NextHandshakeMessageReq next = 3;
}
}
message HandshakerResult {
// The application protocol negotiated for this connection.
string application_protocol = 1;
// The record protocol negotiated for this connection.
string record_protocol = 2;
// Cryptographic key data. The key data may be more than the key length
// required for the record protocol, thus the client of the handshaker
// service needs to truncate the key data into the right key length.
bytes key_data = 3;
// The authenticated identity of the peer.
Identity peer_identity = 4;
// The local identity used in the handshake.
Identity local_identity = 5;
// Indicate whether the handshaker service client should keep the channel
// between the handshaker service open, e.g., in order to handle
// post-handshake messages in the future.
bool keep_channel_open = 6;
// The RPC protocol versions supported by the peer.
RpcProtocolVersions peer_rpc_versions = 7;
}
message HandshakerStatus {
// The status code. This could be the gRPC status code.
uint32 code = 1;
// The status details.
string details = 2;
}
message HandshakerResp {
// Frames to be given to the peer for the NextHandshakeMessageReq. May be
// empty if no out_frames have to be sent to the peer or if in_bytes in the
// HandshakerReq are incomplete. All the non-empty out frames must be sent to
// the peer even if the handshaker status is not OK as these frames may
// contain the alert frames.
bytes out_frames = 1;
// Number of bytes in the in_bytes consumed by the handshaker. It is possible
// that part of in_bytes in HandshakerReq was unrelated to the handshake
// process.
uint32 bytes_consumed = 2;
// This is set iff the handshake was successful. out_frames may still be set
// to frames that needs to be forwarded to the peer.
HandshakerResult result = 3;
// Status of the handshaker.
HandshakerStatus status = 4;
}
service HandshakerService {
// Handshaker service accepts a stream of handshaker request, returning a
// stream of handshaker response. Client is expected to send exactly one
// message with either client_start or server_start followed by one or more
// messages with next. Each time client sends a request, the handshaker
// service expects to respond. Client does not have to wait for service's
// response before sending next request.
rpc DoHandshake(stream HandshakerReq)
returns (stream HandshakerResp) {
}
}

@ -0,0 +1,40 @@
// Copyright 2018 gRPC authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package grpc.gcp;
option java_package = "io.grpc.alts.internal";
// The security level of the created channel. The list is sorted in increasing
// level of security. This order must always be maintained.
enum SecurityLevel {
SECURITY_NONE = 0;
INTEGRITY_ONLY = 1;
INTEGRITY_AND_PRIVACY = 2;
}
// Max and min supported RPC protocol versions.
message RpcProtocolVersions {
// RPC version contains a major version and a minor version.
message Version {
uint32 major = 1;
uint32 minor = 2;
}
// Maximum supported RPC version.
Version max_rpc_version = 1;
// Minimum supported RPC version.
Version min_rpc_version = 2;
}

@ -88,7 +88,6 @@ TEST(SslSessionCacheTest, InitialState) {
// Verify session initial state. // Verify session initial state.
{ {
tsi::SslSessionPtr tmp_sess = tracker.NewSession(1); tsi::SslSessionPtr tmp_sess = tracker.NewSession(1);
EXPECT_EQ(tmp_sess->references, 1);
EXPECT_TRUE(tracker.IsAlive(1)); EXPECT_TRUE(tracker.IsAlive(1));
EXPECT_EQ(tracker.AliveCount(), 1); EXPECT_EQ(tracker.AliveCount(), 1);
} }

@ -34,6 +34,10 @@
#include <grpc/support/log.h> #include <grpc/support/log.h>
#include <grpc/support/string_util.h> #include <grpc/support/string_util.h>
extern "C" {
#include <openssl/crypto.h>
}
#define SSL_TSI_TEST_ALPN1 "foo" #define SSL_TSI_TEST_ALPN1 "foo"
#define SSL_TSI_TEST_ALPN2 "toto" #define SSL_TSI_TEST_ALPN2 "toto"
#define SSL_TSI_TEST_ALPN3 "baz" #define SSL_TSI_TEST_ALPN3 "baz"
@ -42,6 +46,14 @@
#define SSL_TSI_TEST_BAD_SERVER_KEY_CERT_PAIRS_NUM 1 #define SSL_TSI_TEST_BAD_SERVER_KEY_CERT_PAIRS_NUM 1
#define SSL_TSI_TEST_CREDENTIALS_DIR "src/core/tsi/test_creds/" #define SSL_TSI_TEST_CREDENTIALS_DIR "src/core/tsi/test_creds/"
// OpenSSL 1.1 uses AES256 for encryption session ticket by default so specify
// different STEK size.
#if OPENSSL_VERSION_NUMBER >= 0x10100000 && !defined(OPENSSL_IS_BORINGSSL)
const size_t kSessionTicketEncryptionKeySize = 80;
#else
const size_t kSessionTicketEncryptionKeySize = 48;
#endif
typedef enum AlpnMode { typedef enum AlpnMode {
NO_ALPN, NO_ALPN,
ALPN_CLIENT_NO_SERVER, ALPN_CLIENT_NO_SERVER,
@ -624,7 +636,7 @@ void ssl_tsi_test_do_round_trip_odd_buffer_size() {
void ssl_tsi_test_do_handshake_session_cache() { void ssl_tsi_test_do_handshake_session_cache() {
tsi_ssl_session_cache* session_cache = tsi_ssl_session_cache_create_lru(16); tsi_ssl_session_cache* session_cache = tsi_ssl_session_cache_create_lru(16);
char session_ticket_key[48]; char session_ticket_key[kSessionTicketEncryptionKeySize];
auto do_handshake = [&session_ticket_key, auto do_handshake = [&session_ticket_key,
&session_cache](bool session_reused) { &session_cache](bool session_reused) {
tsi_test_fixture* fixture = ssl_tsi_test_fixture_create(); tsi_test_fixture* fixture = ssl_tsi_test_fixture_create();
@ -633,22 +645,22 @@ void ssl_tsi_test_do_handshake_session_cache() {
ssl_fixture->server_name_indication = ssl_fixture->server_name_indication =
const_cast<char*>("waterzooi.test.google.be"); const_cast<char*>("waterzooi.test.google.be");
ssl_fixture->session_ticket_key = session_ticket_key; ssl_fixture->session_ticket_key = session_ticket_key;
ssl_fixture->session_ticket_key_size = 48; ssl_fixture->session_ticket_key_size = sizeof(session_ticket_key);
tsi_ssl_session_cache_ref(session_cache); tsi_ssl_session_cache_ref(session_cache);
ssl_fixture->session_cache = session_cache; ssl_fixture->session_cache = session_cache;
ssl_fixture->session_reused = session_reused; ssl_fixture->session_reused = session_reused;
tsi_test_do_round_trip(&ssl_fixture->base); tsi_test_do_round_trip(&ssl_fixture->base);
tsi_test_fixture_destroy(fixture); tsi_test_fixture_destroy(fixture);
}; };
memset(session_ticket_key, 'a', 48); memset(session_ticket_key, 'a', sizeof(session_ticket_key));
do_handshake(false); do_handshake(false);
do_handshake(true); do_handshake(true);
do_handshake(true); do_handshake(true);
// Changing session_ticket_key on server invalidates ticket. // Changing session_ticket_key on server invalidates ticket.
memset(session_ticket_key, 'b', 48); memset(session_ticket_key, 'b', sizeof(session_ticket_key));
do_handshake(false); do_handshake(false);
do_handshake(true); do_handshake(true);
memset(session_ticket_key, 'c', 48); memset(session_ticket_key, 'c', sizeof(session_ticket_key));
do_handshake(false); do_handshake(false);
do_handshake(true); do_handshake(true);
tsi_ssl_session_cache_unref(session_cache); tsi_ssl_session_cache_unref(session_cache);

@ -31,6 +31,8 @@
#include "test/cpp/interop/interop_client.h" #include "test/cpp/interop/interop_client.h"
#include "test/cpp/util/test_config.h" #include "test/cpp/util/test_config.h"
DEFINE_bool(use_alts, false,
"Whether to use alts. Enable alts will disable tls.");
DEFINE_bool(use_tls, false, "Whether to use tls."); DEFINE_bool(use_tls, false, "Whether to use tls.");
DEFINE_string(custom_credentials_type, "", "User provided credentials type."); DEFINE_string(custom_credentials_type, "", "User provided credentials type.");
DEFINE_bool(use_test_ca, false, "False to use SSL roots for google"); DEFINE_bool(use_test_ca, false, "False to use SSL roots for google");

@ -35,6 +35,7 @@
#include "test/cpp/util/create_test_channel.h" #include "test/cpp/util/create_test_channel.h"
#include "test/cpp/util/test_credentials_provider.h" #include "test/cpp/util/test_credentials_provider.h"
DECLARE_bool(use_alts);
DECLARE_bool(use_tls); DECLARE_bool(use_tls);
DECLARE_string(custom_credentials_type); DECLARE_string(custom_credentials_type);
DECLARE_bool(use_test_ca); DECLARE_bool(use_test_ca);
@ -103,8 +104,10 @@ std::shared_ptr<Channel> CreateChannelForTestCase(
GPR_ASSERT(creds); GPR_ASSERT(creds);
} }
if (FLAGS_custom_credentials_type.empty()) { if (FLAGS_custom_credentials_type.empty()) {
transport_security security_type =
FLAGS_use_alts ? ALTS : (FLAGS_use_tls ? TLS : INSECURE);
return CreateTestChannel(host_port, FLAGS_server_host_override, return CreateTestChannel(host_port, FLAGS_server_host_override,
FLAGS_use_tls, !FLAGS_use_test_ca, creds); security_type, !FLAGS_use_test_ca, creds);
} else { } else {
return CreateTestChannel(host_port, FLAGS_custom_credentials_type, creds); return CreateTestChannel(host_port, FLAGS_custom_credentials_type, creds);
} }

@ -194,7 +194,7 @@ int main(int argc, char** argv) {
snprintf(host_port, host_port_buf_size, "%s:%d", FLAGS_server_host.c_str(), snprintf(host_port, host_port_buf_size, "%s:%d", FLAGS_server_host.c_str(),
FLAGS_server_port); FLAGS_server_port);
std::shared_ptr<grpc::Channel> channel = std::shared_ptr<grpc::Channel> channel =
grpc::CreateTestChannel(host_port, false); grpc::CreateTestChannel(host_port, grpc::testing::INSECURE);
GPR_ASSERT(channel->WaitForConnected(gpr_time_add( GPR_ASSERT(channel->WaitForConnected(gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(300, GPR_TIMESPAN)))); gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(300, GPR_TIMESPAN))));
grpc::testing::Http2Client client(channel); grpc::testing::Http2Client client(channel);

@ -38,6 +38,8 @@
#include "test/cpp/interop/server_helper.h" #include "test/cpp/interop/server_helper.h"
#include "test/cpp/util/test_config.h" #include "test/cpp/util/test_config.h"
DEFINE_bool(use_alts, false,
"Whether to use alts. Enable alts will disable tls.");
DEFINE_bool(use_tls, false, "Whether to use tls."); DEFINE_bool(use_tls, false, "Whether to use tls.");
DEFINE_string(custom_credentials_type, "", "User provided credentials type."); DEFINE_string(custom_credentials_type, "", "User provided credentials type.");
DEFINE_int32(port, 0, "Server port."); DEFINE_int32(port, 0, "Server port.");

@ -44,9 +44,11 @@ using grpc::ClientContext;
using grpc::CreateTestChannel; using grpc::CreateTestChannel;
using grpc::Status; using grpc::Status;
using grpc::testing::Empty; using grpc::testing::Empty;
using grpc::testing::INSECURE;
using grpc::testing::ReconnectInfo; using grpc::testing::ReconnectInfo;
using grpc::testing::ReconnectParams; using grpc::testing::ReconnectParams;
using grpc::testing::ReconnectService; using grpc::testing::ReconnectService;
using grpc::testing::TLS;
int main(int argc, char** argv) { int main(int argc, char** argv) {
grpc::testing::InitTest(&argc, &argv, true); grpc::testing::InitTest(&argc, &argv, true);
@ -57,7 +59,7 @@ int main(int argc, char** argv) {
server_address << FLAGS_server_host << ':' << FLAGS_server_control_port; server_address << FLAGS_server_host << ':' << FLAGS_server_control_port;
std::unique_ptr<ReconnectService::Stub> control_stub( std::unique_ptr<ReconnectService::Stub> control_stub(
ReconnectService::NewStub( ReconnectService::NewStub(
CreateTestChannel(server_address.str(), false))); CreateTestChannel(server_address.str(), INSECURE)));
ClientContext start_context; ClientContext start_context;
ReconnectParams reconnect_params; ReconnectParams reconnect_params;
reconnect_params.set_max_reconnect_backoff_ms(FLAGS_max_reconnect_backoff_ms); reconnect_params.set_max_reconnect_backoff_ms(FLAGS_max_reconnect_backoff_ms);
@ -75,7 +77,7 @@ int main(int argc, char** argv) {
FLAGS_max_reconnect_backoff_ms); FLAGS_max_reconnect_backoff_ms);
} }
std::shared_ptr<Channel> retry_channel = std::shared_ptr<Channel> retry_channel =
CreateTestChannel(server_address.str(), "foo.test.google.fr", true, false, CreateTestChannel(server_address.str(), "foo.test.google.fr", TLS, false,
std::shared_ptr<CallCredentials>(), channel_args); std::shared_ptr<CallCredentials>(), channel_args);
// About 13 retries. // About 13 retries.

@ -26,6 +26,7 @@
#include "src/core/lib/surface/call_test_only.h" #include "src/core/lib/surface/call_test_only.h"
#include "test/cpp/util/test_credentials_provider.h" #include "test/cpp/util/test_credentials_provider.h"
DECLARE_bool(use_alts);
DECLARE_bool(use_tls); DECLARE_bool(use_tls);
DECLARE_string(custom_credentials_type); DECLARE_string(custom_credentials_type);
@ -36,6 +37,8 @@ std::shared_ptr<ServerCredentials> CreateInteropServerCredentials() {
if (!FLAGS_custom_credentials_type.empty()) { if (!FLAGS_custom_credentials_type.empty()) {
return GetCredentialsProvider()->GetServerCredentials( return GetCredentialsProvider()->GetServerCredentials(
FLAGS_custom_credentials_type); FLAGS_custom_credentials_type);
} else if (FLAGS_use_alts) {
return GetCredentialsProvider()->GetServerCredentials(kAltsCredentialsType);
} else if (FLAGS_use_tls) { } else if (FLAGS_use_tls) {
return GetCredentialsProvider()->GetServerCredentials(kTlsCredentialsType); return GetCredentialsProvider()->GetServerCredentials(kTlsCredentialsType);
} else { } else {

@ -99,18 +99,24 @@ DEFINE_bool(do_not_abort_on_transient_failures, true,
// Options from client.cc (for compatibility with interop test). // Options from client.cc (for compatibility with interop test).
// TODO(sreek): Consolidate overlapping options // TODO(sreek): Consolidate overlapping options
DEFINE_bool(use_alts, false,
"Whether to use alts. Enable alts will disable tls.");
DEFINE_bool(use_tls, false, "Whether to use tls."); DEFINE_bool(use_tls, false, "Whether to use tls.");
DEFINE_bool(use_test_ca, false, "False to use SSL roots for google"); DEFINE_bool(use_test_ca, false, "False to use SSL roots for google");
DEFINE_string(server_host_override, "foo.test.google.fr", DEFINE_string(server_host_override, "foo.test.google.fr",
"Override the server host which is sent in HTTP header"); "Override the server host which is sent in HTTP header");
using grpc::testing::ALTS;
using grpc::testing::INSECURE;
using grpc::testing::MetricsService; using grpc::testing::MetricsService;
using grpc::testing::MetricsServiceImpl; using grpc::testing::MetricsServiceImpl;
using grpc::testing::StressTestInteropClient; using grpc::testing::StressTestInteropClient;
using grpc::testing::TLS;
using grpc::testing::TestCaseType; using grpc::testing::TestCaseType;
using grpc::testing::UNKNOWN_TEST; using grpc::testing::UNKNOWN_TEST;
using grpc::testing::WeightedRandomTestSelector; using grpc::testing::WeightedRandomTestSelector;
using grpc::testing::kTestCaseList; using grpc::testing::kTestCaseList;
using grpc::testing::transport_security;
static int log_level = GPR_LOG_SEVERITY_DEBUG; static int log_level = GPR_LOG_SEVERITY_DEBUG;
@ -268,6 +274,8 @@ int main(int argc, char** argv) {
int thread_idx = 0; int thread_idx = 0;
int server_idx = -1; int server_idx = -1;
char buffer[256]; char buffer[256];
transport_security security_type =
FLAGS_use_alts ? ALTS : (FLAGS_use_tls ? TLS : INSECURE);
for (auto it = server_addresses.begin(); it != server_addresses.end(); it++) { for (auto it = server_addresses.begin(); it != server_addresses.end(); it++) {
++server_idx; ++server_idx;
// Create channel(s) for each server // Create channel(s) for each server
@ -276,7 +284,7 @@ int main(int argc, char** argv) {
gpr_log(GPR_INFO, "Starting test with %s channel_idx=%d..", it->c_str(), gpr_log(GPR_INFO, "Starting test with %s channel_idx=%d..", it->c_str(),
channel_idx); channel_idx);
std::shared_ptr<grpc::Channel> channel = grpc::CreateTestChannel( std::shared_ptr<grpc::Channel> channel = grpc::CreateTestChannel(
*it, FLAGS_server_host_override, FLAGS_use_tls, !FLAGS_use_test_ca); *it, FLAGS_server_host_override, security_type, !FLAGS_use_test_ca);
// Create stub(s) for each channel // Create stub(s) for each channel
for (int stub_idx = 0; stub_idx < FLAGS_num_stubs_per_channel; for (int stub_idx = 0; stub_idx < FLAGS_num_stubs_per_channel;

@ -107,37 +107,37 @@ std::shared_ptr<Channel> CreateTestChannel(
std::shared_ptr<Channel> CreateTestChannel( std::shared_ptr<Channel> CreateTestChannel(
const grpc::string& server, const grpc::string& override_hostname, const grpc::string& server, const grpc::string& override_hostname,
bool enable_ssl, bool use_prod_roots, testing::transport_security security_type, bool use_prod_roots,
const std::shared_ptr<CallCredentials>& creds, const std::shared_ptr<CallCredentials>& creds,
const ChannelArguments& args) { const ChannelArguments& args) {
grpc::string type; grpc::string type =
if (enable_ssl) { security_type == testing::ALTS
type = testing::kTlsCredentialsType; ? testing::kAltsCredentialsType
} : (security_type == testing::TLS ? testing::kTlsCredentialsType
: testing::kInsecureCredentialsType);
return CreateTestChannel(server, type, override_hostname, use_prod_roots, return CreateTestChannel(server, type, override_hostname, use_prod_roots,
creds, args); creds, args);
} }
std::shared_ptr<Channel> CreateTestChannel( std::shared_ptr<Channel> CreateTestChannel(
const grpc::string& server, const grpc::string& override_hostname, const grpc::string& server, const grpc::string& override_hostname,
bool enable_ssl, bool use_prod_roots, testing::transport_security security_type, bool use_prod_roots,
const std::shared_ptr<CallCredentials>& creds) { const std::shared_ptr<CallCredentials>& creds) {
return CreateTestChannel(server, override_hostname, enable_ssl, return CreateTestChannel(server, override_hostname, security_type,
use_prod_roots, creds, ChannelArguments()); use_prod_roots, creds, ChannelArguments());
} }
std::shared_ptr<Channel> CreateTestChannel( std::shared_ptr<Channel> CreateTestChannel(
const grpc::string& server, const grpc::string& override_hostname, const grpc::string& server, const grpc::string& override_hostname,
bool enable_ssl, bool use_prod_roots) { testing::transport_security security_type, bool use_prod_roots) {
return CreateTestChannel(server, override_hostname, enable_ssl, return CreateTestChannel(server, override_hostname, security_type,
use_prod_roots, std::shared_ptr<CallCredentials>()); use_prod_roots, std::shared_ptr<CallCredentials>());
} }
// Shortcut for end2end and interop tests. // Shortcut for end2end and interop tests.
std::shared_ptr<Channel> CreateTestChannel(const grpc::string& server, std::shared_ptr<Channel> CreateTestChannel(
bool enable_ssl) { const grpc::string& server, testing::transport_security security_type) {
return CreateTestChannel(server, "foo.test.google.fr", enable_ssl, false); return CreateTestChannel(server, "foo.test.google.fr", security_type, false);
} }
std::shared_ptr<Channel> CreateTestChannel( std::shared_ptr<Channel> CreateTestChannel(

@ -26,21 +26,27 @@
namespace grpc { namespace grpc {
class Channel; class Channel;
std::shared_ptr<Channel> CreateTestChannel(const grpc::string& server, namespace testing {
bool enable_ssl);
typedef enum { INSECURE = 0, TLS, ALTS } transport_security;
} // namespace testing
std::shared_ptr<Channel> CreateTestChannel(
const grpc::string& server, testing::transport_security security_type);
std::shared_ptr<Channel> CreateTestChannel( std::shared_ptr<Channel> CreateTestChannel(
const grpc::string& server, const grpc::string& override_hostname, const grpc::string& server, const grpc::string& override_hostname,
bool enable_ssl, bool use_prod_roots); testing::transport_security security_type, bool use_prod_roots);
std::shared_ptr<Channel> CreateTestChannel( std::shared_ptr<Channel> CreateTestChannel(
const grpc::string& server, const grpc::string& override_hostname, const grpc::string& server, const grpc::string& override_hostname,
bool enable_ssl, bool use_prod_roots, testing::transport_security security_type, bool use_prod_roots,
const std::shared_ptr<CallCredentials>& creds); const std::shared_ptr<CallCredentials>& creds);
std::shared_ptr<Channel> CreateTestChannel( std::shared_ptr<Channel> CreateTestChannel(
const grpc::string& server, const grpc::string& override_hostname, const grpc::string& server, const grpc::string& override_hostname,
bool enable_ssl, bool use_prod_roots, testing::transport_security security_type, bool use_prod_roots,
const std::shared_ptr<CallCredentials>& creds, const std::shared_ptr<CallCredentials>& creds,
const ChannelArguments& args); const ChannelArguments& args);

@ -56,6 +56,9 @@ class DefaultCredentialsProvider : public CredentialsProvider {
const grpc::string& type, ChannelArguments* args) override { const grpc::string& type, ChannelArguments* args) override {
if (type == grpc::testing::kInsecureCredentialsType) { if (type == grpc::testing::kInsecureCredentialsType) {
return InsecureChannelCredentials(); return InsecureChannelCredentials();
} else if (type == grpc::testing::kAltsCredentialsType) {
grpc::experimental::AltsCredentialsOptions alts_opts;
return grpc::experimental::AltsCredentials(alts_opts);
} else if (type == grpc::testing::kTlsCredentialsType) { } else if (type == grpc::testing::kTlsCredentialsType) {
SslCredentialsOptions ssl_opts = {test_root_cert, "", ""}; SslCredentialsOptions ssl_opts = {test_root_cert, "", ""};
args->SetSslTargetNameOverride("foo.test.google.fr"); args->SetSslTargetNameOverride("foo.test.google.fr");
@ -77,6 +80,9 @@ class DefaultCredentialsProvider : public CredentialsProvider {
const grpc::string& type) override { const grpc::string& type) override {
if (type == grpc::testing::kInsecureCredentialsType) { if (type == grpc::testing::kInsecureCredentialsType) {
return InsecureServerCredentials(); return InsecureServerCredentials();
} else if (type == grpc::testing::kAltsCredentialsType) {
grpc::experimental::AltsServerCredentialsOptions alts_opts;
return grpc::experimental::AltsServerCredentials(alts_opts);
} else if (type == grpc::testing::kTlsCredentialsType) { } else if (type == grpc::testing::kTlsCredentialsType) {
SslServerCredentialsOptions::PemKeyCertPair pkcp = {test_server1_key, SslServerCredentialsOptions::PemKeyCertPair pkcp = {test_server1_key,
test_server1_cert}; test_server1_cert};

@ -29,10 +29,10 @@ namespace grpc {
namespace testing { namespace testing {
const char kInsecureCredentialsType[] = "INSECURE_CREDENTIALS"; const char kInsecureCredentialsType[] = "INSECURE_CREDENTIALS";
// For real credentials, like tls/ssl, this name should match the AuthContext // For real credentials, like tls/ssl, this name should match the AuthContext
// property "transport_security_type". // property "transport_security_type".
const char kTlsCredentialsType[] = "ssl"; const char kTlsCredentialsType[] = "ssl";
const char kAltsCredentialsType[] = "alts";
// Provide test credentials of a particular type. // Provide test credentials of a particular type.
class CredentialTypeProvider { class CredentialTypeProvider {

File diff suppressed because it is too large Load Diff

@ -30,10 +30,11 @@ TEST_DIRS=(
) )
VIRTUALENV=python_pylint_venv VIRTUALENV=python_pylint_venv
python -m virtualenv $VIRTUALENV
virtualenv $VIRTUALENV PYTHON=$VIRTUALENV/bin/python
PYTHON=$(realpath $VIRTUALENV/bin/python)
$PYTHON -m pip install --upgrade pip==9.0.2 $PYTHON -m pip install --upgrade pip==10.0.1
$PYTHON -m pip install pylint==1.6.5 $PYTHON -m pip install pylint==1.6.5
for dir in "${DIRS[@]}"; do for dir in "${DIRS[@]}"; do

@ -70,7 +70,7 @@ subprocess_arguments_list = [
'env': environment 'env': environment
}, },
{ {
'args': [VIRTUALENV_PIP_PATH, 'install', '--upgrade', 'pip==9.0.1'], 'args': [VIRTUALENV_PIP_PATH, 'install', '--upgrade', 'pip==10.0.1'],
'env': environment 'env': environment
}, },
{ {

@ -32,7 +32,7 @@ VIRTUALENV=yapf_virtual_environment
virtualenv $VIRTUALENV virtualenv $VIRTUALENV
PYTHON=$(realpath "${VIRTUALENV}/bin/python") PYTHON=$(realpath "${VIRTUALENV}/bin/python")
$PYTHON -m pip install --upgrade pip==9.0.2 $PYTHON -m pip install --upgrade pip==10.0.1
$PYTHON -m pip install --upgrade futures $PYTHON -m pip install --upgrade futures
$PYTHON -m pip install yapf==0.20.0 $PYTHON -m pip install yapf==0.20.0

@ -33,9 +33,9 @@ RUN apt-get update && apt-get install -y \
python-pip python-pip
# Install Python packages from PyPI # Install Python packages from PyPI
RUN pip install --upgrade pip==9.0.2 RUN pip install --upgrade pip==10.0.1
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.2.post1 six==1.10.0 twisted==17.5.0
ADD clang_tidy_all_the_things.sh / ADD clang_tidy_all_the_things.sh /

@ -60,9 +60,9 @@ RUN apt-get update && apt-get install -y \
python-pip python-pip
# Install Python packages from PyPI # Install Python packages from PyPI
RUN pip install --upgrade pip==9.0.2 RUN pip install --upgrade pip==10.0.1
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.2.post1 six==1.10.0 twisted==17.5.0
#================ #================
# C# dependencies # C# dependencies

@ -60,9 +60,9 @@ RUN apt-get update && apt-get install -y \
python-pip python-pip
# Install Python packages from PyPI # Install Python packages from PyPI
RUN pip install --upgrade pip==9.0.2 RUN pip install --upgrade pip==10.0.1
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.2.post1 six==1.10.0 twisted==17.5.0
#================ #================
# C# dependencies # C# dependencies

@ -60,9 +60,9 @@ RUN apt-get update && apt-get install -y \
python-pip python-pip
# Install Python packages from PyPI # Install Python packages from PyPI
RUN pip install --upgrade pip==9.0.2 RUN pip install --upgrade pip==10.0.1
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.2.post1 six==1.10.0 twisted==17.5.0
#================= #=================
# C++ dependencies # C++ dependencies

@ -28,9 +28,9 @@ RUN apt-get update && apt-get install -y \
python-pip python-pip
# Install Python packages from PyPI # Install Python packages from PyPI
RUN pip install --upgrade pip==9.0.2 RUN pip install --upgrade pip==10.0.1
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.2.post1 six==1.10.0 twisted==17.5.0
# Define the default command. # Define the default command.
CMD ["bash"] CMD ["bash"]

@ -28,9 +28,9 @@ RUN apt-get update && apt-get install -y \
python-pip python-pip
# Install Python packages from PyPI # Install Python packages from PyPI
RUN pip install --upgrade pip==9.0.2 RUN pip install --upgrade pip==10.0.1
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.2.post1 six==1.10.0 twisted==17.5.0
# Define the default command. # Define the default command.
CMD ["bash"] CMD ["bash"]

@ -28,9 +28,9 @@ RUN apt-get update && apt-get install -y \
python-pip python-pip
# Install Python packages from PyPI # Install Python packages from PyPI
RUN pip install --upgrade pip==9.0.2 RUN pip install --upgrade pip==10.0.1
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.2.post1 six==1.10.0 twisted==17.5.0
# Define the default command. # Define the default command.
CMD ["bash"] CMD ["bash"]

@ -28,9 +28,9 @@ RUN apt-get update && apt-get install -y \
python-pip python-pip
# Install Python packages from PyPI # Install Python packages from PyPI
RUN pip install --upgrade pip==9.0.2 RUN pip install --upgrade pip==10.0.1
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.2.post1 six==1.10.0 twisted==17.5.0
RUN pip install twisted h2==2.6.1 hyper RUN pip install twisted h2==2.6.1 hyper

@ -43,9 +43,9 @@ RUN apt-get update && apt-get install -y \
python-pip python-pip
# Install Python packages from PyPI # Install Python packages from PyPI
RUN pip install --upgrade pip==9.0.2 RUN pip install --upgrade pip==10.0.1
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.2.post1 six==1.10.0 twisted==17.5.0
# Trigger download of as many Gradle artifacts as possible. # Trigger download of as many Gradle artifacts as possible.

@ -43,9 +43,9 @@ RUN apt-get update && apt-get install -y \
python-pip python-pip
# Install Python packages from PyPI # Install Python packages from PyPI
RUN pip install --upgrade pip==9.0.2 RUN pip install --upgrade pip==10.0.1
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.2.post1 six==1.10.0 twisted==17.5.0
# Trigger download of as many Gradle artifacts as possible. # Trigger download of as many Gradle artifacts as possible.

@ -60,9 +60,9 @@ RUN apt-get update && apt-get install -y \
python-pip python-pip
# Install Python packages from PyPI # Install Python packages from PyPI
RUN pip install --upgrade pip==9.0.2 RUN pip install --upgrade pip==10.0.1
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.2.post1 six==1.10.0 twisted==17.5.0
#================== #==================
# Node dependencies # Node dependencies

@ -17,12 +17,23 @@
set -e set -e
mkdir -p /var/local/git mkdir -p /var/local/git
git clone /var/local/jenkins/grpc-node /var/local/git/grpc-node git clone /var/local/jenkins/grpc-node /var/local/git/grpc-node
# clone gRPC submodules, use data from locally cloned submodules where possible # clone gRPC submodules, use data from locally cloned submodules where possible
(cd /var/local/jenkins/grpc-node/ && git submodule foreach 'cd /var/local/git/grpc-node \ (cd /var/local/jenkins/grpc-node/ && git submodule foreach 'cd /var/local/git/grpc-node \
&& git submodule update --init --recursive --reference /var/local/jenkins/grpc-node/${name} \ && git submodule update --init --recursive --reference /var/local/jenkins/grpc-node/${name} \
${name}') ${name}')
# Use the pending c-core changes if possible
if [ -d "/var/local/jenkins/grpc" ]; then
cd /var/local/jenkins/grpc
CURRENT_COMMIT="$(git rev-parse --verify HEAD)"
cd /var/local/git/grpc-node/packages/grpc-native-core/deps/grpc/
git fetch --tags --progress https://github.com/grpc/grpc.git +refs/pull/*:refs/remotes/origin/pr/*
git checkout $CURRENT_COMMIT
git submodule update --init --recursive --reference /var/local/jenkins/grpc
fi
# copy service account keys if available # copy service account keys if available
cp -r /var/local/jenkins/service_account $HOME || true cp -r /var/local/jenkins/service_account $HOME || true

@ -60,9 +60,9 @@ RUN apt-get update && apt-get install -y \
python-pip python-pip
# Install Python packages from PyPI # Install Python packages from PyPI
RUN pip install --upgrade pip==9.0.2 RUN pip install --upgrade pip==10.0.1
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.2.post1 six==1.10.0 twisted==17.5.0
# Prepare ccache # Prepare ccache
RUN ln -s /usr/bin/ccache /usr/local/bin/gcc RUN ln -s /usr/bin/ccache /usr/local/bin/gcc

@ -60,9 +60,9 @@ RUN apt-get update && apt-get install -y \
python-pip python-pip
# Install Python packages from PyPI # Install Python packages from PyPI
RUN pip install --upgrade pip==9.0.2 RUN pip install --upgrade pip==10.0.1
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.2.post1 six==1.10.0 twisted==17.5.0
#================== #==================
# Ruby dependencies # Ruby dependencies

@ -64,9 +64,9 @@ RUN apt-get update && apt-get install -y \
python-pip python-pip
# Install Python packages from PyPI # Install Python packages from PyPI
RUN pip install --upgrade pip==9.0.2 RUN pip install --upgrade pip==10.0.1
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.2.post1 six==1.10.0 twisted==17.5.0
#================ #================
# C# dependencies # C# dependencies

@ -38,7 +38,7 @@ RUN apk update && apk add \
zip zip
# Install Python packages from PyPI # Install Python packages from PyPI
RUN pip install --upgrade pip==9.0.2 RUN pip install --upgrade pip==10.0.1
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0

@ -64,9 +64,9 @@ RUN apt-get update && apt-get install -y \
python-pip python-pip
# Install Python packages from PyPI # Install Python packages from PyPI
RUN pip install --upgrade pip==9.0.2 RUN pip install --upgrade pip==10.0.1
RUN pip install virtualenv RUN pip install virtualenv
RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.0.post1 six==1.10.0 twisted==17.5.0 RUN pip install futures==2.2.0 enum34==1.0.4 protobuf==3.5.2.post1 six==1.10.0 twisted==17.5.0
#================= #=================
# C++ dependencies # C++ dependencies

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save