Removing GRPC_ERROR_[REF|UNREF|IS_NONE] (#31089)

* Removing GRPC_ERROR_[REF|UNREF|IS_NONE]

* Clean up unnecessary error

* Fix

* Fix2

* Remove unnecessary else
pull/31127/head
Esun Kim 2 years ago committed by GitHub
parent 255083b724
commit ba8af0157b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      src/core/ext/filters/channel_idle/channel_idle_filter.cc
  2. 4
      src/core/ext/filters/client_channel/backup_poller.cc
  3. 4
      src/core/ext/filters/client_channel/channel_connectivity.cc
  4. 110
      src/core/ext/filters/client_channel/client_channel.cc
  5. 2
      src/core/ext/filters/client_channel/dynamic_filters.cc
  6. 9
      src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
  7. 19
      src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
  8. 4
      src/core/ext/filters/client_channel/lb_policy/outlier_detection/outlier_detection.cc
  9. 8
      src/core/ext/filters/client_channel/lb_policy/priority/priority.cc
  10. 7
      src/core/ext/filters/client_channel/lb_policy/rls/rls.cc
  11. 4
      src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc
  12. 3
      src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc
  13. 11
      src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
  14. 12
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc
  15. 17
      src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
  16. 9
      src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc
  17. 4
      src/core/ext/filters/client_channel/resolver/polling_resolver.cc
  18. 6
      src/core/ext/filters/client_channel/resolver_result_parsing.cc
  19. 108
      src/core/ext/filters/client_channel/retry_filter.cc
  20. 6
      src/core/ext/filters/client_channel/retry_service_config.cc
  21. 16
      src/core/ext/filters/client_channel/subchannel.cc
  22. 10
      src/core/ext/filters/client_channel/subchannel_stream_client.cc
  23. 12
      src/core/ext/filters/deadline/deadline_filter.cc
  24. 1
      src/core/ext/filters/fault_injection/service_config_parser.cc
  25. 14
      src/core/ext/filters/http/message_compress/message_compress_filter.cc
  26. 22
      src/core/ext/filters/http/message_compress/message_decompress_filter.cc
  27. 13
      src/core/ext/filters/message_size/message_size_filter.cc
  28. 6
      src/core/ext/filters/rbac/rbac_filter.cc
  29. 3
      src/core/ext/filters/rbac/rbac_service_config_parser.cc
  30. 3
      src/core/ext/filters/server_config_selector/server_config_selector_filter.cc
  31. 2
      src/core/ext/transport/binder/client/binder_connector.cc
  32. 1
      src/core/ext/transport/binder/transport/binder_stream.h
  33. 30
      src/core/ext/transport/binder/transport/binder_transport.cc
  34. 17
      src/core/ext/transport/chttp2/client/chttp2_connector.cc
  35. 42
      src/core/ext/transport/chttp2/server/chttp2_server.cc
  36. 225
      src/core/ext/transport/chttp2/transport/chttp2_transport.cc
  37. 13
      src/core/ext/transport/chttp2/transport/hpack_parser.cc
  38. 24
      src/core/ext/transport/chttp2/transport/parsing.cc
  39. 8
      src/core/ext/transport/chttp2/transport/writing.cc
  40. 13
      src/core/ext/transport/cronet/transport/cronet_transport.cc
  41. 105
      src/core/ext/transport/inproc/inproc_transport.cc
  42. 3
      src/core/ext/xds/certificate_provider_store.cc
  43. 10
      src/core/ext/xds/xds_certificate_provider.cc
  44. 2
      src/core/ext/xds/xds_client_grpc.cc
  45. 2
      src/core/ext/xds/xds_transport_grpc.cc
  46. 6
      src/core/lib/address_utils/parse_address.cc
  47. 12
      src/core/lib/channel/channel_stack.cc
  48. 3
      src/core/lib/channel/channel_stack_builder_impl.cc
  49. 4
      src/core/lib/channel/connected_channel.cc
  50. 67
      src/core/lib/channel/promise_based_filter.cc
  51. 6
      src/core/lib/event_engine/windows/win_socket.cc
  52. 19
      src/core/lib/http/httpcli.cc
  53. 6
      src/core/lib/http/httpcli.h
  54. 4
      src/core/lib/http/httpcli_security_connector.cc
  55. 8
      src/core/lib/http/parser.cc
  56. 1
      src/core/lib/iomgr/buffer_list.cc
  57. 4
      src/core/lib/iomgr/buffer_list.h
  58. 10
      src/core/lib/iomgr/call_combiner.cc
  59. 21
      src/core/lib/iomgr/cfstream_handle.cc
  60. 4
      src/core/lib/iomgr/closure.h
  61. 2
      src/core/lib/iomgr/combiner.cc
  62. 2
      src/core/lib/iomgr/endpoint.h
  63. 9
      src/core/lib/iomgr/endpoint_cfstream.cc
  64. 6
      src/core/lib/iomgr/error.cc
  65. 16
      src/core/lib/iomgr/error.h
  66. 15
      src/core/lib/iomgr/ev_epoll1_linux.cc
  67. 21
      src/core/lib/iomgr/ev_poll_posix.cc
  68. 1
      src/core/lib/iomgr/exec_ctx.cc
  69. 3
      src/core/lib/iomgr/load_file.cc
  70. 3
      src/core/lib/iomgr/resolve_address_posix.cc
  71. 3
      src/core/lib/iomgr/resolve_address_windows.cc
  72. 6
      src/core/lib/iomgr/tcp_client_cfstream.cc
  73. 25
      src/core/lib/iomgr/tcp_client_posix.cc
  74. 9
      src/core/lib/iomgr/tcp_client_windows.cc
  75. 12
      src/core/lib/iomgr/tcp_posix.cc
  76. 16
      src/core/lib/iomgr/tcp_server_posix.cc
  77. 26
      src/core/lib/iomgr/tcp_server_utils_posix_common.cc
  78. 4
      src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc
  79. 16
      src/core/lib/iomgr/tcp_server_windows.cc
  80. 17
      src/core/lib/iomgr/tcp_windows.cc
  81. 5
      src/core/lib/iomgr/timer_generic.cc
  82. 6
      src/core/lib/iomgr/unix_sockets_posix.cc
  83. 4
      src/core/lib/iomgr/wakeup_fd_pipe.cc
  84. 3
      src/core/lib/security/authorization/grpc_authorization_policy_provider.cc
  85. 26
      src/core/lib/security/credentials/external/aws_external_account_credentials.cc
  86. 21
      src/core/lib/security/credentials/external/external_account_credentials.cc
  87. 4
      src/core/lib/security/credentials/external/file_external_account_credentials.cc
  88. 8
      src/core/lib/security/credentials/external/url_external_account_credentials.cc
  89. 11
      src/core/lib/security/credentials/google_default/google_default_credentials.cc
  90. 22
      src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
  91. 31
      src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.cc
  92. 11
      src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.h
  93. 24
      src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc
  94. 8
      src/core/lib/security/security_connector/alts/alts_security_connector.cc
  95. 8
      src/core/lib/security/security_connector/fake/fake_security_connector.cc
  96. 8
      src/core/lib/security/security_connector/insecure/insecure_security_connector.h
  97. 4
      src/core/lib/security/security_connector/load_system_roots_supported.cc
  98. 8
      src/core/lib/security/security_connector/local/local_security_connector.cc
  99. 13
      src/core/lib/security/security_connector/ssl/ssl_security_connector.cc
  100. 22
      src/core/lib/security/security_connector/tls/tls_security_connector.cc
  101. Some files were not shown because too many files have changed in this diff Show More

@ -216,7 +216,7 @@ ArenaPromise<ServerMetadataHandle> ChannelIdleFilter::MakeCallPromise(
bool ChannelIdleFilter::StartTransportOp(grpc_transport_op* op) {
// Catch the disconnect_with_error transport op.
if (!GRPC_ERROR_IS_NONE(op->disconnect_with_error)) Shutdown();
if (!op->disconnect_with_error.ok()) Shutdown();
// Pass the op to the next filter.
return false;
}

@ -117,9 +117,9 @@ static void g_poller_unref() {
static void run_poller(void* arg, grpc_error_handle error) {
backup_poller* p = static_cast<backup_poller*>(arg);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
if (error != GRPC_ERROR_CANCELLED) {
GRPC_LOG_IF_ERROR("run_poller", GRPC_ERROR_REF(error));
GRPC_LOG_IF_ERROR("run_poller", error);
}
backup_poller_shutdown_unref(p);
return;

@ -171,7 +171,7 @@ class StateWatcher : public DualRefCounted<StateWatcher> {
static void WatchComplete(void* arg, grpc_error_handle error) {
auto* self = static_cast<StateWatcher*>(arg);
if (GRPC_TRACE_FLAG_ENABLED(grpc_trace_operation_failures)) {
GRPC_LOG_IF_ERROR("watch_completion_error", GRPC_ERROR_REF(error));
GRPC_LOG_IF_ERROR("watch_completion_error", error);
}
grpc_timer_cancel(&self->timer_);
self->Unref();
@ -179,7 +179,7 @@ class StateWatcher : public DualRefCounted<StateWatcher> {
static void TimeoutComplete(void* arg, grpc_error_handle error) {
auto* self = static_cast<StateWatcher*>(arg);
self->timer_fired_ = GRPC_ERROR_IS_NONE(error);
self->timer_fired_ = error.ok();
// If this is a client channel (not a lame channel), cancel the watch.
ClientChannel* client_channel =
ClientChannel::GetFromChannel(self->channel_.get());

@ -890,7 +890,7 @@ class ClientChannel::ClientChannelControlHelper
ABSL_EXCLUSIVE_LOCKS_REQUIRED(*chand_->work_serializer_) {
if (chand_->resolver_ == nullptr) return; // Shutting down.
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
const char* extra = GRPC_ERROR_IS_NONE(chand_->disconnect_error_)
const char* extra = chand_->disconnect_error_.ok()
? ""
: " (ignoring -- channel shutting down)";
gpr_log(GPR_INFO, "chand=%p: update: state=%s status=(%s) picker=%p%s",
@ -898,7 +898,7 @@ class ClientChannel::ClientChannelControlHelper
picker.get(), extra);
}
// Do update only if not shutting down.
if (GRPC_ERROR_IS_NONE(chand_->disconnect_error_)) {
if (chand_->disconnect_error_.ok()) {
chand_->UpdateStateAndPickerLocked(state, status, "helper",
std::move(picker));
}
@ -1066,7 +1066,6 @@ ClientChannel::~ClientChannel() {
// Stop backup polling.
grpc_client_channel_stop_backup_polling(interested_parties_);
grpc_pollset_set_destroy(interested_parties_);
GRPC_ERROR_UNREF(disconnect_error_);
}
OrphanablePtr<ClientChannel::LoadBalancedCall>
@ -1326,7 +1325,6 @@ void ClientChannel::OnResolverErrorLocked(absl::Status status) {
}
}
}
GRPC_ERROR_UNREF(error);
// Update connectivity state.
UpdateStateAndPickerLocked(
GRPC_CHANNEL_TRANSIENT_FAILURE, status, "resolver failure",
@ -1680,9 +1678,8 @@ void ClientChannel::StartTransportOpLocked(grpc_transport_op* op) {
// Ping.
if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) {
grpc_error_handle error = DoPingLocked(op);
if (!GRPC_ERROR_IS_NONE(error)) {
ExecCtx::Run(DEBUG_LOCATION, op->send_ping.on_initiate,
GRPC_ERROR_REF(error));
if (!error.ok()) {
ExecCtx::Run(DEBUG_LOCATION, op->send_ping.on_initiate, error);
ExecCtx::Run(DEBUG_LOCATION, op->send_ping.on_ack, error);
}
op->bind_pollset = nullptr;
@ -1696,7 +1693,7 @@ void ClientChannel::StartTransportOpLocked(grpc_transport_op* op) {
}
}
// Disconnect or enter IDLE.
if (!GRPC_ERROR_IS_NONE(op->disconnect_with_error)) {
if (!op->disconnect_with_error.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_trace)) {
gpr_log(GPR_INFO, "chand=%p: disconnect_with_error: %s", this,
grpc_error_std_string(op->disconnect_with_error).c_str());
@ -1706,15 +1703,14 @@ void ClientChannel::StartTransportOpLocked(grpc_transport_op* op) {
if (grpc_error_get_int(op->disconnect_with_error,
GRPC_ERROR_INT_CHANNEL_CONNECTIVITY_STATE, &value) &&
static_cast<grpc_connectivity_state>(value) == GRPC_CHANNEL_IDLE) {
if (GRPC_ERROR_IS_NONE(disconnect_error_)) {
if (disconnect_error_.ok()) {
// Enter IDLE state.
UpdateStateAndPickerLocked(GRPC_CHANNEL_IDLE, absl::Status(),
"channel entering IDLE", nullptr);
}
GRPC_ERROR_UNREF(op->disconnect_with_error);
} else {
// Disconnect.
GPR_ASSERT(GRPC_ERROR_IS_NONE(disconnect_error_));
GPR_ASSERT(disconnect_error_.ok());
disconnect_error_ = op->disconnect_with_error;
UpdateStateAndPickerLocked(
GRPC_CHANNEL_SHUTDOWN, absl::Status(), "shutdown from API",
@ -1841,7 +1837,6 @@ ClientChannel::CallData::CallData(grpc_call_element* elem,
ClientChannel::CallData::~CallData() {
grpc_slice_unref(path_);
GRPC_ERROR_UNREF(cancel_error_);
// Make sure there are no remaining pending batches.
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
GPR_ASSERT(pending_batches_[i] == nullptr);
@ -1908,7 +1903,7 @@ void ClientChannel::CallData::StartTransportStreamOpBatch(
// We do not yet have a dynamic call.
//
// If we've previously been cancelled, immediately fail any new batches.
if (GPR_UNLIKELY(!GRPC_ERROR_IS_NONE(calld->cancel_error_))) {
if (GPR_UNLIKELY(!calld->cancel_error_.ok())) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: failing batch with error: %s",
chand, calld,
@ -1916,7 +1911,7 @@ void ClientChannel::CallData::StartTransportStreamOpBatch(
}
// Note: This will release the call combiner.
grpc_transport_stream_op_batch_finish_with_failure(
batch, GRPC_ERROR_REF(calld->cancel_error_), calld->call_combiner_);
batch, calld->cancel_error_, calld->call_combiner_);
return;
}
// Handle cancellation.
@ -1926,19 +1921,16 @@ void ClientChannel::CallData::StartTransportStreamOpBatch(
// cancelled before any batches are passed down (e.g., if the deadline
// is in the past when the call starts), we can return the right
// error to the caller when the first batch does get passed down.
GRPC_ERROR_UNREF(calld->cancel_error_);
calld->cancel_error_ =
GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
calld->cancel_error_ = batch->payload->cancel_stream.cancel_error;
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: recording cancel_error=%s", chand,
calld, grpc_error_std_string(calld->cancel_error_).c_str());
}
// Fail all pending batches.
calld->PendingBatchesFail(elem, GRPC_ERROR_REF(calld->cancel_error_),
NoYieldCallCombiner);
calld->PendingBatchesFail(elem, calld->cancel_error_, NoYieldCallCombiner);
// Note: This will release the call combiner.
grpc_transport_stream_op_batch_finish_with_failure(
batch, GRPC_ERROR_REF(calld->cancel_error_), calld->call_combiner_);
batch, calld->cancel_error_, calld->call_combiner_);
return;
}
// Add the batch to the pending list.
@ -2012,15 +2004,15 @@ void ClientChannel::CallData::FailPendingBatchInCallCombiner(
static_cast<grpc_transport_stream_op_batch*>(arg);
CallData* calld = static_cast<CallData*>(batch->handler_private.extra_arg);
// Note: This will release the call combiner.
grpc_transport_stream_op_batch_finish_with_failure(
batch, GRPC_ERROR_REF(error), calld->call_combiner_);
grpc_transport_stream_op_batch_finish_with_failure(batch, error,
calld->call_combiner_);
}
// This is called via the call combiner, so access to calld is synchronized.
void ClientChannel::CallData::PendingBatchesFail(
grpc_call_element* elem, grpc_error_handle error,
YieldCallCombinerPredicate yield_call_combiner_predicate) {
GPR_ASSERT(!GRPC_ERROR_IS_NONE(error));
GPR_ASSERT(!error.ok());
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
size_t num_batches = 0;
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
@ -2039,7 +2031,7 @@ void ClientChannel::CallData::PendingBatchesFail(
GRPC_CLOSURE_INIT(&batch->handler_private.closure,
FailPendingBatchInCallCombiner, batch,
grpc_schedule_on_exec_ctx);
closures.Add(&batch->handler_private.closure, GRPC_ERROR_REF(error),
closures.Add(&batch->handler_private.closure, error,
"PendingBatchesFail");
batch = nullptr;
}
@ -2049,7 +2041,6 @@ void ClientChannel::CallData::PendingBatchesFail(
} else {
closures.RunClosuresWithoutYielding(call_combiner_);
}
GRPC_ERROR_UNREF(error);
}
// This is called via the call combiner, so access to calld is synchronized.
@ -2124,12 +2115,11 @@ class ClientChannel::CallData::ResolverQueuedCallCanceller {
chand, calld, grpc_error_std_string(error).c_str(), self,
calld->resolver_call_canceller_);
}
if (calld->resolver_call_canceller_ == self &&
!GRPC_ERROR_IS_NONE(error)) {
if (calld->resolver_call_canceller_ == self && !error.ok()) {
// Remove pick from list of queued picks.
calld->MaybeRemoveCallFromResolverQueuedCallsLocked(self->elem_);
// Fail pending batches on the call.
calld->PendingBatchesFail(self->elem_, GRPC_ERROR_REF(error),
calld->PendingBatchesFail(self->elem_, error,
YieldCallCombinerIfPendingBatchesFound);
}
}
@ -2252,7 +2242,7 @@ void ClientChannel::CallData::
}
// Chain to original callback.
Closure::Run(DEBUG_LOCATION, calld->original_recv_trailing_metadata_ready_,
GRPC_ERROR_REF(error));
error);
}
void ClientChannel::CallData::AsyncResolutionDone(grpc_call_element* elem,
@ -2267,13 +2257,13 @@ void ClientChannel::CallData::ResolutionDone(void* arg,
grpc_call_element* elem = static_cast<grpc_call_element*>(arg);
ClientChannel* chand = static_cast<ClientChannel*>(elem->channel_data);
CallData* calld = static_cast<CallData*>(elem->call_data);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: error applying config to call: error=%s",
chand, calld, grpc_error_std_string(error).c_str());
}
calld->PendingBatchesFail(elem, GRPC_ERROR_REF(error), YieldCallCombiner);
calld->PendingBatchesFail(elem, error, YieldCallCombiner);
return;
}
calld->CreateDynamicCall(elem);
@ -2291,7 +2281,6 @@ void ClientChannel::CallData::CheckResolution(void* arg,
}
if (resolution_complete) {
ResolutionDone(elem, error);
GRPC_ERROR_UNREF(error);
}
}
@ -2384,7 +2373,7 @@ void ClientChannel::CallData::CreateDynamicCall(grpc_call_element* elem) {
chand, this, channel_stack);
}
dynamic_call_ = channel_stack->CreateCall(std::move(args), &error);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p: failed to create dynamic call: error=%s",
@ -2572,8 +2561,6 @@ ClientChannel::LoadBalancedCall::LoadBalancedCall(
}
ClientChannel::LoadBalancedCall::~LoadBalancedCall() {
GRPC_ERROR_UNREF(cancel_error_);
GRPC_ERROR_UNREF(failure_error_);
if (backend_metric_data_ != nullptr) {
backend_metric_data_->BackendMetricData::~BackendMetricData();
}
@ -2636,16 +2623,15 @@ void ClientChannel::LoadBalancedCall::FailPendingBatchInCallCombiner(
static_cast<grpc_transport_stream_op_batch*>(arg);
auto* self = static_cast<LoadBalancedCall*>(batch->handler_private.extra_arg);
// Note: This will release the call combiner.
grpc_transport_stream_op_batch_finish_with_failure(
batch, GRPC_ERROR_REF(error), self->call_combiner_);
grpc_transport_stream_op_batch_finish_with_failure(batch, error,
self->call_combiner_);
}
// This is called via the call combiner, so access to calld is synchronized.
void ClientChannel::LoadBalancedCall::PendingBatchesFail(
grpc_error_handle error,
YieldCallCombinerPredicate yield_call_combiner_predicate) {
GPR_ASSERT(!GRPC_ERROR_IS_NONE(error));
GRPC_ERROR_UNREF(failure_error_);
GPR_ASSERT(!error.ok());
failure_error_ = error;
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
size_t num_batches = 0;
@ -2664,7 +2650,7 @@ void ClientChannel::LoadBalancedCall::PendingBatchesFail(
GRPC_CLOSURE_INIT(&batch->handler_private.closure,
FailPendingBatchInCallCombiner, batch,
grpc_schedule_on_exec_ctx);
closures.Add(&batch->handler_private.closure, GRPC_ERROR_REF(error),
closures.Add(&batch->handler_private.closure, error,
"PendingBatchesFail");
batch = nullptr;
}
@ -2731,7 +2717,7 @@ void ClientChannel::LoadBalancedCall::StartTransportStreamOpBatch(
// Record send ops in tracer.
if (batch->cancel_stream) {
call_attempt_tracer_->RecordCancel(
GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error));
batch->payload->cancel_stream.cancel_error);
}
if (batch->send_initial_metadata) {
call_attempt_tracer_->RecordSendInitialMetadata(
@ -2799,14 +2785,14 @@ void ClientChannel::LoadBalancedCall::StartTransportStreamOpBatch(
// We do not yet have a subchannel call.
//
// If we've previously been cancelled, immediately fail any new batches.
if (GPR_UNLIKELY(!GRPC_ERROR_IS_NONE(cancel_error_))) {
if (GPR_UNLIKELY(!cancel_error_.ok())) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: failing batch with error: %s",
chand_, this, grpc_error_std_string(cancel_error_).c_str());
}
// Note: This will release the call combiner.
grpc_transport_stream_op_batch_finish_with_failure(
batch, GRPC_ERROR_REF(cancel_error_), call_combiner_);
grpc_transport_stream_op_batch_finish_with_failure(batch, cancel_error_,
call_combiner_);
return;
}
// Handle cancellation.
@ -2816,17 +2802,16 @@ void ClientChannel::LoadBalancedCall::StartTransportStreamOpBatch(
// cancelled before any batches are passed down (e.g., if the deadline
// is in the past when the call starts), we can return the right
// error to the caller when the first batch does get passed down.
GRPC_ERROR_UNREF(cancel_error_);
cancel_error_ = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
cancel_error_ = batch->payload->cancel_stream.cancel_error;
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO, "chand=%p lb_call=%p: recording cancel_error=%s",
chand_, this, grpc_error_std_string(cancel_error_).c_str());
}
// Fail all pending batches.
PendingBatchesFail(GRPC_ERROR_REF(cancel_error_), NoYieldCallCombiner);
PendingBatchesFail(cancel_error_, NoYieldCallCombiner);
// Note: This will release the call combiner.
grpc_transport_stream_op_batch_finish_with_failure(
batch, GRPC_ERROR_REF(cancel_error_), call_combiner_);
grpc_transport_stream_op_batch_finish_with_failure(batch, cancel_error_,
call_combiner_);
return;
}
// Add the batch to the pending list.
@ -2864,8 +2849,7 @@ void ClientChannel::LoadBalancedCall::SendInitialMetadataOnComplete(
self->call_attempt_tracer_->RecordOnDoneSendInitialMetadata(
self->peer_string_);
Closure::Run(DEBUG_LOCATION,
self->original_send_initial_metadata_on_complete_,
GRPC_ERROR_REF(error));
self->original_send_initial_metadata_on_complete_, error);
}
void ClientChannel::LoadBalancedCall::RecvInitialMetadataReady(
@ -2876,13 +2860,13 @@ void ClientChannel::LoadBalancedCall::RecvInitialMetadataReady(
"chand=%p lb_call=%p: got recv_initial_metadata_ready: error=%s",
self->chand_, self, grpc_error_std_string(error).c_str());
}
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
// recv_initial_metadata_flags is not populated for clients
self->call_attempt_tracer_->RecordReceivedInitialMetadata(
self->recv_initial_metadata_, 0 /* recv_initial_metadata_flags */);
}
Closure::Run(DEBUG_LOCATION, self->original_recv_initial_metadata_ready_,
GRPC_ERROR_REF(error));
error);
}
void ClientChannel::LoadBalancedCall::RecvMessageReady(
@ -2895,8 +2879,7 @@ void ClientChannel::LoadBalancedCall::RecvMessageReady(
if (self->recv_message_->has_value()) {
self->call_attempt_tracer_->RecordReceivedMessage(**self->recv_message_);
}
Closure::Run(DEBUG_LOCATION, self->original_recv_message_ready_,
GRPC_ERROR_REF(error));
Closure::Run(DEBUG_LOCATION, self->original_recv_message_ready_, error);
}
void ClientChannel::LoadBalancedCall::RecvTrailingMetadataReady(
@ -2916,7 +2899,7 @@ void ClientChannel::LoadBalancedCall::RecvTrailingMetadataReady(
self->lb_subchannel_call_tracker_ != nullptr) {
// Get the call's status.
absl::Status status;
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
// Get status from error.
grpc_status_code code;
std::string message;
@ -2939,11 +2922,9 @@ void ClientChannel::LoadBalancedCall::RecvTrailingMetadataReady(
self->RecordCallCompletion(status);
}
// Chain to original callback.
if (!GRPC_ERROR_IS_NONE(self->failure_error_)) {
if (!self->failure_error_.ok()) {
error = self->failure_error_;
self->failure_error_ = GRPC_ERROR_NONE;
} else {
error = GRPC_ERROR_REF(error);
}
Closure::Run(DEBUG_LOCATION, self->original_recv_trailing_metadata_ready_,
error);
@ -2986,7 +2967,7 @@ void ClientChannel::LoadBalancedCall::CreateSubchannelCall() {
subchannel_call_->SetAfterCallStackDestroy(on_call_destruction_complete_);
on_call_destruction_complete_ = nullptr;
}
if (GPR_UNLIKELY(!GRPC_ERROR_IS_NONE(error))) {
if (GPR_UNLIKELY(!error.ok())) {
PendingBatchesFail(error, YieldCallCombiner);
} else {
PendingBatchesResume();
@ -3023,12 +3004,12 @@ class ClientChannel::LoadBalancedCall::LbQueuedCallCanceller {
chand, lb_call, grpc_error_std_string(error).c_str(), self,
lb_call->lb_call_canceller_);
}
if (lb_call->lb_call_canceller_ == self && !GRPC_ERROR_IS_NONE(error)) {
if (lb_call->lb_call_canceller_ == self && !error.ok()) {
lb_call->call_dispatch_controller_->Commit();
// Remove pick from list of queued picks.
lb_call->MaybeRemoveCallFromLbQueuedCallsLocked();
// Fail pending batches on the call.
lb_call->PendingBatchesFail(GRPC_ERROR_REF(error),
lb_call->PendingBatchesFail(error,
YieldCallCombinerIfPendingBatchesFound);
}
}
@ -3074,13 +3055,13 @@ void ClientChannel::LoadBalancedCall::AsyncPickDone(grpc_error_handle error) {
void ClientChannel::LoadBalancedCall::PickDone(void* arg,
grpc_error_handle error) {
auto* self = static_cast<LoadBalancedCall*>(arg);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_client_channel_lb_call_trace)) {
gpr_log(GPR_INFO,
"chand=%p lb_call=%p: failed to pick subchannel: error=%s",
self->chand_, self, grpc_error_std_string(error).c_str());
}
self->PendingBatchesFail(GRPC_ERROR_REF(error), YieldCallCombiner);
self->PendingBatchesFail(error, YieldCallCombiner);
return;
}
self->call_dispatch_controller_->Commit();
@ -3097,7 +3078,6 @@ void ClientChannel::LoadBalancedCall::PickSubchannel(void* arg,
}
if (pick_complete) {
PickDone(self, error);
GRPC_ERROR_UNREF(error);
}
}

@ -66,7 +66,7 @@ DynamicFilters::Call::Call(Args args, grpc_error_handle* error)
};
*error = grpc_call_stack_init(channel_stack_->channel_stack_.get(), 1,
Destroy, this, &call_args);
if (GPR_UNLIKELY(!GRPC_ERROR_IS_NONE(*error))) {
if (GPR_UNLIKELY(!error->ok())) {
gpr_log(GPR_ERROR, "error: %s", grpc_error_std_string(*error).c_str());
return;
}

@ -60,21 +60,20 @@ struct call_data {
static void on_complete_for_send(void* arg, grpc_error_handle error) {
call_data* calld = static_cast<call_data*>(arg);
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
calld->send_initial_metadata_succeeded = true;
}
grpc_core::Closure::Run(DEBUG_LOCATION, calld->original_on_complete_for_send,
GRPC_ERROR_REF(error));
error);
}
static void recv_initial_metadata_ready(void* arg, grpc_error_handle error) {
call_data* calld = static_cast<call_data*>(arg);
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
calld->recv_initial_metadata_succeeded = true;
}
grpc_core::Closure::Run(DEBUG_LOCATION,
calld->original_recv_initial_metadata_ready,
GRPC_ERROR_REF(error));
calld->original_recv_initial_metadata_ready, error);
}
static grpc_error_handle clr_init_call_elem(

@ -1108,7 +1108,6 @@ void GrpcLb::BalancerCallState::SendClientLoadReportLocked() {
void GrpcLb::BalancerCallState::ClientLoadReportDone(void* arg,
grpc_error_handle error) {
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
(void)GRPC_ERROR_REF(error); // ref owned by lambda
lb_calld->grpclb_policy()->work_serializer()->Run(
[lb_calld, error]() { lb_calld->ClientLoadReportDoneLocked(error); },
DEBUG_LOCATION);
@ -1118,9 +1117,8 @@ void GrpcLb::BalancerCallState::ClientLoadReportDoneLocked(
grpc_error_handle error) {
grpc_byte_buffer_destroy(send_message_payload_);
send_message_payload_ = nullptr;
if (!GRPC_ERROR_IS_NONE(error) || this != grpclb_policy()->lb_calld_.get()) {
if (!error.ok() || this != grpclb_policy()->lb_calld_.get()) {
Unref(DEBUG_LOCATION, "client_load_report");
GRPC_ERROR_UNREF(error);
return;
}
ScheduleNextClientLoadReportLocked();
@ -1313,7 +1311,6 @@ void GrpcLb::BalancerCallState::OnBalancerMessageReceivedLocked() {
void GrpcLb::BalancerCallState::OnBalancerStatusReceived(
void* arg, grpc_error_handle error) {
BalancerCallState* lb_calld = static_cast<BalancerCallState*>(arg);
(void)GRPC_ERROR_REF(error); // owned by lambda
lb_calld->grpclb_policy()->work_serializer()->Run(
[lb_calld, error]() { lb_calld->OnBalancerStatusReceivedLocked(error); },
DEBUG_LOCATION);
@ -1331,7 +1328,6 @@ void GrpcLb::BalancerCallState::OnBalancerStatusReceivedLocked(
grpc_error_std_string(error).c_str());
gpr_free(status_details);
}
GRPC_ERROR_UNREF(error);
// If this lb_calld is still in use, this call ended because of a failure so
// we want to retry connecting. Otherwise, we have deliberately ended this
// call and no further action is required.
@ -1697,7 +1693,6 @@ void GrpcLb::StartBalancerCallRetryTimerLocked() {
void GrpcLb::OnBalancerCallRetryTimer(void* arg, grpc_error_handle error) {
GrpcLb* grpclb_policy = static_cast<GrpcLb*>(arg);
(void)GRPC_ERROR_REF(error); // ref owned by lambda
grpclb_policy->work_serializer()->Run(
[grpclb_policy, error]() {
grpclb_policy->OnBalancerCallRetryTimerLocked(error);
@ -1707,14 +1702,13 @@ void GrpcLb::OnBalancerCallRetryTimer(void* arg, grpc_error_handle error) {
void GrpcLb::OnBalancerCallRetryTimerLocked(grpc_error_handle error) {
retry_timer_callback_pending_ = false;
if (!shutting_down_ && GRPC_ERROR_IS_NONE(error) && lb_calld_ == nullptr) {
if (!shutting_down_ && error.ok() && lb_calld_ == nullptr) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server", this);
}
StartBalancerCallLocked();
}
Unref(DEBUG_LOCATION, "on_balancer_call_retry_timer");
GRPC_ERROR_UNREF(error);
}
//
@ -1741,7 +1735,6 @@ void GrpcLb::MaybeEnterFallbackModeAfterStartup() {
void GrpcLb::OnFallbackTimer(void* arg, grpc_error_handle error) {
GrpcLb* grpclb_policy = static_cast<GrpcLb*>(arg);
(void)GRPC_ERROR_REF(error); // ref owned by lambda
grpclb_policy->work_serializer()->Run(
[grpclb_policy, error]() { grpclb_policy->OnFallbackTimerLocked(error); },
DEBUG_LOCATION);
@ -1750,8 +1743,7 @@ void GrpcLb::OnFallbackTimer(void* arg, grpc_error_handle error) {
void GrpcLb::OnFallbackTimerLocked(grpc_error_handle error) {
// If we receive a serverlist after the timer fires but before this callback
// actually runs, don't fall back.
if (fallback_at_startup_checks_pending_ && !shutting_down_ &&
GRPC_ERROR_IS_NONE(error)) {
if (fallback_at_startup_checks_pending_ && !shutting_down_ && error.ok()) {
gpr_log(GPR_INFO,
"[grpclb %p] No response from balancer after fallback timeout; "
"entering fallback mode",
@ -1762,7 +1754,6 @@ void GrpcLb::OnFallbackTimerLocked(grpc_error_handle error) {
CreateOrUpdateChildPolicyLocked();
}
Unref(DEBUG_LOCATION, "on_fallback_timer");
GRPC_ERROR_UNREF(error);
}
//
@ -1864,14 +1855,13 @@ void GrpcLb::StartSubchannelCacheTimerLocked() {
void GrpcLb::OnSubchannelCacheTimer(void* arg, grpc_error_handle error) {
auto* self = static_cast<GrpcLb*>(arg);
(void)GRPC_ERROR_REF(error);
self->work_serializer()->Run(
[self, error]() { self->GrpcLb::OnSubchannelCacheTimerLocked(error); },
DEBUG_LOCATION);
}
void GrpcLb::OnSubchannelCacheTimerLocked(grpc_error_handle error) {
if (subchannel_cache_timer_pending_ && GRPC_ERROR_IS_NONE(error)) {
if (subchannel_cache_timer_pending_ && error.ok()) {
auto it = cached_subchannels_.begin();
if (it != cached_subchannels_.end()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_glb_trace)) {
@ -1888,7 +1878,6 @@ void GrpcLb::OnSubchannelCacheTimerLocked(grpc_error_handle error) {
subchannel_cache_timer_pending_ = false;
}
Unref(DEBUG_LOCATION, "OnSubchannelCacheTimer");
GRPC_ERROR_UNREF(error);
}
//

@ -816,13 +816,12 @@ void OutlierDetectionLb::EjectionTimer::Orphan() {
void OutlierDetectionLb::EjectionTimer::OnTimer(void* arg,
grpc_error_handle error) {
auto* self = static_cast<EjectionTimer*>(arg);
(void)GRPC_ERROR_REF(error); // ref owned by lambda
self->parent_->work_serializer()->Run(
[self, error]() { self->OnTimerLocked(error); }, DEBUG_LOCATION);
}
void OutlierDetectionLb::EjectionTimer::OnTimerLocked(grpc_error_handle error) {
if (GRPC_ERROR_IS_NONE(error) && timer_pending_) {
if (error.ok() && timer_pending_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_outlier_detection_lb_trace)) {
gpr_log(GPR_INFO, "[outlier_detection_lb %p] ejection timer running",
parent_.get());
@ -1005,7 +1004,6 @@ void OutlierDetectionLb::EjectionTimer::OnTimerLocked(grpc_error_handle error) {
MakeOrphanable<EjectionTimer>(parent_, Timestamp::Now());
}
Unref(DEBUG_LOCATION, "Timer");
GRPC_ERROR_UNREF(error);
}
//

@ -566,14 +566,13 @@ void PriorityLb::ChildPriority::DeactivationTimer::Orphan() {
void PriorityLb::ChildPriority::DeactivationTimer::OnTimer(
void* arg, grpc_error_handle error) {
auto* self = static_cast<DeactivationTimer*>(arg);
(void)GRPC_ERROR_REF(error); // ref owned by lambda
self->child_priority_->priority_policy_->work_serializer()->Run(
[self, error]() { self->OnTimerLocked(error); }, DEBUG_LOCATION);
}
void PriorityLb::ChildPriority::DeactivationTimer::OnTimerLocked(
grpc_error_handle error) {
if (GRPC_ERROR_IS_NONE(error) && timer_pending_) {
if (error.ok() && timer_pending_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): deactivation timer fired, "
@ -585,7 +584,6 @@ void PriorityLb::ChildPriority::DeactivationTimer::OnTimerLocked(
child_priority_->priority_policy_->DeleteChild(child_priority_.get());
}
Unref(DEBUG_LOCATION, "Timer");
GRPC_ERROR_UNREF(error);
}
//
@ -630,14 +628,13 @@ void PriorityLb::ChildPriority::FailoverTimer::Orphan() {
void PriorityLb::ChildPriority::FailoverTimer::OnTimer(
void* arg, grpc_error_handle error) {
auto* self = static_cast<FailoverTimer*>(arg);
(void)GRPC_ERROR_REF(error); // ref owned by lambda
self->child_priority_->priority_policy_->work_serializer()->Run(
[self, error]() { self->OnTimerLocked(error); }, DEBUG_LOCATION);
}
void PriorityLb::ChildPriority::FailoverTimer::OnTimerLocked(
grpc_error_handle error) {
if (GRPC_ERROR_IS_NONE(error) && timer_pending_) {
if (error.ok() && timer_pending_) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_lb_priority_trace)) {
gpr_log(GPR_INFO,
"[priority_lb %p] child %s (%p): failover timer fired, "
@ -652,7 +649,6 @@ void PriorityLb::ChildPriority::FailoverTimer::OnTimerLocked(
nullptr);
}
Unref(DEBUG_LOCATION, "Timer");
GRPC_ERROR_UNREF(error);
}
//

@ -1416,7 +1416,6 @@ void RlsLb::Cache::Shutdown() {
void RlsLb::Cache::OnCleanupTimer(void* arg, grpc_error_handle error) {
Cache* cache = static_cast<Cache*>(arg);
(void)GRPC_ERROR_REF(error);
cache->lb_policy_->work_serializer()->Run(
[cache, error]() {
RefCountedPtr<RlsLb> lb_policy(cache->lb_policy_);
@ -1753,7 +1752,6 @@ void RlsLb::RlsRequest::StartCallLocked() {
void RlsLb::RlsRequest::OnRlsCallComplete(void* arg, grpc_error_handle error) {
auto* request = static_cast<RlsRequest*>(arg);
(void)GRPC_ERROR_REF(error);
request->lb_policy_->work_serializer()->Run(
[request, error]() {
request->OnRlsCallCompleteLocked(error);
@ -1774,7 +1772,7 @@ void RlsLb::RlsRequest::OnRlsCallCompleteLocked(grpc_error_handle error) {
}
// Parse response.
ResponseInfo response;
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
grpc_status_code code;
std::string message;
grpc_error_get_status(error, deadline_, &code, &message,
@ -2424,9 +2422,8 @@ void RlsLbConfig::JsonPostLoad(const Json& json, const JsonArgs&,
rls_channel_service_config_ = it->second.Dump();
auto service_config = MakeRefCounted<ServiceConfigImpl>(
ChannelArgs(), rls_channel_service_config_, it->second, &child_error);
if (!GRPC_ERROR_IS_NONE(child_error)) {
if (!child_error.ok()) {
errors->AddError(grpc_error_std_string(child_error));
GRPC_ERROR_UNREF(child_error);
}
}
// Validate childPolicyConfigTargetFieldName.

@ -559,7 +559,6 @@ void XdsClusterManagerLb::ClusterChild::DeactivateLocked() {
void XdsClusterManagerLb::ClusterChild::OnDelayedRemovalTimer(
void* arg, grpc_error_handle error) {
ClusterChild* self = static_cast<ClusterChild*>(arg);
(void)GRPC_ERROR_REF(error); // Ref owned by the lambda
self->xds_cluster_manager_policy_->work_serializer()->Run(
[self, error]() { self->OnDelayedRemovalTimerLocked(error); },
DEBUG_LOCATION);
@ -568,11 +567,10 @@ void XdsClusterManagerLb::ClusterChild::OnDelayedRemovalTimer(
void XdsClusterManagerLb::ClusterChild::OnDelayedRemovalTimerLocked(
grpc_error_handle error) {
delayed_removal_timer_callback_pending_ = false;
if (GRPC_ERROR_IS_NONE(error) && !shutdown_) {
if (error.ok() && !shutdown_) {
xds_cluster_manager_policy_->children_.erase(name_);
}
Unref(DEBUG_LOCATION, "ClusterChild+timer");
GRPC_ERROR_UNREF(error);
}
//

@ -124,9 +124,8 @@ class BinderResolverFactory : public ResolverFactory {
return false;
}
grpc_error_handle error = BinderAddrPopulate(uri.path(), &addr);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
gpr_log(GPR_ERROR, "%s", grpc_error_std_string(error).c_str());
GRPC_ERROR_UNREF(error);
return false;
}
}

@ -410,11 +410,10 @@ AresClientChannelDNSResolver::AresRequestWrapper::OnResolvedLocked(
grpc_error_handle service_config_error = GRPC_ERROR_NONE;
std::string service_config_string =
ChooseServiceConfig(service_config_json_, &service_config_error);
if (!GRPC_ERROR_IS_NONE(service_config_error)) {
if (!service_config_error.ok()) {
result.service_config = absl::UnavailableError(
absl::StrCat("failed to parse service config: ",
grpc_error_std_string(service_config_error)));
GRPC_ERROR_UNREF(service_config_error);
} else if (!service_config_string.empty()) {
GRPC_CARES_TRACE_LOG("resolver:%p selected service config choice: %s",
this, service_config_string.c_str());
@ -618,7 +617,7 @@ class AresDNSResolver : public DNSResolver {
void OnComplete(grpc_error_handle error) override {
GRPC_CARES_TRACE_LOG("AresHostnameRequest:%p OnComplete", this);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
on_resolve_address_done_(grpc_error_to_absl_status(error));
return;
}
@ -668,7 +667,7 @@ class AresDNSResolver : public DNSResolver {
void OnComplete(grpc_error_handle error) override {
GRPC_CARES_TRACE_LOG("AresSRVRequest:%p OnComplete", this);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
on_resolve_address_done_(grpc_error_to_absl_status(error));
return;
}
@ -716,7 +715,7 @@ class AresDNSResolver : public DNSResolver {
void OnComplete(grpc_error_handle error) override {
GRPC_CARES_TRACE_LOG("AresSRVRequest:%p OnComplete", this);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
on_resolved_(grpc_error_to_absl_status(error));
return;
}
@ -849,7 +848,7 @@ void grpc_resolver_dns_ares_init() {
if (grpc_core::UseAresDnsResolver()) {
address_sorting_init();
grpc_error_handle error = grpc_ares_init();
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
GRPC_LOG_IF_ERROR("grpc_ares_init() failed", error);
return;
}

@ -435,7 +435,7 @@ class GrpcPolledFdWindows {
GPR_ASSERT(!connect_done_);
connect_done_ = true;
GPR_ASSERT(wsa_connect_error_ == 0);
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
DWORD transferred_bytes = 0;
DWORD flags;
BOOL wsa_success =
@ -568,7 +568,6 @@ class GrpcPolledFdWindows {
static void OnIocpReadable(void* arg, grpc_error_handle error) {
GrpcPolledFdWindows* polled_fd = static_cast<GrpcPolledFdWindows*>(arg);
(void)GRPC_ERROR_REF(error);
MutexLock lock(polled_fd->mu_);
polled_fd->OnIocpReadableLocked(error);
}
@ -579,7 +578,7 @@ class GrpcPolledFdWindows {
// the entire resolution attempt. Doing so will allow the "inject broken
// nameserver list" test to pass on Windows.
void OnIocpReadableLocked(grpc_error_handle error) {
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
if (winsocket_->read_info.wsa_error != 0) {
/* WSAEMSGSIZE would be due to receiving more data
* than our read buffer's fixed capacity. Assume that
@ -596,7 +595,7 @@ class GrpcPolledFdWindows {
}
}
}
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
read_buf_ = grpc_slice_sub_no_ref(
read_buf_, 0, winsocket_->read_info.bytes_transferred);
read_buf_has_data_ = true;
@ -612,7 +611,6 @@ class GrpcPolledFdWindows {
static void OnIocpWriteable(void* arg, grpc_error_handle error) {
GrpcPolledFdWindows* polled_fd = static_cast<GrpcPolledFdWindows*>(arg);
(void)GRPC_ERROR_REF(error);
MutexLock lock(polled_fd->mu_);
polled_fd->OnIocpWriteableLocked(error);
}
@ -620,7 +618,7 @@ class GrpcPolledFdWindows {
void OnIocpWriteableLocked(grpc_error_handle error) {
GRPC_CARES_TRACE_LOG("OnIocpWriteableInner. fd:|%s|", GetName());
GPR_ASSERT(socket_type_ == SOCK_STREAM);
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
if (winsocket_->write_info.wsa_error != 0) {
error = GRPC_WSA_ERROR(winsocket_->write_info.wsa_error,
"OnIocpWriteableInner");
@ -632,7 +630,7 @@ class GrpcPolledFdWindows {
}
}
GPR_ASSERT(tcp_write_state_ == WRITE_PENDING);
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
tcp_write_state_ = WRITE_WAITING_FOR_VERIFICATION_UPON_RETRY;
write_buf_ = grpc_slice_sub_no_ref(
write_buf_, 0, winsocket_->write_info.bytes_transferred);

@ -295,7 +295,7 @@ static void on_timeout(void* arg, grpc_error_handle error) {
"err=%s",
driver->request, driver, driver->shutting_down,
grpc_error_std_string(error).c_str());
if (!driver->shutting_down && GRPC_ERROR_IS_NONE(error)) {
if (!driver->shutting_down && error.ok()) {
grpc_ares_ev_driver_shutdown_locked(driver);
}
grpc_ares_ev_driver_unref(driver);
@ -321,7 +321,7 @@ static void on_ares_backup_poll_alarm(void* arg, grpc_error_handle error) {
"err=%s",
driver->request, driver, driver->shutting_down,
grpc_error_std_string(error).c_str());
if (!driver->shutting_down && GRPC_ERROR_IS_NONE(error)) {
if (!driver->shutting_down && error.ok()) {
fd_node* fdn = driver->fds;
while (fdn != nullptr) {
if (!fdn->already_shutdown) {
@ -363,7 +363,7 @@ static void on_readable(void* arg, grpc_error_handle error) {
fdn->readable_registered = false;
GRPC_CARES_TRACE_LOG("request:%p readable on %s", fdn->ev_driver->request,
fdn->grpc_polled_fd->GetName());
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
do {
ares_process_fd(ev_driver->channel, as, ARES_SOCKET_BAD);
} while (fdn->grpc_polled_fd->IsFdStillReadableLocked());
@ -389,7 +389,7 @@ static void on_writable(void* arg, grpc_error_handle error) {
fdn->writable_registered = false;
GRPC_CARES_TRACE_LOG("request:%p writable on %s", ev_driver->request,
fdn->grpc_polled_fd->GetName());
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
ares_process_fd(ev_driver->channel, ARES_SOCKET_BAD, as);
} else {
// If error is not GRPC_ERROR_NONE, it means the fd has been shutdown or
@ -603,7 +603,6 @@ void grpc_ares_complete_request_locked(grpc_ares_request* r)
r->ev_driver = nullptr;
if (r->addresses_out != nullptr && *r->addresses_out != nullptr) {
grpc_cares_wrapper_address_sorting_sort(r, r->addresses_out->get());
GRPC_ERROR_UNREF(r->error);
r->error = GRPC_ERROR_NONE;
// TODO(apolcyn): allow c-ares to return a service config
// with no addresses along side it
@ -887,7 +886,7 @@ grpc_error_handle grpc_dns_lookup_ares_continued(
}
error = grpc_ares_ev_driver_create_locked(&r->ev_driver, interested_parties,
query_timeout_ms, r);
if (!GRPC_ERROR_IS_NONE(error)) return error;
if (!error.ok()) return error;
// If dns_server is specified, use it.
error = set_request_dns_server(r, dns_server);
return error;
@ -1053,7 +1052,7 @@ static grpc_ares_request* grpc_dns_lookup_hostname_ares_impl(
grpc_error_handle error = grpc_dns_lookup_ares_continued(
r, dns_server, name, default_port, interested_parties, query_timeout_ms,
&host, &port, true);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, r->on_done, error);
return r;
}
@ -1100,7 +1099,7 @@ grpc_ares_request* grpc_dns_lookup_srv_ares_impl(
error = grpc_dns_lookup_ares_continued(r, dns_server, name, nullptr,
interested_parties, query_timeout_ms,
&host, &port, false);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, r->on_done, error);
return r;
}
@ -1138,7 +1137,7 @@ grpc_ares_request* grpc_dns_lookup_txt_ares_impl(
error = grpc_dns_lookup_ares_continued(r, dns_server, name, nullptr,
interested_parties, query_timeout_ms,
&host, &port, false);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, r->on_done, error);
return r;
}

@ -189,7 +189,6 @@ void GoogleCloud2ProdResolver::MetadataQuery::OnHttpRequestDone(
auto* self = static_cast<MetadataQuery*>(arg);
// Hop back into WorkSerializer to call OnDone().
// Note: We implicitly pass our ref to the callback here.
(void)GRPC_ERROR_REF(error);
self->resolver_->work_serializer_->Run(
[self, error]() {
self->OnDone(self->resolver_.get(), &self->response_, error);
@ -212,7 +211,7 @@ void GoogleCloud2ProdResolver::ZoneQuery::OnDone(
GoogleCloud2ProdResolver* resolver, const grpc_http_response* response,
grpc_error_handle error) {
absl::StatusOr<std::string> zone;
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
zone = absl::UnknownError(
absl::StrCat("error fetching zone from metadata server: ",
grpc_error_std_string(error)));
@ -236,7 +235,6 @@ void GoogleCloud2ProdResolver::ZoneQuery::OnDone(
} else {
resolver->ZoneQueryDone(std::move(*zone));
}
GRPC_ERROR_UNREF(error);
}
//
@ -253,12 +251,11 @@ GoogleCloud2ProdResolver::IPv6Query::IPv6Query(
void GoogleCloud2ProdResolver::IPv6Query::OnDone(
GoogleCloud2ProdResolver* resolver, const grpc_http_response* response,
grpc_error_handle error) {
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
gpr_log(GPR_ERROR, "error fetching IPv6 address from metadata server: %s",
grpc_error_std_string(error).c_str());
}
resolver->IPv6QueryDone(GRPC_ERROR_IS_NONE(error) && response->status == 200);
GRPC_ERROR_UNREF(error);
resolver->IPv6QueryDone(error.ok() && response->status == 200);
}
//

@ -106,7 +106,6 @@ void PollingResolver::ShutdownLocked() {
void PollingResolver::OnNextResolution(void* arg, grpc_error_handle error) {
auto* self = static_cast<PollingResolver*>(arg);
(void)GRPC_ERROR_REF(error); // ref owned by lambda
self->work_serializer_->Run(
[self, error]() { self->OnNextResolutionLocked(error); }, DEBUG_LOCATION);
}
@ -119,11 +118,10 @@ void PollingResolver::OnNextResolutionLocked(grpc_error_handle error) {
this, grpc_error_std_string(error).c_str(), shutdown_);
}
have_next_resolution_timer_ = false;
if (GRPC_ERROR_IS_NONE(error) && !shutdown_) {
if (error.ok() && !shutdown_) {
StartResolvingLocked();
}
Unref(DEBUG_LOCATION, "retry-timer");
GRPC_ERROR_UNREF(error);
}
void PollingResolver::OnRequestComplete(Result result) {

@ -57,7 +57,7 @@ namespace {
absl::optional<std::string> ParseHealthCheckConfig(const Json& field,
grpc_error_handle* error) {
GPR_DEBUG_ASSERT(error != nullptr && GRPC_ERROR_IS_NONE(*error));
GPR_DEBUG_ASSERT(error != nullptr && error->ok());
if (field.type() != Json::Type::OBJECT) {
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"field:healthCheckConfig error:should be of type object");
@ -131,7 +131,7 @@ ClientChannelServiceConfigParser::ParseGlobalParams(const ChannelArgs& /*args*/,
grpc_error_handle parsing_error = GRPC_ERROR_NONE;
health_check_service_name =
ParseHealthCheckConfig(it->second, &parsing_error);
if (!GRPC_ERROR_IS_NONE(parsing_error)) {
if (!parsing_error.ok()) {
error_list.push_back(parsing_error);
}
}
@ -141,7 +141,6 @@ ClientChannelServiceConfigParser::ParseGlobalParams(const ChannelArgs& /*args*/,
absl::Status status = absl::InvalidArgumentError(
absl::StrCat("error parsing client channel global parameters: ",
grpc_error_std_string(error)));
GRPC_ERROR_UNREF(error);
return status;
}
return absl::make_unique<ClientChannelGlobalParsedConfig>(
@ -177,7 +176,6 @@ ClientChannelServiceConfigParser::ParsePerMethodParams(
absl::Status status = absl::InvalidArgumentError(
absl::StrCat("error parsing client channel method parameters: ",
grpc_error_std_string(error)));
GRPC_ERROR_UNREF(error);
return status;
}
return absl::make_unique<ClientChannelMethodParsedConfig>(timeout,

@ -925,7 +925,6 @@ void RetryFilter::CallData::CallAttempt::
void RetryFilter::CallData::CallAttempt::MaybeAddBatchForCancelOp(
grpc_error_handle error, CallCombinerClosureList* closures) {
if (sent_cancel_stream_) {
GRPC_ERROR_UNREF(error);
return;
}
sent_cancel_stream_ = true;
@ -1019,7 +1018,6 @@ void RetryFilter::CallData::CallAttempt::AddBatchesForPendingBatches(
DEBUG_LOCATION,
"internally started recv_trailing_metadata batch pending and "
"recv_trailing_metadata started from surface");
GRPC_ERROR_UNREF(recv_trailing_metadata_error_);
}
recv_trailing_metadata_error_ = GRPC_ERROR_NONE;
}
@ -1235,22 +1233,18 @@ void RetryFilter::CallData::CallAttempt::Abandon() {
DEBUG_LOCATION,
"unref internal recv_trailing_metadata_ready batch; attempt abandoned");
}
GRPC_ERROR_UNREF(recv_trailing_metadata_error_);
recv_trailing_metadata_error_ = GRPC_ERROR_NONE;
recv_initial_metadata_ready_deferred_batch_.reset(
DEBUG_LOCATION,
"unref deferred recv_initial_metadata_ready batch; attempt abandoned");
GRPC_ERROR_UNREF(recv_initial_metadata_error_);
recv_initial_metadata_error_ = GRPC_ERROR_NONE;
recv_message_ready_deferred_batch_.reset(
DEBUG_LOCATION,
"unref deferred recv_message_ready batch; attempt abandoned");
GRPC_ERROR_UNREF(recv_message_error_);
recv_message_error_ = GRPC_ERROR_NONE;
for (auto& on_complete_deferred_batch : on_complete_deferred_batches_) {
on_complete_deferred_batch.batch.reset(
DEBUG_LOCATION, "unref deferred on_complete batch; attempt abandoned");
GRPC_ERROR_UNREF(on_complete_deferred_batch.error);
}
on_complete_deferred_batches_.clear();
}
@ -1261,8 +1255,8 @@ void RetryFilter::CallData::CallAttempt::OnPerAttemptRecvTimer(
GRPC_CLOSURE_INIT(&call_attempt->on_per_attempt_recv_timer_,
OnPerAttemptRecvTimerLocked, call_attempt, nullptr);
GRPC_CALL_COMBINER_START(call_attempt->calld_->call_combiner_,
&call_attempt->on_per_attempt_recv_timer_,
GRPC_ERROR_REF(error), "per-attempt timer fired");
&call_attempt->on_per_attempt_recv_timer_, error,
"per-attempt timer fired");
}
void RetryFilter::CallData::CallAttempt::OnPerAttemptRecvTimerLocked(
@ -1278,8 +1272,7 @@ void RetryFilter::CallData::CallAttempt::OnPerAttemptRecvTimerLocked(
call_attempt->per_attempt_recv_timer_pending_);
}
CallCombinerClosureList closures;
if (GRPC_ERROR_IS_NONE(error) &&
call_attempt->per_attempt_recv_timer_pending_) {
if (error.ok() && call_attempt->per_attempt_recv_timer_pending_) {
call_attempt->per_attempt_recv_timer_pending_ = false;
// Cancel this attempt.
// TODO(roth): When implementing hedging, we should not cancel the
@ -1398,7 +1391,6 @@ void RetryFilter::CallData::CallAttempt::BatchData::
.recv_initial_metadata_ready != nullptr;
});
if (pending == nullptr) {
GRPC_ERROR_UNREF(error);
return;
}
// Return metadata.
@ -1450,9 +1442,9 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvInitialMetadataReady(
// the recv_trailing_metadata_ready callback, then defer propagating this
// callback back to the surface. We can evaluate whether to retry when
// recv_trailing_metadata comes back.
if (GPR_UNLIKELY((call_attempt->trailing_metadata_available_ ||
!GRPC_ERROR_IS_NONE(error)) &&
!call_attempt->completed_recv_trailing_metadata_)) {
if (GPR_UNLIKELY(
(call_attempt->trailing_metadata_available_ || !error.ok()) &&
!call_attempt->completed_recv_trailing_metadata_)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: deferring "
@ -1461,11 +1453,10 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvInitialMetadataReady(
}
call_attempt->recv_initial_metadata_ready_deferred_batch_ =
std::move(batch_data);
call_attempt->recv_initial_metadata_error_ = GRPC_ERROR_REF(error);
call_attempt->recv_initial_metadata_error_ = error;
CallCombinerClosureList closures;
if (!GRPC_ERROR_IS_NONE(error)) {
call_attempt->MaybeAddBatchForCancelOp(GRPC_ERROR_REF(error),
&closures);
if (!error.ok()) {
call_attempt->MaybeAddBatchForCancelOp(error, &closures);
}
if (!call_attempt->started_recv_trailing_metadata_) {
// recv_trailing_metadata not yet started by application; start it
@ -1483,8 +1474,7 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvInitialMetadataReady(
}
// Invoke the callback to return the result to the surface.
CallCombinerClosureList closures;
batch_data->MaybeAddClosureForRecvInitialMetadataCallback(
GRPC_ERROR_REF(error), &closures);
batch_data->MaybeAddClosureForRecvInitialMetadataCallback(error, &closures);
closures.RunClosures(calld->call_combiner_);
}
@ -1503,7 +1493,6 @@ void RetryFilter::CallData::CallAttempt::BatchData::
batch->payload->recv_message.recv_message_ready != nullptr;
});
if (pending == nullptr) {
GRPC_ERROR_UNREF(error);
return;
}
// Return payload.
@ -1555,9 +1544,9 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvMessageReady(
// the recv_trailing_metadata_ready callback, then defer propagating this
// callback back to the surface. We can evaluate whether to retry when
// recv_trailing_metadata comes back.
if (GPR_UNLIKELY((!call_attempt->recv_message_.has_value() ||
!GRPC_ERROR_IS_NONE(error)) &&
!call_attempt->completed_recv_trailing_metadata_)) {
if (GPR_UNLIKELY(
(!call_attempt->recv_message_.has_value() || !error.ok()) &&
!call_attempt->completed_recv_trailing_metadata_)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: deferring recv_message_ready "
@ -1565,11 +1554,10 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvMessageReady(
calld->chand_, calld, call_attempt);
}
call_attempt->recv_message_ready_deferred_batch_ = std::move(batch_data);
call_attempt->recv_message_error_ = GRPC_ERROR_REF(error);
call_attempt->recv_message_error_ = error;
CallCombinerClosureList closures;
if (!GRPC_ERROR_IS_NONE(error)) {
call_attempt->MaybeAddBatchForCancelOp(GRPC_ERROR_REF(error),
&closures);
if (!error.ok()) {
call_attempt->MaybeAddBatchForCancelOp(error, &closures);
}
if (!call_attempt->started_recv_trailing_metadata_) {
// recv_trailing_metadata not yet started by application; start it
@ -1587,8 +1575,7 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvMessageReady(
}
// Invoke the callback to return the result to the surface.
CallCombinerClosureList closures;
batch_data->MaybeAddClosureForRecvMessageCallback(GRPC_ERROR_REF(error),
&closures);
batch_data->MaybeAddClosureForRecvMessageCallback(error, &closures);
closures.RunClosures(calld->call_combiner_);
}
@ -1605,7 +1592,7 @@ void GetCallStatus(
grpc_status_code* status, absl::optional<Duration>* server_pushback,
bool* is_lb_drop,
absl::optional<GrpcStreamNetworkState::ValueType>* stream_network_state) {
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
grpc_error_get_status(error, deadline, status, nullptr, nullptr, nullptr);
intptr_t value = 0;
if (grpc_error_get_int(error, GRPC_ERROR_INT_LB_POLICY_DROP, &value) &&
@ -1617,7 +1604,6 @@ void GetCallStatus(
}
*server_pushback = md_batch->get(GrpcRetryPushbackMsMetadata());
*stream_network_state = md_batch->get(GrpcStreamNetworkState());
GRPC_ERROR_UNREF(error);
}
} // namespace
@ -1697,13 +1683,12 @@ void RetryFilter::CallData::CallAttempt::BatchData::
PendingBatch* pending = &calld->pending_batches_[i];
if (pending->batch == nullptr) continue;
if (call_attempt_->PendingBatchContainsUnstartedSendOps(pending)) {
closures->Add(pending->batch->on_complete, GRPC_ERROR_REF(error),
closures->Add(pending->batch->on_complete, error,
"failing on_complete for pending batch");
pending->batch->on_complete = nullptr;
calld->MaybeClearPendingBatch(pending);
}
}
GRPC_ERROR_UNREF(error);
}
void RetryFilter::CallData::CallAttempt::BatchData::RunClosuresForCompletedCall(
@ -1711,15 +1696,14 @@ void RetryFilter::CallData::CallAttempt::BatchData::RunClosuresForCompletedCall(
// Construct list of closures to execute.
CallCombinerClosureList closures;
// First, add closure for recv_trailing_metadata_ready.
MaybeAddClosureForRecvTrailingMetadataReady(GRPC_ERROR_REF(error), &closures);
MaybeAddClosureForRecvTrailingMetadataReady(error, &closures);
// If there are deferred batch completion callbacks, add them to closures.
AddClosuresForDeferredCompletionCallbacks(&closures);
// Add closures to fail any pending batches that have not yet been started.
AddClosuresToFailUnstartedPendingBatches(GRPC_ERROR_REF(error), &closures);
AddClosuresToFailUnstartedPendingBatches(error, &closures);
// Schedule all of the closures identified above.
// Note: This will release the call combiner.
closures.RunClosures(call_attempt_->calld_->call_combiner_);
GRPC_ERROR_UNREF(error);
}
void RetryFilter::CallData::CallAttempt::BatchData::RecvTrailingMetadataReady(
@ -1752,8 +1736,8 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvTrailingMetadataReady(
absl::optional<GrpcStreamNetworkState::ValueType> stream_network_state;
grpc_metadata_batch* md_batch =
batch_data->batch_.payload->recv_trailing_metadata.recv_trailing_metadata;
GetCallStatus(calld->deadline_, md_batch, GRPC_ERROR_REF(error), &status,
&server_pushback, &is_lb_drop, &stream_network_state);
GetCallStatus(calld->deadline_, md_batch, error, &status, &server_pushback,
&is_lb_drop, &stream_network_state);
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO,
"chand=%p calld=%p attempt=%p: call finished, status=%s "
@ -1793,11 +1777,11 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvTrailingMetadataReady(
CallCombinerClosureList closures;
// Cancel call attempt.
call_attempt->MaybeAddBatchForCancelOp(
GRPC_ERROR_IS_NONE(error)
error.ok()
? grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("call attempt failed"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_CANCELLED)
: GRPC_ERROR_REF(error),
: error,
&closures);
// For transparent retries, add a closure to immediately start a new
// call attempt.
@ -1820,7 +1804,7 @@ void RetryFilter::CallData::CallAttempt::BatchData::RecvTrailingMetadataReady(
// subsequent batches.
call_attempt->MaybeSwitchToFastPath();
// Run any necessary closures.
batch_data->RunClosuresForCompletedCall(GRPC_ERROR_REF(error));
batch_data->RunClosuresForCompletedCall(error);
}
//
@ -1843,7 +1827,6 @@ void RetryFilter::CallData::CallAttempt::BatchData::
// If batch_data is a replay batch, then there will be no pending
// batch to complete.
if (pending == nullptr) {
GRPC_ERROR_UNREF(error);
return;
}
// Propagate payload.
@ -1911,16 +1894,16 @@ void RetryFilter::CallData::CallAttempt::BatchData::OnComplete(
// recv_trailing_metadata_ready callback, then defer propagating this
// callback back to the surface. We can evaluate whether to retry when
// recv_trailing_metadata comes back.
if (GPR_UNLIKELY(!calld->retry_committed_ && !GRPC_ERROR_IS_NONE(error) &&
if (GPR_UNLIKELY(!calld->retry_committed_ && !error.ok() &&
!call_attempt->completed_recv_trailing_metadata_)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p attempt=%p: deferring on_complete",
calld->chand_, calld, call_attempt);
}
call_attempt->on_complete_deferred_batches_.emplace_back(
std::move(batch_data), GRPC_ERROR_REF(error));
std::move(batch_data), error);
CallCombinerClosureList closures;
call_attempt->MaybeAddBatchForCancelOp(GRPC_ERROR_REF(error), &closures);
call_attempt->MaybeAddBatchForCancelOp(error, &closures);
if (!call_attempt->started_recv_trailing_metadata_) {
// recv_trailing_metadata not yet started by application; start it
// ourselves to get status.
@ -1947,8 +1930,7 @@ void RetryFilter::CallData::CallAttempt::BatchData::OnComplete(
// Construct list of closures to execute.
CallCombinerClosureList closures;
// Add closure for the completed pending batch, if any.
batch_data->AddClosuresForCompletedPendingBatch(GRPC_ERROR_REF(error),
&closures);
batch_data->AddClosuresForCompletedPendingBatch(error, &closures);
// If needed, add a callback to start any replay or pending send ops on
// the LB call.
if (!call_attempt->completed_recv_trailing_metadata_) {
@ -2191,7 +2173,6 @@ RetryFilter::CallData::~CallData() {
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
GPR_ASSERT(pending_batches_[i].batch == nullptr);
}
GRPC_ERROR_UNREF(cancelled_from_surface_);
}
void RetryFilter::CallData::StartTransportStreamOpBatch(
@ -2209,24 +2190,22 @@ void RetryFilter::CallData::StartTransportStreamOpBatch(
}
// If we were previously cancelled from the surface, fail this
// batch immediately.
if (!GRPC_ERROR_IS_NONE(cancelled_from_surface_)) {
if (!cancelled_from_surface_.ok()) {
// Note: This will release the call combiner.
grpc_transport_stream_op_batch_finish_with_failure(
batch, GRPC_ERROR_REF(cancelled_from_surface_), call_combiner_);
batch, cancelled_from_surface_, call_combiner_);
return;
}
// Handle cancellation.
if (GPR_UNLIKELY(batch->cancel_stream)) {
// Save cancel_error in case subsequent batches are started.
GRPC_ERROR_UNREF(cancelled_from_surface_);
cancelled_from_surface_ =
GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
cancelled_from_surface_ = batch->payload->cancel_stream.cancel_error;
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
gpr_log(GPR_INFO, "chand=%p calld=%p: cancelled from surface: %s", chand_,
this, grpc_error_std_string(cancelled_from_surface_).c_str());
}
// Fail any pending batches.
PendingBatchesFail(GRPC_ERROR_REF(cancelled_from_surface_));
PendingBatchesFail(cancelled_from_surface_);
// If we have a current call attempt, commit the call, then send
// the cancellation down to that attempt. When the call fails, it
// will not be retried, because we have committed it here.
@ -2256,7 +2235,7 @@ void RetryFilter::CallData::StartTransportStreamOpBatch(
// batch. Return it back to the surface immediately.
// Note: This will release the call combiner.
grpc_transport_stream_op_batch_finish_with_failure(
batch, GRPC_ERROR_REF(cancelled_from_surface_), call_combiner_);
batch, cancelled_from_surface_, call_combiner_);
return;
}
// Add the batch to the pending list.
@ -2514,13 +2493,13 @@ void RetryFilter::CallData::FailPendingBatchInCallCombiner(
static_cast<grpc_transport_stream_op_batch*>(arg);
CallData* call = static_cast<CallData*>(batch->handler_private.extra_arg);
// Note: This will release the call combiner.
grpc_transport_stream_op_batch_finish_with_failure(
batch, GRPC_ERROR_REF(error), call->call_combiner_);
grpc_transport_stream_op_batch_finish_with_failure(batch, error,
call->call_combiner_);
}
// This is called via the call combiner, so access to calld is synchronized.
void RetryFilter::CallData::PendingBatchesFail(grpc_error_handle error) {
GPR_ASSERT(!GRPC_ERROR_IS_NONE(error));
GPR_ASSERT(!error.ok());
if (GRPC_TRACE_FLAG_ENABLED(grpc_retry_trace)) {
size_t num_batches = 0;
for (size_t i = 0; i < GPR_ARRAY_SIZE(pending_batches_); ++i) {
@ -2539,13 +2518,12 @@ void RetryFilter::CallData::PendingBatchesFail(grpc_error_handle error) {
GRPC_CLOSURE_INIT(&batch->handler_private.closure,
FailPendingBatchInCallCombiner, batch,
grpc_schedule_on_exec_ctx);
closures.Add(&batch->handler_private.closure, GRPC_ERROR_REF(error),
closures.Add(&batch->handler_private.closure, error,
"PendingBatchesFail");
PendingBatchClear(pending);
}
}
closures.RunClosuresWithoutYielding(call_combiner_);
GRPC_ERROR_UNREF(error);
}
template <typename Predicate>
@ -2622,14 +2600,14 @@ void RetryFilter::CallData::StartRetryTimer(
void RetryFilter::CallData::OnRetryTimer(void* arg, grpc_error_handle error) {
auto* calld = static_cast<CallData*>(arg);
GRPC_CLOSURE_INIT(&calld->retry_closure_, OnRetryTimerLocked, calld, nullptr);
GRPC_CALL_COMBINER_START(calld->call_combiner_, &calld->retry_closure_,
GRPC_ERROR_REF(error), "retry timer fired");
GRPC_CALL_COMBINER_START(calld->call_combiner_, &calld->retry_closure_, error,
"retry timer fired");
}
void RetryFilter::CallData::OnRetryTimerLocked(void* arg,
grpc_error_handle error) {
auto* calld = static_cast<CallData*>(arg);
if (GRPC_ERROR_IS_NONE(error) && calld->retry_timer_pending_) {
if (error.ok() && calld->retry_timer_pending_) {
calld->retry_timer_pending_ = false;
calld->CreateCallAttempt(/*is_transparent_retry=*/false);
} else {
@ -2652,7 +2630,7 @@ void RetryFilter::CallData::AddClosureToStartTransparentRetry(
void RetryFilter::CallData::StartTransparentRetry(void* arg,
grpc_error_handle /*error*/) {
auto* calld = static_cast<CallData*>(arg);
if (GRPC_ERROR_IS_NONE(calld->cancelled_from_surface_)) {
if (calld->cancelled_from_surface_.ok()) {
calld->CreateCallAttempt(/*is_transparent_retry=*/true);
} else {
GRPC_CALL_COMBINER_STOP(calld->call_combiner_,

@ -150,11 +150,10 @@ RetryServiceConfigParser::ParseGlobalParams(const ChannelArgs& /*args*/,
intptr_t milli_token_ratio = 0;
grpc_error_handle error =
ParseRetryThrottling(it->second, &max_milli_tokens, &milli_token_ratio);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
absl::Status status = absl::InvalidArgumentError(
absl::StrCat("error parsing retry global parameters: ",
grpc_error_std_string(error)));
GRPC_ERROR_UNREF(error);
return status;
}
return absl::make_unique<RetryGlobalConfig>(max_milli_tokens,
@ -308,11 +307,10 @@ RetryServiceConfigParser::ParsePerMethodParams(const ChannelArgs& args,
grpc_error_handle error = ParseRetryPolicy(
args, it->second, &max_attempts, &initial_backoff, &max_backoff,
&backoff_multiplier, &retryable_status_codes, &per_attempt_recv_timeout);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
absl::Status status = absl::InvalidArgumentError(
absl::StrCat("error parsing retry method parameters: ",
grpc_error_std_string(error)));
GRPC_ERROR_UNREF(error);
return status;
}
return absl::make_unique<RetryMethodConfig>(

@ -160,7 +160,7 @@ SubchannelCall::SubchannelCall(Args args, grpc_error_handle* error)
};
*error = grpc_call_stack_init(connected_subchannel_->channel_stack(), 1,
SubchannelCall::Destroy, this, &call_args);
if (GPR_UNLIKELY(!GRPC_ERROR_IS_NONE(*error))) {
if (GPR_UNLIKELY(!error->ok())) {
gpr_log(GPR_ERROR, "error: %s", grpc_error_std_string(*error).c_str());
return;
}
@ -255,12 +255,11 @@ namespace {
// Sets *status based on the rest of the parameters.
void GetCallStatus(grpc_status_code* status, Timestamp deadline,
grpc_metadata_batch* md_batch, grpc_error_handle error) {
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
grpc_error_get_status(error, deadline, status, nullptr, nullptr, nullptr);
} else {
*status = md_batch->get(GrpcStatusMetadata()).value_or(GRPC_STATUS_UNKNOWN);
}
GRPC_ERROR_UNREF(error);
}
} // namespace
@ -270,8 +269,7 @@ void SubchannelCall::RecvTrailingMetadataReady(void* arg,
SubchannelCall* call = static_cast<SubchannelCall*>(arg);
GPR_ASSERT(call->recv_trailing_metadata_ != nullptr);
grpc_status_code status = GRPC_STATUS_OK;
GetCallStatus(&status, call->deadline_, call->recv_trailing_metadata_,
GRPC_ERROR_REF(error));
GetCallStatus(&status, call->deadline_, call->recv_trailing_metadata_, error);
channelz::SubchannelNode* channelz_subchannel =
call->connected_subchannel_->channelz_subchannel();
GPR_ASSERT(channelz_subchannel != nullptr);
@ -280,8 +278,7 @@ void SubchannelCall::RecvTrailingMetadataReady(void* arg,
} else {
channelz_subchannel->RecordCallFailed();
}
Closure::Run(DEBUG_LOCATION, call->original_recv_trailing_metadata_,
GRPC_ERROR_REF(error));
Closure::Run(DEBUG_LOCATION, call->original_recv_trailing_metadata_, error);
}
void SubchannelCall::IncrementRefCount() {
@ -889,14 +886,13 @@ void Subchannel::OnConnectingFinished(void* arg, grpc_error_handle error) {
WeakRefCountedPtr<Subchannel> c(static_cast<Subchannel*>(arg));
{
MutexLock lock(&c->mu_);
c->OnConnectingFinishedLocked(GRPC_ERROR_REF(error));
c->OnConnectingFinishedLocked(error);
}
c.reset(DEBUG_LOCATION, "Connect");
}
void Subchannel::OnConnectingFinishedLocked(grpc_error_handle error) {
if (shutdown_) {
(void)GRPC_ERROR_UNREF(error);
return;
}
// If we didn't get a transport or we fail to publish it, report
@ -931,7 +927,6 @@ void Subchannel::OnConnectingFinishedLocked(grpc_error_handle error) {
}
});
}
(void)GRPC_ERROR_UNREF(error);
}
bool Subchannel::PublishTransportLocked() {
@ -949,7 +944,6 @@ bool Subchannel::PublishTransportLocked() {
gpr_log(GPR_ERROR,
"subchannel %p %s: error initializing subchannel stack: %s", this,
key_.ToString().c_str(), grpc_error_std_string(error).c_str());
GRPC_ERROR_UNREF(error);
return false;
}
RefCountedPtr<channelz::SocketNode> socket =

@ -146,7 +146,7 @@ void SubchannelStreamClient::OnRetryTimer(void* arg, grpc_error_handle error) {
{
MutexLock lock(&self->mu_);
self->retry_timer_callback_pending_ = false;
if (self->event_handler_ != nullptr && GRPC_ERROR_IS_NONE(error) &&
if (self->event_handler_ != nullptr && error.ok() &&
self->call_state_ == nullptr) {
if (GPR_UNLIKELY(self->tracer_ != nullptr)) {
gpr_log(GPR_INFO,
@ -218,14 +218,12 @@ void SubchannelStreamClient::CallState::StartCallLocked() {
this, grpc_schedule_on_exec_ctx);
call_->SetAfterCallStackDestroy(&after_call_stack_destruction_);
// Check if creation failed.
if (!GRPC_ERROR_IS_NONE(error) ||
subchannel_stream_client_->event_handler_ == nullptr) {
if (!error.ok() || subchannel_stream_client_->event_handler_ == nullptr) {
gpr_log(GPR_ERROR,
"SubchannelStreamClient %p CallState %p: error creating "
"stream on subchannel (%s); will retry",
subchannel_stream_client_.get(), this,
grpc_error_std_string(error).c_str());
GRPC_ERROR_UNREF(error);
CallEndedLocked(/*retry=*/true);
return;
}
@ -240,7 +238,7 @@ void SubchannelStreamClient::CallState::StartCallLocked() {
send_initial_metadata_.Set(
HttpPathMetadata(),
subchannel_stream_client_->event_handler_->GetPathLocked());
GPR_ASSERT(GRPC_ERROR_IS_NONE(error));
GPR_ASSERT(error.ok());
payload_.send_initial_metadata.send_initial_metadata =
&send_initial_metadata_;
payload_.send_initial_metadata.peer_string = nullptr;
@ -418,7 +416,7 @@ void SubchannelStreamClient::CallState::RecvTrailingMetadataReady(
grpc_status_code status =
self->recv_trailing_metadata_.get(GrpcStatusMetadata())
.value_or(GRPC_STATUS_UNKNOWN);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
grpc_error_get_status(error, Timestamp::InfFuture(), &status,
nullptr /* slice */, nullptr /* http_error */,
nullptr /* error_string */);

@ -73,7 +73,7 @@ class TimerState {
grpc_transport_stream_op_batch* batch = grpc_make_transport_stream_op(
GRPC_CLOSURE_INIT(&self->closure_, YieldCallCombiner, self, nullptr));
batch->cancel_stream = true;
batch->payload->cancel_stream.cancel_error = GRPC_ERROR_REF(error);
batch->payload->cancel_stream.cancel_error = error;
self->elem_->filter->start_transport_stream_op_batch(self->elem_, batch);
}
@ -86,7 +86,7 @@ class TimerState {
error = grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED);
deadline_state->call_combiner->Cancel(GRPC_ERROR_REF(error));
deadline_state->call_combiner->Cancel(error);
GRPC_CLOSURE_INIT(&self->closure_, SendCancelOpInCallCombiner, self,
nullptr);
GRPC_CALL_COMBINER_START(deadline_state->call_combiner, &self->closure_,
@ -145,7 +145,7 @@ static void recv_trailing_metadata_ready(void* arg, grpc_error_handle error) {
// Invoke the original callback.
grpc_core::Closure::Run(DEBUG_LOCATION,
deadline_state->original_recv_trailing_metadata_ready,
GRPC_ERROR_REF(error));
error);
}
// Inject our own recv_trailing_metadata_ready callback into op.
@ -183,8 +183,7 @@ static void start_timer_after_init(void* arg, grpc_error_handle error) {
// need to bounce ourselves into it.
state->in_call_combiner = true;
GRPC_CALL_COMBINER_START(deadline_state->call_combiner, &state->closure,
GRPC_ERROR_REF(error),
"scheduling deadline timer");
error, "scheduling deadline timer");
return;
}
delete state;
@ -305,8 +304,7 @@ static void recv_initial_metadata_ready(void* arg, grpc_error_handle error) {
.value_or(grpc_core::Timestamp::InfFuture()));
// Invoke the next callback.
grpc_core::Closure::Run(DEBUG_LOCATION,
calld->next_recv_initial_metadata_ready,
GRPC_ERROR_REF(error));
calld->next_recv_initial_metadata_ready, error);
}
// Method for starting a call op for server filter.

@ -165,7 +165,6 @@ FaultInjectionServiceConfigParser::ParsePerMethodParams(const ChannelArgs& args,
absl::Status status = absl::InvalidArgumentError(
absl::StrCat("error parsing fault injection method parameters: ",
grpc_error_std_string(error)));
GRPC_ERROR_UNREF(error);
return status;
}
if (fault_injection_policies.empty()) return nullptr;

@ -211,8 +211,7 @@ void CallData::FailSendMessageBatchInCallCombiner(void* calld_arg,
CallData* calld = static_cast<CallData*>(calld_arg);
if (calld->send_message_batch_ != nullptr) {
grpc_transport_stream_op_batch_finish_with_failure(
calld->send_message_batch_, GRPC_ERROR_REF(error),
calld->call_combiner_);
calld->send_message_batch_, error, calld->call_combiner_);
calld->send_message_batch_ = nullptr;
}
}
@ -228,20 +227,19 @@ void CallData::CompressStartTransportStreamOpBatch(
grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
// Handle cancel_stream.
if (batch->cancel_stream) {
GRPC_ERROR_UNREF(cancel_error_);
cancel_error_ = GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error);
cancel_error_ = batch->payload->cancel_stream.cancel_error;
if (send_message_batch_ != nullptr) {
if (!seen_initial_metadata_) {
GRPC_CALL_COMBINER_START(
call_combiner_,
GRPC_CLOSURE_CREATE(FailSendMessageBatchInCallCombiner, this,
grpc_schedule_on_exec_ctx),
GRPC_ERROR_REF(cancel_error_), "failing send_message op");
cancel_error_, "failing send_message op");
}
}
} else if (!GRPC_ERROR_IS_NONE(cancel_error_)) {
grpc_transport_stream_op_batch_finish_with_failure(
batch, GRPC_ERROR_REF(cancel_error_), call_combiner_);
} else if (!cancel_error_.ok()) {
grpc_transport_stream_op_batch_finish_with_failure(batch, cancel_error_,
call_combiner_);
return;
}
// Handle send_initial_metadata.

@ -130,7 +130,7 @@ class CallData {
void CallData::OnRecvInitialMetadataReady(void* arg, grpc_error_handle error) {
CallData* calld = static_cast<CallData*>(arg);
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
calld->algorithm_ =
calld->recv_initial_metadata_->get(GrpcEncodingMetadata())
.value_or(GRPC_COMPRESS_NONE);
@ -139,7 +139,7 @@ void CallData::OnRecvInitialMetadataReady(void* arg, grpc_error_handle error) {
calld->MaybeResumeOnRecvTrailingMetadataReady();
grpc_closure* closure = calld->original_recv_initial_metadata_ready_;
calld->original_recv_initial_metadata_ready_ = nullptr;
Closure::Run(DEBUG_LOCATION, closure, GRPC_ERROR_REF(error));
Closure::Run(DEBUG_LOCATION, closure, error);
}
void CallData::MaybeResumeOnRecvMessageReady() {
@ -153,7 +153,7 @@ void CallData::MaybeResumeOnRecvMessageReady() {
void CallData::OnRecvMessageReady(void* arg, grpc_error_handle error) {
CallData* calld = static_cast<CallData*>(arg);
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
if (calld->original_recv_initial_metadata_ready_ != nullptr) {
calld->seen_recv_message_ready_ = true;
GRPC_CALL_COMBINER_STOP(calld->call_combiner_,
@ -172,21 +172,20 @@ void CallData::OnRecvMessageReady(void* arg, grpc_error_handle error) {
if (calld->max_recv_message_length_ >= 0 &&
(*calld->recv_message_)->Length() >
static_cast<uint32_t>(calld->max_recv_message_length_)) {
GPR_DEBUG_ASSERT(GRPC_ERROR_IS_NONE(calld->error_));
GPR_DEBUG_ASSERT(calld->error_.ok());
calld->error_ = grpc_error_set_int(
GRPC_ERROR_CREATE_FROM_CPP_STRING(
absl::StrFormat("Received message larger than max (%u vs. %d)",
(*calld->recv_message_)->Length(),
calld->max_recv_message_length_)),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED);
return calld->ContinueRecvMessageReadyCallback(
GRPC_ERROR_REF(calld->error_));
return calld->ContinueRecvMessageReadyCallback(calld->error_);
}
SliceBuffer decompressed_slices;
if (grpc_msg_decompress(calld->algorithm_,
(*calld->recv_message_)->c_slice_buffer(),
decompressed_slices.c_slice_buffer()) == 0) {
GPR_DEBUG_ASSERT(GRPC_ERROR_IS_NONE(calld->error_));
GPR_DEBUG_ASSERT(calld->error_.ok());
calld->error_ = GRPC_ERROR_CREATE_FROM_CPP_STRING(absl::StrCat(
"Unexpected error decompressing data for algorithm with "
"enum value ",
@ -197,11 +196,10 @@ void CallData::OnRecvMessageReady(void* arg, grpc_error_handle error) {
GRPC_WRITE_INTERNAL_TEST_ONLY_WAS_COMPRESSED;
(*calld->recv_message_)->Swap(&decompressed_slices);
}
return calld->ContinueRecvMessageReadyCallback(
GRPC_ERROR_REF(calld->error_));
return calld->ContinueRecvMessageReadyCallback(calld->error_);
}
}
calld->ContinueRecvMessageReadyCallback(GRPC_ERROR_REF(error));
calld->ContinueRecvMessageReadyCallback(error);
}
void CallData::ContinueRecvMessageReadyCallback(grpc_error_handle error) {
@ -227,14 +225,14 @@ void CallData::OnRecvTrailingMetadataReady(void* arg, grpc_error_handle error) {
if (calld->original_recv_initial_metadata_ready_ != nullptr ||
calld->original_recv_message_ready_ != nullptr) {
calld->seen_recv_trailing_metadata_ready_ = true;
calld->on_recv_trailing_metadata_ready_error_ = GRPC_ERROR_REF(error);
calld->on_recv_trailing_metadata_ready_error_ = error;
GRPC_CALL_COMBINER_STOP(
calld->call_combiner_,
"Deferring OnRecvTrailingMetadataReady until after "
"OnRecvInitialMetadataReady and OnRecvMessageReady");
return;
}
error = grpc_error_add_child(GRPC_ERROR_REF(error), calld->error_);
error = grpc_error_add_child(error, calld->error_);
calld->error_ = GRPC_ERROR_NONE;
grpc_closure* closure = calld->original_recv_trailing_metadata_ready_;
calld->original_recv_trailing_metadata_ready_ = nullptr;

@ -119,7 +119,6 @@ MessageSizeParser::ParsePerMethodParams(const ChannelArgs& /*args*/,
absl::Status status = absl::InvalidArgumentError(
absl::StrCat("error parsing message size method parameters: ",
grpc_error_std_string(error)));
GRPC_ERROR_UNREF(error);
return status;
}
return absl::make_unique<MessageSizeParsedConfig>(max_request_message_bytes,
@ -223,11 +222,8 @@ static void recv_message_ready(void* user_data, grpc_error_handle error) {
"Received message larger than max (%u vs. %d)",
(*calld->recv_message)->Length(), calld->limits.max_recv_size)),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED);
error = grpc_error_add_child(GRPC_ERROR_REF(error), new_error);
GRPC_ERROR_UNREF(calld->error);
calld->error = GRPC_ERROR_REF(error);
} else {
(void)GRPC_ERROR_REF(error);
error = grpc_error_add_child(error, new_error);
calld->error = error;
}
// Invoke the next callback.
grpc_closure* closure = calld->next_recv_message_ready;
@ -255,14 +251,13 @@ static void recv_trailing_metadata_ready(void* user_data,
call_data* calld = static_cast<call_data*>(elem->call_data);
if (calld->next_recv_message_ready != nullptr) {
calld->seen_recv_trailing_metadata = true;
calld->recv_trailing_metadata_error = GRPC_ERROR_REF(error);
calld->recv_trailing_metadata_error = error;
GRPC_CALL_COMBINER_STOP(calld->call_combiner,
"deferring recv_trailing_metadata_ready until "
"after recv_message_ready");
return;
}
error =
grpc_error_add_child(GRPC_ERROR_REF(error), GRPC_ERROR_REF(calld->error));
error = grpc_error_add_child(error, calld->error);
// Invoke the next callback.
grpc_core::Closure::Run(DEBUG_LOCATION,
calld->original_recv_trailing_metadata_ready, error);

@ -82,7 +82,7 @@ void RbacFilter::CallData::RecvInitialMetadataReady(void* user_data,
grpc_call_element* elem = static_cast<grpc_call_element*>(user_data);
CallData* calld = static_cast<CallData*>(elem->call_data);
RbacFilter* filter = static_cast<RbacFilter*>(elem->channel_data);
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
// Fetch and apply the rbac policy from the service config.
auto* service_config_call_data = static_cast<ServiceConfigCallData*>(
calld->call_context_[GRPC_CONTEXT_SERVICE_CONFIG_CALL_DATA].value);
@ -103,12 +103,10 @@ void RbacFilter::CallData::RecvInitialMetadataReady(void* user_data,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unauthorized RPC rejected");
}
}
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_PERMISSION_DENIED);
}
} else {
(void)GRPC_ERROR_REF(error);
}
grpc_closure* closure = calld->original_recv_initial_metadata_ready_;
calld->original_recv_initial_metadata_ready_ = nullptr;

@ -597,11 +597,10 @@ RbacServiceConfigParser::ParsePerMethodParams(const ChannelArgs& args,
}
grpc_error_handle error =
GRPC_ERROR_CREATE_FROM_VECTOR("Rbac parser", &error_list);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
absl::Status status = absl::InvalidArgumentError(
absl::StrCat("error parsing RBAC method parameters: ",
grpc_error_std_string(error)));
GRPC_ERROR_UNREF(error);
return status;
}
if (rbac_policies.empty()) return nullptr;

@ -135,10 +135,9 @@ ArenaPromise<ServerMetadataHandle> ServerConfigSelectorFilter::MakeCallPromise(
if (!sel.ok()) return Immediate(ServerMetadataHandle(sel.status()));
auto call_config =
sel.value()->GetCallConfig(call_args.client_initial_metadata.get());
if (!GRPC_ERROR_IS_NONE(call_config.error)) {
if (!call_config.error.ok()) {
auto r = Immediate(ServerMetadataHandle(
absl::UnavailableError(grpc_error_std_string(call_config.error))));
GRPC_ERROR_UNREF(call_config.error);
return std::move(r);
}
auto& ctx = GetContext<

@ -99,7 +99,7 @@ class BinderConnector : public grpc_core::SubchannelConnector {
Unref(); // Was referenced in BinderConnector::Connect
}
void Shutdown(grpc_error_handle error) override { (void)error; }
void Shutdown(grpc_error_handle /*error*/) override {}
private:
Args args_;

@ -68,7 +68,6 @@ struct grpc_binder_stream {
}
~grpc_binder_stream() {
GRPC_ERROR_UNREF(cancel_self_error);
if (destroy_stream_then_closure != nullptr) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, destroy_stream_then_closure,
GRPC_ERROR_NONE);

@ -150,21 +150,20 @@ static void cancel_stream_locked(grpc_binder_transport* gbt,
grpc_error_handle error) {
gpr_log(GPR_INFO, "cancel_stream_locked");
if (!gbs->is_closed) {
GPR_ASSERT(GRPC_ERROR_IS_NONE(gbs->cancel_self_error));
GPR_ASSERT(gbs->cancel_self_error.ok());
gbs->is_closed = true;
gbs->cancel_self_error = GRPC_ERROR_REF(error);
gbs->cancel_self_error = error;
gbt->transport_stream_receiver->CancelStream(gbs->tx_code);
gbt->registered_stream.erase(gbs->tx_code);
if (gbs->recv_initial_metadata_ready != nullptr) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, gbs->recv_initial_metadata_ready,
GRPC_ERROR_REF(error));
error);
gbs->recv_initial_metadata_ready = nullptr;
gbs->recv_initial_metadata = nullptr;
gbs->trailing_metadata_available = nullptr;
}
if (gbs->recv_message_ready != nullptr) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, gbs->recv_message_ready,
GRPC_ERROR_REF(error));
grpc_core::ExecCtx::Run(DEBUG_LOCATION, gbs->recv_message_ready, error);
gbs->recv_message_ready = nullptr;
gbs->recv_message->reset();
gbs->recv_message = nullptr;
@ -172,13 +171,11 @@ static void cancel_stream_locked(grpc_binder_transport* gbt,
}
if (gbs->recv_trailing_metadata_finished != nullptr) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION,
gbs->recv_trailing_metadata_finished,
GRPC_ERROR_REF(error));
gbs->recv_trailing_metadata_finished, error);
gbs->recv_trailing_metadata_finished = nullptr;
gbs->recv_trailing_metadata = nullptr;
}
}
GRPC_ERROR_UNREF(error);
}
static bool ContainsAuthorityAndPath(const grpc_binder::Metadata& metadata) {
@ -261,8 +258,7 @@ static void recv_message_locked(void* arg, grpc_error_handle /*error*/) {
return GRPC_ERROR_NONE;
}();
if (!GRPC_ERROR_IS_NONE(error) &&
gbs->call_failed_before_recv_message != nullptr) {
if (!error.ok() && gbs->call_failed_before_recv_message != nullptr) {
*gbs->call_failed_before_recv_message = true;
}
grpc_closure* cb = gbs->recv_message_ready;
@ -413,22 +409,22 @@ static void perform_stream_op_locked(void* stream_op,
grpc_core::ExecCtx::Run(
DEBUG_LOCATION,
op->payload->recv_initial_metadata.recv_initial_metadata_ready,
GRPC_ERROR_REF(gbs->cancel_self_error));
gbs->cancel_self_error);
}
if (op->recv_message) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION,
op->payload->recv_message.recv_message_ready,
GRPC_ERROR_REF(gbs->cancel_self_error));
gbs->cancel_self_error);
}
if (op->recv_trailing_metadata) {
grpc_core::ExecCtx::Run(
DEBUG_LOCATION,
op->payload->recv_trailing_metadata.recv_trailing_metadata_ready,
GRPC_ERROR_REF(gbs->cancel_self_error));
gbs->cancel_self_error);
}
if (op->on_complete != nullptr) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, op->on_complete,
GRPC_ERROR_REF(gbs->cancel_self_error));
gbs->cancel_self_error);
}
GRPC_BINDER_STREAM_UNREF(gbs, "perform_stream_op");
return;
@ -606,13 +602,11 @@ static void perform_transport_op_locked(void* transport_op,
grpc_core::ExecCtx::Run(DEBUG_LOCATION, op->on_consumed, GRPC_ERROR_NONE);
}
bool do_close = false;
if (!GRPC_ERROR_IS_NONE(op->disconnect_with_error)) {
if (!op->disconnect_with_error.ok()) {
do_close = true;
GRPC_ERROR_UNREF(op->disconnect_with_error);
}
if (!GRPC_ERROR_IS_NONE(op->goaway_error)) {
if (!op->goaway_error.ok()) {
do_close = true;
GRPC_ERROR_UNREF(op->goaway_error);
}
if (do_close) {
close_transport_locked(gbt);

@ -133,9 +133,8 @@ void Chttp2Connector::Shutdown(grpc_error_handle error) {
shutdown_ = true;
if (handshake_mgr_ != nullptr) {
// Handshaker will also shutdown the endpoint if it exists
handshake_mgr_->Shutdown(GRPC_ERROR_REF(error));
handshake_mgr_->Shutdown(error);
}
GRPC_ERROR_UNREF(error);
}
void Chttp2Connector::OnHandshakeDone(void* arg, grpc_error_handle error) {
@ -143,8 +142,8 @@ void Chttp2Connector::OnHandshakeDone(void* arg, grpc_error_handle error) {
Chttp2Connector* self = static_cast<Chttp2Connector*>(args->user_data);
{
MutexLock lock(&self->mu_);
if (!GRPC_ERROR_IS_NONE(error) || self->shutdown_) {
if (GRPC_ERROR_IS_NONE(error)) {
if (!error.ok() || self->shutdown_) {
if (error.ok()) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("connector shutdown");
// We were shut down after handshaking completed successfully, so
// destroy the endpoint here.
@ -153,13 +152,11 @@ void Chttp2Connector::OnHandshakeDone(void* arg, grpc_error_handle error) {
// before destroying them, even if we know that there are no
// pending read/write callbacks. This should be fixed, at which
// point this can be removed.
grpc_endpoint_shutdown(args->endpoint, GRPC_ERROR_REF(error));
grpc_endpoint_shutdown(args->endpoint, error);
grpc_endpoint_destroy(args->endpoint);
grpc_slice_buffer_destroy(args->read_buffer);
gpr_free(args->read_buffer);
}
} else {
error = GRPC_ERROR_REF(error);
}
self->result_->Reset();
NullThenSchedClosure(DEBUG_LOCATION, &self->notify_, error);
@ -200,14 +197,14 @@ void Chttp2Connector::OnReceiveSettings(void* arg, grpc_error_handle error) {
if (!self->notify_error_.has_value()) {
grpc_endpoint_delete_from_pollset_set(self->endpoint_,
self->args_.interested_parties);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
// Transport got an error while waiting on SETTINGS frame.
// TODO(yashykt): The following two lines should be moved to
// SubchannelConnector::Result::Reset()
grpc_transport_destroy(self->result_->transport);
self->result_->Reset();
}
self->MaybeNotify(GRPC_ERROR_REF(error));
self->MaybeNotify(error);
grpc_timer_cancel(&self->timer_);
} else {
// OnTimeout() was already invoked. Call Notify() again so that notify_
@ -244,7 +241,6 @@ void Chttp2Connector::OnTimeout(void* arg, grpc_error_handle /*error*/) {
void Chttp2Connector::MaybeNotify(grpc_error_handle error) {
if (notify_error_.has_value()) {
GRPC_ERROR_UNREF(error);
NullThenSchedClosure(DEBUG_LOCATION, &notify_, notify_error_.value());
// Clear state for a new Connect().
// Clear out the endpoint_, since it is the responsibility of
@ -373,7 +369,6 @@ grpc_channel* grpc_channel_create(const char* target,
if (grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, &integer)) {
status = static_cast<grpc_status_code>(integer);
}
GRPC_ERROR_UNREF(error);
channel = grpc_lame_client_channel_create(
target, status, "Failed to create secure client channel");
}

@ -323,8 +323,7 @@ void Chttp2ServerListener::ConfigFetcherWatcher::UpdateConnectionManager(
int port_temp;
grpc_error_handle error = grpc_tcp_server_add_port(
listener_->tcp_server_, &listener_->resolved_address_, &port_temp);
if (!GRPC_ERROR_IS_NONE(error)) {
GRPC_ERROR_UNREF(error);
if (!error.ok()) {
gpr_log(GPR_ERROR, "Error adding port to server: %s",
grpc_error_std_string(error).c_str());
// TODO(yashykt): We wouldn't need to assert here if we bound to the
@ -445,11 +444,11 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
bool cleanup_connection = false;
{
MutexLock connection_lock(&self->connection_->mu_);
if (!GRPC_ERROR_IS_NONE(error) || self->connection_->shutdown_) {
if (!error.ok() || self->connection_->shutdown_) {
std::string error_str = grpc_error_std_string(error);
gpr_log(GPR_DEBUG, "Handshaking failed: %s", error_str.c_str());
cleanup_connection = true;
if (GRPC_ERROR_IS_NONE(error) && args->endpoint != nullptr) {
if (error.ok() && args->endpoint != nullptr) {
// We were shut down or stopped serving after handshaking completed
// successfully, so destroy the endpoint here.
// TODO(ctiller): It is currently necessary to shutdown endpoints
@ -472,7 +471,7 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
self->connection_->listener_->server_->SetupTransport(
transport, self->accepting_pollset_, args->args,
grpc_chttp2_transport_get_socket_node(transport));
if (GRPC_ERROR_IS_NONE(channel_init_err)) {
if (channel_init_err.ok()) {
// Use notify_on_receive_settings callback to enforce the
// handshake deadline.
// Note: The reinterpret_cast<>s here are safe, because
@ -513,7 +512,6 @@ void Chttp2ServerListener::ActiveConnection::HandshakingState::OnHandshakeDone(
// Failed to create channel from transport. Clean up.
gpr_log(GPR_ERROR, "Failed to create channel: %s",
grpc_error_std_string(channel_init_err).c_str());
GRPC_ERROR_UNREF(channel_init_err);
grpc_transport_destroy(transport);
grpc_slice_buffer_destroy(args->read_buffer);
gpr_free(args->read_buffer);
@ -652,7 +650,7 @@ void Chttp2ServerListener::ActiveConnection::OnDrainGraceTimeExpiry(
ActiveConnection* self = static_cast<ActiveConnection*>(arg);
// If the drain_grace_timer_ was not cancelled, disconnect the transport
// immediately.
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
grpc_chttp2_transport* transport = nullptr;
{
MutexLock lock(&self->mu_);
@ -684,14 +682,14 @@ grpc_error_handle Chttp2ServerListener::Create(
&listener->tcp_server_shutdown_complete_,
grpc_event_engine::experimental::ChannelArgsEndpointConfig(args),
&listener->tcp_server_);
if (!GRPC_ERROR_IS_NONE(error)) return error;
if (!error.ok()) return error;
if (server->config_fetcher() != nullptr) {
listener->resolved_address_ = *addr;
// TODO(yashykt): Consider binding so as to be able to return the port
// number.
} else {
error = grpc_tcp_server_add_port(listener->tcp_server_, addr, port_num);
if (!GRPC_ERROR_IS_NONE(error)) return error;
if (!error.ok()) return error;
}
// Create channelz node.
if (args.GetBool(GRPC_ARG_ENABLE_CHANNELZ)
@ -710,7 +708,7 @@ grpc_error_handle Chttp2ServerListener::Create(
server->AddListener(OrphanablePtr<Server::ListenerInterface>(listener));
return GRPC_ERROR_NONE;
}();
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
if (listener != nullptr) {
if (listener->tcp_server_ != nullptr) {
// listener is deleted when tcp_server_ is shutdown.
@ -732,7 +730,7 @@ grpc_error_handle Chttp2ServerListener::CreateWithAcceptor(
&listener->tcp_server_shutdown_complete_,
grpc_event_engine::experimental::ChannelArgsEndpointConfig(args),
&listener->tcp_server_);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
delete listener;
return error;
}
@ -826,7 +824,7 @@ void Chttp2ServerListener::OnAccept(void* arg, grpc_endpoint* tcp,
}
grpc_error_handle error = GRPC_ERROR_NONE;
args = self->args_modifier_(*args_result, &error);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
gpr_log(GPR_DEBUG, "Closing connection: %s",
grpc_error_std_string(error).c_str());
endpoint_cleanup(error);
@ -865,11 +863,10 @@ void Chttp2ServerListener::OnAccept(void* arg, grpc_endpoint* tcp,
}
}
void Chttp2ServerListener::TcpServerShutdownComplete(void* arg,
grpc_error_handle error) {
void Chttp2ServerListener::TcpServerShutdownComplete(
void* arg, grpc_error_handle /*error*/) {
Chttp2ServerListener* self = static_cast<Chttp2ServerListener*>(arg);
self->channelz_listen_socket_.reset();
GRPC_ERROR_UNREF(error);
delete self;
}
@ -950,7 +947,7 @@ grpc_error_handle Chttp2ServerAddPort(Server* server, const char* addr,
int port_temp = -1;
error = Chttp2ServerListener::Create(server, &addr, args, args_modifier,
&port_temp);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
error_list.push_back(error);
} else {
if (*port_num == -1) {
@ -974,15 +971,11 @@ grpc_error_handle Chttp2ServerAddPort(Server* server, const char* addr,
error = GRPC_ERROR_CREATE_REFERENCING_FROM_COPIED_STRING(
msg.c_str(), error_list.data(), error_list.size());
gpr_log(GPR_INFO, "WARNING: %s", grpc_error_std_string(error).c_str());
GRPC_ERROR_UNREF(error);
// we managed to bind some addresses: continue without error
}
return GRPC_ERROR_NONE;
}(); // lambda end
for (const grpc_error_handle& error : error_list) {
GRPC_ERROR_UNREF(error);
}
if (!GRPC_ERROR_IS_NONE(error)) *port_num = 0;
if (!error.ok()) *port_num = 0;
return error;
}
@ -1053,10 +1046,8 @@ int grpc_server_add_http2_port(grpc_server* server, const char* addr,
core_server, addr, args, grpc_core::ModifyArgsForConnection, &port_num);
done:
sc.reset(DEBUG_LOCATION, "server");
if (!GRPC_ERROR_IS_NONE(err)) {
if (!err.ok()) {
gpr_log(GPR_ERROR, "%s", grpc_error_std_string(err).c_str());
GRPC_ERROR_UNREF(err);
}
return port_num;
}
@ -1086,7 +1077,7 @@ void grpc_server_add_channel_from_fd(grpc_server* server, int fd,
);
grpc_error_handle error =
core_server->SetupTransport(transport, nullptr, server_args, nullptr);
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
for (grpc_pollset* pollset : core_server->pollsets()) {
grpc_endpoint_add_to_pollset(server_endpoint, pollset);
}
@ -1094,7 +1085,6 @@ void grpc_server_add_channel_from_fd(grpc_server* server, int fd,
} else {
gpr_log(GPR_ERROR, "Failed to create channel: %s",
grpc_error_std_string(error).c_str());
GRPC_ERROR_UNREF(error);
grpc_transport_destroy(transport);
}
}

@ -239,7 +239,6 @@ grpc_chttp2_transport::~grpc_chttp2_transport() {
// ContextList::Execute follows semantics of a callback function and does not
// take a ref on error
grpc_core::ContextList::Execute(cl, nullptr, error);
GRPC_ERROR_UNREF(error);
cl = nullptr;
grpc_slice_buffer_destroy(&read_buffer);
@ -250,8 +249,6 @@ grpc_chttp2_transport::~grpc_chttp2_transport() {
GPR_ASSERT(lists[i].tail == nullptr);
}
GRPC_ERROR_UNREF(goaway_error);
GPR_ASSERT(grpc_chttp2_stream_map_size(&stream_map) == 0);
grpc_chttp2_stream_map_destroy(&stream_map);
@ -267,7 +264,6 @@ grpc_chttp2_transport::~grpc_chttp2_transport() {
write_cb_pool = next;
}
GRPC_ERROR_UNREF(closed_with_error);
gpr_free(ping_acks);
if (grpc_core::test_only_destruct_callback != nullptr) {
grpc_core::test_only_destruct_callback();
@ -559,15 +555,15 @@ static void destroy_transport(grpc_transport* gt) {
static void close_transport_locked(grpc_chttp2_transport* t,
grpc_error_handle error) {
end_all_the_calls(t, GRPC_ERROR_REF(error));
cancel_pings(t, GRPC_ERROR_REF(error));
if (GRPC_ERROR_IS_NONE(t->closed_with_error)) {
end_all_the_calls(t, error);
cancel_pings(t, error);
if (t->closed_with_error.ok()) {
if (!grpc_error_has_clear_grpc_status(error)) {
error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_UNAVAILABLE);
}
if (t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE) {
if (GRPC_ERROR_IS_NONE(t->close_transport_on_writes_finished)) {
if (t->close_transport_on_writes_finished.ok()) {
t->close_transport_on_writes_finished =
GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Delayed close due to in-progress write");
@ -576,8 +572,8 @@ static void close_transport_locked(grpc_chttp2_transport* t,
grpc_error_add_child(t->close_transport_on_writes_finished, error);
return;
}
GPR_ASSERT(!GRPC_ERROR_IS_NONE(error));
t->closed_with_error = GRPC_ERROR_REF(error);
GPR_ASSERT(!error.ok());
t->closed_with_error = error;
connectivity_state_set(t, GRPC_CHANNEL_SHUTDOWN, absl::Status(),
"close_transport");
if (t->ping_state.is_delayed_ping_timer_set) {
@ -606,19 +602,17 @@ static void close_transport_locked(grpc_chttp2_transport* t,
GRPC_CHTTP2_STREAM_UNREF(s, "chttp2_writing:close");
}
GPR_ASSERT(t->write_state == GRPC_CHTTP2_WRITE_STATE_IDLE);
grpc_endpoint_shutdown(t->ep, GRPC_ERROR_REF(error));
grpc_endpoint_shutdown(t->ep, error);
}
if (t->notify_on_receive_settings != nullptr) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, t->notify_on_receive_settings,
GRPC_ERROR_REF(error));
error);
t->notify_on_receive_settings = nullptr;
}
if (t->notify_on_close != nullptr) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, t->notify_on_close,
GRPC_ERROR_REF(error));
grpc_core::ExecCtx::Run(DEBUG_LOCATION, t->notify_on_close, error);
t->notify_on_close = nullptr;
}
GRPC_ERROR_UNREF(error);
}
#ifndef NDEBUG
@ -699,8 +693,6 @@ grpc_chttp2_stream::~grpc_chttp2_stream() {
GPR_ASSERT(recv_message_ready == nullptr);
GPR_ASSERT(recv_trailing_metadata_finished == nullptr);
grpc_slice_buffer_destroy(&flow_controlled_buffer);
GRPC_ERROR_UNREF(read_closed_error);
GRPC_ERROR_UNREF(write_closed_error);
GRPC_CHTTP2_UNREF_TRANSPORT(t, "stream");
grpc_core::ExecCtx::Run(DEBUG_LOCATION, destroy_stream_arg, GRPC_ERROR_NONE);
}
@ -774,7 +766,7 @@ static void set_write_state(grpc_chttp2_transport* t,
// from peer while we had some pending writes)
if (st == GRPC_CHTTP2_WRITE_STATE_IDLE) {
grpc_core::ExecCtx::RunList(DEBUG_LOCATION, &t->run_after_write);
if (!GRPC_ERROR_IS_NONE(t->close_transport_on_writes_finished)) {
if (!t->close_transport_on_writes_finished.ok()) {
grpc_error_handle err = t->close_transport_on_writes_finished;
t->close_transport_on_writes_finished = GRPC_ERROR_NONE;
close_transport_locked(t, err);
@ -821,8 +813,7 @@ void grpc_chttp2_initiate_write(grpc_chttp2_transport* t,
void grpc_chttp2_mark_stream_writable(grpc_chttp2_transport* t,
grpc_chttp2_stream* s) {
if (GRPC_ERROR_IS_NONE(t->closed_with_error) &&
grpc_chttp2_list_add_writable_stream(t, s)) {
if (t->closed_with_error.ok() && grpc_chttp2_list_add_writable_stream(t, s)) {
GRPC_CHTTP2_STREAM_REF(s, "chttp2_writing:become");
}
}
@ -840,7 +831,7 @@ static void write_action_begin_locked(void* gt,
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(gt);
GPR_ASSERT(t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE);
grpc_chttp2_begin_write_result r;
if (!GRPC_ERROR_IS_NONE(t->closed_with_error)) {
if (!t->closed_with_error.ok()) {
r.writing = false;
} else {
r = grpc_chttp2_begin_write(t);
@ -895,7 +886,7 @@ static void write_action_end(void* tp, grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
t->combiner->Run(GRPC_CLOSURE_INIT(&t->write_action_end_locked,
write_action_end_locked, t, nullptr),
GRPC_ERROR_REF(error));
error);
}
// Callback from the grpc_endpoint after bytes have been written by calling
@ -904,8 +895,8 @@ static void write_action_end_locked(void* tp, grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
bool closed = false;
if (!GRPC_ERROR_IS_NONE(error)) {
close_transport_locked(t, GRPC_ERROR_REF(error));
if (!error.ok()) {
close_transport_locked(t, error);
closed = true;
}
@ -942,7 +933,7 @@ static void write_action_end_locked(void* tp, grpc_error_handle error) {
break;
}
grpc_chttp2_end_write(t, GRPC_ERROR_REF(error));
grpc_chttp2_end_write(t, error);
GRPC_CHTTP2_UNREF_TRANSPORT(t, "writing");
}
@ -971,9 +962,8 @@ static void cancel_unstarted_streams(grpc_chttp2_transport* t,
s->trailing_metadata_buffer.Set(
grpc_core::GrpcStreamNetworkState(),
grpc_core::GrpcStreamNetworkState::kNotSentOnWire);
grpc_chttp2_cancel_stream(t, s, GRPC_ERROR_REF(error));
grpc_chttp2_cancel_stream(t, s, error);
}
GRPC_ERROR_UNREF(error);
}
void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
@ -981,8 +971,7 @@ void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
uint32_t last_stream_id,
absl::string_view goaway_text) {
// Discard the error from a previous goaway frame (if any)
if (!GRPC_ERROR_IS_NONE(t->goaway_error)) {
GRPC_ERROR_UNREF(t->goaway_error);
if (!t->goaway_error.ok()) {
}
t->goaway_error = grpc_error_set_str(
grpc_error_set_int(
@ -1002,7 +991,7 @@ void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
goaway_error, grpc_error_std_string(t->goaway_error).c_str());
}
if (t->is_client) {
cancel_unstarted_streams(t, GRPC_ERROR_REF(t->goaway_error));
cancel_unstarted_streams(t, t->goaway_error);
// Cancel all unseen streams
grpc_chttp2_stream_map_for_each(
&t->stream_map,
@ -1013,8 +1002,7 @@ void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
s->trailing_metadata_buffer.Set(
grpc_core::GrpcStreamNetworkState(),
grpc_core::GrpcStreamNetworkState::kNotSeenByServer);
grpc_chttp2_cancel_stream(s->t, s,
GRPC_ERROR_REF(s->t->goaway_error));
grpc_chttp2_cancel_stream(s->t, s, s->t->goaway_error);
}
},
&last_stream_id);
@ -1053,8 +1041,8 @@ static void maybe_start_some_streams(grpc_chttp2_transport* t) {
grpc_chttp2_stream* s;
// maybe cancel out streams that haven't yet started if we have received a
// GOAWAY
if (!GRPC_ERROR_IS_NONE(t->goaway_error)) {
cancel_unstarted_streams(t, GRPC_ERROR_REF(t->goaway_error));
if (!t->goaway_error.ok()) {
cancel_unstarted_streams(t, t->goaway_error);
return;
}
// start streams where we have free grpc_chttp2_stream ids and free
@ -1134,7 +1122,6 @@ void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t,
grpc_closure* closure = *pclosure;
*pclosure = nullptr;
if (closure == nullptr) {
GRPC_ERROR_UNREF(error);
return;
}
closure->next_data.scratch -= CLOSURE_BARRIER_FIRST_REF_BIT;
@ -1151,10 +1138,10 @@ void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t,
desc, grpc_error_std_string(error).c_str(),
write_state_name(t->write_state));
}
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
grpc_error_handle cl_err =
grpc_core::internal::StatusMoveFromHeapPtr(closure->error_data.error);
if (GRPC_ERROR_IS_NONE(cl_err)) {
if (cl_err.ok()) {
cl_err = GRPC_ERROR_CREATE_FROM_CPP_STRING(absl::StrCat(
"Error in HTTP transport completing operation: ", desc,
" write_state=", write_state_name(t->write_state), " refs=",
@ -1255,7 +1242,7 @@ static void perform_stream_op_locked(void* stream_op,
}
if (!s->write_closed) {
if (t->is_client) {
if (GRPC_ERROR_IS_NONE(t->closed_with_error)) {
if (t->closed_with_error.ok()) {
GPR_ASSERT(s->id == 0);
grpc_chttp2_list_add_waiting_for_concurrency(t, s);
maybe_start_some_streams(t);
@ -1481,21 +1468,18 @@ static void cancel_pings(grpc_chttp2_transport* t, grpc_error_handle error) {
// callback remaining pings: they're not allowed to call into the transport,
// and maybe they hold resources that need to be freed
grpc_chttp2_ping_queue* pq = &t->ping_queue;
GPR_ASSERT(!GRPC_ERROR_IS_NONE(error));
GPR_ASSERT(!error.ok());
for (size_t j = 0; j < GRPC_CHTTP2_PCL_COUNT; j++) {
grpc_closure_list_fail_all(&pq->lists[j], GRPC_ERROR_REF(error));
grpc_closure_list_fail_all(&pq->lists[j], error);
grpc_core::ExecCtx::RunList(DEBUG_LOCATION, &pq->lists[j]);
}
GRPC_ERROR_UNREF(error);
}
static void send_ping_locked(grpc_chttp2_transport* t,
grpc_closure* on_initiate, grpc_closure* on_ack) {
if (!GRPC_ERROR_IS_NONE(t->closed_with_error)) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, on_initiate,
GRPC_ERROR_REF(t->closed_with_error));
grpc_core::ExecCtx::Run(DEBUG_LOCATION, on_ack,
GRPC_ERROR_REF(t->closed_with_error));
if (!t->closed_with_error.ok()) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, on_initiate, t->closed_with_error);
grpc_core::ExecCtx::Run(DEBUG_LOCATION, on_ack, t->closed_with_error);
return;
}
grpc_chttp2_ping_queue* pq = &t->ping_queue;
@ -1509,14 +1493,14 @@ static void send_ping_locked(grpc_chttp2_transport* t,
// a ping in progress, the keepalive ping would piggyback onto that ping,
// instead of waiting for that ping to complete and then starting a new ping.
static void send_keepalive_ping_locked(grpc_chttp2_transport* t) {
if (!GRPC_ERROR_IS_NONE(t->closed_with_error)) {
if (!t->closed_with_error.ok()) {
t->combiner->Run(GRPC_CLOSURE_INIT(&t->start_keepalive_ping_locked,
start_keepalive_ping_locked, t, nullptr),
GRPC_ERROR_REF(t->closed_with_error));
t->closed_with_error);
t->combiner->Run(
GRPC_CLOSURE_INIT(&t->finish_keepalive_ping_locked,
finish_keepalive_ping_locked, t, nullptr),
GRPC_ERROR_REF(t->closed_with_error));
t->closed_with_error);
return;
}
grpc_chttp2_ping_queue* pq = &t->ping_queue;
@ -1524,7 +1508,7 @@ static void send_keepalive_ping_locked(grpc_chttp2_transport* t) {
// There is a ping in flight. Add yourself to the inflight closure list.
t->combiner->Run(GRPC_CLOSURE_INIT(&t->start_keepalive_ping_locked,
start_keepalive_ping_locked, t, nullptr),
GRPC_ERROR_REF(t->closed_with_error));
t->closed_with_error);
grpc_closure_list_append(
&pq->lists[GRPC_CHTTP2_PCL_INFLIGHT],
GRPC_CLOSURE_INIT(&t->finish_keepalive_ping_locked,
@ -1548,13 +1532,13 @@ void grpc_chttp2_retry_initiate_ping(void* tp, grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
t->combiner->Run(GRPC_CLOSURE_INIT(&t->retry_initiate_ping_locked,
retry_initiate_ping_locked, t, nullptr),
GRPC_ERROR_REF(error));
error);
}
static void retry_initiate_ping_locked(void* tp, grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
t->ping_state.is_delayed_ping_timer_set = false;
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING);
}
GRPC_CHTTP2_UNREF_TRANSPORT(t, "retry_initiate_ping_locked");
@ -1610,7 +1594,7 @@ class GracefulGoaway : public grpc_core::RefCounted<GracefulGoaway> {
// We already sent the final GOAWAY.
return;
}
if (t_->destroying || !GRPC_ERROR_IS_NONE(t_->closed_with_error)) {
if (t_->destroying || !t_->closed_with_error.ok()) {
GRPC_CHTTP2_IF_TRACING(gpr_log(
GPR_INFO,
"transport:%p %s peer:%s Transport already shutting down. "
@ -1647,7 +1631,7 @@ class GracefulGoaway : public grpc_core::RefCounted<GracefulGoaway> {
static void OnTimer(void* arg, grpc_error_handle error) {
auto* self = static_cast<GracefulGoaway*>(arg);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
self->Unref();
return;
}
@ -1697,7 +1681,6 @@ static void send_goaway(grpc_chttp2_transport* t, grpc_error_handle error,
// Final GOAWAY has already been sent.
}
grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT);
GRPC_ERROR_UNREF(error);
}
void grpc_chttp2_add_ping_strike(grpc_chttp2_transport* t) {
@ -1731,7 +1714,7 @@ static void perform_transport_op_locked(void* stream_op,
grpc_chttp2_transport* t =
static_cast<grpc_chttp2_transport*>(op->handler_private.extra_arg);
if (!GRPC_ERROR_IS_NONE(op->goaway_error)) {
if (!op->goaway_error.ok()) {
send_goaway(t, op->goaway_error, /*immediate_disconnect_hint=*/false);
}
@ -1761,8 +1744,8 @@ static void perform_transport_op_locked(void* stream_op,
t->state_tracker.RemoveWatcher(op->stop_connectivity_watch);
}
if (!GRPC_ERROR_IS_NONE(op->disconnect_with_error)) {
send_goaway(t, GRPC_ERROR_REF(op->disconnect_with_error),
if (!op->disconnect_with_error.ok()) {
send_goaway(t, op->disconnect_with_error,
/*immediate_disconnect_hint=*/true);
close_transport_locked(t, op->disconnect_with_error);
}
@ -1843,7 +1826,7 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t,
}
} else {
error = absl::get<grpc_error_handle>(r);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
s->seen_error = true;
grpc_slice_buffer_reset_and_unref(&s->frame_storage);
break;
@ -1864,7 +1847,7 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t,
}
// save the length of the buffer before handing control back to application
// threads. Needed to support correct flow control bookkeeping
if (GRPC_ERROR_IS_NONE(error) && s->recv_message->has_value()) {
if (error.ok() && s->recv_message->has_value()) {
null_then_sched_closure(&s->recv_message_ready);
} else if (s->published_metadata[1] != GRPC_METADATA_NOT_PUBLISHED) {
if (s->call_failed_before_recv_message != nullptr) {
@ -1873,7 +1856,6 @@ void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t,
}
null_then_sched_closure(&s->recv_message_ready);
}
GRPC_ERROR_UNREF(error);
}();
upd.SetPendingSize(s->frame_storage.length);
@ -1923,8 +1905,6 @@ static void remove_stream(grpc_chttp2_transport* t, uint32_t id,
grpc_chttp2_list_remove_stalled_by_stream(t, s);
grpc_chttp2_list_remove_stalled_by_transport(t, s);
GRPC_ERROR_UNREF(error);
maybe_start_some_streams(t);
}
@ -1946,7 +1926,7 @@ void grpc_chttp2_cancel_stream(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM);
}
}
if (!GRPC_ERROR_IS_NONE(due_to_error) && !s->seen_error) {
if (!due_to_error.ok() && !s->seen_error) {
s->seen_error = true;
}
grpc_chttp2_mark_stream_closed(t, s, 1, 1, due_to_error);
@ -1978,13 +1958,11 @@ void grpc_chttp2_fake_status(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
s->published_metadata[1] = GRPC_METADATA_SYNTHESIZED_FROM_FAKE;
grpc_chttp2_maybe_complete_recv_trailing_metadata(t, s);
}
GRPC_ERROR_UNREF(error);
}
static void add_error(grpc_error_handle error, grpc_error_handle* refs,
size_t* nrefs) {
if (GRPC_ERROR_IS_NONE(error)) return;
if (error.ok()) return;
for (size_t i = 0; i < *nrefs; i++) {
if (error == refs[i]) {
return;
@ -2007,7 +1985,6 @@ static grpc_error_handle removal_error(grpc_error_handle extra_error,
error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(main_error_msg,
refs, nrefs);
}
GRPC_ERROR_UNREF(extra_error);
return error;
}
@ -2017,12 +1994,11 @@ static void flush_write_list(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
while (*list) {
grpc_chttp2_write_cb* cb = *list;
*list = cb->next;
grpc_chttp2_complete_closure_step(t, s, &cb->closure, GRPC_ERROR_REF(error),
grpc_chttp2_complete_closure_step(t, s, &cb->closure, error,
"on_write_finished_cb");
cb->next = t->write_cb_pool;
t->write_cb_pool = cb;
}
GRPC_ERROR_UNREF(error);
}
void grpc_chttp2_fail_pending_writes(grpc_chttp2_transport* t,
@ -2032,19 +2008,16 @@ void grpc_chttp2_fail_pending_writes(grpc_chttp2_transport* t,
removal_error(error, s, "Pending writes failed due to stream closure");
s->send_initial_metadata = nullptr;
grpc_chttp2_complete_closure_step(t, s, &s->send_initial_metadata_finished,
GRPC_ERROR_REF(error),
"send_initial_metadata_finished");
error, "send_initial_metadata_finished");
s->send_trailing_metadata = nullptr;
s->sent_trailing_metadata_op = nullptr;
grpc_chttp2_complete_closure_step(t, s, &s->send_trailing_metadata_finished,
GRPC_ERROR_REF(error),
"send_trailing_metadata_finished");
error, "send_trailing_metadata_finished");
grpc_chttp2_complete_closure_step(t, s, &s->send_message_finished,
GRPC_ERROR_REF(error),
grpc_chttp2_complete_closure_step(t, s, &s->send_message_finished, error,
"fetching_send_message_finished");
flush_write_list(t, s, &s->on_write_finished_cbs, GRPC_ERROR_REF(error));
flush_write_list(t, s, &s->on_write_finished_cbs, error);
flush_write_list(t, s, &s->on_flow_controlled_cbs, error);
}
@ -2054,7 +2027,7 @@ void grpc_chttp2_mark_stream_closed(grpc_chttp2_transport* t,
if (s->read_closed && s->write_closed) {
// already closed, but we should still fake the status if needed.
grpc_error_handle overall_error = removal_error(error, s, "Stream removed");
if (!GRPC_ERROR_IS_NONE(overall_error)) {
if (!overall_error.ok()) {
grpc_chttp2_fake_status(t, s, overall_error);
}
grpc_chttp2_maybe_complete_recv_trailing_metadata(t, s);
@ -2063,26 +2036,25 @@ void grpc_chttp2_mark_stream_closed(grpc_chttp2_transport* t,
bool closed_read = false;
bool became_closed = false;
if (close_reads && !s->read_closed) {
s->read_closed_error = GRPC_ERROR_REF(error);
s->read_closed_error = error;
s->read_closed = true;
closed_read = true;
}
if (close_writes && !s->write_closed) {
s->write_closed_error = GRPC_ERROR_REF(error);
s->write_closed_error = error;
s->write_closed = true;
grpc_chttp2_fail_pending_writes(t, s, GRPC_ERROR_REF(error));
grpc_chttp2_fail_pending_writes(t, s, error);
}
if (s->read_closed && s->write_closed) {
became_closed = true;
grpc_error_handle overall_error =
removal_error(GRPC_ERROR_REF(error), s, "Stream removed");
grpc_error_handle overall_error = removal_error(error, s, "Stream removed");
if (s->id != 0) {
remove_stream(t, s->id, GRPC_ERROR_REF(overall_error));
remove_stream(t, s->id, overall_error);
} else {
// Purge streams waiting on concurrency still waiting for id assignment
grpc_chttp2_list_remove_waiting_for_concurrency(t, s);
}
if (!GRPC_ERROR_IS_NONE(overall_error)) {
if (!overall_error.ok()) {
grpc_chttp2_fake_status(t, s, overall_error);
}
}
@ -2099,7 +2071,6 @@ void grpc_chttp2_mark_stream_closed(grpc_chttp2_transport* t,
grpc_chttp2_maybe_complete_recv_trailing_metadata(t, s);
GRPC_CHTTP2_STREAM_UNREF(s, "chttp2");
}
GRPC_ERROR_UNREF(error);
}
static void close_from_api(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
@ -2269,7 +2240,7 @@ struct cancel_stream_cb_args {
static void cancel_stream_cb(void* user_data, uint32_t /*key*/, void* stream) {
cancel_stream_cb_args* args = static_cast<cancel_stream_cb_args*>(user_data);
grpc_chttp2_stream* s = static_cast<grpc_chttp2_stream*>(stream);
grpc_chttp2_cancel_stream(args->t, s, GRPC_ERROR_REF(args->error));
grpc_chttp2_cancel_stream(args->t, s, args->error);
}
static void end_all_the_calls(grpc_chttp2_transport* t,
@ -2281,10 +2252,9 @@ static void end_all_the_calls(grpc_chttp2_transport* t,
error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
GRPC_STATUS_UNAVAILABLE);
}
cancel_unstarted_streams(t, GRPC_ERROR_REF(error));
cancel_unstarted_streams(t, error);
cancel_stream_cb_args args = {error, t};
grpc_chttp2_stream_map_for_each(&t->stream_map, cancel_stream_cb, &args);
GRPC_ERROR_UNREF(error);
}
//
@ -2340,11 +2310,11 @@ static grpc_error_handle try_http_parsing(grpc_chttp2_transport* t) {
grpc_http_parser_init(&parser, GRPC_HTTP_RESPONSE, &response);
grpc_error_handle parse_error = GRPC_ERROR_NONE;
for (; i < t->read_buffer.count && GRPC_ERROR_IS_NONE(parse_error); i++) {
for (; i < t->read_buffer.count && parse_error.ok(); i++) {
parse_error =
grpc_http_parser_parse(&parser, t->read_buffer.slices[i], nullptr);
}
if (GRPC_ERROR_IS_NONE(parse_error) &&
if (parse_error.ok() &&
(parse_error = grpc_http_parser_eof(&parser)) == GRPC_ERROR_NONE) {
error = grpc_error_set_int(
grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@ -2353,7 +2323,6 @@ static grpc_error_handle try_http_parsing(grpc_chttp2_transport* t) {
GRPC_ERROR_INT_GRPC_STATUS,
grpc_http2_status_to_grpc_status(response.status));
}
GRPC_ERROR_UNREF(parse_error);
grpc_http_parser_destroy(&parser);
grpc_http_response_destroy(&response);
@ -2364,38 +2333,32 @@ static void read_action(void* tp, grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
t->combiner->Run(
GRPC_CLOSURE_INIT(&t->read_action_locked, read_action_locked, t, nullptr),
GRPC_ERROR_REF(error));
error);
}
static void read_action_locked(void* tp, grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
(void)GRPC_ERROR_REF(error);
grpc_error_handle err = error;
if (!GRPC_ERROR_IS_NONE(err)) {
if (!err.ok()) {
err = grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Endpoint read failed", &err, 1),
GRPC_ERROR_INT_OCCURRED_DURING_WRITE,
t->write_state);
}
std::swap(err, error);
GRPC_ERROR_UNREF(err);
if (GRPC_ERROR_IS_NONE(t->closed_with_error)) {
if (t->closed_with_error.ok()) {
size_t i = 0;
grpc_error_handle errors[3] = {GRPC_ERROR_REF(error), GRPC_ERROR_NONE,
GRPC_ERROR_NONE};
grpc_error_handle errors[3] = {error, GRPC_ERROR_NONE, GRPC_ERROR_NONE};
for (; i < t->read_buffer.count && errors[1] == GRPC_ERROR_NONE; i++) {
errors[1] = grpc_chttp2_perform_read(t, t->read_buffer.slices[i]);
}
if (errors[1] != GRPC_ERROR_NONE) {
errors[2] = try_http_parsing(t);
GRPC_ERROR_UNREF(error);
error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed parsing HTTP/2", errors, GPR_ARRAY_SIZE(errors));
}
for (i = 0; i < GPR_ARRAY_SIZE(errors); i++) {
GRPC_ERROR_UNREF(errors[i]);
}
if (t->initial_window_update != 0) {
@ -2412,20 +2375,20 @@ static void read_action_locked(void* tp, grpc_error_handle error) {
}
bool keep_reading = false;
if (GRPC_ERROR_IS_NONE(error) && !GRPC_ERROR_IS_NONE(t->closed_with_error)) {
if (error.ok() && !t->closed_with_error.ok()) {
error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Transport closed", &t->closed_with_error, 1);
}
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
// If a goaway frame was received, this might be the reason why the read
// failed. Add this info to the error
if (!GRPC_ERROR_IS_NONE(t->goaway_error)) {
error = grpc_error_add_child(error, GRPC_ERROR_REF(t->goaway_error));
if (!t->goaway_error.ok()) {
error = grpc_error_add_child(error, t->goaway_error);
}
close_transport_locked(t, GRPC_ERROR_REF(error));
close_transport_locked(t, error);
t->endpoint_reading = 0;
} else if (GRPC_ERROR_IS_NONE(t->closed_with_error)) {
} else if (t->closed_with_error.ok()) {
keep_reading = true;
// Since we have read a byte, reset the keepalive timer
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_WAITING) {
@ -2448,12 +2411,10 @@ static void read_action_locked(void* tp, grpc_error_handle error) {
} else {
GRPC_CHTTP2_UNREF_TRANSPORT(t, "reading_action");
}
GRPC_ERROR_UNREF(error);
}
static void continue_read_action_locked(grpc_chttp2_transport* t) {
const bool urgent = !GRPC_ERROR_IS_NONE(t->goaway_error);
const bool urgent = !t->goaway_error.ok();
GRPC_CLOSURE_INIT(&t->read_action_locked, read_action, t,
grpc_schedule_on_exec_ctx);
grpc_endpoint_read(t->ep, &t->read_buffer, &t->read_action_locked, urgent,
@ -2477,7 +2438,7 @@ static void start_bdp_ping(void* tp, grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
t->combiner->Run(GRPC_CLOSURE_INIT(&t->start_bdp_ping_locked,
start_bdp_ping_locked, t, nullptr),
GRPC_ERROR_REF(error));
error);
}
static void start_bdp_ping_locked(void* tp, grpc_error_handle error) {
@ -2486,7 +2447,7 @@ static void start_bdp_ping_locked(void* tp, grpc_error_handle error) {
gpr_log(GPR_INFO, "%s: Start BDP ping err=%s", t->peer_string.c_str(),
grpc_error_std_string(error).c_str());
}
if (!GRPC_ERROR_IS_NONE(error) || !GRPC_ERROR_IS_NONE(t->closed_with_error)) {
if (!error.ok() || !t->closed_with_error.ok()) {
return;
}
// Reset the keepalive ping timer
@ -2501,7 +2462,7 @@ static void finish_bdp_ping(void* tp, grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
t->combiner->Run(GRPC_CLOSURE_INIT(&t->finish_bdp_ping_locked,
finish_bdp_ping_locked, t, nullptr),
GRPC_ERROR_REF(error));
error);
}
static void finish_bdp_ping_locked(void* tp, grpc_error_handle error) {
@ -2510,7 +2471,7 @@ static void finish_bdp_ping_locked(void* tp, grpc_error_handle error) {
gpr_log(GPR_INFO, "%s: Complete BDP ping err=%s", t->peer_string.c_str(),
grpc_error_std_string(error).c_str());
}
if (!GRPC_ERROR_IS_NONE(error) || !GRPC_ERROR_IS_NONE(t->closed_with_error)) {
if (!error.ok() || !t->closed_with_error.ok()) {
GRPC_CHTTP2_UNREF_TRANSPORT(t, "bdp_ping");
return;
}
@ -2519,7 +2480,7 @@ static void finish_bdp_ping_locked(void* tp, grpc_error_handle error) {
// finish_bdp_ping_locked to be run later.
t->combiner->Run(GRPC_CLOSURE_INIT(&t->finish_bdp_ping_locked,
finish_bdp_ping_locked, t, nullptr),
GRPC_ERROR_REF(error));
error);
return;
}
t->bdp_ping_started = false;
@ -2540,7 +2501,7 @@ static void next_bdp_ping_timer_expired(void* tp, grpc_error_handle error) {
t->combiner->Run(
GRPC_CLOSURE_INIT(&t->next_bdp_ping_timer_expired_locked,
next_bdp_ping_timer_expired_locked, t, nullptr),
GRPC_ERROR_REF(error));
error);
}
static void next_bdp_ping_timer_expired_locked(void* tp,
@ -2548,7 +2509,7 @@ static void next_bdp_ping_timer_expired_locked(void* tp,
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(tp);
GPR_ASSERT(t->have_next_bdp_ping_timer);
t->have_next_bdp_ping_timer = false;
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
GRPC_CHTTP2_UNREF_TRANSPORT(t, "bdp_ping");
return;
}
@ -2624,15 +2585,15 @@ static void init_keepalive_ping(void* arg, grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(arg);
t->combiner->Run(GRPC_CLOSURE_INIT(&t->init_keepalive_ping_locked,
init_keepalive_ping_locked, t, nullptr),
GRPC_ERROR_REF(error));
error);
}
static void init_keepalive_ping_locked(void* arg, grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(arg);
GPR_ASSERT(t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_WAITING);
if (t->destroying || !GRPC_ERROR_IS_NONE(t->closed_with_error)) {
if (t->destroying || !t->closed_with_error.ok()) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING;
} else if (GRPC_ERROR_IS_NONE(error)) {
} else if (error.ok()) {
if (t->keepalive_permit_without_calls ||
grpc_chttp2_stream_map_size(&t->stream_map) > 0) {
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_PINGING;
@ -2669,12 +2630,12 @@ static void start_keepalive_ping(void* arg, grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(arg);
t->combiner->Run(GRPC_CLOSURE_INIT(&t->start_keepalive_ping_locked,
start_keepalive_ping_locked, t, nullptr),
GRPC_ERROR_REF(error));
error);
}
static void start_keepalive_ping_locked(void* arg, grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(arg);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
return;
}
if (t->channelz_socket != nullptr) {
@ -2697,13 +2658,13 @@ static void finish_keepalive_ping(void* arg, grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(arg);
t->combiner->Run(GRPC_CLOSURE_INIT(&t->finish_keepalive_ping_locked,
finish_keepalive_ping_locked, t, nullptr),
GRPC_ERROR_REF(error));
error);
}
static void finish_keepalive_ping_locked(void* arg, grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(arg);
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace) ||
GRPC_TRACE_FLAG_ENABLED(grpc_keepalive_trace)) {
gpr_log(GPR_INFO, "%s: Finish keepalive ping", t->peer_string.c_str());
@ -2714,7 +2675,7 @@ static void finish_keepalive_ping_locked(void* arg, grpc_error_handle error) {
t->combiner->Run(
GRPC_CLOSURE_INIT(&t->finish_keepalive_ping_locked,
finish_keepalive_ping_locked, t, nullptr),
GRPC_ERROR_REF(error));
error);
return;
}
t->keepalive_ping_started = false;
@ -2736,14 +2697,14 @@ static void keepalive_watchdog_fired(void* arg, grpc_error_handle error) {
t->combiner->Run(
GRPC_CLOSURE_INIT(&t->keepalive_watchdog_fired_locked,
keepalive_watchdog_fired_locked, t, nullptr),
GRPC_ERROR_REF(error));
error);
}
static void keepalive_watchdog_fired_locked(void* arg,
grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(arg);
if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
gpr_log(GPR_INFO, "%s: Keepalive watchdog fired. Closing transport.",
t->peer_string.c_str());
t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING;
@ -2839,8 +2800,7 @@ static void post_destructive_reclaimer(grpc_chttp2_transport* t) {
static void benign_reclaimer_locked(void* arg, grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(arg);
if (GRPC_ERROR_IS_NONE(error) &&
grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
if (error.ok() && grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
// Channel with no active streams: send a goaway to try and make it
// disconnect cleanly
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
@ -2852,8 +2812,7 @@ static void benign_reclaimer_locked(void* arg, grpc_error_handle error) {
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Buffers full"),
GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM),
/*immediate_disconnect_hint=*/true);
} else if (GRPC_ERROR_IS_NONE(error) &&
GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
} else if (error.ok() && GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {
gpr_log(GPR_INFO,
"HTTP2: %s - skip benign reclamation, there are still %" PRIdPTR
" streams",
@ -2871,7 +2830,7 @@ static void destructive_reclaimer_locked(void* arg, grpc_error_handle error) {
grpc_chttp2_transport* t = static_cast<grpc_chttp2_transport*>(arg);
size_t n = grpc_chttp2_stream_map_size(&t->stream_map);
t->destructive_reclaimer_registered = false;
if (GRPC_ERROR_IS_NONE(error) && n > 0) {
if (error.ok() && n > 0) {
grpc_chttp2_stream* s = static_cast<grpc_chttp2_stream*>(
grpc_chttp2_stream_map_rand(&t->stream_map));
if (GRPC_TRACE_FLAG_ENABLED(grpc_resource_quota_trace)) {

@ -593,8 +593,7 @@ class HPackParser::Input {
// Set the current error - allows the rest of the code not to need to pass
// around StatusOr<> which would be prohibitive here.
GPR_ATTRIBUTE_NOINLINE void SetError(grpc_error_handle error) {
if (!GRPC_ERROR_IS_NONE(error_) || eof_error_) {
GRPC_ERROR_UNREF(error);
if (!error_.ok() || eof_error_) {
return;
}
error_ = error;
@ -606,7 +605,7 @@ class HPackParser::Input {
template <typename F, typename T>
GPR_ATTRIBUTE_NOINLINE T MaybeSetErrorAndReturn(F error_factory,
T return_value) {
if (!GRPC_ERROR_IS_NONE(error_) || eof_error_) return return_value;
if (!error_.ok() || eof_error_) return return_value;
error_ = error_factory();
begin_ = end_;
return return_value;
@ -616,7 +615,7 @@ class HPackParser::Input {
// is a common case)
template <typename T>
T UnexpectedEOF(T return_value) {
if (!GRPC_ERROR_IS_NONE(error_)) return return_value;
if (!error_.ok()) return return_value;
eof_error_ = true;
return return_value;
}
@ -1084,7 +1083,7 @@ class HPackParser::Parser {
auto r = EmitHeader(*md);
// Add to the hpack table
grpc_error_handle err = table_->Add(std::move(*md));
if (GPR_UNLIKELY(!GRPC_ERROR_IS_NONE(err))) {
if (GPR_UNLIKELY(!err.ok())) {
input_->SetError(err);
return false;
};
@ -1179,7 +1178,7 @@ class HPackParser::Parser {
}
(*dynamic_table_updates_allowed_)--;
grpc_error_handle err = table_->SetCurrentTableSize(*size);
if (!GRPC_ERROR_IS_NONE(err)) {
if (!err.ok()) {
input_->SetError(err);
return false;
}
@ -1353,7 +1352,7 @@ grpc_error_handle grpc_chttp2_header_parser_parse(void* hpack_parser,
s->stats.incoming.header_bytes += GRPC_SLICE_LENGTH(slice);
}
grpc_error_handle error = parser->Parse(slice, is_last != 0);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
return error;
}
if (is_last) {

@ -248,12 +248,12 @@ grpc_error_handle grpc_chttp2_perform_read(grpc_chttp2_transport* t,
t->incoming_stream_id |= (static_cast<uint32_t>(*cur));
t->deframe_state = GRPC_DTS_FRAME;
err = init_frame_parser(t);
if (!GRPC_ERROR_IS_NONE(err)) {
if (!err.ok()) {
return err;
}
if (t->incoming_frame_size == 0) {
err = parse_frame_slice(t, grpc_empty_slice(), 1);
if (!GRPC_ERROR_IS_NONE(err)) {
if (!err.ok()) {
return err;
}
t->incoming_stream = nullptr;
@ -283,7 +283,7 @@ grpc_error_handle grpc_chttp2_perform_read(grpc_chttp2_transport* t,
grpc_slice_sub_no_ref(slice, static_cast<size_t>(cur - beg),
static_cast<size_t>(end - beg)),
1);
if (!GRPC_ERROR_IS_NONE(err)) {
if (!err.ok()) {
return err;
}
t->deframe_state = GRPC_DTS_FH_0;
@ -296,7 +296,7 @@ grpc_error_handle grpc_chttp2_perform_read(grpc_chttp2_transport* t,
grpc_slice_sub_no_ref(slice, cur_offset,
cur_offset + t->incoming_frame_size),
1);
if (!GRPC_ERROR_IS_NONE(err)) {
if (!err.ok()) {
return err;
}
cur += t->incoming_frame_size;
@ -308,7 +308,7 @@ grpc_error_handle grpc_chttp2_perform_read(grpc_chttp2_transport* t,
grpc_slice_sub_no_ref(slice, static_cast<size_t>(cur - beg),
static_cast<size_t>(end - beg)),
0);
if (!GRPC_ERROR_IS_NONE(err)) {
if (!err.ok()) {
return err;
}
t->incoming_frame_size -= static_cast<uint32_t>(end - cur);
@ -630,7 +630,7 @@ static grpc_error_handle init_window_update_frame_parser(
grpc_error_handle err = grpc_chttp2_window_update_parser_begin_frame(
&t->simple.window_update, t->incoming_frame_size,
t->incoming_frame_flags);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
if (t->incoming_stream_id != 0) {
grpc_chttp2_stream* s = t->incoming_stream =
grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
@ -647,7 +647,7 @@ static grpc_error_handle init_window_update_frame_parser(
static grpc_error_handle init_ping_parser(grpc_chttp2_transport* t) {
grpc_error_handle err = grpc_chttp2_ping_parser_begin_frame(
&t->simple.ping, t->incoming_frame_size, t->incoming_frame_flags);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
t->parser = grpc_chttp2_ping_parser_parse;
t->parser_data = &t->simple.ping;
return GRPC_ERROR_NONE;
@ -656,7 +656,7 @@ static grpc_error_handle init_ping_parser(grpc_chttp2_transport* t) {
static grpc_error_handle init_rst_stream_parser(grpc_chttp2_transport* t) {
grpc_error_handle err = grpc_chttp2_rst_stream_parser_begin_frame(
&t->simple.rst_stream, t->incoming_frame_size, t->incoming_frame_flags);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
grpc_chttp2_stream* s = t->incoming_stream =
grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
if (!t->incoming_stream) {
@ -671,7 +671,7 @@ static grpc_error_handle init_rst_stream_parser(grpc_chttp2_transport* t) {
static grpc_error_handle init_goaway_parser(grpc_chttp2_transport* t) {
grpc_error_handle err = grpc_chttp2_goaway_parser_begin_frame(
&t->goaway_parser, t->incoming_frame_size, t->incoming_frame_flags);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
t->parser = grpc_chttp2_goaway_parser_parse;
t->parser_data = &t->goaway_parser;
return GRPC_ERROR_NONE;
@ -686,7 +686,7 @@ static grpc_error_handle init_settings_frame_parser(grpc_chttp2_transport* t) {
grpc_error_handle err = grpc_chttp2_settings_parser_begin_frame(
&t->simple.settings, t->incoming_frame_size, t->incoming_frame_flags,
t->settings[GRPC_PEER_SETTINGS]);
if (!GRPC_ERROR_IS_NONE(err)) {
if (!err.ok()) {
return err;
}
if (t->incoming_frame_flags & GRPC_CHTTP2_FLAG_ACK) {
@ -711,7 +711,7 @@ static grpc_error_handle parse_frame_slice(grpc_chttp2_transport* t,
grpc_chttp2_stream* s = t->incoming_stream;
grpc_error_handle err = t->parser(t->parser_data, t, s, slice, is_last);
intptr_t unused;
if (GPR_LIKELY(GRPC_ERROR_IS_NONE(err))) {
if (GPR_LIKELY(err.ok())) {
return err;
} else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, &unused)) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_http_trace)) {
@ -723,8 +723,6 @@ static grpc_error_handle parse_frame_slice(grpc_chttp2_transport* t,
grpc_chttp2_add_rst_stream_to_next_write(t, t->incoming_stream_id,
GRPC_HTTP2_PROTOCOL_ERROR,
&s->stats.outgoing);
} else {
GRPC_ERROR_UNREF(err);
}
}
return err;

@ -192,13 +192,12 @@ static bool update_list(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
grpc_chttp2_write_cb* next = cb->next;
if (cb->call_at_byte <= *ctr) {
sched_any = true;
finish_write_cb(t, s, cb, GRPC_ERROR_REF(error));
finish_write_cb(t, s, cb, error);
} else {
add_to_write_list(list, cb);
}
cb = next;
}
GRPC_ERROR_UNREF(error);
return sched_any;
}
@ -316,7 +315,7 @@ class WriteContext {
void UpdateStreamsNoLongerStalled() {
grpc_chttp2_stream* s;
while (grpc_chttp2_list_pop_stalled_by_transport(t_, &s)) {
if (GRPC_ERROR_IS_NONE(t_->closed_with_error) &&
if (t_->closed_with_error.ok() &&
grpc_chttp2_list_add_writable_stream(t_, s)) {
if (!s->refcount->refs.RefIfNonZero()) {
grpc_chttp2_list_remove_writable_stream(t_, s);
@ -672,11 +671,10 @@ void grpc_chttp2_end_write(grpc_chttp2_transport* t, grpc_error_handle error) {
if (s->sending_bytes != 0) {
update_list(t, s, static_cast<int64_t>(s->sending_bytes),
&s->on_write_finished_cbs, &s->flow_controlled_bytes_written,
GRPC_ERROR_REF(error));
error);
s->sending_bytes = 0;
}
GRPC_CHTTP2_STREAM_UNREF(s, "chttp2_writing:end");
}
grpc_slice_buffer_reset_and_unref(&t->outbuf);
GRPC_ERROR_UNREF(error);
}

@ -1295,7 +1295,7 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_RECV_TRAILING_METADATA", oas);
grpc_error_handle error = GRPC_ERROR_NONE;
if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
error = GRPC_ERROR_REF(stream_state->cancel_error);
error = stream_state->cancel_error;
} else if (stream_state->state_callback_received[OP_FAILED]) {
grpc_status_code grpc_error_code =
cronet_net_error_to_grpc_error(stream_state->net_error);
@ -1324,16 +1324,16 @@ static enum e_op_result execute_stream_op(struct op_and_state* oas) {
result = ACTION_TAKEN_NO_CALLBACK;
}
stream_state->state_op_done[OP_CANCEL_ERROR] = true;
if (GRPC_ERROR_IS_NONE(stream_state->cancel_error)) {
if (stream_state->cancel_error.ok()) {
stream_state->cancel_error =
GRPC_ERROR_REF(stream_op->payload->cancel_stream.cancel_error);
stream_op->payload->cancel_stream.cancel_error;
}
} else if (op_can_be_run(stream_op, s, &oas->state, OP_ON_COMPLETE)) {
CRONET_LOG(GPR_DEBUG, "running: %p OP_ON_COMPLETE", oas);
if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
if (stream_op->on_complete) {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, stream_op->on_complete,
GRPC_ERROR_REF(stream_state->cancel_error));
stream_state->cancel_error);
}
} else if (stream_state->state_callback_received[OP_FAILED]) {
if (stream_op->on_complete) {
@ -1391,10 +1391,7 @@ inline stream_obj::stream_obj(grpc_transport* gt, grpc_stream* gs,
gpr_mu_init(&mu);
}
inline stream_obj::~stream_obj() {
null_and_maybe_free_read_buffer(this);
GRPC_ERROR_UNREF(state.cancel_error);
}
inline stream_obj::~stream_obj() { null_and_maybe_free_read_buffer(this); }
static int init_stream(grpc_transport* gt, grpc_stream* gs,
grpc_stream_refcount* refcount,

@ -203,7 +203,7 @@ struct inproc_stream {
cs->write_buffer_trailing_md.Clear();
cs->write_buffer_trailing_md_filled = false;
}
if (!GRPC_ERROR_IS_NONE(cs->write_buffer_cancel_error)) {
if (!cs->write_buffer_cancel_error.ok()) {
cancel_other_error = cs->write_buffer_cancel_error;
cs->write_buffer_cancel_error = GRPC_ERROR_NONE;
maybe_process_ops_locked(this, cancel_other_error);
@ -213,13 +213,7 @@ struct inproc_stream {
}
}
~inproc_stream() {
GRPC_ERROR_UNREF(write_buffer_cancel_error);
GRPC_ERROR_UNREF(cancel_self_error);
GRPC_ERROR_UNREF(cancel_other_error);
t->unref();
}
~inproc_stream() { t->unref(); }
#ifndef NDEBUG
#define STREAM_REF(refs, reason) grpc_stream_ref(refs, reason)
@ -415,13 +409,12 @@ void complete_if_batch_end_locked(inproc_stream* s, grpc_error_handle error,
if ((is_sm + is_stm + is_rim + is_rm + is_rtm) == 1) {
INPROC_LOG(GPR_INFO, "%s %p %p %s", msg, s, op,
grpc_error_std_string(error).c_str());
grpc_core::ExecCtx::Run(DEBUG_LOCATION, op->on_complete,
GRPC_ERROR_REF(error));
grpc_core::ExecCtx::Run(DEBUG_LOCATION, op->on_complete, error);
}
}
void maybe_process_ops_locked(inproc_stream* s, grpc_error_handle error) {
if (s && (!GRPC_ERROR_IS_NONE(error) || s->ops_needed)) {
if (s && (!error.ok() || s->ops_needed)) {
s->ops_needed = false;
op_state_machine_locked(s, error);
}
@ -445,12 +438,12 @@ void fail_helper_locked(inproc_stream* s, grpc_error_handle error) {
fill_in_metadata(s, &fake_md, dest, destfilled);
if (other != nullptr) {
if (GRPC_ERROR_IS_NONE(other->cancel_other_error)) {
other->cancel_other_error = GRPC_ERROR_REF(error);
if (other->cancel_other_error.ok()) {
other->cancel_other_error = error;
}
maybe_process_ops_locked(other, error);
} else if (GRPC_ERROR_IS_NONE(s->write_buffer_cancel_error)) {
s->write_buffer_cancel_error = GRPC_ERROR_REF(error);
} else if (s->write_buffer_cancel_error.ok()) {
s->write_buffer_cancel_error = error;
}
}
if (s->recv_initial_md_op) {
@ -470,7 +463,7 @@ void fail_helper_locked(inproc_stream* s, grpc_error_handle error) {
nullptr);
err = GRPC_ERROR_NONE;
} else {
err = GRPC_ERROR_REF(error);
err = error;
}
if (s->recv_initial_md_op->payload->recv_initial_metadata
.trailing_metadata_available != nullptr) {
@ -506,8 +499,7 @@ void fail_helper_locked(inproc_stream* s, grpc_error_handle error) {
}
grpc_core::ExecCtx::Run(
DEBUG_LOCATION,
s->recv_message_op->payload->recv_message.recv_message_ready,
GRPC_ERROR_REF(error));
s->recv_message_op->payload->recv_message.recv_message_ready, error);
complete_if_batch_end_locked(
s, error, s->recv_message_op,
"fail_helper scheduling recv-message-on-complete");
@ -533,7 +525,7 @@ void fail_helper_locked(inproc_stream* s, grpc_error_handle error) {
DEBUG_LOCATION,
s->recv_trailing_md_op->payload->recv_trailing_metadata
.recv_trailing_metadata_ready,
GRPC_ERROR_REF(error));
error);
INPROC_LOG(GPR_INFO, "fail_helper %p scheduling trailing-md-on-complete %s",
s, grpc_error_std_string(error).c_str());
complete_if_batch_end_locked(
@ -543,8 +535,6 @@ void fail_helper_locked(inproc_stream* s, grpc_error_handle error) {
}
close_other_side_locked(s, "fail_helper:other_side");
close_stream_locked(s);
GRPC_ERROR_UNREF(error);
}
// TODO(vjpai): It should not be necessary to drain the incoming byte
@ -592,14 +582,14 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) {
// cancellation takes precedence
inproc_stream* other = s->other_side;
if (!GRPC_ERROR_IS_NONE(s->cancel_self_error)) {
fail_helper_locked(s, GRPC_ERROR_REF(s->cancel_self_error));
if (!s->cancel_self_error.ok()) {
fail_helper_locked(s, s->cancel_self_error);
goto done;
} else if (!GRPC_ERROR_IS_NONE(s->cancel_other_error)) {
fail_helper_locked(s, GRPC_ERROR_REF(s->cancel_other_error));
} else if (!s->cancel_other_error.ok()) {
fail_helper_locked(s, s->cancel_other_error);
goto done;
} else if (!GRPC_ERROR_IS_NONE(error)) {
fail_helper_locked(s, GRPC_ERROR_REF(error));
} else if (!error.ok()) {
fail_helper_locked(s, error);
goto done;
}
@ -637,7 +627,7 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) {
// The buffer is already in use; that's an error!
INPROC_LOG(GPR_INFO, "Extra trailing metadata %p", s);
new_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra trailing metadata");
fail_helper_locked(s, GRPC_ERROR_REF(new_err));
fail_helper_locked(s, new_err);
goto done;
} else {
if (!other || !other->closed) {
@ -682,7 +672,7 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) {
"op_state_machine %p scheduling on_complete errors for already "
"recvd initial md %s",
s, grpc_error_std_string(new_err).c_str());
fail_helper_locked(s, GRPC_ERROR_REF(new_err));
fail_helper_locked(s, new_err);
goto done;
}
@ -740,7 +730,7 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) {
"op_state_machine %p scheduling on_complete errors for already "
"recvd trailing md %s",
s, grpc_error_std_string(new_err).c_str());
fail_helper_locked(s, GRPC_ERROR_REF(new_err));
fail_helper_locked(s, new_err);
goto done;
}
}
@ -813,7 +803,7 @@ void op_state_machine_locked(inproc_stream* s, grpc_error_handle error) {
DEBUG_LOCATION,
s->recv_trailing_md_op->payload->recv_trailing_metadata
.recv_trailing_metadata_ready,
GRPC_ERROR_REF(new_err));
new_err);
complete_if_batch_end_locked(
s, new_err, s->recv_trailing_md_op,
"op_state_machine scheduling recv-trailing-md-on-complete");
@ -861,16 +851,15 @@ done:
close_other_side_locked(s, "op_state_machine");
close_stream_locked(s);
}
GRPC_ERROR_UNREF(new_err);
}
bool cancel_stream_locked(inproc_stream* s, grpc_error_handle error) {
bool ret = false; // was the cancel accepted
INPROC_LOG(GPR_INFO, "cancel_stream %p with %s", s,
grpc_error_std_string(error).c_str());
if (GRPC_ERROR_IS_NONE(s->cancel_self_error)) {
if (s->cancel_self_error.ok()) {
ret = true;
s->cancel_self_error = GRPC_ERROR_REF(error);
s->cancel_self_error = error;
// Catch current value of other before it gets closed off
inproc_stream* other = s->other_side;
maybe_process_ops_locked(s, s->cancel_self_error);
@ -888,12 +877,12 @@ bool cancel_stream_locked(inproc_stream* s, grpc_error_handle error) {
fill_in_metadata(s, &cancel_md, dest, destfilled);
if (other != nullptr) {
if (GRPC_ERROR_IS_NONE(other->cancel_other_error)) {
other->cancel_other_error = GRPC_ERROR_REF(s->cancel_self_error);
if (other->cancel_other_error.ok()) {
other->cancel_other_error = s->cancel_self_error;
}
maybe_process_ops_locked(other, other->cancel_other_error);
} else if (GRPC_ERROR_IS_NONE(s->write_buffer_cancel_error)) {
s->write_buffer_cancel_error = GRPC_ERROR_REF(s->cancel_self_error);
} else if (s->write_buffer_cancel_error.ok()) {
s->write_buffer_cancel_error = s->cancel_self_error;
}
// if we are a server and already received trailing md but
@ -904,7 +893,7 @@ bool cancel_stream_locked(inproc_stream* s, grpc_error_handle error) {
DEBUG_LOCATION,
s->recv_trailing_md_op->payload->recv_trailing_metadata
.recv_trailing_metadata_ready,
GRPC_ERROR_REF(s->cancel_self_error));
s->cancel_self_error);
complete_if_batch_end_locked(
s, s->cancel_self_error, s->recv_trailing_md_op,
"cancel_stream scheduling trailing-md-on-complete");
@ -915,7 +904,6 @@ bool cancel_stream_locked(inproc_stream* s, grpc_error_handle error) {
close_other_side_locked(s, "cancel_stream:other_side");
close_stream_locked(s);
GRPC_ERROR_UNREF(error);
return ret;
}
@ -955,9 +943,9 @@ void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
// this function is responsible to make sure that that field gets unref'ed
cancel_stream_locked(s, op->payload->cancel_stream.cancel_error);
// this op can complete without an error
} else if (!GRPC_ERROR_IS_NONE(s->cancel_self_error)) {
} else if (!s->cancel_self_error.ok()) {
// already self-canceled so still give it an error
error = GRPC_ERROR_REF(s->cancel_self_error);
error = s->cancel_self_error;
} else {
INPROC_LOG(GPR_INFO, "perform_stream_op %p %s%s%s%s%s%s%s", s,
s->t->is_client ? "client" : "server",
@ -970,12 +958,11 @@ void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
}
inproc_stream* other = s->other_side;
if (GRPC_ERROR_IS_NONE(error) &&
(op->send_initial_metadata || op->send_trailing_metadata)) {
if (error.ok() && (op->send_initial_metadata || op->send_trailing_metadata)) {
if (s->t->is_closed) {
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Endpoint already shutdown");
}
if (GRPC_ERROR_IS_NONE(error) && op->send_initial_metadata) {
if (error.ok() && op->send_initial_metadata) {
grpc_metadata_batch* dest = (other == nullptr)
? &s->write_buffer_initial_md
: &other->to_read_initial_md;
@ -1005,10 +992,9 @@ void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
}
}
if (GRPC_ERROR_IS_NONE(error) &&
(op->send_message || op->send_trailing_metadata ||
op->recv_initial_metadata || op->recv_message ||
op->recv_trailing_metadata)) {
if (error.ok() && (op->send_message || op->send_trailing_metadata ||
op->recv_initial_metadata || op->recv_message ||
op->recv_trailing_metadata)) {
// Mark ops that need to be processed by the state machine
if (op->send_message) {
s->send_message_op = op;
@ -1045,7 +1031,7 @@ void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
s->ops_needed = true;
}
} else {
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
// Consume any send message that was sent here but that we are not
// pushing to the other side
if (op->send_message) {
@ -1069,7 +1055,7 @@ void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
grpc_core::ExecCtx::Run(
DEBUG_LOCATION,
op->payload->recv_initial_metadata.recv_initial_metadata_ready,
GRPC_ERROR_REF(error));
error);
}
if (op->recv_message) {
INPROC_LOG(
@ -1082,7 +1068,7 @@ void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
}
grpc_core::ExecCtx::Run(DEBUG_LOCATION,
op->payload->recv_message.recv_message_ready,
GRPC_ERROR_REF(error));
error);
}
if (op->recv_trailing_metadata) {
INPROC_LOG(GPR_INFO,
@ -1092,15 +1078,14 @@ void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
grpc_core::ExecCtx::Run(
DEBUG_LOCATION,
op->payload->recv_trailing_metadata.recv_trailing_metadata_ready,
GRPC_ERROR_REF(error));
error);
}
}
INPROC_LOG(GPR_INFO, "perform_stream_op %p scheduling on_complete %s", s,
grpc_error_std_string(error).c_str());
grpc_core::ExecCtx::Run(DEBUG_LOCATION, on_complete, GRPC_ERROR_REF(error));
grpc_core::ExecCtx::Run(DEBUG_LOCATION, on_complete, error);
}
gpr_mu_unlock(mu);
GRPC_ERROR_UNREF(error);
}
void close_transport_locked(inproc_transport* t) {
@ -1141,13 +1126,11 @@ void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
}
bool do_close = false;
if (!GRPC_ERROR_IS_NONE(op->goaway_error)) {
if (!op->goaway_error.ok()) {
do_close = true;
GRPC_ERROR_UNREF(op->goaway_error);
}
if (!GRPC_ERROR_IS_NONE(op->disconnect_with_error)) {
if (!op->disconnect_with_error.ok()) {
do_close = true;
GRPC_ERROR_UNREF(op->disconnect_with_error);
}
if (do_close) {
@ -1251,7 +1234,7 @@ grpc_channel* grpc_inproc_channel_create(grpc_server* server,
grpc_error_handle error = core_server->SetupTransport(
server_transport, nullptr, server_args, nullptr);
grpc_channel* channel = nullptr;
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
auto new_channel = grpc_core::Channel::Create(
"inproc", client_args, GRPC_CLIENT_DIRECT_CHANNEL, client_transport);
if (!new_channel.ok()) {
@ -1263,7 +1246,6 @@ grpc_channel* grpc_inproc_channel_create(grpc_server* server,
if (grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, &integer)) {
status = static_cast<grpc_status_code>(integer);
}
GRPC_ERROR_UNREF(error);
// client_transport was destroyed when grpc_channel_create_internal saw an
// error.
grpc_transport_destroy(server_transport);
@ -1281,7 +1263,6 @@ grpc_channel* grpc_inproc_channel_create(grpc_server* server,
if (grpc_error_get_int(error, GRPC_ERROR_INT_GRPC_STATUS, &integer)) {
status = static_cast<grpc_status_code>(integer);
}
GRPC_ERROR_UNREF(error);
grpc_transport_destroy(client_transport);
grpc_transport_destroy(server_transport);
channel = grpc_lame_client_channel_create(

@ -77,9 +77,8 @@ void CertificateProviderStore::PluginDefinition::JsonPostLoad(
grpc_error_handle parse_error = GRPC_ERROR_NONE;
config =
factory->CreateCertificateProviderConfig(config_json, &parse_error);
if (!GRPC_ERROR_IS_NONE(parse_error)) {
if (!parse_error.ok()) {
errors->AddError(grpc_error_std_string(parse_error));
GRPC_ERROR_UNREF(parse_error);
}
}
}

@ -59,12 +59,11 @@ class RootCertificatesWatcher
}
void OnError(grpc_error_handle root_cert_error,
grpc_error_handle identity_cert_error) override {
if (!GRPC_ERROR_IS_NONE(root_cert_error)) {
grpc_error_handle /*identity_cert_error*/) override {
if (!root_cert_error.ok()) {
parent_->SetErrorForCert(cert_name_, root_cert_error /* pass the ref */,
absl::nullopt);
}
GRPC_ERROR_UNREF(identity_cert_error);
}
private:
@ -93,13 +92,12 @@ class IdentityCertificatesWatcher
}
}
void OnError(grpc_error_handle root_cert_error,
void OnError(grpc_error_handle /*root_cert_error*/,
grpc_error_handle identity_cert_error) override {
if (!GRPC_ERROR_IS_NONE(identity_cert_error)) {
if (!identity_cert_error.ok()) {
parent_->SetErrorForCert(cert_name_, absl::nullopt,
identity_cert_error /* pass the ref */);
}
GRPC_ERROR_UNREF(root_cert_error);
}
private:

@ -90,7 +90,7 @@ absl::StatusOr<std::string> GetBootstrapContents(const char* fallback_config) {
grpc_slice contents;
grpc_error_handle error =
grpc_load_file(path->c_str(), /*add_null_terminator=*/true, &contents);
if (!GRPC_ERROR_IS_NONE(error)) return grpc_error_to_absl_status(error);
if (!error.ok()) return grpc_error_to_absl_status(error);
std::string contents_str(StringViewFromSlice(contents));
grpc_slice_unref(contents);
return contents_str;

@ -177,7 +177,7 @@ void GrpcXdsTransportFactory::GrpcXdsTransport::GrpcStreamingCall::
grpc_byte_buffer_destroy(self->send_message_payload_);
self->send_message_payload_ = nullptr;
// Invoke request handler.
self->event_handler_->OnRequestSent(GRPC_ERROR_IS_NONE(error));
self->event_handler_->OnRequestSent(error.ok());
// Drop the ref.
self->Unref(DEBUG_LOCATION, "OnRequestSent");
}

@ -52,9 +52,8 @@ bool grpc_parse_unix(const grpc_core::URI& uri,
}
grpc_error_handle error =
grpc_core::UnixSockaddrPopulate(uri.path(), resolved_addr);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
gpr_log(GPR_ERROR, "%s", grpc_error_std_string(error).c_str());
GRPC_ERROR_UNREF(error);
return false;
}
return true;
@ -69,9 +68,8 @@ bool grpc_parse_unix_abstract(const grpc_core::URI& uri,
}
grpc_error_handle error =
grpc_core::UnixAbstractSockaddrPopulate(uri.path(), resolved_addr);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
gpr_log(GPR_ERROR, "%s", grpc_error_std_string(error).c_str());
GRPC_ERROR_UNREF(error);
return false;
}
return true;

@ -145,11 +145,9 @@ grpc_error_handle grpc_channel_stack_init(
elems[i].channel_data = user_data;
grpc_error_handle error =
elems[i].filter->init_channel_elem(&elems[i], &args);
if (!GRPC_ERROR_IS_NONE(error)) {
if (GRPC_ERROR_IS_NONE(first_error)) {
if (!error.ok()) {
if (first_error.ok()) {
first_error = error;
} else {
GRPC_ERROR_UNREF(error);
}
}
user_data +=
@ -207,11 +205,9 @@ grpc_error_handle grpc_call_stack_init(
for (size_t i = 0; i < count; i++) {
grpc_error_handle error =
call_elems[i].filter->init_call_elem(&call_elems[i], elem_args);
if (!GRPC_ERROR_IS_NONE(error)) {
if (GRPC_ERROR_IS_NONE(first_error)) {
if (!error.ok()) {
if (first_error.ok()) {
first_error = error;
} else {
GRPC_ERROR_UNREF(error);
}
}
}

@ -75,11 +75,10 @@ ChannelStackBuilderImpl::Build() {
channel_stack, stack->data(), stack->size(), final_args, name(),
channel_stack);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
grpc_channel_stack_destroy(channel_stack);
gpr_free(channel_stack);
auto status = grpc_error_to_absl_status(error);
GRPC_ERROR_UNREF(error);
return status;
}

@ -57,8 +57,8 @@ typedef struct connected_channel_call_data {
static void run_in_call_combiner(void* arg, grpc_error_handle error) {
callback_state* state = static_cast<callback_state*>(arg);
GRPC_CALL_COMBINER_START(state->call_combiner, state->original_closure,
GRPC_ERROR_REF(error), state->reason);
GRPC_CALL_COMBINER_START(state->call_combiner, state->original_closure, error,
state->reason);
}
static void run_cancel_in_call_combiner(void* arg, grpc_error_handle error) {

@ -159,7 +159,6 @@ void BaseCallData::CapturedBatch::CancelWith(grpc_error_handle error,
uintptr_t& refcnt = *RefCountField(batch);
if (refcnt == 0) {
// refcnt==0 ==> cancelled
GRPC_ERROR_UNREF(error);
return;
}
refcnt = 0;
@ -352,8 +351,7 @@ class ClientCallData::PollContext {
error = grpc_error_set_str(error, GRPC_ERROR_STR_GRPC_MESSAGE,
message->as_string_view());
}
GRPC_ERROR_UNREF(self_->cancelled_error_);
self_->cancelled_error_ = GRPC_ERROR_REF(error);
self_->cancelled_error_ = error;
if (self_->recv_initial_metadata_ != nullptr) {
switch (self_->recv_initial_metadata_->state) {
case RecvInitialMetadata::kInitial:
@ -378,7 +376,7 @@ class ClientCallData::PollContext {
std::exchange(
self_->recv_initial_metadata_->original_on_ready,
nullptr),
GRPC_ERROR_REF(error),
error,
"wake_inside_combiner:recv_initial_metadata_ready");
}
}
@ -389,7 +387,7 @@ class ClientCallData::PollContext {
GPR_ASSERT(
self_->recv_trailing_state_ == RecvTrailingState::kInitial ||
self_->recv_trailing_state_ == RecvTrailingState::kForwarded);
self_->call_combiner()->Cancel(GRPC_ERROR_REF(error));
self_->call_combiner()->Cancel(error);
CapturedBatch b(grpc_make_transport_stream_op(GRPC_CLOSURE_CREATE(
[](void* p, grpc_error_handle) {
GRPC_CALL_COMBINER_STOP(static_cast<CallCombiner*>(p),
@ -482,7 +480,6 @@ ClientCallData::ClientCallData(grpc_call_element* elem,
ClientCallData::~ClientCallData() {
GPR_ASSERT(poll_ctx_ == nullptr);
GRPC_ERROR_UNREF(cancelled_error_);
if (recv_initial_metadata_ != nullptr) {
recv_initial_metadata_->~RecvInitialMetadata();
}
@ -510,7 +507,6 @@ void ClientCallData::StartBatch(grpc_transport_stream_op_batch* b) {
!batch->recv_trailing_metadata);
Cancel(batch->payload->cancel_stream.cancel_error);
if (is_last()) {
GRPC_ERROR_UNREF(batch->payload->cancel_stream.cancel_error);
batch.CompleteWith(&flusher);
} else {
batch.ResumeWith(&flusher);
@ -560,7 +556,7 @@ void ClientCallData::StartBatch(grpc_transport_stream_op_batch* b) {
// If we're already cancelled, just terminate the batch.
if (send_initial_state_ == SendInitialState::kCancelled ||
recv_trailing_state_ == RecvTrailingState::kCancelled) {
batch.CancelWith(GRPC_ERROR_REF(cancelled_error_), &flusher);
batch.CancelWith(cancelled_error_, &flusher);
} else {
// Otherwise, we should not have seen a send_initial_metadata op yet.
GPR_ASSERT(send_initial_state_ == SendInitialState::kInitial);
@ -580,14 +576,14 @@ void ClientCallData::StartBatch(grpc_transport_stream_op_batch* b) {
// recv_trailing_metadata *without* send_initial_metadata: hook it so we
// can respond to it, and push it down.
if (recv_trailing_state_ == RecvTrailingState::kCancelled) {
batch.CancelWith(GRPC_ERROR_REF(cancelled_error_), &flusher);
batch.CancelWith(cancelled_error_, &flusher);
} else {
GPR_ASSERT(recv_trailing_state_ == RecvTrailingState::kInitial);
recv_trailing_state_ = RecvTrailingState::kForwarded;
HookRecvTrailingMetadata(batch);
}
} else if (!GRPC_ERROR_IS_NONE(cancelled_error_)) {
batch.CancelWith(GRPC_ERROR_REF(cancelled_error_), &flusher);
} else if (!cancelled_error_.ok()) {
batch.CancelWith(cancelled_error_, &flusher);
}
if (batch.is_captured()) {
@ -602,8 +598,7 @@ void ClientCallData::StartBatch(grpc_transport_stream_op_batch* b) {
// Handle cancellation.
void ClientCallData::Cancel(grpc_error_handle error) {
// Track the latest reason for cancellation.
GRPC_ERROR_UNREF(cancelled_error_);
cancelled_error_ = GRPC_ERROR_REF(error);
cancelled_error_ = error;
// Stop running the promise.
promise_ = ArenaPromise<ServerMetadataHandle>();
// If we have an op queued, fail that op.
@ -621,7 +616,7 @@ void ClientCallData::Cancel(grpc_error_handle error) {
auto* f = static_cast<FailBatch*>(p);
{
Flusher flusher(f->call);
f->batch.CancelWith(GRPC_ERROR_REF(error), &flusher);
f->batch.CancelWith(error, &flusher);
GRPC_CALL_STACK_UNREF(f->call->call_stack(), "cancel pending batch");
}
delete f;
@ -631,8 +626,7 @@ void ClientCallData::Cancel(grpc_error_handle error) {
b->batch = std::move(send_initial_metadata_batch_);
b->call = this;
GRPC_CALL_STACK_REF(call_stack(), "cancel pending batch");
GRPC_CALL_COMBINER_START(call_combiner(), b,
GRPC_ERROR_REF(cancelled_error_),
GRPC_CALL_COMBINER_START(call_combiner(), b, cancelled_error_,
"cancel pending batch");
} else {
send_initial_state_ = SendInitialState::kCancelled;
@ -646,7 +640,7 @@ void ClientCallData::Cancel(grpc_error_handle error) {
GRPC_CALL_COMBINER_START(
call_combiner(),
std::exchange(recv_initial_metadata_->original_on_ready, nullptr),
GRPC_ERROR_REF(error), "propagate cancellation");
error, "propagate cancellation");
break;
case RecvInitialMetadata::kInitial:
case RecvInitialMetadata::kGotLatch:
@ -697,17 +691,17 @@ void ClientCallData::RecvInitialMetadataReady(grpc_error_handle error) {
abort(); // unreachable
}
Flusher flusher(this);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
recv_initial_metadata_->state = RecvInitialMetadata::kResponded;
flusher.AddClosure(
std::exchange(recv_initial_metadata_->original_on_ready, nullptr),
GRPC_ERROR_REF(error), "propagate cancellation");
error, "propagate cancellation");
} else if (send_initial_state_ == SendInitialState::kCancelled ||
recv_trailing_state_ == RecvTrailingState::kResponded) {
recv_initial_metadata_->state = RecvInitialMetadata::kResponded;
flusher.AddClosure(
std::exchange(recv_initial_metadata_->original_on_ready, nullptr),
GRPC_ERROR_REF(cancelled_error_), "propagate cancellation");
cancelled_error_, "propagate cancellation");
}
WakeInsideCombiner(&flusher);
}
@ -827,14 +821,13 @@ void ClientCallData::RecvTrailingMetadataReady(grpc_error_handle error) {
if (recv_trailing_state_ == RecvTrailingState::kCancelled) {
if (grpc_closure* call_closure =
std::exchange(original_recv_trailing_metadata_ready_, nullptr)) {
flusher.AddClosure(call_closure, GRPC_ERROR_REF(error),
"propagate failure");
flusher.AddClosure(call_closure, error, "propagate failure");
}
return;
}
// If there was an error, we'll put that into the trailing metadata and
// proceed as if there was not.
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
SetStatusFromError(recv_trailing_metadata_, error);
}
// Record that we've got the callback.
@ -949,10 +942,7 @@ ServerCallData::ServerCallData(grpc_call_element* elem,
grpc_schedule_on_exec_ctx);
}
ServerCallData::~ServerCallData() {
GPR_ASSERT(poll_ctx_ == nullptr);
GRPC_ERROR_UNREF(cancelled_error_);
}
ServerCallData::~ServerCallData() { GPR_ASSERT(poll_ctx_ == nullptr); }
// Activity implementation.
void ServerCallData::ForceImmediateRepoll() {
@ -975,10 +965,8 @@ void ServerCallData::StartBatch(grpc_transport_stream_op_batch* b) {
!batch->send_trailing_metadata && !batch->send_message &&
!batch->recv_initial_metadata && !batch->recv_message &&
!batch->recv_trailing_metadata);
Cancel(GRPC_ERROR_REF(batch->payload->cancel_stream.cancel_error),
&flusher);
Cancel(batch->payload->cancel_stream.cancel_error, &flusher);
if (is_last()) {
GRPC_ERROR_UNREF(batch->payload->cancel_stream.cancel_error);
batch.CompleteWith(&flusher);
} else {
batch.ResumeWith(&flusher);
@ -1015,7 +1003,7 @@ void ServerCallData::StartBatch(grpc_transport_stream_op_batch* b) {
send_initial_metadata_->state = SendInitialMetadata::kQueuedAndGotLatch;
break;
case SendInitialMetadata::kCancelled:
batch.CancelWith(GRPC_ERROR_REF(cancelled_error_), &flusher);
batch.CancelWith(cancelled_error_, &flusher);
break;
case SendInitialMetadata::kQueuedAndGotLatch:
case SendInitialMetadata::kQueuedWaitingForLatch:
@ -1040,7 +1028,7 @@ void ServerCallData::StartBatch(grpc_transport_stream_op_batch* b) {
abort(); // unreachable
break;
case SendTrailingState::kCancelled:
batch.CancelWith(GRPC_ERROR_REF(cancelled_error_), &flusher);
batch.CancelWith(cancelled_error_, &flusher);
break;
}
}
@ -1052,13 +1040,12 @@ void ServerCallData::StartBatch(grpc_transport_stream_op_batch* b) {
// Handle cancellation.
void ServerCallData::Cancel(grpc_error_handle error, Flusher* flusher) {
// Track the latest reason for cancellation.
GRPC_ERROR_UNREF(cancelled_error_);
cancelled_error_ = error;
// Stop running the promise.
promise_ = ArenaPromise<ServerMetadataHandle>();
if (send_trailing_state_ == SendTrailingState::kQueued) {
send_trailing_state_ = SendTrailingState::kCancelled;
send_trailing_metadata_batch_.CancelWith(GRPC_ERROR_REF(error), flusher);
send_trailing_metadata_batch_.CancelWith(error, flusher);
} else {
send_trailing_state_ = SendTrailingState::kCancelled;
}
@ -1072,16 +1059,14 @@ void ServerCallData::Cancel(grpc_error_handle error, Flusher* flusher) {
case SendInitialMetadata::kQueuedWaitingForLatch:
case SendInitialMetadata::kQueuedAndGotLatch:
case SendInitialMetadata::kQueuedAndSetLatch:
send_initial_metadata_->batch.CancelWith(GRPC_ERROR_REF(error),
flusher);
send_initial_metadata_->batch.CancelWith(error, flusher);
break;
}
send_initial_metadata_->state = SendInitialMetadata::kCancelled;
}
if (auto* closure =
std::exchange(original_recv_initial_metadata_ready_, nullptr)) {
flusher->AddClosure(closure, GRPC_ERROR_REF(error),
"original_recv_initial_metadata");
flusher->AddClosure(closure, error, "original_recv_initial_metadata");
}
}
@ -1154,11 +1139,11 @@ void ServerCallData::RecvInitialMetadataReady(grpc_error_handle error) {
Flusher flusher(this);
GPR_ASSERT(recv_initial_state_ == RecvInitialState::kForwarded);
// If there was an error we just propagate that through
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
recv_initial_state_ = RecvInitialState::kResponded;
flusher.AddClosure(
std::exchange(original_recv_initial_metadata_ready_, nullptr),
GRPC_ERROR_REF(error), "propagate error");
std::exchange(original_recv_initial_metadata_ready_, nullptr), error,
"propagate error");
return;
}
// Record that we've got the callback.

@ -182,11 +182,11 @@ static grpc_error_handle enable_socket_low_latency(SOCKET sock) {
absl::Status PrepareSocket(SOCKET sock) {
absl::Status err;
err = grpc_tcp_set_non_block(sock);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
err = enable_socket_low_latency(sock);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
err = set_dualstack(sock);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
return GRPC_ERROR_NONE;
}

@ -196,7 +196,6 @@ HttpRequest::~HttpRequest() {
grpc_iomgr_unregister_object(&iomgr_obj_);
grpc_slice_buffer_destroy(&incoming_);
grpc_slice_buffer_destroy(&outgoing_);
GRPC_ERROR_UNREF(overall_error_);
grpc_pollset_set_destroy(pollset_set_);
}
@ -239,7 +238,7 @@ void HttpRequest::Orphan() {
}
void HttpRequest::AppendError(grpc_error_handle error) {
if (GRPC_ERROR_IS_NONE(overall_error_)) {
if (overall_error_.ok()) {
overall_error_ =
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Failed HTTP/1 client request");
}
@ -258,7 +257,7 @@ void HttpRequest::OnReadInternal(grpc_error_handle error) {
have_read_byte_ = 1;
grpc_error_handle err =
grpc_http_parser_parse(&parser_, incoming_.slices[i], nullptr);
if (!GRPC_ERROR_IS_NONE(err)) {
if (!err.ok()) {
Finish(err);
return;
}
@ -267,10 +266,10 @@ void HttpRequest::OnReadInternal(grpc_error_handle error) {
if (cancelled_) {
Finish(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"HTTP1 request cancelled during read", &overall_error_, 1));
} else if (GRPC_ERROR_IS_NONE(error)) {
} else if (error.ok()) {
DoRead();
} else if (!have_read_byte_) {
NextAddress(GRPC_ERROR_REF(error));
NextAddress(error);
} else {
Finish(grpc_http_parser_eof(&parser_));
}
@ -280,10 +279,10 @@ void HttpRequest::ContinueDoneWriteAfterScheduleOnExecCtx(
void* arg, grpc_error_handle error) {
RefCountedPtr<HttpRequest> req(static_cast<HttpRequest*>(arg));
MutexLock lock(&req->mu_);
if (GRPC_ERROR_IS_NONE(error) && !req->cancelled_) {
if (error.ok() && !req->cancelled_) {
req->OnWritten();
} else {
req->NextAddress(GRPC_ERROR_REF(error));
req->NextAddress(error);
}
}
@ -305,9 +304,9 @@ void HttpRequest::OnHandshakeDone(void* arg, grpc_error_handle error) {
}
MutexLock lock(&req->mu_);
req->own_endpoint_ = true;
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
req->handshake_mgr_.reset();
req->NextAddress(GRPC_ERROR_REF(error));
req->NextAddress(error);
return;
}
// Handshake completed, so we own fields in args
@ -356,7 +355,7 @@ void HttpRequest::DoHandshake(const grpc_resolved_address* addr) {
}
void HttpRequest::NextAddress(grpc_error_handle error) {
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
AppendError(error);
}
if (cancelled_) {

@ -195,8 +195,7 @@ class HttpRequest : public InternallyRefCounted<HttpRequest> {
static void OnRead(void* user_data, grpc_error_handle error) {
HttpRequest* req = static_cast<HttpRequest*>(user_data);
ExecCtx::Run(DEBUG_LOCATION,
&req->continue_on_read_after_schedule_on_exec_ctx_,
GRPC_ERROR_REF(error));
&req->continue_on_read_after_schedule_on_exec_ctx_, error);
}
// Needed since OnRead may be called inline from grpc_endpoint_read
@ -215,8 +214,7 @@ class HttpRequest : public InternallyRefCounted<HttpRequest> {
static void DoneWrite(void* arg, grpc_error_handle error) {
HttpRequest* req = static_cast<HttpRequest*>(arg);
ExecCtx::Run(DEBUG_LOCATION,
&req->continue_done_write_after_schedule_on_exec_ctx_,
GRPC_ERROR_REF(error));
&req->continue_done_write_after_schedule_on_exec_ctx_, error);
}
// Needed since DoneWrite may be called inline from grpc_endpoint_write

@ -122,9 +122,7 @@ class grpc_httpcli_ssl_channel_security_connector final
}
void cancel_check_peer(grpc_closure* /*on_peer_checked*/,
grpc_error_handle error) override {
GRPC_ERROR_UNREF(error);
}
grpc_error_handle /*error*/) override {}
int cmp(const grpc_security_connector* other_sc) const override {
auto* other =

@ -235,7 +235,7 @@ static grpc_error_handle add_header(grpc_http_parser* parser) {
(*hdrs)[(*hdr_count)++] = hdr;
done:
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
gpr_free(hdr.key);
gpr_free(hdr.value);
}
@ -248,7 +248,7 @@ static grpc_error_handle finish_line(grpc_http_parser* parser,
switch (parser->state) {
case GRPC_HTTP_FIRST_LINE:
err = handle_first_line(parser);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
parser->state = GRPC_HTTP_HEADERS;
break;
case GRPC_HTTP_HEADERS:
@ -263,7 +263,7 @@ static grpc_error_handle finish_line(grpc_http_parser* parser,
break;
} else {
err = add_header(parser);
if (!GRPC_ERROR_IS_NONE(err)) {
if (!err.ok()) {
return err;
}
}
@ -448,7 +448,7 @@ grpc_error_handle grpc_http_parser_parse(grpc_http_parser* parser,
bool found_body_start = false;
grpc_error_handle err =
addbyte(parser, GRPC_SLICE_START_PTR(slice)[i], &found_body_start);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
if (found_body_start && start_of_body != nullptr) *start_of_body = i + 1;
}
return GRPC_ERROR_NONE;

@ -282,7 +282,6 @@ void TracedBuffer::Shutdown(TracedBuffer** head, void* remaining,
if (remaining != nullptr) {
timestamps_callback(remaining, nullptr, shutdown_err);
}
GRPC_ERROR_UNREF(shutdown_err);
}
void grpc_tcp_set_write_timestamps_callback(

@ -147,9 +147,7 @@ class TracedBuffer {
public:
/* Phony shutdown function */
static void Shutdown(TracedBuffer** /*head*/, void* /*remaining*/,
grpc_error_handle shutdown_err) {
GRPC_ERROR_UNREF(shutdown_err);
}
grpc_error_handle /*shutdown_err*/) {}
};
#endif /* GRPC_LINUX_ERRQUEUE */

@ -81,7 +81,7 @@ void CallCombiner::TsanClosure(void* arg, grpc_error_handle error) {
} else {
lock.reset();
}
Closure::Run(DEBUG_LOCATION, self->original_closure_, GRPC_ERROR_REF(error));
Closure::Run(DEBUG_LOCATION, self->original_closure_, error);
if (lock != nullptr) {
TSAN_ANNOTATE_RWLOCK_RELEASED(&lock->taken, true);
bool prev = true;
@ -192,14 +192,14 @@ void CallCombiner::SetNotifyOnCancel(grpc_closure* closure) {
grpc_error_handle original_error = DecodeCancelStateError(original_state);
// If error is set, invoke the cancellation closure immediately.
// Otherwise, store the new closure.
if (!GRPC_ERROR_IS_NONE(original_error)) {
if (!original_error.ok()) {
if (GRPC_TRACE_FLAG_ENABLED(grpc_call_combiner_trace)) {
gpr_log(GPR_INFO,
"call_combiner=%p: scheduling notify_on_cancel callback=%p "
"for pre-existing cancellation",
this, closure);
}
ExecCtx::Run(DEBUG_LOCATION, closure, GRPC_ERROR_REF(original_error));
ExecCtx::Run(DEBUG_LOCATION, closure, original_error);
break;
} else {
if (gpr_atm_full_cas(&cancel_state_, original_state,
@ -233,7 +233,7 @@ void CallCombiner::Cancel(grpc_error_handle error) {
while (true) {
gpr_atm original_state = gpr_atm_acq_load(&cancel_state_);
grpc_error_handle original_error = DecodeCancelStateError(original_state);
if (!GRPC_ERROR_IS_NONE(original_error)) {
if (!original_error.ok()) {
internal::StatusFreeHeapPtr(status_ptr);
break;
}
@ -246,7 +246,7 @@ void CallCombiner::Cancel(grpc_error_handle error) {
"call_combiner=%p: scheduling notify_on_cancel callback=%p",
this, notify_on_cancel);
}
ExecCtx::Run(DEBUG_LOCATION, notify_on_cancel, GRPC_ERROR_REF(error));
ExecCtx::Run(DEBUG_LOCATION, notify_on_cancel, error);
}
break;
}

@ -83,10 +83,9 @@ void CFStreamHandle::ReadCallback(CFReadStreamRef stream,
GRPC_ERROR_CREATE_FROM_CFERROR(stream_error, "read error"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
CFRelease(stream_error);
handle->open_event_.SetShutdown(GRPC_ERROR_REF(error));
handle->write_event_.SetShutdown(GRPC_ERROR_REF(error));
handle->read_event_.SetShutdown(GRPC_ERROR_REF(error));
GRPC_ERROR_UNREF(error);
handle->open_event_.SetShutdown(error);
handle->write_event_.SetShutdown(error);
handle->read_event_.SetShutdown(error);
break;
default:
GPR_UNREACHABLE_CODE(return );
@ -118,10 +117,9 @@ void CFStreamHandle::WriteCallback(CFWriteStreamRef stream,
GRPC_ERROR_CREATE_FROM_CFERROR(stream_error, "write error"),
GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE);
CFRelease(stream_error);
handle->open_event_.SetShutdown(GRPC_ERROR_REF(error));
handle->write_event_.SetShutdown(GRPC_ERROR_REF(error));
handle->read_event_.SetShutdown(GRPC_ERROR_REF(error));
GRPC_ERROR_UNREF(error);
handle->open_event_.SetShutdown(error);
handle->write_event_.SetShutdown(error);
handle->read_event_.SetShutdown(error);
break;
default:
GPR_UNREACHABLE_CODE(return );
@ -172,10 +170,9 @@ void CFStreamHandle::NotifyOnWrite(grpc_closure* closure) {
}
void CFStreamHandle::Shutdown(grpc_error_handle error) {
open_event_.SetShutdown(GRPC_ERROR_REF(error));
read_event_.SetShutdown(GRPC_ERROR_REF(error));
write_event_.SetShutdown(GRPC_ERROR_REF(error));
GRPC_ERROR_UNREF(error);
open_event_.SetShutdown(error);
read_event_.SetShutdown(error);
write_event_.SetShutdown(error);
}
void CFStreamHandle::Ref(const char* file, int line, const char* reason) {

@ -195,7 +195,6 @@ inline bool grpc_closure_list_append(grpc_closure_list* closure_list,
grpc_closure* closure,
grpc_error_handle error) {
if (closure == nullptr) {
GRPC_ERROR_UNREF(error);
return false;
}
closure->error_data.error = grpc_core::internal::StatusAllocHeapPtr(error);
@ -211,7 +210,6 @@ inline void grpc_closure_list_fail_all(grpc_closure_list* list,
grpc_core::internal::StatusAllocHeapPtr(forced_failure);
}
}
GRPC_ERROR_UNREF(forced_failure);
}
/** append all closures from \a src to \a dst and empty \a src. */
@ -241,7 +239,6 @@ class Closure {
grpc_error_handle error) {
(void)location;
if (closure == nullptr) {
GRPC_ERROR_UNREF(error);
return;
}
#ifndef NDEBUG
@ -258,7 +255,6 @@ class Closure {
gpr_log(GPR_DEBUG, "closure %p finished", closure);
}
#endif
GRPC_ERROR_UNREF(error);
}
};
} // namespace grpc_core

@ -316,7 +316,7 @@ static void enqueue_finally(void* closure, grpc_error_handle error) {
grpc_core::Combiner* lock =
reinterpret_cast<grpc_core::Combiner*>(cl->error_data.scratch);
cl->error_data.scratch = 0;
combiner_finally_exec(lock, cl, GRPC_ERROR_REF(error));
combiner_finally_exec(lock, cl, error);
}
namespace grpc_core {

@ -56,7 +56,7 @@ struct grpc_endpoint_vtable {
Callback success indicates that the endpoint can accept more reads, failure
indicates the endpoint is closed.
Valid slices may be placed into \a slices even when the callback is
invoked with !GRPC_ERROR_IS_NONE(error). */
invoked with !error.ok(). */
void grpc_endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
grpc_closure* cb, bool urgent, int min_progress_size);

@ -147,9 +147,9 @@ static void CallWriteCb(CFStreamEndpoint* ep, grpc_error_handle error) {
static void ReadAction(void* arg, grpc_error_handle error) {
CFStreamEndpoint* ep = static_cast<CFStreamEndpoint*>(arg);
GPR_ASSERT(ep->read_cb != nullptr);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
grpc_slice_buffer_reset_and_unref(ep->read_slices);
CallReadCb(ep, GRPC_ERROR_REF(error));
CallReadCb(ep, error);
EP_UNREF(ep, "read");
return;
}
@ -189,13 +189,12 @@ static void ReadAction(void* arg, grpc_error_handle error) {
static void WriteAction(void* arg, grpc_error_handle error) {
CFStreamEndpoint* ep = static_cast<CFStreamEndpoint*>(arg);
GPR_ASSERT(ep->write_cb != nullptr);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
grpc_slice_buffer_reset_and_unref(ep->write_slices);
CallWriteCb(ep, GRPC_ERROR_REF(error));
CallWriteCb(ep, error);
EP_UNREF(ep, "write");
return;
}
grpc_slice slice = grpc_slice_buffer_take_first(ep->write_slices);
size_t slice_len = GRPC_SLICE_LENGTH(slice);
CFIndex write_size = CFWriteStreamWrite(

@ -94,7 +94,7 @@ absl::Status grpc_wsa_error(const grpc_core::DebugLocation& location, int err,
grpc_error_handle grpc_error_set_int(grpc_error_handle src,
grpc_error_ints which, intptr_t value) {
if (GRPC_ERROR_IS_NONE(src)) {
if (src.ok()) {
src = absl::UnknownError("");
StatusSetInt(&src, grpc_core::StatusIntProperty::kRpcStatus,
GRPC_STATUS_OK);
@ -135,7 +135,7 @@ bool grpc_error_get_int(grpc_error_handle error, grpc_error_ints which,
grpc_error_handle grpc_error_set_str(grpc_error_handle src,
grpc_error_strs which,
absl::string_view str) {
if (GRPC_ERROR_IS_NONE(src)) {
if (src.ok()) {
src = absl::UnknownError("");
StatusSetInt(&src, grpc_core::StatusIntProperty::kRpcStatus,
GRPC_STATUS_OK);
@ -210,7 +210,7 @@ grpc_error_handle grpc_error_add_child(grpc_error_handle src,
bool grpc_log_error(const char* what, grpc_error_handle error, const char* file,
int line) {
GPR_DEBUG_ASSERT(!GRPC_ERROR_IS_NONE(error));
GPR_DEBUG_ASSERT(!error.ok());
gpr_log(file, line, GPR_LOG_SEVERITY_ERROR, "%s: %s", what,
grpc_core::StatusToString(error).c_str());
return false;

@ -147,6 +147,8 @@ void grpc_enable_error_creation();
#define GRPC_ERROR_OOM absl::Status(absl::ResourceExhaustedError(""))
#define GRPC_ERROR_CANCELLED absl::CancelledError()
// Deprecated: Please do not use these macros.
// These will be removed once migration is done.
#define GRPC_ERROR_REF(err) (err)
#define GRPC_ERROR_UNREF(err) (void)(err)
@ -199,7 +201,7 @@ absl::Status grpc_os_error(const grpc_core::DebugLocation& location, int err,
const char* call_name) GRPC_MUST_USE_RESULT;
inline absl::Status grpc_assert_never_ok(absl::Status error) {
GPR_ASSERT(!GRPC_ERROR_IS_NONE(error));
GPR_ASSERT(!error.ok());
return error;
}
@ -246,8 +248,7 @@ bool grpc_log_error(const char* what, grpc_error_handle error, const char* file,
int line);
inline bool grpc_log_if_error(const char* what, grpc_error_handle error,
const char* file, int line) {
return GRPC_ERROR_IS_NONE(error) ? true
: grpc_log_error(what, error, file, line);
return error.ok() ? true : grpc_log_error(what, error, file, line);
}
#define GRPC_LOG_IF_ERROR(what, error) \
@ -261,9 +262,7 @@ class AtomicError {
error_ = GRPC_ERROR_NONE;
lock_ = GPR_SPINLOCK_STATIC_INITIALIZER;
}
explicit AtomicError(grpc_error_handle error) {
error_ = GRPC_ERROR_REF(error);
}
explicit AtomicError(grpc_error_handle error) { error_ = error; }
~AtomicError() { GRPC_ERROR_UNREF(error_); }
AtomicError(const AtomicError&) = delete;
@ -272,7 +271,7 @@ class AtomicError {
/// returns get() == GRPC_ERROR_NONE
bool ok() {
gpr_spinlock_lock(&lock_);
bool ret = GRPC_ERROR_IS_NONE(error_);
bool ret = error_.ok();
gpr_spinlock_unlock(&lock_);
return ret;
}
@ -286,8 +285,7 @@ class AtomicError {
void set(grpc_error_handle error) {
gpr_spinlock_lock(&lock_);
GRPC_ERROR_UNREF(error_);
error_ = GRPC_ERROR_REF(error);
error_ = error;
gpr_spinlock_unlock(&lock_);
}

@ -240,8 +240,8 @@ struct grpc_pollset_set {
static bool append_error(grpc_error_handle* composite, grpc_error_handle error,
const char* desc) {
if (GRPC_ERROR_IS_NONE(error)) return true;
if (GRPC_ERROR_IS_NONE(*composite)) {
if (error.ok()) return true;
if (composite->ok()) {
*composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
}
*composite = grpc_error_add_child(*composite, error);
@ -381,7 +381,7 @@ static int fd_wrapped_fd(grpc_fd* fd) { return fd->fd; }
* shutdown() syscall on that fd) */
static void fd_shutdown_internal(grpc_fd* fd, grpc_error_handle why,
bool releasing_fd) {
if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
if (fd->read_closure->SetShutdown(why)) {
if (!releasing_fd) {
shutdown(fd->fd, SHUT_RDWR);
} else {
@ -392,10 +392,9 @@ static void fd_shutdown_internal(grpc_fd* fd, grpc_error_handle why,
gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
}
}
fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
fd->error_closure->SetShutdown(GRPC_ERROR_REF(why));
fd->write_closure->SetShutdown(why);
fd->error_closure->SetShutdown(why);
}
GRPC_ERROR_UNREF(why);
}
/* Might be called multiple times */
@ -421,7 +420,7 @@ static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
close(fd->fd);
}
grpc_core::ExecCtx::Run(DEBUG_LOCATION, on_done, GRPC_ERROR_REF(error));
grpc_core::ExecCtx::Run(DEBUG_LOCATION, on_done, error);
grpc_iomgr_unregister_object(&fd->iomgr_object);
fork_fd_list_remove_grpc_fd(fd);
@ -515,7 +514,7 @@ static grpc_error_handle pollset_global_init(void) {
gpr_atm_no_barrier_store(&g_active_poller, 0);
global_wakeup_fd.read_fd = -1;
grpc_error_handle err = grpc_wakeup_fd_init(&global_wakeup_fd);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
struct epoll_event ev;
ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLET);
ev.data.ptr = &global_wakeup_fd;

@ -358,7 +358,6 @@ static void unref_by(grpc_fd* fd, int n) {
grpc_iomgr_unregister_object(&fd->iomgr_object);
fork_fd_list_remove_node(fd->fork_fd_list);
if (fd->shutdown) {
GRPC_ERROR_UNREF(fd->shutdown_error);
}
fd->shutdown_error.~Status();
gpr_free(fd);
@ -551,8 +550,6 @@ static void fd_shutdown(grpc_fd* fd, grpc_error_handle why) {
shutdown(fd->fd, SHUT_RDWR);
set_ready_locked(fd, &fd->read_closure);
set_ready_locked(fd, &fd->write_closure);
} else {
GRPC_ERROR_UNREF(why);
}
gpr_mu_unlock(&fd->mu);
}
@ -757,8 +754,8 @@ static void push_front_worker(grpc_pollset* p, grpc_pollset_worker* worker) {
static void kick_append_error(grpc_error_handle* composite,
grpc_error_handle error) {
if (GRPC_ERROR_IS_NONE(error)) return;
if (GRPC_ERROR_IS_NONE(*composite)) {
if (error.ok()) return;
if (composite->ok()) {
*composite = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Kick Failure");
}
*composite = grpc_error_add_child(*composite, error);
@ -818,7 +815,7 @@ static grpc_error_handle pollset_kick_ext(grpc_pollset* p,
}
}
GRPC_LOG_IF_ERROR("pollset_kick_ext", GRPC_ERROR_REF(error));
GRPC_LOG_IF_ERROR("pollset_kick_ext", error);
return error;
}
@ -893,8 +890,8 @@ static void finish_shutdown(grpc_pollset* pollset) {
static void work_combine_error(grpc_error_handle* composite,
grpc_error_handle error) {
if (GRPC_ERROR_IS_NONE(error)) return;
if (GRPC_ERROR_IS_NONE(*composite)) {
if (error.ok()) return;
if (composite->ok()) {
*composite = GRPC_ERROR_CREATE_FROM_STATIC_STRING("pollset_work");
}
*composite = grpc_error_add_child(*composite, error);
@ -928,8 +925,8 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
gpr_malloc(sizeof(*worker.wakeup_fd)));
error = grpc_wakeup_fd_init(&worker.wakeup_fd->fd);
fork_fd_list_add_wakeup_fd(worker.wakeup_fd);
if (!GRPC_ERROR_IS_NONE(error)) {
GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
if (!error.ok()) {
GRPC_LOG_IF_ERROR("pollset_work", error);
return error;
}
}
@ -1090,7 +1087,7 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
/* If we're forced to re-evaluate polling (via pollset_kick with
GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) then we land here and force
a loop */
if (worker.reevaluate_polling_on_wakeup && GRPC_ERROR_IS_NONE(error)) {
if (worker.reevaluate_polling_on_wakeup && error.ok()) {
worker.reevaluate_polling_on_wakeup = 0;
pollset->kicked_without_pollers = 0;
if (queued_work || worker.kicked_specifically) {
@ -1126,7 +1123,7 @@ static grpc_error_handle pollset_work(grpc_pollset* pollset,
}
}
if (worker_hdl) *worker_hdl = nullptr;
GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
GRPC_LOG_IF_ERROR("pollset_work", error);
return error;
}

@ -81,7 +81,6 @@ void ExecCtx::Run(const DebugLocation& location, grpc_closure* closure,
grpc_error_handle error) {
(void)location;
if (closure == nullptr) {
GRPC_ERROR_UNREF(error);
return;
}
#ifndef NDEBUG

@ -66,14 +66,13 @@ grpc_error_handle grpc_load_file(const char* filename, int add_null_terminator,
end:
*output = result;
if (file != nullptr) fclose(file);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
grpc_error_handle error_out =
grpc_error_set_str(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to load file", &error, 1),
GRPC_ERROR_STR_FILENAME,
filename);
GRPC_ERROR_UNREF(error);
error = error_out;
}
GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX;

@ -170,11 +170,10 @@ done:
if (result) {
freeaddrinfo(result);
}
if (GRPC_ERROR_IS_NONE(err)) {
if (err.ok()) {
return addresses;
}
auto error_result = grpc_error_to_absl_status(err);
GRPC_ERROR_UNREF(err);
return error_result;
}

@ -154,11 +154,10 @@ done:
if (result) {
freeaddrinfo(result);
}
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
return addresses;
}
auto error_result = grpc_error_to_absl_status(error);
GRPC_ERROR_UNREF(error);
return error_result;
}

@ -116,7 +116,7 @@ static void OnOpen(void* arg, grpc_error_handle error) {
gpr_mu_unlock(&connect->mu);
CFStreamConnectCleanup(connect);
} else {
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
CFErrorRef stream_error = CFReadStreamCopyError(connect->read_stream);
if (stream_error == NULL) {
stream_error = CFWriteStreamCopyError(connect->write_stream);
@ -125,13 +125,11 @@ static void OnOpen(void* arg, grpc_error_handle error) {
error = GRPC_ERROR_CREATE_FROM_CFERROR(stream_error, "connect() error");
CFRelease(stream_error);
}
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
*endpoint = grpc_cfstream_endpoint_create(
connect->read_stream, connect->write_stream,
connect->addr_name.c_str(), connect->stream_handle);
}
} else {
(void)GRPC_ERROR_REF(error);
}
gpr_mu_unlock(&connect->mu);
grpc_core::ExecCtx::Run(DEBUG_LOCATION, closure, error);

@ -99,23 +99,23 @@ static grpc_error_handle prepare_socket(
GPR_ASSERT(fd >= 0);
err = grpc_set_socket_nonblocking(fd, 1);
if (!GRPC_ERROR_IS_NONE(err)) goto error;
if (!err.ok()) goto error;
err = grpc_set_socket_cloexec(fd, 1);
if (!GRPC_ERROR_IS_NONE(err)) goto error;
if (!err.ok()) goto error;
if (!grpc_is_unix_socket(addr)) {
err = grpc_set_socket_low_latency(fd, 1);
if (!GRPC_ERROR_IS_NONE(err)) goto error;
if (!err.ok()) goto error;
err = grpc_set_socket_reuse_addr(fd, 1);
if (!GRPC_ERROR_IS_NONE(err)) goto error;
if (!err.ok()) goto error;
err = grpc_set_socket_tcp_user_timeout(fd, options, true /* is_client */);
if (!GRPC_ERROR_IS_NONE(err)) goto error;
if (!err.ok()) goto error;
}
err = grpc_set_socket_no_sigpipe_if_possible(fd);
if (!GRPC_ERROR_IS_NONE(err)) goto error;
if (!err.ok()) goto error;
err = grpc_apply_socket_mutator_in_args(fd, GRPC_FD_CLIENT_CONNECTION_USAGE,
options);
if (!GRPC_ERROR_IS_NONE(err)) goto error;
if (!err.ok()) goto error;
goto done;
@ -170,8 +170,6 @@ static void on_writable(void* acp, grpc_error_handle error) {
std::string addr_str = ac->addr_str;
grpc_fd* fd;
(void)GRPC_ERROR_REF(error);
if (GRPC_TRACE_FLAG_ENABLED(grpc_tcp_trace)) {
gpr_log(GPR_INFO, "CLIENT_CONNECT: %s: on_writable: error=%s",
ac->addr_str.c_str(), grpc_error_std_string(error).c_str());
@ -187,7 +185,7 @@ static void on_writable(void* acp, grpc_error_handle error) {
grpc_timer_cancel(&ac->alarm);
gpr_mu_lock(&ac->mu);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
error =
grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, "Timeout occurred");
goto finish;
@ -261,7 +259,7 @@ finish:
}
done = (--ac->refs == 0);
gpr_mu_unlock(&ac->mu);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
std::string str;
bool ret = grpc_error_get_str(error, GRPC_ERROR_STR_DESCRIPTION, &str);
GPR_ASSERT(ret);
@ -281,9 +279,6 @@ finish:
// between the core shutdown mu and the connector mu (b/188239051)
if (!connect_cancelled) {
grpc_core::Executor::Run(closure, error);
} else if (!GRPC_ERROR_IS_NONE(error)) {
// Unref the error here because it is not used.
(void)GRPC_ERROR_UNREF(error);
}
}
@ -302,7 +297,7 @@ grpc_error_handle grpc_tcp_client_prepare_fd(
}
error =
grpc_create_dualstack_socket(mapped_addr, SOCK_STREAM, 0, &dsmode, fd);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
return error;
}
if (dsmode == GRPC_DSMODE_IPV4) {

@ -83,8 +83,6 @@ static void on_connect(void* acp, grpc_error_handle error) {
GPR_ASSERT(*ep == NULL);
grpc_closure* on_done = ac->on_done;
(void)GRPC_ERROR_REF(error);
gpr_mu_lock(&ac->mu);
grpc_winsocket* socket = ac->socket;
ac->socket = NULL;
@ -94,7 +92,7 @@ static void on_connect(void* acp, grpc_error_handle error) {
gpr_mu_lock(&ac->mu);
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
if (socket != NULL) {
DWORD transfered_bytes = 0;
DWORD flags;
@ -162,7 +160,7 @@ static int64_t tcp_connect(grpc_closure* on_done, grpc_endpoint** endpoint,
}
error = grpc_tcp_prepare_socket(sock);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
goto failure;
}
@ -219,13 +217,12 @@ static int64_t tcp_connect(grpc_closure* on_done, grpc_endpoint** endpoint,
return 0;
failure:
GPR_ASSERT(!GRPC_ERROR_IS_NONE(error));
GPR_ASSERT(!error.ok());
grpc_error_handle final_error = grpc_error_set_str(
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Failed to connect",
&error, 1),
GRPC_ERROR_STR_TARGET_ADDRESS,
addr_uri.ok() ? *addr_uri : addr_uri.status().ToString());
GRPC_ERROR_UNREF(error);
if (socket != NULL) {
grpc_winsocket_destroy(socket);
} else if (sock != INVALID_SOCKET) {

@ -1119,7 +1119,7 @@ static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error) {
}
tcp->read_mu.Lock();
grpc_error_handle tcp_read_error;
if (GPR_LIKELY(GRPC_ERROR_IS_NONE(error))) {
if (GPR_LIKELY(error.ok())) {
maybe_make_read_slices(tcp);
if (!tcp_do_read(tcp, &tcp_read_error)) {
/* We've consumed the edge, request a new one */
@ -1130,7 +1130,7 @@ static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error_handle error) {
}
tcp_trace_read(tcp, tcp_read_error);
} else {
tcp_read_error = GRPC_ERROR_REF(error);
tcp_read_error = error;
grpc_slice_buffer_reset_and_unref(tcp->incoming_buffer);
grpc_slice_buffer_reset_and_unref(&tcp->last_read_buffer);
}
@ -1471,7 +1471,7 @@ static void tcp_handle_error(void* arg /* grpc_tcp */,
grpc_error_std_string(error).c_str());
}
if (!GRPC_ERROR_IS_NONE(error) ||
if (!error.ok() ||
static_cast<bool>(gpr_atm_acq_load(&tcp->stop_error_notification))) {
/* We aren't going to register to hear on error anymore, so it is safe to
* unref. */
@ -1789,7 +1789,7 @@ static void tcp_handle_write(void* arg /* grpc_tcp */,
grpc_tcp* tcp = static_cast<grpc_tcp*>(arg);
grpc_closure* cb;
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
cb = tcp->write_cb;
tcp->write_cb = nullptr;
if (tcp->current_zerocopy_send != nullptr) {
@ -1797,7 +1797,7 @@ static void tcp_handle_write(void* arg /* grpc_tcp */,
"handle_write_err");
tcp->current_zerocopy_send = nullptr;
}
grpc_core::Closure::Run(DEBUG_LOCATION, cb, GRPC_ERROR_REF(error));
grpc_core::Closure::Run(DEBUG_LOCATION, cb, error);
TCP_UNREF(tcp, "write");
return;
}
@ -1811,7 +1811,7 @@ static void tcp_handle_write(void* arg /* grpc_tcp */,
}
notify_on_write(tcp);
// tcp_flush does not populate error if it has returned false.
GPR_DEBUG_ASSERT(GRPC_ERROR_IS_NONE(error));
GPR_DEBUG_ASSERT(error.ok());
} else {
cb = tcp->write_cb;
tcp->write_cb = nullptr;

@ -180,7 +180,7 @@ static void tcp_server_destroy(grpc_tcp_server* s) {
static void on_read(void* arg, grpc_error_handle err) {
grpc_tcp_listener* sp = static_cast<grpc_tcp_listener*>(arg);
grpc_pollset* read_notifier_pollset;
if (!GRPC_ERROR_IS_NONE(err)) {
if (!err.ok()) {
goto error;
}
@ -242,7 +242,7 @@ static void on_read(void* arg, grpc_error_handle err) {
err = grpc_apply_socket_mutator_in_args(fd, GRPC_FD_SERVER_CONNECTION_USAGE,
sp->server->options);
if (!GRPC_ERROR_IS_NONE(err)) {
if (!err.ok()) {
goto error;
}
@ -333,25 +333,23 @@ static grpc_error_handle add_wildcard_addrs_to_server(grpc_tcp_server* s,
}
}
if (*out_port > 0) {
if (!GRPC_ERROR_IS_NONE(v6_err)) {
if (!v6_err.ok()) {
gpr_log(GPR_INFO,
"Failed to add :: listener, "
"the environment may not support IPv6: %s",
grpc_error_std_string(v6_err).c_str());
GRPC_ERROR_UNREF(v6_err);
}
if (!GRPC_ERROR_IS_NONE(v4_err)) {
if (!v4_err.ok()) {
gpr_log(GPR_INFO,
"Failed to add 0.0.0.0 listener, "
"the environment may not support IPv4: %s",
grpc_error_std_string(v4_err).c_str());
GRPC_ERROR_UNREF(v4_err);
}
return GRPC_ERROR_NONE;
} else {
grpc_error_handle root_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Failed to add any wildcard listeners");
GPR_ASSERT(!GRPC_ERROR_IS_NONE(v6_err) && !GRPC_ERROR_IS_NONE(v4_err));
GPR_ASSERT(!v6_err.ok() && !v4_err.ok());
root_err = grpc_error_add_child(root_err, v6_err);
root_err = grpc_error_add_child(root_err, v4_err);
return root_err;
@ -374,10 +372,10 @@ static grpc_error_handle clone_port(grpc_tcp_listener* listener,
grpc_dualstack_mode dsmode;
err = grpc_create_dualstack_socket(&listener->addr, SOCK_STREAM, 0, &dsmode,
&fd);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
err = grpc_tcp_server_prepare_socket(listener->server, fd, &listener->addr,
true, &port);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
listener->server->nports++;
addr_str = grpc_sockaddr_to_string(&listener->addr, true);
if (!addr_str.ok()) {

@ -90,7 +90,7 @@ static grpc_error_handle add_socket_to_server(grpc_tcp_server* s, int fd,
grpc_error_handle err =
grpc_tcp_server_prepare_socket(s, fd, addr, s->so_reuseport, &port);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
GPR_ASSERT(port > 0);
absl::StatusOr<std::string> addr_str = grpc_sockaddr_to_string(addr, true);
if (!addr_str.ok()) {
@ -137,7 +137,7 @@ grpc_error_handle grpc_tcp_server_add_addr(grpc_tcp_server* s,
int fd;
grpc_error_handle err =
grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, dsmode, &fd);
if (!GRPC_ERROR_IS_NONE(err)) {
if (!err.ok()) {
return err;
}
if (*dsmode == GRPC_DSMODE_IPV4 &&
@ -158,36 +158,35 @@ grpc_error_handle grpc_tcp_server_prepare_socket(
if (so_reuseport && !grpc_is_unix_socket(addr)) {
err = grpc_set_socket_reuse_port(fd, 1);
if (!GRPC_ERROR_IS_NONE(err)) goto error;
if (!err.ok()) goto error;
}
#ifdef GRPC_LINUX_ERRQUEUE
err = grpc_set_socket_zerocopy(fd);
if (!GRPC_ERROR_IS_NONE(err)) {
if (!err.ok()) {
/* it's not fatal, so just log it. */
gpr_log(GPR_DEBUG, "Node does not support SO_ZEROCOPY, continuing.");
GRPC_ERROR_UNREF(err);
}
#endif
err = grpc_set_socket_nonblocking(fd, 1);
if (!GRPC_ERROR_IS_NONE(err)) goto error;
if (!err.ok()) goto error;
err = grpc_set_socket_cloexec(fd, 1);
if (!GRPC_ERROR_IS_NONE(err)) goto error;
if (!err.ok()) goto error;
if (!grpc_is_unix_socket(addr)) {
err = grpc_set_socket_low_latency(fd, 1);
if (!GRPC_ERROR_IS_NONE(err)) goto error;
if (!err.ok()) goto error;
err = grpc_set_socket_reuse_addr(fd, 1);
if (!GRPC_ERROR_IS_NONE(err)) goto error;
if (!err.ok()) goto error;
err =
grpc_set_socket_tcp_user_timeout(fd, s->options, false /* is_client */);
if (!GRPC_ERROR_IS_NONE(err)) goto error;
if (!err.ok()) goto error;
}
err = grpc_set_socket_no_sigpipe_if_possible(fd);
if (!GRPC_ERROR_IS_NONE(err)) goto error;
if (!err.ok()) goto error;
err = grpc_apply_socket_mutator_in_args(fd, GRPC_FD_SERVER_LISTENER_USAGE,
s->options);
if (!GRPC_ERROR_IS_NONE(err)) goto error;
if (!err.ok()) goto error;
if (bind(fd, reinterpret_cast<grpc_sockaddr*>(const_cast<char*>(addr->addr)),
addr->len) < 0) {
@ -212,7 +211,7 @@ grpc_error_handle grpc_tcp_server_prepare_socket(
return GRPC_ERROR_NONE;
error:
GPR_ASSERT(!GRPC_ERROR_IS_NONE(err));
GPR_ASSERT(!err.ok());
if (fd >= 0) {
close(fd);
}
@ -220,7 +219,6 @@ error:
grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Unable to configure socket", &err, 1),
GRPC_ERROR_INT_FD, fd);
GRPC_ERROR_UNREF(err);
return ret;
}

@ -65,7 +65,7 @@ static grpc_error_handle get_unused_port(int* port) {
int fd;
grpc_error_handle err =
grpc_create_dualstack_socket(&wild, SOCK_STREAM, 0, &dsmode, &fd);
if (!GRPC_ERROR_IS_NONE(err)) {
if (!err.ok()) {
return err;
}
if (dsmode == GRPC_DSMODE_IPV4) {
@ -164,7 +164,7 @@ grpc_error_handle grpc_tcp_server_add_all_local_addrs(grpc_tcp_server* s,
}
}
freeifaddrs(ifa);
if (!GRPC_ERROR_IS_NONE(err)) {
if (!err.ok()) {
return err;
} else if (sp == nullptr) {
return GRPC_ERROR_CREATE_FROM_STATIC_STRING("No local addresses");

@ -196,7 +196,7 @@ static grpc_error_handle prepare_socket(SOCKET sock,
int sockname_temp_len;
error = grpc_tcp_prepare_socket(sock);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
goto failure;
}
@ -223,7 +223,7 @@ static grpc_error_handle prepare_socket(SOCKET sock,
return GRPC_ERROR_NONE;
failure:
GPR_ASSERT(!GRPC_ERROR_IS_NONE(error));
GPR_ASSERT(!error.ok());
auto addr_uri = grpc_sockaddr_to_uri(addr);
grpc_error_set_int(
grpc_error_set_str(
@ -232,7 +232,6 @@ failure:
GRPC_ERROR_STR_TARGET_ADDRESS,
addr_uri.ok() ? *addr_uri : addr_uri.status().ToString()),
GRPC_ERROR_INT_FD, (intptr_t)sock);
GRPC_ERROR_UNREF(error);
if (sock != INVALID_SOCKET) closesocket(sock);
return error;
}
@ -266,7 +265,7 @@ static grpc_error_handle start_accept_locked(grpc_tcp_listener* port) {
}
error = grpc_tcp_prepare_socket(sock);
if (!GRPC_ERROR_IS_NONE(error)) goto failure;
if (!error.ok()) goto failure;
/* Start the "accept" asynchronously. */
success = port->AcceptEx(port->socket->socket, sock, port->addresses, 0,
@ -291,7 +290,7 @@ static grpc_error_handle start_accept_locked(grpc_tcp_listener* port) {
return error;
failure:
GPR_ASSERT(!GRPC_ERROR_IS_NONE(error));
GPR_ASSERT(!error.ok());
if (sock != INVALID_SOCKET) closesocket(sock);
return error;
}
@ -315,7 +314,7 @@ static void on_accept(void* arg, grpc_error_handle error) {
/* The general mechanism for shutting down is to queue abortion calls. While
this is necessary in the read/write case, it's useless for the accept
case. We only need to adjust the pending callback count */
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
gpr_log(GPR_INFO, "Skipping on_accept due to error: %s",
grpc_error_std_string(error).c_str());
@ -418,7 +417,7 @@ static grpc_error_handle add_socket_to_server(grpc_tcp_server* s, SOCKET sock,
}
error = prepare_socket(sock, addr, &port);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
return error;
}
@ -510,11 +509,10 @@ static grpc_error_handle tcp_server_add_port(grpc_tcp_server* s,
done:
gpr_free(allocated_addr);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
grpc_error_handle error_out =
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Failed to add port to server", &error, 1);
GRPC_ERROR_UNREF(error);
error = error_out;
*port = -1;
} else {

@ -89,11 +89,11 @@ static grpc_error_handle enable_socket_low_latency(SOCKET sock) {
grpc_error_handle grpc_tcp_prepare_socket(SOCKET sock) {
grpc_error_handle err;
err = grpc_tcp_set_non_block(sock);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
err = set_dualstack(sock);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
err = enable_socket_low_latency(sock);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
return GRPC_ERROR_NONE;
}
@ -131,7 +131,6 @@ static void tcp_free(grpc_tcp* tcp) {
grpc_winsocket_destroy(tcp->socket);
gpr_mu_destroy(&tcp->mu);
grpc_slice_buffer_destroy(&tcp->last_read_buffer);
if (tcp->shutting_down) GRPC_ERROR_UNREF(tcp->shutdown_error);
delete tcp;
}
@ -184,9 +183,7 @@ static void on_read(void* tcpp, grpc_error_handle error) {
gpr_log(GPR_INFO, "TCP:%p on_read", tcp);
}
(void)GRPC_ERROR_REF(error);
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
if (info->wsa_error != 0 && !tcp->shutting_down) {
char* utf8_message = gpr_format_message(info->wsa_error);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(utf8_message);
@ -324,14 +321,12 @@ static void on_write(void* tcpp, grpc_error_handle error) {
gpr_log(GPR_INFO, "TCP:%p on_write", tcp);
}
(void)GRPC_ERROR_REF(error);
gpr_mu_lock(&tcp->mu);
cb = tcp->write_cb;
tcp->write_cb = NULL;
gpr_mu_unlock(&tcp->mu);
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
if (info->wsa_error != 0) {
error = GRPC_WSA_ERROR(info->wsa_error, "WSASend");
} else {
@ -465,8 +460,6 @@ static void win_shutdown(grpc_endpoint* ep, grpc_error_handle why) {
if (!tcp->shutting_down) {
tcp->shutting_down = 1;
tcp->shutdown_error = why;
} else {
GRPC_ERROR_UNREF(why);
}
grpc_winsocket_shutdown(tcp->socket);
gpr_mu_unlock(&tcp->mu);

@ -550,8 +550,7 @@ static size_t pop_timers(timer_shard* shard, grpc_core::Timestamp now,
gpr_mu_lock(&shard->mu);
while ((timer = pop_one(shard, now))) {
REMOVE_FROM_HASH_TABLE(timer);
grpc_core::ExecCtx::Run(DEBUG_LOCATION, timer->closure,
GRPC_ERROR_REF(error));
grpc_core::ExecCtx::Run(DEBUG_LOCATION, timer->closure, error);
n++;
}
*new_min_deadline = compute_min_deadline(shard);
@ -657,8 +656,6 @@ static grpc_timer_check_result run_some_expired_timers(
gpr_spinlock_unlock(&g_shared_mutables.checker_mu);
}
GRPC_ERROR_UNREF(error);
return result;
}

@ -45,11 +45,10 @@ absl::StatusOr<std::vector<grpc_resolved_address>>
grpc_resolve_unix_domain_address(absl::string_view name) {
grpc_resolved_address addr;
grpc_error_handle error = grpc_core::UnixSockaddrPopulate(name, &addr);
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
return std::vector<grpc_resolved_address>({addr});
}
auto result = grpc_error_to_absl_status(error);
GRPC_ERROR_UNREF(error);
return result;
}
@ -58,11 +57,10 @@ grpc_resolve_unix_abstract_domain_address(const absl::string_view name) {
grpc_resolved_address addr;
grpc_error_handle error =
grpc_core::UnixAbstractSockaddrPopulate(name, &addr);
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
return std::vector<grpc_resolved_address>({addr});
}
auto result = grpc_error_to_absl_status(error);
GRPC_ERROR_UNREF(error);
return result;
}

@ -41,9 +41,9 @@ static grpc_error_handle pipe_init(grpc_wakeup_fd* fd_info) {
}
grpc_error_handle err;
err = grpc_set_socket_nonblocking(pipefd[0], 1);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
err = grpc_set_socket_nonblocking(pipefd[1], 1);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
fd_info->read_fd = pipefd[0];
fd_info->write_fd = pipefd[1];
return GRPC_ERROR_NONE;

@ -63,10 +63,9 @@ absl::StatusOr<std::string> ReadPolicyFromFile(absl::string_view policy_path) {
grpc_slice policy_slice = grpc_empty_slice();
grpc_error_handle error =
grpc_load_file(std::string(policy_path).c_str(), 0, &policy_slice);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
absl::Status status =
absl::InvalidArgumentError(grpc_error_std_string(error));
GRPC_ERROR_UNREF(error);
return status;
}
std::string policy_contents(StringViewFromSlice(policy_slice));

@ -82,7 +82,7 @@ AwsExternalAccountCredentials::Create(Options options,
grpc_error_handle* error) {
auto creds = MakeRefCounted<AwsExternalAccountCredentials>(
std::move(options), std::move(scopes), error);
if (GRPC_ERROR_IS_NONE(*error)) {
if (error->ok()) {
return creds;
} else {
return nullptr;
@ -204,12 +204,12 @@ void AwsExternalAccountCredentials::OnRetrieveImdsV2SessionToken(
void* arg, grpc_error_handle error) {
AwsExternalAccountCredentials* self =
static_cast<AwsExternalAccountCredentials*>(arg);
self->OnRetrieveImdsV2SessionTokenInternal(GRPC_ERROR_REF(error));
self->OnRetrieveImdsV2SessionTokenInternal(error);
}
void AwsExternalAccountCredentials::OnRetrieveImdsV2SessionTokenInternal(
grpc_error_handle error) {
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
FinishRetrieveSubjectToken("", error);
return;
}
@ -282,12 +282,12 @@ void AwsExternalAccountCredentials::OnRetrieveRegion(void* arg,
grpc_error_handle error) {
AwsExternalAccountCredentials* self =
static_cast<AwsExternalAccountCredentials*>(arg);
self->OnRetrieveRegionInternal(GRPC_ERROR_REF(error));
self->OnRetrieveRegionInternal(error);
}
void AwsExternalAccountCredentials::OnRetrieveRegionInternal(
grpc_error_handle error) {
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
FinishRetrieveSubjectToken("", error);
return;
}
@ -336,12 +336,12 @@ void AwsExternalAccountCredentials::OnRetrieveRoleName(
void* arg, grpc_error_handle error) {
AwsExternalAccountCredentials* self =
static_cast<AwsExternalAccountCredentials*>(arg);
self->OnRetrieveRoleNameInternal(GRPC_ERROR_REF(error));
self->OnRetrieveRoleNameInternal(error);
}
void AwsExternalAccountCredentials::OnRetrieveRoleNameInternal(
grpc_error_handle error) {
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
FinishRetrieveSubjectToken("", error);
return;
}
@ -401,12 +401,12 @@ void AwsExternalAccountCredentials::OnRetrieveSigningKeys(
void* arg, grpc_error_handle error) {
AwsExternalAccountCredentials* self =
static_cast<AwsExternalAccountCredentials*>(arg);
self->OnRetrieveSigningKeysInternal(GRPC_ERROR_REF(error));
self->OnRetrieveSigningKeysInternal(error);
}
void AwsExternalAccountCredentials::OnRetrieveSigningKeysInternal(
grpc_error_handle error) {
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
FinishRetrieveSubjectToken("", error);
return;
}
@ -469,22 +469,20 @@ void AwsExternalAccountCredentials::BuildSubjectToken() {
access_key_id_, secret_access_key_, token_, "POST",
cred_verification_url_, region_, "",
std::map<std::string, std::string>(), &error);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
FinishRetrieveSubjectToken(
"", GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Creating aws request signer failed.", &error, 1));
GRPC_ERROR_UNREF(error);
return;
}
}
auto signed_headers = signer_->GetSignedRequestHeaders();
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
FinishRetrieveSubjectToken("",
GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Invalid getting signed request"
"headers.",
&error, 1));
GRPC_ERROR_UNREF(error);
return;
}
// Construct subject token
@ -514,7 +512,7 @@ void AwsExternalAccountCredentials::FinishRetrieveSubjectToken(
auto cb = cb_;
cb_ = nullptr;
// Invoke the callback.
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
cb("", error);
} else {
cb(subject_token, GRPC_ERROR_NONE);

@ -104,7 +104,7 @@ bool MatchWorkforcePoolAudience(absl::string_view audience) {
RefCountedPtr<ExternalAccountCredentials> ExternalAccountCredentials::Create(
const Json& json, std::vector<std::string> scopes,
grpc_error_handle* error) {
GPR_ASSERT(GRPC_ERROR_IS_NONE(*error));
GPR_ASSERT(error->ok());
Options options;
options.type = GRPC_AUTH_JSON_TYPE_INVALID;
if (json.type() != Json::Type::OBJECT) {
@ -220,7 +220,7 @@ RefCountedPtr<ExternalAccountCredentials> ExternalAccountCredentials::Create(
"Invalid options credential source to create "
"ExternalAccountCredentials.");
}
if (GRPC_ERROR_IS_NONE(*error)) {
if (error->ok()) {
return creds;
} else {
return nullptr;
@ -272,7 +272,7 @@ void ExternalAccountCredentials::fetch_oauth2(
void ExternalAccountCredentials::OnRetrieveSubjectTokenInternal(
absl::string_view subject_token, grpc_error_handle error) {
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
FinishTokenFetch(error);
} else {
ExchangeToken(subject_token);
@ -366,13 +366,13 @@ void ExternalAccountCredentials::OnExchangeToken(void* arg,
grpc_error_handle error) {
ExternalAccountCredentials* self =
static_cast<ExternalAccountCredentials*>(arg);
self->OnExchangeTokenInternal(GRPC_ERROR_REF(error));
self->OnExchangeTokenInternal(error);
}
void ExternalAccountCredentials::OnExchangeTokenInternal(
grpc_error_handle error) {
http_request_.reset();
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
FinishTokenFetch(error);
} else {
if (options_.service_account_impersonation_url.empty()) {
@ -463,13 +463,13 @@ void ExternalAccountCredentials::OnImpersenateServiceAccount(
void* arg, grpc_error_handle error) {
ExternalAccountCredentials* self =
static_cast<ExternalAccountCredentials*>(arg);
self->OnImpersenateServiceAccountInternal(GRPC_ERROR_REF(error));
self->OnImpersenateServiceAccountInternal(error);
}
void ExternalAccountCredentials::OnImpersenateServiceAccountInternal(
grpc_error_handle error) {
http_request_.reset();
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
FinishTokenFetch(error);
return;
}
@ -529,8 +529,7 @@ void ExternalAccountCredentials::OnImpersenateServiceAccountInternal(
}
void ExternalAccountCredentials::FinishTokenFetch(grpc_error_handle error) {
GRPC_LOG_IF_ERROR("Fetch external account credentials access token",
GRPC_ERROR_REF(error));
GRPC_LOG_IF_ERROR("Fetch external account credentials access token", error);
// Move object state into local variables.
auto* cb = response_cb_;
response_cb_ = nullptr;
@ -542,7 +541,6 @@ void ExternalAccountCredentials::FinishTokenFetch(grpc_error_handle error) {
cb(metadata_req, error);
// Delete context.
delete ctx;
GRPC_ERROR_UNREF(error);
}
} // namespace grpc_core
@ -561,11 +559,10 @@ grpc_call_credentials* grpc_external_account_credentials_create(
auto creds = grpc_core::ExternalAccountCredentials::Create(
*json, std::move(scopes), &error)
.release();
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
gpr_log(GPR_ERROR,
"External account credentials creation failed. Error: %s.",
grpc_error_std_string(error).c_str());
GRPC_ERROR_UNREF(error);
return nullptr;
}
return creds;

@ -37,7 +37,7 @@ FileExternalAccountCredentials::Create(Options options,
grpc_error_handle* error) {
auto creds = MakeRefCounted<FileExternalAccountCredentials>(
std::move(options), std::move(scopes), error);
if (GRPC_ERROR_IS_NONE(*error)) {
if (error->ok()) {
return creds;
} else {
return nullptr;
@ -108,7 +108,7 @@ void FileExternalAccountCredentials::RetrieveSubjectToken(
// request because it may have changed since the last request.
grpc_error_handle error =
grpc_load_file(file_.c_str(), 0, &content_slice.slice);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
cb("", error);
return;
}

@ -50,7 +50,7 @@ UrlExternalAccountCredentials::Create(Options options,
grpc_error_handle* error) {
auto creds = MakeRefCounted<UrlExternalAccountCredentials>(
std::move(options), std::move(scopes), error);
if (GRPC_ERROR_IS_NONE(*error)) {
if (error->ok()) {
return creds;
} else {
return nullptr;
@ -189,13 +189,13 @@ void UrlExternalAccountCredentials::OnRetrieveSubjectToken(
void* arg, grpc_error_handle error) {
UrlExternalAccountCredentials* self =
static_cast<UrlExternalAccountCredentials*>(arg);
self->OnRetrieveSubjectTokenInternal(GRPC_ERROR_REF(error));
self->OnRetrieveSubjectTokenInternal(error);
}
void UrlExternalAccountCredentials::OnRetrieveSubjectTokenInternal(
grpc_error_handle error) {
http_request_.reset();
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
FinishRetrieveSubjectToken("", error);
return;
}
@ -236,7 +236,7 @@ void UrlExternalAccountCredentials::FinishRetrieveSubjectToken(
auto cb = cb_;
cb_ = nullptr;
// Invoke the callback.
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
cb("", error);
} else {
cb(subject_token, GRPC_ERROR_NONE);

@ -170,7 +170,7 @@ static void on_metadata_server_detection_http_response(
void* user_data, grpc_error_handle error) {
metadata_server_detector* detector =
static_cast<metadata_server_detector*>(user_data);
if (GRPC_ERROR_IS_NONE(error) && detector->response.status == 200 &&
if (error.ok() && detector->response.status == 200 &&
detector->response.hdr_count > 0) {
/* Internet providers can return a generic response to all requests, so
it is necessary to check that metadata header is present also. */
@ -313,7 +313,7 @@ static grpc_error_handle create_default_creds_from_path(
goto end;
}
error = grpc_load_file(creds_path.c_str(), 0, &creds_data);
if (!GRPC_ERROR_IS_NONE(error)) goto end;
if (!error.ok()) goto end;
{
auto json_or = Json::Parse(grpc_core::StringViewFromSlice(creds_data));
if (!json_or.ok()) {
@ -365,7 +365,7 @@ static grpc_error_handle create_default_creds_from_path(
result = grpc_core::ExternalAccountCredentials::Create(json, {}, &error);
end:
GPR_ASSERT((result == nullptr) + (GRPC_ERROR_IS_NONE(error)) == 1);
GPR_ASSERT((result == nullptr) + (error.ok()) == 1);
grpc_slice_unref(creds_data);
*creds = result;
return error;
@ -401,14 +401,14 @@ static grpc_core::RefCountedPtr<grpc_call_credentials> make_default_call_creds(
auto path_from_env = grpc_core::GetEnv(GRPC_GOOGLE_CREDENTIALS_ENV_VAR);
if (path_from_env.has_value()) {
err = create_default_creds_from_path(*path_from_env, &call_creds);
if (GRPC_ERROR_IS_NONE(err)) return call_creds;
if (err.ok()) return call_creds;
*error = grpc_error_add_child(*error, err);
}
/* Then the well-known file. */
err = create_default_creds_from_path(
grpc_get_well_known_google_credentials_file_path(), &call_creds);
if (GRPC_ERROR_IS_NONE(err)) return call_creds;
if (err.ok()) return call_creds;
*error = grpc_error_add_child(*error, err);
update_tenancy();
@ -463,7 +463,6 @@ grpc_channel_credentials* grpc_google_default_credentials_create(
gpr_log(GPR_ERROR, "Could not create google default credentials: %s",
grpc_error_std_string(error).c_str());
}
GRPC_ERROR_UNREF(error);
return result;
}

@ -234,7 +234,7 @@ end:
static void on_oauth2_token_fetcher_http_response(void* user_data,
grpc_error_handle error) {
GRPC_LOG_IF_ERROR("oauth_fetch", GRPC_ERROR_REF(error));
GRPC_LOG_IF_ERROR("oauth_fetch", error);
grpc_credentials_metadata_request* r =
static_cast<grpc_credentials_metadata_request*>(user_data);
grpc_oauth2_token_fetcher_credentials* c =
@ -247,10 +247,9 @@ void grpc_oauth2_token_fetcher_credentials::on_http_response(
absl::optional<grpc_core::Slice> access_token_value;
grpc_core::Duration token_lifetime;
grpc_credentials_status status =
GRPC_ERROR_IS_NONE(error)
? grpc_oauth2_token_fetcher_credentials_parse_server_response(
&r->response, &access_token_value, &token_lifetime)
: GRPC_CREDENTIALS_ERROR;
error.ok() ? grpc_oauth2_token_fetcher_credentials_parse_server_response(
&r->response, &access_token_value, &token_lifetime)
: GRPC_CREDENTIALS_ERROR;
// Update cache and grab list of pending requests.
gpr_mu_lock(&mu_);
token_fetch_pending_ = false;
@ -277,7 +276,6 @@ void grpc_oauth2_token_fetcher_credentials::on_http_response(
auto err = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"Error occurred when fetching oauth2 token.", &error, 1);
pending_request->result = grpc_error_to_absl_status(err);
GRPC_ERROR_UNREF(err);
}
pending_request->done.store(true, std::memory_order_release);
pending_request->waker.Wakeup();
@ -533,7 +531,7 @@ void MaybeAddToBody(const char* field_name, const char* field,
grpc_error_handle LoadTokenFile(const char* path, gpr_slice* token) {
grpc_error_handle err = grpc_load_file(path, 1, token);
if (!GRPC_ERROR_IS_NONE(err)) return err;
if (!err.ok()) return err;
if (GRPC_SLICE_LENGTH(*token) == 0) {
gpr_log(GPR_ERROR, "Token file %s is empty", path);
err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Token file is empty.");
@ -571,9 +569,8 @@ class StsTokenFetcherCredentials
grpc_http_request request;
memset(&request, 0, sizeof(grpc_http_request));
grpc_error_handle err = FillBody(&request.body, &request.body_length);
if (!GRPC_ERROR_IS_NONE(err)) {
if (!err.ok()) {
response_cb(metadata_req, err);
GRPC_ERROR_UNREF(err);
return;
}
grpc_http_header header = {
@ -609,7 +606,7 @@ class StsTokenFetcherCredentials
auto cleanup = [&body, &body_length, &body_parts, &subject_token,
&actor_token, &err]() {
if (GRPC_ERROR_IS_NONE(err)) {
if (err.ok()) {
std::string body_str = absl::StrJoin(body_parts, "");
*body = gpr_strdup(body_str.c_str());
*body_length = body_str.size();
@ -620,7 +617,7 @@ class StsTokenFetcherCredentials
};
err = LoadTokenFile(subject_token_path_.get(), &subject_token);
if (!GRPC_ERROR_IS_NONE(err)) return cleanup();
if (!err.ok()) return cleanup();
body_parts.push_back(absl::StrFormat(
GRPC_STS_POST_MINIMAL_BODY_FORMAT_STRING,
reinterpret_cast<const char*>(GRPC_SLICE_START_PTR(subject_token)),
@ -632,7 +629,7 @@ class StsTokenFetcherCredentials
&body_parts);
if ((actor_token_path_ != nullptr) && *actor_token_path_ != '\0') {
err = LoadTokenFile(actor_token_path_.get(), &actor_token);
if (!GRPC_ERROR_IS_NONE(err)) return cleanup();
if (!err.ok()) return cleanup();
MaybeAddToBody(
"actor_token",
reinterpret_cast<const char*>(GRPC_SLICE_START_PTR(actor_token)),
@ -689,7 +686,6 @@ absl::StatusOr<URI> ValidateStsCredentialsOptions(
"Invalid STS Credentials Options", &error_list);
auto retval =
absl::InvalidArgumentError(grpc_error_std_string(grpc_error_vec));
GRPC_ERROR_UNREF(grpc_error_vec);
return retval;
}

@ -122,8 +122,7 @@ void grpc_tls_certificate_distributor::SetErrorForCert(
certificate_info_map_[*watcher_it->second.identity_cert_name];
identity_cert_error_to_report = identity_cert_info.identity_cert_error;
}
watcher_ptr->OnError(GRPC_ERROR_REF(*root_cert_error),
GRPC_ERROR_REF(identity_cert_error_to_report));
watcher_ptr->OnError(*root_cert_error, identity_cert_error_to_report);
}
cert_info.SetRootError(*root_cert_error);
}
@ -145,32 +144,28 @@ void grpc_tls_certificate_distributor::SetErrorForCert(
certificate_info_map_[*watcher_it->second.root_cert_name];
root_cert_error_to_report = root_cert_info.root_cert_error;
}
watcher_ptr->OnError(GRPC_ERROR_REF(root_cert_error_to_report),
GRPC_ERROR_REF(*identity_cert_error));
watcher_ptr->OnError(root_cert_error_to_report, *identity_cert_error);
}
cert_info.SetIdentityError(*identity_cert_error);
}
};
void grpc_tls_certificate_distributor::SetError(grpc_error_handle error) {
GPR_ASSERT(!GRPC_ERROR_IS_NONE(error));
GPR_ASSERT(!error.ok());
grpc_core::MutexLock lock(&mu_);
for (const auto& watcher : watchers_) {
const auto watcher_ptr = watcher.first;
GPR_ASSERT(watcher_ptr != nullptr);
const auto& watcher_info = watcher.second;
watcher_ptr->OnError(
watcher_info.root_cert_name.has_value() ? GRPC_ERROR_REF(error)
: GRPC_ERROR_NONE,
watcher_info.identity_cert_name.has_value() ? GRPC_ERROR_REF(error)
: GRPC_ERROR_NONE);
watcher_info.root_cert_name.has_value() ? error : GRPC_ERROR_NONE,
watcher_info.identity_cert_name.has_value() ? error : GRPC_ERROR_NONE);
}
for (auto& cert_info_entry : certificate_info_map_) {
auto& cert_info = cert_info_entry.second;
cert_info.SetRootError(GRPC_ERROR_REF(error));
cert_info.SetIdentityError(GRPC_ERROR_REF(error));
cert_info.SetRootError(error);
cert_info.SetIdentityError(error);
}
GRPC_ERROR_UNREF(error);
};
void grpc_tls_certificate_distributor::WatchTlsCertificates(
@ -203,7 +198,7 @@ void grpc_tls_certificate_distributor::WatchTlsCertificates(
already_watching_identity_for_root_cert =
!cert_info.identity_cert_watchers.empty();
cert_info.root_cert_watchers.insert(watcher_ptr);
root_error = GRPC_ERROR_REF(cert_info.root_cert_error);
root_error = cert_info.root_cert_error;
// Empty credentials will be treated as no updates.
if (!cert_info.pem_root_certs.empty()) {
updated_root_certs = cert_info.pem_root_certs;
@ -215,7 +210,7 @@ void grpc_tls_certificate_distributor::WatchTlsCertificates(
already_watching_root_for_identity_cert =
!cert_info.root_cert_watchers.empty();
cert_info.identity_cert_watchers.insert(watcher_ptr);
identity_error = GRPC_ERROR_REF(cert_info.identity_cert_error);
identity_error = cert_info.identity_cert_error;
// Empty credentials will be treated as no updates.
if (!cert_info.pem_key_cert_pairs.empty()) {
updated_identity_pairs = cert_info.pem_key_cert_pairs;
@ -231,13 +226,9 @@ void grpc_tls_certificate_distributor::WatchTlsCertificates(
std::move(updated_identity_pairs));
}
// Notify this watcher if the certs it is watching already had some errors.
if (!GRPC_ERROR_IS_NONE(root_error) ||
!GRPC_ERROR_IS_NONE(identity_error)) {
watcher_ptr->OnError(GRPC_ERROR_REF(root_error),
GRPC_ERROR_REF(identity_error));
if (!root_error.ok() || !identity_error.ok()) {
watcher_ptr->OnError(root_error, identity_error);
}
GRPC_ERROR_UNREF(root_error);
GRPC_ERROR_UNREF(identity_error);
}
// Invoke watch status callback if needed.
{

@ -188,16 +188,9 @@ struct grpc_tls_certificate_distributor
// credential reloading.
std::set<TlsCertificatesWatcherInterface*> identity_cert_watchers;
~CertificateInfo() {
GRPC_ERROR_UNREF(root_cert_error);
GRPC_ERROR_UNREF(identity_cert_error);
}
void SetRootError(grpc_error_handle error) {
GRPC_ERROR_UNREF(root_cert_error);
root_cert_error = error;
}
~CertificateInfo() {}
void SetRootError(grpc_error_handle error) { root_cert_error = error; }
void SetIdentityError(grpc_error_handle error) {
GRPC_ERROR_UNREF(identity_cert_error);
identity_cert_error = error;
}
};

@ -89,8 +89,7 @@ StaticDataCertificateProvider::StaticDataCertificateProvider(
identity_cert_error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Unable to get latest identity certificates.");
}
if (!GRPC_ERROR_IS_NONE(root_cert_error) ||
!GRPC_ERROR_IS_NONE(identity_cert_error)) {
if (!root_cert_error.ok() || !identity_cert_error.ok()) {
distributor_->SetErrorForCert(cert_name, root_cert_error,
identity_cert_error);
}
@ -184,8 +183,7 @@ FileWatcherCertificateProvider::FileWatcherCertificateProvider(
identity_cert_error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
"Unable to get latest identity certificates.");
}
if (!GRPC_ERROR_IS_NONE(root_cert_error) ||
!GRPC_ERROR_IS_NONE(identity_cert_error)) {
if (!root_cert_error.ok() || !identity_cert_error.ok()) {
distributor_->SetErrorForCert(cert_name, root_cert_error,
identity_cert_error);
}
@ -269,15 +267,10 @@ void FileWatcherCertificateProvider::ForceUpdate() {
info.identity_being_watched && pem_key_cert_pairs_.empty();
if (report_root_error || report_identity_error) {
distributor_->SetErrorForCert(
cert_name,
report_root_error ? GRPC_ERROR_REF(root_cert_error)
: GRPC_ERROR_NONE,
report_identity_error ? GRPC_ERROR_REF(identity_cert_error)
: GRPC_ERROR_NONE);
cert_name, report_root_error ? root_cert_error : GRPC_ERROR_NONE,
report_identity_error ? identity_cert_error : GRPC_ERROR_NONE);
}
}
GRPC_ERROR_UNREF(root_cert_error);
GRPC_ERROR_UNREF(identity_cert_error);
}
}
@ -288,11 +281,10 @@ FileWatcherCertificateProvider::ReadRootCertificatesFromFile(
grpc_slice root_slice = grpc_empty_slice();
grpc_error_handle root_error =
grpc_load_file(root_cert_full_path.c_str(), 0, &root_slice);
if (!GRPC_ERROR_IS_NONE(root_error)) {
if (!root_error.ok()) {
gpr_log(GPR_ERROR, "Reading file %s failed: %s",
root_cert_full_path.c_str(),
grpc_error_std_string(root_error).c_str());
GRPC_ERROR_UNREF(root_error);
return absl::nullopt;
}
std::string root_cert(StringViewFromSlice(root_slice));
@ -347,20 +339,18 @@ FileWatcherCertificateProvider::ReadIdentityKeyCertPairFromFiles(
SliceWrapper key_slice, cert_slice;
grpc_error_handle key_error =
grpc_load_file(private_key_path.c_str(), 0, &key_slice.slice);
if (!GRPC_ERROR_IS_NONE(key_error)) {
if (!key_error.ok()) {
gpr_log(GPR_ERROR, "Reading file %s failed: %s. Start retrying...",
private_key_path.c_str(),
grpc_error_std_string(key_error).c_str());
GRPC_ERROR_UNREF(key_error);
continue;
}
grpc_error_handle cert_error =
grpc_load_file(identity_certificate_path.c_str(), 0, &cert_slice.slice);
if (!GRPC_ERROR_IS_NONE(cert_error)) {
if (!cert_error.ok()) {
gpr_log(GPR_ERROR, "Reading file %s failed: %s. Start retrying...",
identity_certificate_path.c_str(),
grpc_error_std_string(cert_error).c_str());
GRPC_ERROR_UNREF(cert_error);
continue;
}
std::string private_key(StringViewFromSlice(key_slice.slice));

@ -120,9 +120,7 @@ class grpc_alts_channel_security_connector final
}
void cancel_check_peer(grpc_closure* /*on_peer_checked*/,
grpc_error_handle error) override {
GRPC_ERROR_UNREF(error);
}
grpc_error_handle /*error*/) override {}
int cmp(const grpc_security_connector* other_sc) const override {
auto* other =
@ -179,9 +177,7 @@ class grpc_alts_server_security_connector final
}
void cancel_check_peer(grpc_closure* /*on_peer_checked*/,
grpc_error_handle error) override {
GRPC_ERROR_UNREF(error);
}
grpc_error_handle /*error*/) override {}
int cmp(const grpc_security_connector* other) const override {
return server_security_connector_cmp(

@ -86,9 +86,7 @@ class grpc_fake_channel_security_connector final
grpc_closure* on_peer_checked) override;
void cancel_check_peer(grpc_closure* /*on_peer_checked*/,
grpc_error_handle error) override {
GRPC_ERROR_UNREF(error);
}
grpc_error_handle /*error*/) override {}
int cmp(const grpc_security_connector* other_sc) const override {
auto* other =
@ -286,9 +284,7 @@ class grpc_fake_server_security_connector
}
void cancel_check_peer(grpc_closure* /*on_peer_checked*/,
grpc_error_handle error) override {
GRPC_ERROR_UNREF(error);
}
grpc_error_handle /*error*/) override {}
void add_handshakers(const grpc_core::ChannelArgs& args,
grpc_pollset_set* /*interested_parties*/,

@ -72,9 +72,7 @@ class InsecureChannelSecurityConnector
grpc_closure* on_peer_checked) override;
void cancel_check_peer(grpc_closure* /*on_peer_checked*/,
grpc_error_handle error) override {
GRPC_ERROR_UNREF(error);
}
grpc_error_handle /*error*/) override {}
int cmp(const grpc_security_connector* other_sc) const override;
};
@ -95,9 +93,7 @@ class InsecureServerSecurityConnector : public grpc_server_security_connector {
grpc_closure* on_peer_checked) override;
void cancel_check_peer(grpc_closure* /*on_peer_checked*/,
grpc_error_handle error) override {
GRPC_ERROR_UNREF(error);
}
grpc_error_handle /*error*/) override {}
int cmp(const grpc_security_connector* other) const override;
};

@ -73,10 +73,8 @@ grpc_slice GetSystemRootCerts() {
for (size_t i = 0; i < num_cert_files_; i++) {
grpc_error_handle error =
grpc_load_file(kCertFiles[i], 1, &valid_bundle_slice);
if (GRPC_ERROR_IS_NONE(error)) {
if (error.ok()) {
return valid_bundle_slice;
} else {
GRPC_ERROR_UNREF(error);
}
}
return grpc_empty_slice();

@ -208,9 +208,7 @@ class grpc_local_channel_security_connector final
}
void cancel_check_peer(grpc_closure* /*on_peer_checked*/,
grpc_error_handle error) override {
GRPC_ERROR_UNREF(error);
}
grpc_error_handle /*error*/) override {}
grpc_core::ArenaPromise<absl::Status> CheckCallHost(
absl::string_view host, grpc_auth_context*) override {
@ -256,9 +254,7 @@ class grpc_local_server_security_connector final
}
void cancel_check_peer(grpc_closure* /*on_peer_checked*/,
grpc_error_handle error) override {
GRPC_ERROR_UNREF(error);
}
grpc_error_handle /*error*/) override {}
int cmp(const grpc_security_connector* other) const override {
return server_security_connector_cmp(

@ -61,7 +61,7 @@ grpc_error_handle ssl_check_peer(
const char* peer_name, const tsi_peer* peer,
grpc_core::RefCountedPtr<grpc_auth_context>* auth_context) {
grpc_error_handle error = grpc_ssl_check_alpn(peer);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
return error;
}
/* Check the peer name if specified. */
@ -159,8 +159,7 @@ class grpc_ssl_channel_security_connector final
? target_name_.c_str()
: overridden_target_name_.c_str();
grpc_error_handle error = ssl_check_peer(target_name, &peer, auth_context);
if (GRPC_ERROR_IS_NONE(error) &&
verify_options_->verify_peer_callback != nullptr) {
if (error.ok() && verify_options_->verify_peer_callback != nullptr) {
const tsi_peer_property* p =
tsi_peer_get_property_by_name(&peer, TSI_X509_PEM_CERT_PROPERTY);
if (p == nullptr) {
@ -185,9 +184,7 @@ class grpc_ssl_channel_security_connector final
}
void cancel_check_peer(grpc_closure* /*on_peer_checked*/,
grpc_error_handle error) override {
GRPC_ERROR_UNREF(error);
}
grpc_error_handle /*error*/) override {}
int cmp(const grpc_security_connector* other_sc) const override {
auto* other =
@ -306,9 +303,7 @@ class grpc_ssl_server_security_connector
}
void cancel_check_peer(grpc_closure* /*on_peer_checked*/,
grpc_error_handle error) override {
GRPC_ERROR_UNREF(error);
}
grpc_error_handle /*error*/) override {}
int cmp(const grpc_security_connector* other) const override {
return server_security_connector_cmp(

@ -359,7 +359,7 @@ void TlsChannelSecurityConnector::check_peer(
? target_name_.c_str()
: overridden_target_name_.c_str();
grpc_error_handle error = grpc_ssl_check_alpn(&peer);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
ExecCtx::Run(DEBUG_LOCATION, on_peer_checked, error);
tsi_peer_destruct(&peer);
return;
@ -378,11 +378,10 @@ void TlsChannelSecurityConnector::check_peer(
void TlsChannelSecurityConnector::cancel_check_peer(
grpc_closure* on_peer_checked, grpc_error_handle error) {
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
gpr_log(GPR_ERROR,
"TlsChannelSecurityConnector::cancel_check_peer error: %s",
grpc_error_std_string(error).c_str());
GRPC_ERROR_UNREF(error);
return;
}
auto* verifier = options_->certificate_verifier();
@ -456,18 +455,16 @@ void TlsChannelSecurityConnector::TlsChannelCertificateWatcher::
// BlockOnInitialCredentialHandshaker is implemented.
void TlsChannelSecurityConnector::TlsChannelCertificateWatcher::OnError(
grpc_error_handle root_cert_error, grpc_error_handle identity_cert_error) {
if (!GRPC_ERROR_IS_NONE(root_cert_error)) {
if (!root_cert_error.ok()) {
gpr_log(GPR_ERROR,
"TlsChannelCertificateWatcher getting root_cert_error: %s",
grpc_error_std_string(root_cert_error).c_str());
}
if (!GRPC_ERROR_IS_NONE(identity_cert_error)) {
if (!identity_cert_error.ok()) {
gpr_log(GPR_ERROR,
"TlsChannelCertificateWatcher getting identity_cert_error: %s",
grpc_error_std_string(identity_cert_error).c_str());
}
GRPC_ERROR_UNREF(root_cert_error);
GRPC_ERROR_UNREF(identity_cert_error);
}
TlsChannelSecurityConnector::ChannelPendingVerifierRequest::
@ -643,7 +640,7 @@ void TlsServerSecurityConnector::check_peer(
RefCountedPtr<grpc_auth_context>* auth_context,
grpc_closure* on_peer_checked) {
grpc_error_handle error = grpc_ssl_check_alpn(&peer);
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
ExecCtx::Run(DEBUG_LOCATION, on_peer_checked, error);
tsi_peer_destruct(&peer);
return;
@ -666,11 +663,10 @@ void TlsServerSecurityConnector::check_peer(
void TlsServerSecurityConnector::cancel_check_peer(
grpc_closure* on_peer_checked, grpc_error_handle error) {
if (!GRPC_ERROR_IS_NONE(error)) {
if (!error.ok()) {
gpr_log(GPR_ERROR,
"TlsServerSecurityConnector::cancel_check_peer error: %s",
grpc_error_std_string(error).c_str());
GRPC_ERROR_UNREF(error);
return;
}
auto* verifier = options_->certificate_verifier();
@ -734,18 +730,16 @@ void TlsServerSecurityConnector::TlsServerCertificateWatcher::
// BlockOnInitialCredentialHandshaker is implemented.
void TlsServerSecurityConnector::TlsServerCertificateWatcher::OnError(
grpc_error_handle root_cert_error, grpc_error_handle identity_cert_error) {
if (!GRPC_ERROR_IS_NONE(root_cert_error)) {
if (!root_cert_error.ok()) {
gpr_log(GPR_ERROR,
"TlsServerCertificateWatcher getting root_cert_error: %s",
grpc_error_std_string(root_cert_error).c_str());
}
if (!GRPC_ERROR_IS_NONE(identity_cert_error)) {
if (!identity_cert_error.ok()) {
gpr_log(GPR_ERROR,
"TlsServerCertificateWatcher getting identity_cert_error: %s",
grpc_error_std_string(identity_cert_error).c_str());
}
GRPC_ERROR_UNREF(root_cert_error);
GRPC_ERROR_UNREF(identity_cert_error);
}
TlsServerSecurityConnector::ServerPendingVerifierRequest::

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save