commit
f438d7c727
390 changed files with 23179 additions and 6580 deletions
@ -0,0 +1,3 @@ |
||||
# load bazelrc from the legacy location |
||||
# as recommended in https://github.com/bazelbuild/bazel/issues/6319 |
||||
import %workspace%/tools/bazel.rc |
Before Width: | Height: | Size: 52 KiB After Width: | Height: | Size: 52 KiB |
Before Width: | Height: | Size: 44 KiB After Width: | Height: | Size: 44 KiB |
@ -0,0 +1,44 @@ |
||||
# Copyright 2018 gRPC authors. |
||||
# |
||||
# Licensed under the Apache License, Version 2.0 (the "License"); |
||||
# you may not use this file except in compliance with the License. |
||||
# You may obtain a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, |
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
# See the License for the specific language governing permissions and |
||||
# limitations under the License. |
||||
"""The Python implementation of the GRPC helloworld.Greeter client with channel options and call timeout parameters.""" |
||||
|
||||
from __future__ import print_function |
||||
|
||||
import grpc |
||||
|
||||
import helloworld_pb2 |
||||
import helloworld_pb2_grpc |
||||
|
||||
|
||||
def run(): |
||||
# NOTE(gRPC Python Team): .close() is possible on a channel and should be |
||||
# used in circumstances in which the with statement does not fit the needs |
||||
# of the code. |
||||
# |
||||
# For more channel options, please see https://grpc.io/grpc/core/group__grpc__arg__keys.html |
||||
with grpc.insecure_channel( |
||||
target='localhost:50051', |
||||
options=[('grpc.lb_policy_name', 'pick_first'), |
||||
('grpc.enable_retries', 0), ('grpc.keepalive_timeout_ms', |
||||
10000)]) as channel: |
||||
stub = helloworld_pb2_grpc.GreeterStub(channel) |
||||
# Timeout in seconds. |
||||
# Please refer gRPC Python documents for more detail. https://grpc.io/grpc/python/grpc.html |
||||
response = stub.SayHello( |
||||
helloworld_pb2.HelloRequest(name='you'), timeout=10) |
||||
print("Greeter client received: " + response.message) |
||||
|
||||
|
||||
if __name__ == '__main__': |
||||
run() |
@ -0,0 +1,920 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#ifndef GRPCPP_IMPL_CODEGEN_CALL_OP_SET_H |
||||
#define GRPCPP_IMPL_CODEGEN_CALL_OP_SET_H |
||||
|
||||
#include <assert.h> |
||||
#include <array> |
||||
#include <cstring> |
||||
#include <functional> |
||||
#include <map> |
||||
#include <memory> |
||||
#include <vector> |
||||
|
||||
#include <grpcpp/impl/codegen/byte_buffer.h> |
||||
#include <grpcpp/impl/codegen/call.h> |
||||
#include <grpcpp/impl/codegen/call_hook.h> |
||||
#include <grpcpp/impl/codegen/call_op_set_interface.h> |
||||
#include <grpcpp/impl/codegen/client_context.h> |
||||
#include <grpcpp/impl/codegen/completion_queue_tag.h> |
||||
#include <grpcpp/impl/codegen/config.h> |
||||
#include <grpcpp/impl/codegen/core_codegen_interface.h> |
||||
#include <grpcpp/impl/codegen/intercepted_channel.h> |
||||
#include <grpcpp/impl/codegen/interceptor_common.h> |
||||
#include <grpcpp/impl/codegen/serialization_traits.h> |
||||
#include <grpcpp/impl/codegen/slice.h> |
||||
#include <grpcpp/impl/codegen/string_ref.h> |
||||
|
||||
#include <grpc/impl/codegen/atm.h> |
||||
#include <grpc/impl/codegen/compression_types.h> |
||||
#include <grpc/impl/codegen/grpc_types.h> |
||||
|
||||
namespace grpc { |
||||
|
||||
class CompletionQueue; |
||||
extern CoreCodegenInterface* g_core_codegen_interface; |
||||
|
||||
namespace internal { |
||||
class Call; |
||||
class CallHook; |
||||
|
||||
// TODO(yangg) if the map is changed before we send, the pointers will be a
|
||||
// mess. Make sure it does not happen.
|
||||
inline grpc_metadata* FillMetadataArray( |
||||
const std::multimap<grpc::string, grpc::string>& metadata, |
||||
size_t* metadata_count, const grpc::string& optional_error_details) { |
||||
*metadata_count = metadata.size() + (optional_error_details.empty() ? 0 : 1); |
||||
if (*metadata_count == 0) { |
||||
return nullptr; |
||||
} |
||||
grpc_metadata* metadata_array = |
||||
(grpc_metadata*)(g_core_codegen_interface->gpr_malloc( |
||||
(*metadata_count) * sizeof(grpc_metadata))); |
||||
size_t i = 0; |
||||
for (auto iter = metadata.cbegin(); iter != metadata.cend(); ++iter, ++i) { |
||||
metadata_array[i].key = SliceReferencingString(iter->first); |
||||
metadata_array[i].value = SliceReferencingString(iter->second); |
||||
} |
||||
if (!optional_error_details.empty()) { |
||||
metadata_array[i].key = |
||||
g_core_codegen_interface->grpc_slice_from_static_buffer( |
||||
kBinaryErrorDetailsKey, sizeof(kBinaryErrorDetailsKey) - 1); |
||||
metadata_array[i].value = SliceReferencingString(optional_error_details); |
||||
} |
||||
return metadata_array; |
||||
} |
||||
} // namespace internal
|
||||
|
||||
/// Per-message write options.
|
||||
class WriteOptions { |
||||
public: |
||||
WriteOptions() : flags_(0), last_message_(false) {} |
||||
WriteOptions(const WriteOptions& other) |
||||
: flags_(other.flags_), last_message_(other.last_message_) {} |
||||
|
||||
/// Clear all flags.
|
||||
inline void Clear() { flags_ = 0; } |
||||
|
||||
/// Returns raw flags bitset.
|
||||
inline uint32_t flags() const { return flags_; } |
||||
|
||||
/// Sets flag for the disabling of compression for the next message write.
|
||||
///
|
||||
/// \sa GRPC_WRITE_NO_COMPRESS
|
||||
inline WriteOptions& set_no_compression() { |
||||
SetBit(GRPC_WRITE_NO_COMPRESS); |
||||
return *this; |
||||
} |
||||
|
||||
/// Clears flag for the disabling of compression for the next message write.
|
||||
///
|
||||
/// \sa GRPC_WRITE_NO_COMPRESS
|
||||
inline WriteOptions& clear_no_compression() { |
||||
ClearBit(GRPC_WRITE_NO_COMPRESS); |
||||
return *this; |
||||
} |
||||
|
||||
/// Get value for the flag indicating whether compression for the next
|
||||
/// message write is forcefully disabled.
|
||||
///
|
||||
/// \sa GRPC_WRITE_NO_COMPRESS
|
||||
inline bool get_no_compression() const { |
||||
return GetBit(GRPC_WRITE_NO_COMPRESS); |
||||
} |
||||
|
||||
/// Sets flag indicating that the write may be buffered and need not go out on
|
||||
/// the wire immediately.
|
||||
///
|
||||
/// \sa GRPC_WRITE_BUFFER_HINT
|
||||
inline WriteOptions& set_buffer_hint() { |
||||
SetBit(GRPC_WRITE_BUFFER_HINT); |
||||
return *this; |
||||
} |
||||
|
||||
/// Clears flag indicating that the write may be buffered and need not go out
|
||||
/// on the wire immediately.
|
||||
///
|
||||
/// \sa GRPC_WRITE_BUFFER_HINT
|
||||
inline WriteOptions& clear_buffer_hint() { |
||||
ClearBit(GRPC_WRITE_BUFFER_HINT); |
||||
return *this; |
||||
} |
||||
|
||||
/// Get value for the flag indicating that the write may be buffered and need
|
||||
/// not go out on the wire immediately.
|
||||
///
|
||||
/// \sa GRPC_WRITE_BUFFER_HINT
|
||||
inline bool get_buffer_hint() const { return GetBit(GRPC_WRITE_BUFFER_HINT); } |
||||
|
||||
/// corked bit: aliases set_buffer_hint currently, with the intent that
|
||||
/// set_buffer_hint will be removed in the future
|
||||
inline WriteOptions& set_corked() { |
||||
SetBit(GRPC_WRITE_BUFFER_HINT); |
||||
return *this; |
||||
} |
||||
|
||||
inline WriteOptions& clear_corked() { |
||||
ClearBit(GRPC_WRITE_BUFFER_HINT); |
||||
return *this; |
||||
} |
||||
|
||||
inline bool is_corked() const { return GetBit(GRPC_WRITE_BUFFER_HINT); } |
||||
|
||||
/// last-message bit: indicates this is the last message in a stream
|
||||
/// client-side: makes Write the equivalent of performing Write, WritesDone
|
||||
/// in a single step
|
||||
/// server-side: hold the Write until the service handler returns (sync api)
|
||||
/// or until Finish is called (async api)
|
||||
inline WriteOptions& set_last_message() { |
||||
last_message_ = true; |
||||
return *this; |
||||
} |
||||
|
||||
/// Clears flag indicating that this is the last message in a stream,
|
||||
/// disabling coalescing.
|
||||
inline WriteOptions& clear_last_message() { |
||||
last_message_ = false; |
||||
return *this; |
||||
} |
||||
|
||||
/// Guarantee that all bytes have been written to the socket before completing
|
||||
/// this write (usually writes are completed when they pass flow control).
|
||||
inline WriteOptions& set_write_through() { |
||||
SetBit(GRPC_WRITE_THROUGH); |
||||
return *this; |
||||
} |
||||
|
||||
inline bool is_write_through() const { return GetBit(GRPC_WRITE_THROUGH); } |
||||
|
||||
/// Get value for the flag indicating that this is the last message, and
|
||||
/// should be coalesced with trailing metadata.
|
||||
///
|
||||
/// \sa GRPC_WRITE_LAST_MESSAGE
|
||||
bool is_last_message() const { return last_message_; } |
||||
|
||||
WriteOptions& operator=(const WriteOptions& rhs) { |
||||
flags_ = rhs.flags_; |
||||
return *this; |
||||
} |
||||
|
||||
private: |
||||
void SetBit(const uint32_t mask) { flags_ |= mask; } |
||||
|
||||
void ClearBit(const uint32_t mask) { flags_ &= ~mask; } |
||||
|
||||
bool GetBit(const uint32_t mask) const { return (flags_ & mask) != 0; } |
||||
|
||||
uint32_t flags_; |
||||
bool last_message_; |
||||
}; |
||||
|
||||
namespace internal { |
||||
|
||||
/// Default argument for CallOpSet. I is unused by the class, but can be
|
||||
/// used for generating multiple names for the same thing.
|
||||
template <int I> |
||||
class CallNoOp { |
||||
protected: |
||||
void AddOp(grpc_op* ops, size_t* nops) {} |
||||
void FinishOp(bool* status) {} |
||||
void SetInterceptionHookPoint( |
||||
InternalInterceptorBatchMethods* interceptor_methods) {} |
||||
void SetFinishInterceptionHookPoint( |
||||
InternalInterceptorBatchMethods* interceptor_methods) {} |
||||
void SetHijackingState(InternalInterceptorBatchMethods* interceptor_methods) { |
||||
} |
||||
}; |
||||
|
||||
class CallOpSendInitialMetadata { |
||||
public: |
||||
CallOpSendInitialMetadata() : send_(false) { |
||||
maybe_compression_level_.is_set = false; |
||||
} |
||||
|
||||
void SendInitialMetadata(std::multimap<grpc::string, grpc::string>* metadata, |
||||
uint32_t flags) { |
||||
maybe_compression_level_.is_set = false; |
||||
send_ = true; |
||||
flags_ = flags; |
||||
metadata_map_ = metadata; |
||||
} |
||||
|
||||
void set_compression_level(grpc_compression_level level) { |
||||
maybe_compression_level_.is_set = true; |
||||
maybe_compression_level_.level = level; |
||||
} |
||||
|
||||
protected: |
||||
void AddOp(grpc_op* ops, size_t* nops) { |
||||
if (!send_ || hijacked_) return; |
||||
grpc_op* op = &ops[(*nops)++]; |
||||
op->op = GRPC_OP_SEND_INITIAL_METADATA; |
||||
op->flags = flags_; |
||||
op->reserved = NULL; |
||||
initial_metadata_ = |
||||
FillMetadataArray(*metadata_map_, &initial_metadata_count_, ""); |
||||
op->data.send_initial_metadata.count = initial_metadata_count_; |
||||
op->data.send_initial_metadata.metadata = initial_metadata_; |
||||
op->data.send_initial_metadata.maybe_compression_level.is_set = |
||||
maybe_compression_level_.is_set; |
||||
if (maybe_compression_level_.is_set) { |
||||
op->data.send_initial_metadata.maybe_compression_level.level = |
||||
maybe_compression_level_.level; |
||||
} |
||||
} |
||||
void FinishOp(bool* status) { |
||||
if (!send_ || hijacked_) return; |
||||
g_core_codegen_interface->gpr_free(initial_metadata_); |
||||
send_ = false; |
||||
} |
||||
|
||||
void SetInterceptionHookPoint( |
||||
InternalInterceptorBatchMethods* interceptor_methods) { |
||||
if (!send_) return; |
||||
interceptor_methods->AddInterceptionHookPoint( |
||||
experimental::InterceptionHookPoints::PRE_SEND_INITIAL_METADATA); |
||||
interceptor_methods->SetSendInitialMetadata(metadata_map_); |
||||
} |
||||
|
||||
void SetFinishInterceptionHookPoint( |
||||
InternalInterceptorBatchMethods* interceptor_methods) {} |
||||
|
||||
void SetHijackingState(InternalInterceptorBatchMethods* interceptor_methods) { |
||||
hijacked_ = true; |
||||
} |
||||
|
||||
bool hijacked_ = false; |
||||
bool send_; |
||||
uint32_t flags_; |
||||
size_t initial_metadata_count_; |
||||
std::multimap<grpc::string, grpc::string>* metadata_map_; |
||||
grpc_metadata* initial_metadata_; |
||||
struct { |
||||
bool is_set; |
||||
grpc_compression_level level; |
||||
} maybe_compression_level_; |
||||
}; |
||||
|
||||
class CallOpSendMessage { |
||||
public: |
||||
CallOpSendMessage() : send_buf_() {} |
||||
|
||||
/// Send \a message using \a options for the write. The \a options are cleared
|
||||
/// after use.
|
||||
template <class M> |
||||
Status SendMessage(const M& message, |
||||
WriteOptions options) GRPC_MUST_USE_RESULT; |
||||
|
||||
template <class M> |
||||
Status SendMessage(const M& message) GRPC_MUST_USE_RESULT; |
||||
|
||||
protected: |
||||
void AddOp(grpc_op* ops, size_t* nops) { |
||||
if (!send_buf_.Valid() || hijacked_) return; |
||||
grpc_op* op = &ops[(*nops)++]; |
||||
op->op = GRPC_OP_SEND_MESSAGE; |
||||
op->flags = write_options_.flags(); |
||||
op->reserved = NULL; |
||||
op->data.send_message.send_message = send_buf_.c_buffer(); |
||||
// Flags are per-message: clear them after use.
|
||||
write_options_.Clear(); |
||||
} |
||||
void FinishOp(bool* status) { send_buf_.Clear(); } |
||||
|
||||
void SetInterceptionHookPoint( |
||||
InternalInterceptorBatchMethods* interceptor_methods) { |
||||
if (!send_buf_.Valid()) return; |
||||
interceptor_methods->AddInterceptionHookPoint( |
||||
experimental::InterceptionHookPoints::PRE_SEND_MESSAGE); |
||||
interceptor_methods->SetSendMessage(&send_buf_); |
||||
} |
||||
|
||||
void SetFinishInterceptionHookPoint( |
||||
InternalInterceptorBatchMethods* interceptor_methods) {} |
||||
|
||||
void SetHijackingState(InternalInterceptorBatchMethods* interceptor_methods) { |
||||
hijacked_ = true; |
||||
} |
||||
|
||||
private: |
||||
bool hijacked_ = false; |
||||
ByteBuffer send_buf_; |
||||
WriteOptions write_options_; |
||||
}; |
||||
|
||||
template <class M> |
||||
Status CallOpSendMessage::SendMessage(const M& message, WriteOptions options) { |
||||
write_options_ = options; |
||||
bool own_buf; |
||||
// TODO(vjpai): Remove the void below when possible
|
||||
// The void in the template parameter below should not be needed
|
||||
// (since it should be implicit) but is needed due to an observed
|
||||
// difference in behavior between clang and gcc for certain internal users
|
||||
Status result = SerializationTraits<M, void>::Serialize( |
||||
message, send_buf_.bbuf_ptr(), &own_buf); |
||||
if (!own_buf) { |
||||
send_buf_.Duplicate(); |
||||
} |
||||
return result; |
||||
} |
||||
|
||||
template <class M> |
||||
Status CallOpSendMessage::SendMessage(const M& message) { |
||||
return SendMessage(message, WriteOptions()); |
||||
} |
||||
|
||||
template <class R> |
||||
class CallOpRecvMessage { |
||||
public: |
||||
CallOpRecvMessage() |
||||
: got_message(false), |
||||
message_(nullptr), |
||||
allow_not_getting_message_(false) {} |
||||
|
||||
void RecvMessage(R* message) { message_ = message; } |
||||
|
||||
// Do not change status if no message is received.
|
||||
void AllowNoMessage() { allow_not_getting_message_ = true; } |
||||
|
||||
bool got_message; |
||||
|
||||
protected: |
||||
void AddOp(grpc_op* ops, size_t* nops) { |
||||
if (message_ == nullptr || hijacked_) return; |
||||
grpc_op* op = &ops[(*nops)++]; |
||||
op->op = GRPC_OP_RECV_MESSAGE; |
||||
op->flags = 0; |
||||
op->reserved = NULL; |
||||
op->data.recv_message.recv_message = recv_buf_.c_buffer_ptr(); |
||||
} |
||||
|
||||
void FinishOp(bool* status) { |
||||
if (message_ == nullptr || hijacked_) return; |
||||
if (recv_buf_.Valid()) { |
||||
if (*status) { |
||||
got_message = *status = |
||||
SerializationTraits<R>::Deserialize(recv_buf_.bbuf_ptr(), message_) |
||||
.ok(); |
||||
recv_buf_.Release(); |
||||
} else { |
||||
got_message = false; |
||||
recv_buf_.Clear(); |
||||
} |
||||
} else { |
||||
got_message = false; |
||||
if (!allow_not_getting_message_) { |
||||
*status = false; |
||||
} |
||||
} |
||||
message_ = nullptr; |
||||
} |
||||
|
||||
void SetInterceptionHookPoint( |
||||
InternalInterceptorBatchMethods* interceptor_methods) { |
||||
interceptor_methods->SetRecvMessage(message_); |
||||
} |
||||
|
||||
void SetFinishInterceptionHookPoint( |
||||
InternalInterceptorBatchMethods* interceptor_methods) { |
||||
if (!got_message) return; |
||||
interceptor_methods->AddInterceptionHookPoint( |
||||
experimental::InterceptionHookPoints::POST_RECV_MESSAGE); |
||||
} |
||||
void SetHijackingState(InternalInterceptorBatchMethods* interceptor_methods) { |
||||
hijacked_ = true; |
||||
if (message_ == nullptr) return; |
||||
interceptor_methods->AddInterceptionHookPoint( |
||||
experimental::InterceptionHookPoints::PRE_RECV_MESSAGE); |
||||
got_message = true; |
||||
} |
||||
|
||||
private: |
||||
R* message_; |
||||
ByteBuffer recv_buf_; |
||||
bool allow_not_getting_message_; |
||||
bool hijacked_ = false; |
||||
}; |
||||
|
||||
class DeserializeFunc { |
||||
public: |
||||
virtual Status Deserialize(ByteBuffer* buf) = 0; |
||||
virtual ~DeserializeFunc() {} |
||||
}; |
||||
|
||||
template <class R> |
||||
class DeserializeFuncType final : public DeserializeFunc { |
||||
public: |
||||
DeserializeFuncType(R* message) : message_(message) {} |
||||
Status Deserialize(ByteBuffer* buf) override { |
||||
return SerializationTraits<R>::Deserialize(buf->bbuf_ptr(), message_); |
||||
} |
||||
|
||||
~DeserializeFuncType() override {} |
||||
|
||||
private: |
||||
R* message_; // Not a managed pointer because management is external to this
|
||||
}; |
||||
|
||||
class CallOpGenericRecvMessage { |
||||
public: |
||||
CallOpGenericRecvMessage() |
||||
: got_message(false), allow_not_getting_message_(false) {} |
||||
|
||||
template <class R> |
||||
void RecvMessage(R* message) { |
||||
// Use an explicit base class pointer to avoid resolution error in the
|
||||
// following unique_ptr::reset for some old implementations.
|
||||
DeserializeFunc* func = new DeserializeFuncType<R>(message); |
||||
deserialize_.reset(func); |
||||
message_ = message; |
||||
} |
||||
|
||||
// Do not change status if no message is received.
|
||||
void AllowNoMessage() { allow_not_getting_message_ = true; } |
||||
|
||||
bool got_message; |
||||
|
||||
protected: |
||||
void AddOp(grpc_op* ops, size_t* nops) { |
||||
if (!deserialize_ || hijacked_) return; |
||||
grpc_op* op = &ops[(*nops)++]; |
||||
op->op = GRPC_OP_RECV_MESSAGE; |
||||
op->flags = 0; |
||||
op->reserved = NULL; |
||||
op->data.recv_message.recv_message = recv_buf_.c_buffer_ptr(); |
||||
} |
||||
|
||||
void FinishOp(bool* status) { |
||||
if (!deserialize_ || hijacked_) return; |
||||
if (recv_buf_.Valid()) { |
||||
if (*status) { |
||||
got_message = true; |
||||
*status = deserialize_->Deserialize(&recv_buf_).ok(); |
||||
recv_buf_.Release(); |
||||
} else { |
||||
got_message = false; |
||||
recv_buf_.Clear(); |
||||
} |
||||
} else { |
||||
got_message = false; |
||||
if (!allow_not_getting_message_) { |
||||
*status = false; |
||||
} |
||||
} |
||||
deserialize_.reset(); |
||||
} |
||||
|
||||
void SetInterceptionHookPoint( |
||||
InternalInterceptorBatchMethods* interceptor_methods) { |
||||
interceptor_methods->SetRecvMessage(message_); |
||||
} |
||||
|
||||
void SetFinishInterceptionHookPoint( |
||||
InternalInterceptorBatchMethods* interceptor_methods) { |
||||
if (!got_message) return; |
||||
interceptor_methods->AddInterceptionHookPoint( |
||||
experimental::InterceptionHookPoints::POST_RECV_MESSAGE); |
||||
} |
||||
void SetHijackingState(InternalInterceptorBatchMethods* interceptor_methods) { |
||||
hijacked_ = true; |
||||
if (!deserialize_) return; |
||||
interceptor_methods->AddInterceptionHookPoint( |
||||
experimental::InterceptionHookPoints::PRE_RECV_MESSAGE); |
||||
} |
||||
|
||||
private: |
||||
void* message_; |
||||
bool hijacked_ = false; |
||||
std::unique_ptr<DeserializeFunc> deserialize_; |
||||
ByteBuffer recv_buf_; |
||||
bool allow_not_getting_message_; |
||||
}; |
||||
|
||||
class CallOpClientSendClose { |
||||
public: |
||||
CallOpClientSendClose() : send_(false) {} |
||||
|
||||
void ClientSendClose() { send_ = true; } |
||||
|
||||
protected: |
||||
void AddOp(grpc_op* ops, size_t* nops) { |
||||
if (!send_ || hijacked_) return; |
||||
grpc_op* op = &ops[(*nops)++]; |
||||
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; |
||||
op->flags = 0; |
||||
op->reserved = NULL; |
||||
} |
||||
void FinishOp(bool* status) { send_ = false; } |
||||
|
||||
void SetInterceptionHookPoint( |
||||
InternalInterceptorBatchMethods* interceptor_methods) { |
||||
if (!send_) return; |
||||
interceptor_methods->AddInterceptionHookPoint( |
||||
experimental::InterceptionHookPoints::PRE_SEND_CLOSE); |
||||
} |
||||
|
||||
void SetFinishInterceptionHookPoint( |
||||
InternalInterceptorBatchMethods* interceptor_methods) {} |
||||
|
||||
void SetHijackingState(InternalInterceptorBatchMethods* interceptor_methods) { |
||||
hijacked_ = true; |
||||
} |
||||
|
||||
private: |
||||
bool hijacked_ = false; |
||||
bool send_; |
||||
}; |
||||
|
||||
class CallOpServerSendStatus { |
||||
public: |
||||
CallOpServerSendStatus() : send_status_available_(false) {} |
||||
|
||||
void ServerSendStatus( |
||||
std::multimap<grpc::string, grpc::string>* trailing_metadata, |
||||
const Status& status) { |
||||
send_error_details_ = status.error_details(); |
||||
metadata_map_ = trailing_metadata; |
||||
send_status_available_ = true; |
||||
send_status_code_ = static_cast<grpc_status_code>(status.error_code()); |
||||
send_error_message_ = status.error_message(); |
||||
} |
||||
|
||||
protected: |
||||
void AddOp(grpc_op* ops, size_t* nops) { |
||||
if (!send_status_available_ || hijacked_) return; |
||||
trailing_metadata_ = FillMetadataArray( |
||||
*metadata_map_, &trailing_metadata_count_, send_error_details_); |
||||
grpc_op* op = &ops[(*nops)++]; |
||||
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; |
||||
op->data.send_status_from_server.trailing_metadata_count = |
||||
trailing_metadata_count_; |
||||
op->data.send_status_from_server.trailing_metadata = trailing_metadata_; |
||||
op->data.send_status_from_server.status = send_status_code_; |
||||
error_message_slice_ = SliceReferencingString(send_error_message_); |
||||
op->data.send_status_from_server.status_details = |
||||
send_error_message_.empty() ? nullptr : &error_message_slice_; |
||||
op->flags = 0; |
||||
op->reserved = NULL; |
||||
} |
||||
|
||||
void FinishOp(bool* status) { |
||||
if (!send_status_available_ || hijacked_) return; |
||||
g_core_codegen_interface->gpr_free(trailing_metadata_); |
||||
send_status_available_ = false; |
||||
} |
||||
|
||||
void SetInterceptionHookPoint( |
||||
InternalInterceptorBatchMethods* interceptor_methods) { |
||||
if (!send_status_available_) return; |
||||
interceptor_methods->AddInterceptionHookPoint( |
||||
experimental::InterceptionHookPoints::PRE_SEND_STATUS); |
||||
interceptor_methods->SetSendTrailingMetadata(metadata_map_); |
||||
interceptor_methods->SetSendStatus(&send_status_code_, &send_error_details_, |
||||
&send_error_message_); |
||||
} |
||||
|
||||
void SetFinishInterceptionHookPoint( |
||||
InternalInterceptorBatchMethods* interceptor_methods) {} |
||||
|
||||
void SetHijackingState(InternalInterceptorBatchMethods* interceptor_methods) { |
||||
hijacked_ = true; |
||||
} |
||||
|
||||
private: |
||||
bool hijacked_ = false; |
||||
bool send_status_available_; |
||||
grpc_status_code send_status_code_; |
||||
grpc::string send_error_details_; |
||||
grpc::string send_error_message_; |
||||
size_t trailing_metadata_count_; |
||||
std::multimap<grpc::string, grpc::string>* metadata_map_; |
||||
grpc_metadata* trailing_metadata_; |
||||
grpc_slice error_message_slice_; |
||||
}; |
||||
|
||||
class CallOpRecvInitialMetadata { |
||||
public: |
||||
CallOpRecvInitialMetadata() : metadata_map_(nullptr) {} |
||||
|
||||
void RecvInitialMetadata(ClientContext* context) { |
||||
context->initial_metadata_received_ = true; |
||||
metadata_map_ = &context->recv_initial_metadata_; |
||||
} |
||||
|
||||
protected: |
||||
void AddOp(grpc_op* ops, size_t* nops) { |
||||
if (metadata_map_ == nullptr || hijacked_) return; |
||||
grpc_op* op = &ops[(*nops)++]; |
||||
op->op = GRPC_OP_RECV_INITIAL_METADATA; |
||||
op->data.recv_initial_metadata.recv_initial_metadata = metadata_map_->arr(); |
||||
op->flags = 0; |
||||
op->reserved = NULL; |
||||
} |
||||
|
||||
void FinishOp(bool* status) { |
||||
if (metadata_map_ == nullptr || hijacked_) return; |
||||
} |
||||
|
||||
void SetInterceptionHookPoint( |
||||
InternalInterceptorBatchMethods* interceptor_methods) { |
||||
interceptor_methods->SetRecvInitialMetadata(metadata_map_); |
||||
} |
||||
|
||||
void SetFinishInterceptionHookPoint( |
||||
InternalInterceptorBatchMethods* interceptor_methods) { |
||||
if (metadata_map_ == nullptr) return; |
||||
interceptor_methods->AddInterceptionHookPoint( |
||||
experimental::InterceptionHookPoints::POST_RECV_INITIAL_METADATA); |
||||
metadata_map_ = nullptr; |
||||
} |
||||
|
||||
void SetHijackingState(InternalInterceptorBatchMethods* interceptor_methods) { |
||||
hijacked_ = true; |
||||
if (metadata_map_ == nullptr) return; |
||||
interceptor_methods->AddInterceptionHookPoint( |
||||
experimental::InterceptionHookPoints::PRE_RECV_INITIAL_METADATA); |
||||
} |
||||
|
||||
private: |
||||
bool hijacked_ = false; |
||||
MetadataMap* metadata_map_; |
||||
}; |
||||
|
||||
class CallOpClientRecvStatus { |
||||
public: |
||||
CallOpClientRecvStatus() |
||||
: recv_status_(nullptr), debug_error_string_(nullptr) {} |
||||
|
||||
void ClientRecvStatus(ClientContext* context, Status* status) { |
||||
client_context_ = context; |
||||
metadata_map_ = &client_context_->trailing_metadata_; |
||||
recv_status_ = status; |
||||
error_message_ = g_core_codegen_interface->grpc_empty_slice(); |
||||
} |
||||
|
||||
protected: |
||||
void AddOp(grpc_op* ops, size_t* nops) { |
||||
if (recv_status_ == nullptr || hijacked_) return; |
||||
grpc_op* op = &ops[(*nops)++]; |
||||
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; |
||||
op->data.recv_status_on_client.trailing_metadata = metadata_map_->arr(); |
||||
op->data.recv_status_on_client.status = &status_code_; |
||||
op->data.recv_status_on_client.status_details = &error_message_; |
||||
op->data.recv_status_on_client.error_string = &debug_error_string_; |
||||
op->flags = 0; |
||||
op->reserved = NULL; |
||||
} |
||||
|
||||
void FinishOp(bool* status) { |
||||
if (recv_status_ == nullptr || hijacked_) return; |
||||
grpc::string binary_error_details = metadata_map_->GetBinaryErrorDetails(); |
||||
*recv_status_ = |
||||
Status(static_cast<StatusCode>(status_code_), |
||||
GRPC_SLICE_IS_EMPTY(error_message_) |
||||
? grpc::string() |
||||
: grpc::string(GRPC_SLICE_START_PTR(error_message_), |
||||
GRPC_SLICE_END_PTR(error_message_)), |
||||
binary_error_details); |
||||
client_context_->set_debug_error_string( |
||||
debug_error_string_ != nullptr ? debug_error_string_ : ""); |
||||
g_core_codegen_interface->grpc_slice_unref(error_message_); |
||||
if (debug_error_string_ != nullptr) { |
||||
g_core_codegen_interface->gpr_free((void*)debug_error_string_); |
||||
} |
||||
} |
||||
|
||||
void SetInterceptionHookPoint( |
||||
InternalInterceptorBatchMethods* interceptor_methods) { |
||||
interceptor_methods->SetRecvStatus(recv_status_); |
||||
interceptor_methods->SetRecvTrailingMetadata(metadata_map_); |
||||
} |
||||
|
||||
void SetFinishInterceptionHookPoint( |
||||
InternalInterceptorBatchMethods* interceptor_methods) { |
||||
if (recv_status_ == nullptr) return; |
||||
interceptor_methods->AddInterceptionHookPoint( |
||||
experimental::InterceptionHookPoints::POST_RECV_STATUS); |
||||
recv_status_ = nullptr; |
||||
} |
||||
|
||||
void SetHijackingState(InternalInterceptorBatchMethods* interceptor_methods) { |
||||
hijacked_ = true; |
||||
if (recv_status_ == nullptr) return; |
||||
interceptor_methods->AddInterceptionHookPoint( |
||||
experimental::InterceptionHookPoints::PRE_RECV_STATUS); |
||||
} |
||||
|
||||
private: |
||||
bool hijacked_ = false; |
||||
ClientContext* client_context_; |
||||
MetadataMap* metadata_map_; |
||||
Status* recv_status_; |
||||
const char* debug_error_string_; |
||||
grpc_status_code status_code_; |
||||
grpc_slice error_message_; |
||||
}; |
||||
|
||||
template <class Op1 = CallNoOp<1>, class Op2 = CallNoOp<2>, |
||||
class Op3 = CallNoOp<3>, class Op4 = CallNoOp<4>, |
||||
class Op5 = CallNoOp<5>, class Op6 = CallNoOp<6>> |
||||
class CallOpSet; |
||||
|
||||
/// Primary implementation of CallOpSetInterface.
|
||||
/// Since we cannot use variadic templates, we declare slots up to
|
||||
/// the maximum count of ops we'll need in a set. We leverage the
|
||||
/// empty base class optimization to slim this class (especially
|
||||
/// when there are many unused slots used). To avoid duplicate base classes,
|
||||
/// the template parmeter for CallNoOp is varied by argument position.
|
||||
template <class Op1, class Op2, class Op3, class Op4, class Op5, class Op6> |
||||
class CallOpSet : public CallOpSetInterface, |
||||
public Op1, |
||||
public Op2, |
||||
public Op3, |
||||
public Op4, |
||||
public Op5, |
||||
public Op6 { |
||||
public: |
||||
CallOpSet() : core_cq_tag_(this), return_tag_(this) {} |
||||
// The copy constructor and assignment operator reset the value of
|
||||
// core_cq_tag_, return_tag_, done_intercepting_ and interceptor_methods_
|
||||
// since those are only meaningful on a specific object, not across objects.
|
||||
CallOpSet(const CallOpSet& other) |
||||
: core_cq_tag_(this), |
||||
return_tag_(this), |
||||
call_(other.call_), |
||||
done_intercepting_(false), |
||||
interceptor_methods_(InterceptorBatchMethodsImpl()) {} |
||||
|
||||
CallOpSet& operator=(const CallOpSet& other) { |
||||
core_cq_tag_ = this; |
||||
return_tag_ = this; |
||||
call_ = other.call_; |
||||
done_intercepting_ = false; |
||||
interceptor_methods_ = InterceptorBatchMethodsImpl(); |
||||
return *this; |
||||
} |
||||
|
||||
void FillOps(Call* call) override { |
||||
done_intercepting_ = false; |
||||
g_core_codegen_interface->grpc_call_ref(call->call()); |
||||
call_ = |
||||
*call; // It's fine to create a copy of call since it's just pointers
|
||||
|
||||
if (RunInterceptors()) { |
||||
ContinueFillOpsAfterInterception(); |
||||
} else { |
||||
// After the interceptors are run, ContinueFillOpsAfterInterception will
|
||||
// be run
|
||||
} |
||||
} |
||||
|
||||
bool FinalizeResult(void** tag, bool* status) override { |
||||
if (done_intercepting_) { |
||||
// We have already finished intercepting and filling in the results. This
|
||||
// round trip from the core needed to be made because interceptors were
|
||||
// run
|
||||
*tag = return_tag_; |
||||
*status = saved_status_; |
||||
g_core_codegen_interface->grpc_call_unref(call_.call()); |
||||
return true; |
||||
} |
||||
|
||||
this->Op1::FinishOp(status); |
||||
this->Op2::FinishOp(status); |
||||
this->Op3::FinishOp(status); |
||||
this->Op4::FinishOp(status); |
||||
this->Op5::FinishOp(status); |
||||
this->Op6::FinishOp(status); |
||||
saved_status_ = *status; |
||||
if (RunInterceptorsPostRecv()) { |
||||
*tag = return_tag_; |
||||
g_core_codegen_interface->grpc_call_unref(call_.call()); |
||||
return true; |
||||
} |
||||
// Interceptors are going to be run, so we can't return the tag just yet.
|
||||
// After the interceptors are run, ContinueFinalizeResultAfterInterception
|
||||
return false; |
||||
} |
||||
|
||||
void set_output_tag(void* return_tag) { return_tag_ = return_tag; } |
||||
|
||||
void* core_cq_tag() override { return core_cq_tag_; } |
||||
|
||||
/// set_core_cq_tag is used to provide a different core CQ tag than "this".
|
||||
/// This is used for callback-based tags, where the core tag is the core
|
||||
/// callback function. It does not change the use or behavior of any other
|
||||
/// function (such as FinalizeResult)
|
||||
void set_core_cq_tag(void* core_cq_tag) { core_cq_tag_ = core_cq_tag; } |
||||
|
||||
// This will be called while interceptors are run if the RPC is a hijacked
|
||||
// RPC. This should set hijacking state for each of the ops.
|
||||
void SetHijackingState() override { |
||||
this->Op1::SetHijackingState(&interceptor_methods_); |
||||
this->Op2::SetHijackingState(&interceptor_methods_); |
||||
this->Op3::SetHijackingState(&interceptor_methods_); |
||||
this->Op4::SetHijackingState(&interceptor_methods_); |
||||
this->Op5::SetHijackingState(&interceptor_methods_); |
||||
this->Op6::SetHijackingState(&interceptor_methods_); |
||||
} |
||||
|
||||
// Should be called after interceptors are done running
|
||||
void ContinueFillOpsAfterInterception() override { |
||||
static const size_t MAX_OPS = 6; |
||||
grpc_op ops[MAX_OPS]; |
||||
size_t nops = 0; |
||||
this->Op1::AddOp(ops, &nops); |
||||
this->Op2::AddOp(ops, &nops); |
||||
this->Op3::AddOp(ops, &nops); |
||||
this->Op4::AddOp(ops, &nops); |
||||
this->Op5::AddOp(ops, &nops); |
||||
this->Op6::AddOp(ops, &nops); |
||||
GPR_CODEGEN_ASSERT(GRPC_CALL_OK == |
||||
g_core_codegen_interface->grpc_call_start_batch( |
||||
call_.call(), ops, nops, core_cq_tag(), nullptr)); |
||||
} |
||||
|
||||
// Should be called after interceptors are done running on the finalize result
|
||||
// path
|
||||
void ContinueFinalizeResultAfterInterception() override { |
||||
done_intercepting_ = true; |
||||
GPR_CODEGEN_ASSERT(GRPC_CALL_OK == |
||||
g_core_codegen_interface->grpc_call_start_batch( |
||||
call_.call(), nullptr, 0, core_cq_tag(), nullptr)); |
||||
} |
||||
|
||||
private: |
||||
// Returns true if no interceptors need to be run
|
||||
bool RunInterceptors() { |
||||
interceptor_methods_.ClearState(); |
||||
interceptor_methods_.SetCallOpSetInterface(this); |
||||
interceptor_methods_.SetCall(&call_); |
||||
this->Op1::SetInterceptionHookPoint(&interceptor_methods_); |
||||
this->Op2::SetInterceptionHookPoint(&interceptor_methods_); |
||||
this->Op3::SetInterceptionHookPoint(&interceptor_methods_); |
||||
this->Op4::SetInterceptionHookPoint(&interceptor_methods_); |
||||
this->Op5::SetInterceptionHookPoint(&interceptor_methods_); |
||||
this->Op6::SetInterceptionHookPoint(&interceptor_methods_); |
||||
return interceptor_methods_.RunInterceptors(); |
||||
} |
||||
// Returns true if no interceptors need to be run
|
||||
bool RunInterceptorsPostRecv() { |
||||
// Call and OpSet had already been set on the set state.
|
||||
// SetReverse also clears previously set hook points
|
||||
interceptor_methods_.SetReverse(); |
||||
this->Op1::SetFinishInterceptionHookPoint(&interceptor_methods_); |
||||
this->Op2::SetFinishInterceptionHookPoint(&interceptor_methods_); |
||||
this->Op3::SetFinishInterceptionHookPoint(&interceptor_methods_); |
||||
this->Op4::SetFinishInterceptionHookPoint(&interceptor_methods_); |
||||
this->Op5::SetFinishInterceptionHookPoint(&interceptor_methods_); |
||||
this->Op6::SetFinishInterceptionHookPoint(&interceptor_methods_); |
||||
return interceptor_methods_.RunInterceptors(); |
||||
} |
||||
|
||||
void* core_cq_tag_; |
||||
void* return_tag_; |
||||
Call call_; |
||||
bool done_intercepting_ = false; |
||||
InterceptorBatchMethodsImpl interceptor_methods_; |
||||
bool saved_status_; |
||||
}; |
||||
|
||||
} // namespace internal
|
||||
} // namespace grpc
|
||||
|
||||
#endif // GRPCPP_IMPL_CODEGEN_CALL_OP_SET_H
|
@ -0,0 +1,59 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#ifndef GRPCPP_IMPL_CODEGEN_CALL_OP_SET_INTERFACE_H |
||||
#define GRPCPP_IMPL_CODEGEN_CALL_OP_SET_INTERFACE_H |
||||
|
||||
#include <grpcpp/impl/codegen/completion_queue_tag.h> |
||||
|
||||
namespace grpc { |
||||
namespace internal { |
||||
|
||||
class Call; |
||||
|
||||
/// An abstract collection of call ops, used to generate the
|
||||
/// grpc_call_op structure to pass down to the lower layers,
|
||||
/// and as it is-a CompletionQueueTag, also massages the final
|
||||
/// completion into the correct form for consumption in the C++
|
||||
/// API.
|
||||
class CallOpSetInterface : public CompletionQueueTag { |
||||
public: |
||||
/// Fills in grpc_op, starting from ops[*nops] and moving
|
||||
/// upwards.
|
||||
virtual void FillOps(internal::Call* call) = 0; |
||||
|
||||
/// Get the tag to be used at the core completion queue. Generally, the
|
||||
/// value of core_cq_tag will be "this". However, it can be overridden if we
|
||||
/// want core to process the tag differently (e.g., as a core callback)
|
||||
virtual void* core_cq_tag() = 0; |
||||
|
||||
// This will be called while interceptors are run if the RPC is a hijacked
|
||||
// RPC. This should set hijacking state for each of the ops.
|
||||
virtual void SetHijackingState() = 0; |
||||
|
||||
// Should be called after interceptors are done running
|
||||
virtual void ContinueFillOpsAfterInterception() = 0; |
||||
|
||||
// Should be called after interceptors are done running on the finalize result
|
||||
// path
|
||||
virtual void ContinueFinalizeResultAfterInterception() = 0; |
||||
}; |
||||
} // namespace internal
|
||||
} // namespace grpc
|
||||
|
||||
#endif // GRPCPP_IMPL_CODEGEN_CALL_OP_SET_INTERFACE_H
|
@ -0,0 +1,80 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#ifndef GRPCPP_IMPL_CODEGEN_INTERCEPTED_CHANNEL_H |
||||
#define GRPCPP_IMPL_CODEGEN_INTERCEPTED_CHANNEL_H |
||||
|
||||
#include <grpcpp/impl/codegen/channel_interface.h> |
||||
|
||||
namespace grpc { |
||||
|
||||
namespace internal { |
||||
|
||||
class InterceptorBatchMethodsImpl; |
||||
|
||||
/// An InterceptedChannel is available to client Interceptors. An
|
||||
/// InterceptedChannel is unique to an interceptor, and when an RPC is started
|
||||
/// on this channel, only those interceptors that come after this interceptor
|
||||
/// see the RPC.
|
||||
class InterceptedChannel : public ChannelInterface { |
||||
public: |
||||
virtual ~InterceptedChannel() { channel_ = nullptr; } |
||||
|
||||
/// Get the current channel state. If the channel is in IDLE and
|
||||
/// \a try_to_connect is set to true, try to connect.
|
||||
grpc_connectivity_state GetState(bool try_to_connect) override { |
||||
return channel_->GetState(try_to_connect); |
||||
} |
||||
|
||||
private: |
||||
InterceptedChannel(ChannelInterface* channel, int pos) |
||||
: channel_(channel), interceptor_pos_(pos) {} |
||||
|
||||
Call CreateCall(const RpcMethod& method, ClientContext* context, |
||||
CompletionQueue* cq) override { |
||||
return channel_->CreateCallInternal(method, context, cq, interceptor_pos_); |
||||
} |
||||
|
||||
void PerformOpsOnCall(CallOpSetInterface* ops, Call* call) override { |
||||
return channel_->PerformOpsOnCall(ops, call); |
||||
} |
||||
void* RegisterMethod(const char* method) override { |
||||
return channel_->RegisterMethod(method); |
||||
} |
||||
|
||||
void NotifyOnStateChangeImpl(grpc_connectivity_state last_observed, |
||||
gpr_timespec deadline, CompletionQueue* cq, |
||||
void* tag) override { |
||||
return channel_->NotifyOnStateChangeImpl(last_observed, deadline, cq, tag); |
||||
} |
||||
bool WaitForStateChangeImpl(grpc_connectivity_state last_observed, |
||||
gpr_timespec deadline) override { |
||||
return channel_->WaitForStateChangeImpl(last_observed, deadline); |
||||
} |
||||
|
||||
CompletionQueue* CallbackCQ() override { return channel_->CallbackCQ(); } |
||||
|
||||
ChannelInterface* channel_; |
||||
int interceptor_pos_; |
||||
|
||||
friend class InterceptorBatchMethodsImpl; |
||||
}; |
||||
} // namespace internal
|
||||
} // namespace grpc
|
||||
|
||||
#endif // GRPCPP_IMPL_CODEGEN_INTERCEPTED_CHANNEL_H
|
@ -0,0 +1,383 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#ifndef GRPCPP_IMPL_CODEGEN_INTERCEPTOR_COMMON_H |
||||
#define GRPCPP_IMPL_CODEGEN_INTERCEPTOR_COMMON_H |
||||
|
||||
#include <grpcpp/impl/codegen/client_interceptor.h> |
||||
#include <grpcpp/impl/codegen/server_interceptor.h> |
||||
|
||||
#include <grpc/impl/codegen/grpc_types.h> |
||||
|
||||
namespace grpc { |
||||
namespace internal { |
||||
|
||||
/// Internal methods for setting the state
|
||||
class InternalInterceptorBatchMethods |
||||
: public experimental::InterceptorBatchMethods { |
||||
public: |
||||
virtual ~InternalInterceptorBatchMethods() {} |
||||
|
||||
virtual void AddInterceptionHookPoint( |
||||
experimental::InterceptionHookPoints type) = 0; |
||||
|
||||
virtual void SetSendMessage(ByteBuffer* buf) = 0; |
||||
|
||||
virtual void SetSendInitialMetadata( |
||||
std::multimap<grpc::string, grpc::string>* metadata) = 0; |
||||
|
||||
virtual void SetSendStatus(grpc_status_code* code, |
||||
grpc::string* error_details, |
||||
grpc::string* error_message) = 0; |
||||
|
||||
virtual void SetSendTrailingMetadata( |
||||
std::multimap<grpc::string, grpc::string>* metadata) = 0; |
||||
|
||||
virtual void SetRecvMessage(void* message) = 0; |
||||
|
||||
virtual void SetRecvInitialMetadata(MetadataMap* map) = 0; |
||||
|
||||
virtual void SetRecvStatus(Status* status) = 0; |
||||
|
||||
virtual void SetRecvTrailingMetadata(MetadataMap* map) = 0; |
||||
}; |
||||
|
||||
class InterceptorBatchMethodsImpl : public InternalInterceptorBatchMethods { |
||||
public: |
||||
InterceptorBatchMethodsImpl() { |
||||
for (auto i = static_cast<experimental::InterceptionHookPoints>(0); |
||||
i < experimental::InterceptionHookPoints::NUM_INTERCEPTION_HOOKS; |
||||
i = static_cast<experimental::InterceptionHookPoints>( |
||||
static_cast<size_t>(i) + 1)) { |
||||
hooks_[static_cast<size_t>(i)] = false; |
||||
} |
||||
} |
||||
|
||||
~InterceptorBatchMethodsImpl() {} |
||||
|
||||
bool QueryInterceptionHookPoint( |
||||
experimental::InterceptionHookPoints type) override { |
||||
return hooks_[static_cast<size_t>(type)]; |
||||
} |
||||
|
||||
void Proceed() override { /* fill this */ |
||||
if (call_->client_rpc_info() != nullptr) { |
||||
return ProceedClient(); |
||||
} |
||||
GPR_CODEGEN_ASSERT(call_->server_rpc_info() != nullptr); |
||||
ProceedServer(); |
||||
} |
||||
|
||||
void Hijack() override { |
||||
// Only the client can hijack when sending down initial metadata
|
||||
GPR_CODEGEN_ASSERT(!reverse_ && ops_ != nullptr && |
||||
call_->client_rpc_info() != nullptr); |
||||
// It is illegal to call Hijack twice
|
||||
GPR_CODEGEN_ASSERT(!ran_hijacking_interceptor_); |
||||
auto* rpc_info = call_->client_rpc_info(); |
||||
rpc_info->hijacked_ = true; |
||||
rpc_info->hijacked_interceptor_ = current_interceptor_index_; |
||||
ClearHookPoints(); |
||||
ops_->SetHijackingState(); |
||||
ran_hijacking_interceptor_ = true; |
||||
rpc_info->RunInterceptor(this, current_interceptor_index_); |
||||
} |
||||
|
||||
void AddInterceptionHookPoint( |
||||
experimental::InterceptionHookPoints type) override { |
||||
hooks_[static_cast<size_t>(type)] = true; |
||||
} |
||||
|
||||
ByteBuffer* GetSendMessage() override { return send_message_; } |
||||
|
||||
std::multimap<grpc::string, grpc::string>* GetSendInitialMetadata() override { |
||||
return send_initial_metadata_; |
||||
} |
||||
|
||||
Status GetSendStatus() override { |
||||
return Status(static_cast<StatusCode>(*code_), *error_message_, |
||||
*error_details_); |
||||
} |
||||
|
||||
void ModifySendStatus(const Status& status) override { |
||||
*code_ = static_cast<grpc_status_code>(status.error_code()); |
||||
*error_details_ = status.error_details(); |
||||
*error_message_ = status.error_message(); |
||||
} |
||||
|
||||
std::multimap<grpc::string, grpc::string>* GetSendTrailingMetadata() |
||||
override { |
||||
return send_trailing_metadata_; |
||||
} |
||||
|
||||
void* GetRecvMessage() override { return recv_message_; } |
||||
|
||||
std::multimap<grpc::string_ref, grpc::string_ref>* GetRecvInitialMetadata() |
||||
override { |
||||
return recv_initial_metadata_->map(); |
||||
} |
||||
|
||||
Status* GetRecvStatus() override { return recv_status_; } |
||||
|
||||
std::multimap<grpc::string_ref, grpc::string_ref>* GetRecvTrailingMetadata() |
||||
override { |
||||
return recv_trailing_metadata_->map(); |
||||
} |
||||
|
||||
void SetSendMessage(ByteBuffer* buf) override { send_message_ = buf; } |
||||
|
||||
void SetSendInitialMetadata( |
||||
std::multimap<grpc::string, grpc::string>* metadata) override { |
||||
send_initial_metadata_ = metadata; |
||||
} |
||||
|
||||
void SetSendStatus(grpc_status_code* code, grpc::string* error_details, |
||||
grpc::string* error_message) override { |
||||
code_ = code; |
||||
error_details_ = error_details; |
||||
error_message_ = error_message; |
||||
} |
||||
|
||||
void SetSendTrailingMetadata( |
||||
std::multimap<grpc::string, grpc::string>* metadata) override { |
||||
send_trailing_metadata_ = metadata; |
||||
} |
||||
|
||||
void SetRecvMessage(void* message) override { recv_message_ = message; } |
||||
|
||||
void SetRecvInitialMetadata(MetadataMap* map) override { |
||||
recv_initial_metadata_ = map; |
||||
} |
||||
|
||||
void SetRecvStatus(Status* status) override { recv_status_ = status; } |
||||
|
||||
void SetRecvTrailingMetadata(MetadataMap* map) override { |
||||
recv_trailing_metadata_ = map; |
||||
} |
||||
|
||||
std::unique_ptr<ChannelInterface> GetInterceptedChannel() override { |
||||
auto* info = call_->client_rpc_info(); |
||||
if (info == nullptr) { |
||||
return std::unique_ptr<ChannelInterface>(nullptr); |
||||
} |
||||
// The intercepted channel starts from the interceptor just after the
|
||||
// current interceptor
|
||||
return std::unique_ptr<ChannelInterface>(new InterceptedChannel( |
||||
info->channel(), current_interceptor_index_ + 1)); |
||||
} |
||||
|
||||
// Clears all state
|
||||
void ClearState() { |
||||
reverse_ = false; |
||||
ran_hijacking_interceptor_ = false; |
||||
ClearHookPoints(); |
||||
} |
||||
|
||||
// Prepares for Post_recv operations
|
||||
void SetReverse() { |
||||
reverse_ = true; |
||||
ran_hijacking_interceptor_ = false; |
||||
ClearHookPoints(); |
||||
} |
||||
|
||||
// This needs to be set before interceptors are run
|
||||
void SetCall(Call* call) { call_ = call; } |
||||
|
||||
// This needs to be set before interceptors are run using RunInterceptors().
|
||||
// Alternatively, RunInterceptors(std::function<void(void)> f) can be used.
|
||||
void SetCallOpSetInterface(CallOpSetInterface* ops) { ops_ = ops; } |
||||
|
||||
// Returns true if no interceptors are run. This should be used only by
|
||||
// subclasses of CallOpSetInterface. SetCall and SetCallOpSetInterface should
|
||||
// have been called before this. After all the interceptors are done running,
|
||||
// either ContinueFillOpsAfterInterception or
|
||||
// ContinueFinalizeOpsAfterInterception will be called. Note that neither of
|
||||
// them is invoked if there were no interceptors registered.
|
||||
bool RunInterceptors() { |
||||
GPR_CODEGEN_ASSERT(ops_); |
||||
auto* client_rpc_info = call_->client_rpc_info(); |
||||
if (client_rpc_info != nullptr) { |
||||
if (client_rpc_info->interceptors_.size() == 0) { |
||||
return true; |
||||
} else { |
||||
RunClientInterceptors(); |
||||
return false; |
||||
} |
||||
} |
||||
|
||||
auto* server_rpc_info = call_->server_rpc_info(); |
||||
if (server_rpc_info == nullptr || |
||||
server_rpc_info->interceptors_.size() == 0) { |
||||
return true; |
||||
} |
||||
RunServerInterceptors(); |
||||
return false; |
||||
} |
||||
|
||||
// Returns true if no interceptors are run. Returns false otherwise if there
|
||||
// are interceptors registered. After the interceptors are done running \a f
|
||||
// will be invoked. This is to be used only by BaseAsyncRequest and
|
||||
// SyncRequest.
|
||||
bool RunInterceptors(std::function<void(void)> f) { |
||||
// This is used only by the server for initial call request
|
||||
GPR_CODEGEN_ASSERT(reverse_ == true); |
||||
GPR_CODEGEN_ASSERT(call_->client_rpc_info() == nullptr); |
||||
auto* server_rpc_info = call_->server_rpc_info(); |
||||
if (server_rpc_info == nullptr || |
||||
server_rpc_info->interceptors_.size() == 0) { |
||||
return true; |
||||
} |
||||
callback_ = std::move(f); |
||||
RunServerInterceptors(); |
||||
return false; |
||||
} |
||||
|
||||
private: |
||||
void RunClientInterceptors() { |
||||
auto* rpc_info = call_->client_rpc_info(); |
||||
if (!reverse_) { |
||||
current_interceptor_index_ = 0; |
||||
} else { |
||||
if (rpc_info->hijacked_) { |
||||
current_interceptor_index_ = rpc_info->hijacked_interceptor_; |
||||
} else { |
||||
current_interceptor_index_ = rpc_info->interceptors_.size() - 1; |
||||
} |
||||
} |
||||
rpc_info->RunInterceptor(this, current_interceptor_index_); |
||||
} |
||||
|
||||
void RunServerInterceptors() { |
||||
auto* rpc_info = call_->server_rpc_info(); |
||||
if (!reverse_) { |
||||
current_interceptor_index_ = 0; |
||||
} else { |
||||
current_interceptor_index_ = rpc_info->interceptors_.size() - 1; |
||||
} |
||||
rpc_info->RunInterceptor(this, current_interceptor_index_); |
||||
} |
||||
|
||||
void ProceedClient() { |
||||
auto* rpc_info = call_->client_rpc_info(); |
||||
if (rpc_info->hijacked_ && !reverse_ && |
||||
current_interceptor_index_ == rpc_info->hijacked_interceptor_ && |
||||
!ran_hijacking_interceptor_) { |
||||
// We now need to provide hijacked recv ops to this interceptor
|
||||
ClearHookPoints(); |
||||
ops_->SetHijackingState(); |
||||
ran_hijacking_interceptor_ = true; |
||||
rpc_info->RunInterceptor(this, current_interceptor_index_); |
||||
return; |
||||
} |
||||
if (!reverse_) { |
||||
current_interceptor_index_++; |
||||
// We are going down the stack of interceptors
|
||||
if (current_interceptor_index_ < rpc_info->interceptors_.size()) { |
||||
if (rpc_info->hijacked_ && |
||||
current_interceptor_index_ > rpc_info->hijacked_interceptor_) { |
||||
// This is a hijacked RPC and we are done with hijacking
|
||||
ops_->ContinueFillOpsAfterInterception(); |
||||
} else { |
||||
rpc_info->RunInterceptor(this, current_interceptor_index_); |
||||
} |
||||
} else { |
||||
// we are done running all the interceptors without any hijacking
|
||||
ops_->ContinueFillOpsAfterInterception(); |
||||
} |
||||
} else { |
||||
// We are going up the stack of interceptors
|
||||
if (current_interceptor_index_ > 0) { |
||||
// Continue running interceptors
|
||||
current_interceptor_index_--; |
||||
rpc_info->RunInterceptor(this, current_interceptor_index_); |
||||
} else { |
||||
// we are done running all the interceptors without any hijacking
|
||||
ops_->ContinueFinalizeResultAfterInterception(); |
||||
} |
||||
} |
||||
} |
||||
|
||||
void ProceedServer() { |
||||
auto* rpc_info = call_->server_rpc_info(); |
||||
if (!reverse_) { |
||||
current_interceptor_index_++; |
||||
if (current_interceptor_index_ < rpc_info->interceptors_.size()) { |
||||
return rpc_info->RunInterceptor(this, current_interceptor_index_); |
||||
} else if (ops_) { |
||||
return ops_->ContinueFillOpsAfterInterception(); |
||||
} |
||||
} else { |
||||
// We are going up the stack of interceptors
|
||||
if (current_interceptor_index_ > 0) { |
||||
// Continue running interceptors
|
||||
current_interceptor_index_--; |
||||
return rpc_info->RunInterceptor(this, current_interceptor_index_); |
||||
} else if (ops_) { |
||||
return ops_->ContinueFinalizeResultAfterInterception(); |
||||
} |
||||
} |
||||
GPR_CODEGEN_ASSERT(callback_); |
||||
callback_(); |
||||
} |
||||
|
||||
void ClearHookPoints() { |
||||
for (auto i = static_cast<experimental::InterceptionHookPoints>(0); |
||||
i < experimental::InterceptionHookPoints::NUM_INTERCEPTION_HOOKS; |
||||
i = static_cast<experimental::InterceptionHookPoints>( |
||||
static_cast<size_t>(i) + 1)) { |
||||
hooks_[static_cast<size_t>(i)] = false; |
||||
} |
||||
} |
||||
|
||||
std::array<bool, |
||||
static_cast<size_t>( |
||||
experimental::InterceptionHookPoints::NUM_INTERCEPTION_HOOKS)> |
||||
hooks_; |
||||
|
||||
size_t current_interceptor_index_ = 0; // Current iterator
|
||||
bool reverse_ = false; |
||||
bool ran_hijacking_interceptor_ = false; |
||||
Call* call_ = nullptr; // The Call object is present along with CallOpSet
|
||||
// object/callback
|
||||
CallOpSetInterface* ops_ = nullptr; |
||||
std::function<void(void)> callback_; |
||||
|
||||
ByteBuffer* send_message_ = nullptr; |
||||
|
||||
std::multimap<grpc::string, grpc::string>* send_initial_metadata_; |
||||
|
||||
grpc_status_code* code_ = nullptr; |
||||
grpc::string* error_details_ = nullptr; |
||||
grpc::string* error_message_ = nullptr; |
||||
Status send_status_; |
||||
|
||||
std::multimap<grpc::string, grpc::string>* send_trailing_metadata_ = nullptr; |
||||
|
||||
void* recv_message_ = nullptr; |
||||
|
||||
MetadataMap* recv_initial_metadata_ = nullptr; |
||||
|
||||
Status* recv_status_ = nullptr; |
||||
|
||||
MetadataMap* recv_trailing_metadata_ = nullptr; |
||||
}; |
||||
|
||||
} // namespace internal
|
||||
} // namespace grpc
|
||||
|
||||
#endif // GRPCPP_IMPL_CODEGEN_INTERCEPTOR_COMMON_H
|
@ -0,0 +1,200 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#ifndef GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_H |
||||
#define GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_H |
||||
|
||||
#include <functional> |
||||
|
||||
#include <grpcpp/impl/codegen/call.h> |
||||
#include <grpcpp/impl/codegen/callback_common.h> |
||||
#include <grpcpp/impl/codegen/config.h> |
||||
#include <grpcpp/impl/codegen/core_codegen_interface.h> |
||||
#include <grpcpp/impl/codegen/server_context.h> |
||||
#include <grpcpp/impl/codegen/server_interface.h> |
||||
#include <grpcpp/impl/codegen/status.h> |
||||
|
||||
namespace grpc { |
||||
|
||||
// forward declarations
|
||||
namespace internal { |
||||
template <class ServiceType, class RequestType, class ResponseType> |
||||
class CallbackUnaryHandler; |
||||
} // namespace internal
|
||||
|
||||
namespace experimental { |
||||
|
||||
// For unary RPCs, the exposed controller class is only an interface
|
||||
// and the actual implementation is an internal class.
|
||||
class ServerCallbackRpcController { |
||||
public: |
||||
virtual ~ServerCallbackRpcController() {} |
||||
|
||||
// The method handler must call this function when it is done so that
|
||||
// the library knows to free its resources
|
||||
virtual void Finish(Status s) = 0; |
||||
|
||||
// Allow the method handler to push out the initial metadata before
|
||||
// the response and status are ready
|
||||
virtual void SendInitialMetadata(std::function<void(bool)>) = 0; |
||||
}; |
||||
|
||||
} // namespace experimental
|
||||
|
||||
namespace internal { |
||||
|
||||
template <class ServiceType, class RequestType, class ResponseType> |
||||
class CallbackUnaryHandler : public MethodHandler { |
||||
public: |
||||
CallbackUnaryHandler( |
||||
std::function<void(ServerContext*, const RequestType*, ResponseType*, |
||||
experimental::ServerCallbackRpcController*)> |
||||
func, |
||||
ServiceType* service) |
||||
: func_(func) {} |
||||
void RunHandler(const HandlerParameter& param) final { |
||||
// Arena allocate a controller structure (that includes request/response)
|
||||
g_core_codegen_interface->grpc_call_ref(param.call->call()); |
||||
auto* controller = new (g_core_codegen_interface->grpc_call_arena_alloc( |
||||
param.call->call(), sizeof(ServerCallbackRpcControllerImpl))) |
||||
ServerCallbackRpcControllerImpl( |
||||
param.server_context, param.call, |
||||
static_cast<RequestType*>(param.request), |
||||
std::move(param.call_requester)); |
||||
Status status = param.status; |
||||
|
||||
if (status.ok()) { |
||||
// Call the actual function handler and expect the user to call finish
|
||||
CatchingCallback(std::move(func_), param.server_context, |
||||
controller->request(), controller->response(), |
||||
controller); |
||||
} else { |
||||
// if deserialization failed, we need to fail the call
|
||||
controller->Finish(status); |
||||
} |
||||
} |
||||
|
||||
void* Deserialize(grpc_call* call, grpc_byte_buffer* req, |
||||
Status* status) final { |
||||
ByteBuffer buf; |
||||
buf.set_buffer(req); |
||||
auto* request = new (g_core_codegen_interface->grpc_call_arena_alloc( |
||||
call, sizeof(RequestType))) RequestType(); |
||||
*status = SerializationTraits<RequestType>::Deserialize(&buf, request); |
||||
buf.Release(); |
||||
if (status->ok()) { |
||||
return request; |
||||
} |
||||
request->~RequestType(); |
||||
return nullptr; |
||||
} |
||||
|
||||
private: |
||||
std::function<void(ServerContext*, const RequestType*, ResponseType*, |
||||
experimental::ServerCallbackRpcController*)> |
||||
func_; |
||||
|
||||
// The implementation class of ServerCallbackRpcController is a private member
|
||||
// of CallbackUnaryHandler since it is never exposed anywhere, and this allows
|
||||
// it to take advantage of CallbackUnaryHandler's friendships.
|
||||
class ServerCallbackRpcControllerImpl |
||||
: public experimental::ServerCallbackRpcController { |
||||
public: |
||||
void Finish(Status s) override { |
||||
finish_tag_ = CallbackWithSuccessTag( |
||||
call_.call(), |
||||
[this](bool) { |
||||
grpc_call* call = call_.call(); |
||||
auto call_requester = std::move(call_requester_); |
||||
this->~ServerCallbackRpcControllerImpl(); // explicitly call
|
||||
// destructor
|
||||
g_core_codegen_interface->grpc_call_unref(call); |
||||
call_requester(); |
||||
}, |
||||
&finish_buf_); |
||||
if (!ctx_->sent_initial_metadata_) { |
||||
finish_buf_.SendInitialMetadata(&ctx_->initial_metadata_, |
||||
ctx_->initial_metadata_flags()); |
||||
if (ctx_->compression_level_set()) { |
||||
finish_buf_.set_compression_level(ctx_->compression_level()); |
||||
} |
||||
ctx_->sent_initial_metadata_ = true; |
||||
} |
||||
// The response is dropped if the status is not OK.
|
||||
if (s.ok()) { |
||||
finish_buf_.ServerSendStatus(&ctx_->trailing_metadata_, |
||||
finish_buf_.SendMessage(resp_)); |
||||
} else { |
||||
finish_buf_.ServerSendStatus(&ctx_->trailing_metadata_, s); |
||||
} |
||||
finish_buf_.set_core_cq_tag(&finish_tag_); |
||||
call_.PerformOps(&finish_buf_); |
||||
} |
||||
|
||||
void SendInitialMetadata(std::function<void(bool)> f) override { |
||||
GPR_CODEGEN_ASSERT(!ctx_->sent_initial_metadata_); |
||||
|
||||
meta_tag_ = |
||||
CallbackWithSuccessTag(call_.call(), std::move(f), &meta_buf_); |
||||
meta_buf_.SendInitialMetadata(&ctx_->initial_metadata_, |
||||
ctx_->initial_metadata_flags()); |
||||
if (ctx_->compression_level_set()) { |
||||
meta_buf_.set_compression_level(ctx_->compression_level()); |
||||
} |
||||
ctx_->sent_initial_metadata_ = true; |
||||
meta_buf_.set_core_cq_tag(&meta_tag_); |
||||
call_.PerformOps(&meta_buf_); |
||||
} |
||||
|
||||
private: |
||||
template <class SrvType, class ReqType, class RespType> |
||||
friend class CallbackUnaryHandler; |
||||
|
||||
ServerCallbackRpcControllerImpl(ServerContext* ctx, Call* call, |
||||
RequestType* req, |
||||
std::function<void()> call_requester) |
||||
: ctx_(ctx), |
||||
call_(*call), |
||||
req_(req), |
||||
call_requester_(std::move(call_requester)) {} |
||||
|
||||
~ServerCallbackRpcControllerImpl() { req_->~RequestType(); } |
||||
|
||||
RequestType* request() { return req_; } |
||||
ResponseType* response() { return &resp_; } |
||||
|
||||
CallOpSet<CallOpSendInitialMetadata> meta_buf_; |
||||
CallbackWithSuccessTag meta_tag_; |
||||
CallOpSet<CallOpSendInitialMetadata, CallOpSendMessage, |
||||
CallOpServerSendStatus> |
||||
finish_buf_; |
||||
CallbackWithSuccessTag finish_tag_; |
||||
|
||||
ServerContext* ctx_; |
||||
Call call_; |
||||
RequestType* req_; |
||||
ResponseType resp_; |
||||
std::function<void()> call_requester_; |
||||
}; |
||||
}; |
||||
|
||||
} // namespace internal
|
||||
|
||||
} // namespace grpc
|
||||
|
||||
#endif // GRPCPP_IMPL_CODEGEN_SERVER_CALLBACK_H
|
@ -0,0 +1,99 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#ifndef GRPCPP_IMPL_CODEGEN_SERVER_INTERCEPTOR_H |
||||
#define GRPCPP_IMPL_CODEGEN_SERVER_INTERCEPTOR_H |
||||
|
||||
#include <atomic> |
||||
#include <vector> |
||||
|
||||
#include <grpcpp/impl/codegen/interceptor.h> |
||||
#include <grpcpp/impl/codegen/string_ref.h> |
||||
|
||||
namespace grpc { |
||||
|
||||
class ServerContext; |
||||
|
||||
namespace internal { |
||||
class InterceptorBatchMethodsImpl; |
||||
} |
||||
|
||||
namespace experimental { |
||||
class ServerRpcInfo; |
||||
|
||||
class ServerInterceptorFactoryInterface { |
||||
public: |
||||
virtual ~ServerInterceptorFactoryInterface() {} |
||||
virtual Interceptor* CreateServerInterceptor(ServerRpcInfo* info) = 0; |
||||
}; |
||||
|
||||
class ServerRpcInfo { |
||||
public: |
||||
~ServerRpcInfo(){}; |
||||
|
||||
ServerRpcInfo(const ServerRpcInfo&) = delete; |
||||
ServerRpcInfo(ServerRpcInfo&&) = default; |
||||
ServerRpcInfo& operator=(ServerRpcInfo&&) = default; |
||||
|
||||
// Getter methods
|
||||
const char* method() { return method_; } |
||||
grpc::ServerContext* server_context() { return ctx_; } |
||||
|
||||
private: |
||||
ServerRpcInfo(grpc::ServerContext* ctx, const char* method) |
||||
: ctx_(ctx), method_(method) { |
||||
ref_.store(1); |
||||
} |
||||
|
||||
// Runs interceptor at pos \a pos.
|
||||
void RunInterceptor( |
||||
experimental::InterceptorBatchMethods* interceptor_methods, size_t pos) { |
||||
GPR_CODEGEN_ASSERT(pos < interceptors_.size()); |
||||
interceptors_[pos]->Intercept(interceptor_methods); |
||||
} |
||||
|
||||
void RegisterInterceptors( |
||||
const std::vector< |
||||
std::unique_ptr<experimental::ServerInterceptorFactoryInterface>>& |
||||
creators) { |
||||
for (const auto& creator : creators) { |
||||
interceptors_.push_back(std::unique_ptr<experimental::Interceptor>( |
||||
creator->CreateServerInterceptor(this))); |
||||
} |
||||
} |
||||
|
||||
void Ref() { ref_++; } |
||||
void Unref() { |
||||
if (--ref_ == 0) { |
||||
delete this; |
||||
} |
||||
} |
||||
|
||||
grpc::ServerContext* ctx_ = nullptr; |
||||
const char* method_ = nullptr; |
||||
std::atomic_int ref_; |
||||
std::vector<std::unique_ptr<experimental::Interceptor>> interceptors_; |
||||
|
||||
friend class internal::InterceptorBatchMethodsImpl; |
||||
friend class grpc::ServerContext; |
||||
}; |
||||
|
||||
} // namespace experimental
|
||||
} // namespace grpc
|
||||
|
||||
#endif // GRPCPP_IMPL_CODEGEN_SERVER_INTERCEPTOR_H
|
@ -0,0 +1,24 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#ifndef GRPCPP_SUPPORT_SERVER_CALLBACK_H |
||||
#define GRPCPP_SUPPORT_SERVER_CALLBACK_H |
||||
|
||||
#include <grpcpp/impl/codegen/server_callback.h> |
||||
|
||||
#endif // GRPCPP_SUPPORT_SERVER_CALLBACK_H
|
@ -1,8 +1,7 @@ |
||||
/* Automatically generated nanopb constant definitions */ |
||||
/* Generated by nanopb-0.3.7-dev */ |
||||
|
||||
#include "src/cpp/server/health/health.pb.h" |
||||
|
||||
#include "src/core/ext/filters/client_channel/health/health.pb.h" |
||||
/* @@protoc_insertion_point(includes) */ |
||||
#if PB_PROTO_HEADER_VERSION != 30 |
||||
#error Regenerate this file with the current version of nanopb generator. |
@ -0,0 +1,653 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#include <grpc/support/port_platform.h> |
||||
|
||||
#include <stdint.h> |
||||
#include <stdio.h> |
||||
|
||||
#include "src/core/ext/filters/client_channel/health/health_check_client.h" |
||||
|
||||
#include "pb_decode.h" |
||||
#include "pb_encode.h" |
||||
#include "src/core/ext/filters/client_channel/health/health.pb.h" |
||||
#include "src/core/lib/debug/trace.h" |
||||
#include "src/core/lib/gprpp/mutex_lock.h" |
||||
#include "src/core/lib/slice/slice_internal.h" |
||||
#include "src/core/lib/transport/error_utils.h" |
||||
#include "src/core/lib/transport/status_metadata.h" |
||||
|
||||
#define HEALTH_CHECK_INITIAL_CONNECT_BACKOFF_SECONDS 1 |
||||
#define HEALTH_CHECK_RECONNECT_BACKOFF_MULTIPLIER 1.6 |
||||
#define HEALTH_CHECK_RECONNECT_MAX_BACKOFF_SECONDS 120 |
||||
#define HEALTH_CHECK_RECONNECT_JITTER 0.2 |
||||
|
||||
grpc_core::TraceFlag grpc_health_check_client_trace(false, |
||||
"health_check_client"); |
||||
|
||||
namespace grpc_core { |
||||
|
||||
//
|
||||
// HealthCheckClient
|
||||
//
|
||||
|
||||
HealthCheckClient::HealthCheckClient( |
||||
const char* service_name, |
||||
RefCountedPtr<ConnectedSubchannel> connected_subchannel, |
||||
grpc_pollset_set* interested_parties, |
||||
grpc_core::RefCountedPtr<grpc_core::channelz::SubchannelNode> channelz_node) |
||||
: InternallyRefCountedWithTracing<HealthCheckClient>( |
||||
&grpc_health_check_client_trace), |
||||
service_name_(service_name), |
||||
connected_subchannel_(std::move(connected_subchannel)), |
||||
interested_parties_(interested_parties), |
||||
channelz_node_(std::move(channelz_node)), |
||||
retry_backoff_( |
||||
BackOff::Options() |
||||
.set_initial_backoff( |
||||
HEALTH_CHECK_INITIAL_CONNECT_BACKOFF_SECONDS * 1000) |
||||
.set_multiplier(HEALTH_CHECK_RECONNECT_BACKOFF_MULTIPLIER) |
||||
.set_jitter(HEALTH_CHECK_RECONNECT_JITTER) |
||||
.set_max_backoff(HEALTH_CHECK_RECONNECT_MAX_BACKOFF_SECONDS * |
||||
1000)) { |
||||
if (grpc_health_check_client_trace.enabled()) { |
||||
gpr_log(GPR_INFO, "created HealthCheckClient %p", this); |
||||
} |
||||
GRPC_CLOSURE_INIT(&retry_timer_callback_, OnRetryTimer, this, |
||||
grpc_schedule_on_exec_ctx); |
||||
gpr_mu_init(&mu_); |
||||
StartCall(); |
||||
} |
||||
|
||||
HealthCheckClient::~HealthCheckClient() { |
||||
if (grpc_health_check_client_trace.enabled()) { |
||||
gpr_log(GPR_INFO, "destroying HealthCheckClient %p", this); |
||||
} |
||||
GRPC_ERROR_UNREF(error_); |
||||
gpr_mu_destroy(&mu_); |
||||
} |
||||
|
||||
void HealthCheckClient::NotifyOnHealthChange(grpc_connectivity_state* state, |
||||
grpc_closure* closure) { |
||||
MutexLock lock(&mu_); |
||||
GPR_ASSERT(notify_state_ == nullptr); |
||||
if (*state != state_) { |
||||
*state = state_; |
||||
GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_REF(error_)); |
||||
return; |
||||
} |
||||
notify_state_ = state; |
||||
on_health_changed_ = closure; |
||||
} |
||||
|
||||
void HealthCheckClient::SetHealthStatus(grpc_connectivity_state state, |
||||
grpc_error* error) { |
||||
MutexLock lock(&mu_); |
||||
SetHealthStatusLocked(state, error); |
||||
} |
||||
|
||||
void HealthCheckClient::SetHealthStatusLocked(grpc_connectivity_state state, |
||||
grpc_error* error) { |
||||
if (grpc_health_check_client_trace.enabled()) { |
||||
gpr_log(GPR_INFO, "HealthCheckClient %p: setting state=%d error=%s", this, |
||||
state, grpc_error_string(error)); |
||||
} |
||||
if (notify_state_ != nullptr && *notify_state_ != state) { |
||||
*notify_state_ = state; |
||||
notify_state_ = nullptr; |
||||
GRPC_CLOSURE_SCHED(on_health_changed_, GRPC_ERROR_REF(error)); |
||||
on_health_changed_ = nullptr; |
||||
} |
||||
state_ = state; |
||||
GRPC_ERROR_UNREF(error_); |
||||
error_ = error; |
||||
} |
||||
|
||||
void HealthCheckClient::Orphan() { |
||||
if (grpc_health_check_client_trace.enabled()) { |
||||
gpr_log(GPR_INFO, "HealthCheckClient %p: shutting down", this); |
||||
} |
||||
{ |
||||
MutexLock lock(&mu_); |
||||
if (on_health_changed_ != nullptr) { |
||||
*notify_state_ = GRPC_CHANNEL_SHUTDOWN; |
||||
notify_state_ = nullptr; |
||||
GRPC_CLOSURE_SCHED(on_health_changed_, GRPC_ERROR_NONE); |
||||
on_health_changed_ = nullptr; |
||||
} |
||||
shutting_down_ = true; |
||||
call_state_.reset(); |
||||
if (retry_timer_callback_pending_) { |
||||
grpc_timer_cancel(&retry_timer_); |
||||
} |
||||
} |
||||
Unref(DEBUG_LOCATION, "orphan"); |
||||
} |
||||
|
||||
void HealthCheckClient::StartCall() { |
||||
MutexLock lock(&mu_); |
||||
StartCallLocked(); |
||||
} |
||||
|
||||
void HealthCheckClient::StartCallLocked() { |
||||
if (shutting_down_) return; |
||||
GPR_ASSERT(call_state_ == nullptr); |
||||
SetHealthStatusLocked(GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE); |
||||
call_state_ = MakeOrphanable<CallState>(Ref(), interested_parties_); |
||||
if (grpc_health_check_client_trace.enabled()) { |
||||
gpr_log(GPR_INFO, "HealthCheckClient %p: created CallState %p", this, |
||||
call_state_.get()); |
||||
} |
||||
call_state_->StartCall(); |
||||
} |
||||
|
||||
void HealthCheckClient::StartRetryTimer() { |
||||
MutexLock lock(&mu_); |
||||
SetHealthStatusLocked( |
||||
GRPC_CHANNEL_TRANSIENT_FAILURE, |
||||
GRPC_ERROR_CREATE_FROM_STATIC_STRING( |
||||
"health check call failed; will retry after backoff")); |
||||
grpc_millis next_try = retry_backoff_.NextAttemptTime(); |
||||
if (grpc_health_check_client_trace.enabled()) { |
||||
gpr_log(GPR_INFO, "HealthCheckClient %p: health check call lost...", this); |
||||
grpc_millis timeout = next_try - ExecCtx::Get()->Now(); |
||||
if (timeout > 0) { |
||||
gpr_log(GPR_INFO, |
||||
"HealthCheckClient %p: ... will retry in %" PRId64 "ms.", this, |
||||
timeout); |
||||
} else { |
||||
gpr_log(GPR_INFO, "HealthCheckClient %p: ... retrying immediately.", |
||||
this); |
||||
} |
||||
} |
||||
// Ref for callback, tracked manually.
|
||||
Ref(DEBUG_LOCATION, "health_retry_timer").release(); |
||||
retry_timer_callback_pending_ = true; |
||||
grpc_timer_init(&retry_timer_, next_try, &retry_timer_callback_); |
||||
} |
||||
|
||||
void HealthCheckClient::OnRetryTimer(void* arg, grpc_error* error) { |
||||
HealthCheckClient* self = static_cast<HealthCheckClient*>(arg); |
||||
{ |
||||
MutexLock lock(&self->mu_); |
||||
self->retry_timer_callback_pending_ = false; |
||||
if (!self->shutting_down_ && error == GRPC_ERROR_NONE && |
||||
self->call_state_ == nullptr) { |
||||
if (grpc_health_check_client_trace.enabled()) { |
||||
gpr_log(GPR_INFO, "HealthCheckClient %p: restarting health check call", |
||||
self); |
||||
} |
||||
self->StartCallLocked(); |
||||
} |
||||
} |
||||
self->Unref(DEBUG_LOCATION, "health_retry_timer"); |
||||
} |
||||
|
||||
//
|
||||
// protobuf helpers
|
||||
//
|
||||
|
||||
namespace { |
||||
|
||||
void EncodeRequest(const char* service_name, |
||||
ManualConstructor<SliceBufferByteStream>* send_message) { |
||||
grpc_health_v1_HealthCheckRequest request_struct; |
||||
request_struct.has_service = true; |
||||
snprintf(request_struct.service, sizeof(request_struct.service), "%s", |
||||
service_name); |
||||
pb_ostream_t ostream; |
||||
memset(&ostream, 0, sizeof(ostream)); |
||||
pb_encode(&ostream, grpc_health_v1_HealthCheckRequest_fields, |
||||
&request_struct); |
||||
grpc_slice request_slice = GRPC_SLICE_MALLOC(ostream.bytes_written); |
||||
ostream = pb_ostream_from_buffer(GRPC_SLICE_START_PTR(request_slice), |
||||
GRPC_SLICE_LENGTH(request_slice)); |
||||
GPR_ASSERT(pb_encode(&ostream, grpc_health_v1_HealthCheckRequest_fields, |
||||
&request_struct) != 0); |
||||
grpc_slice_buffer slice_buffer; |
||||
grpc_slice_buffer_init(&slice_buffer); |
||||
grpc_slice_buffer_add(&slice_buffer, request_slice); |
||||
send_message->Init(&slice_buffer, 0); |
||||
grpc_slice_buffer_destroy_internal(&slice_buffer); |
||||
} |
||||
|
||||
// Returns true if healthy.
|
||||
// If there was an error parsing the response, sets *error and returns false.
|
||||
bool DecodeResponse(grpc_slice_buffer* slice_buffer, grpc_error** error) { |
||||
// If message is empty, assume unhealthy.
|
||||
if (slice_buffer->length == 0) { |
||||
*error = |
||||
GRPC_ERROR_CREATE_FROM_STATIC_STRING("health check response was empty"); |
||||
return false; |
||||
} |
||||
// Concatenate the slices to form a single string.
|
||||
UniquePtr<uint8_t> recv_message_deleter; |
||||
uint8_t* recv_message; |
||||
if (slice_buffer->count == 1) { |
||||
recv_message = GRPC_SLICE_START_PTR(slice_buffer->slices[0]); |
||||
} else { |
||||
recv_message = static_cast<uint8_t*>(gpr_malloc(slice_buffer->length)); |
||||
recv_message_deleter.reset(recv_message); |
||||
size_t offset = 0; |
||||
for (size_t i = 0; i < slice_buffer->count; ++i) { |
||||
memcpy(recv_message + offset, |
||||
GRPC_SLICE_START_PTR(slice_buffer->slices[i]), |
||||
GRPC_SLICE_LENGTH(slice_buffer->slices[i])); |
||||
offset += GRPC_SLICE_LENGTH(slice_buffer->slices[i]); |
||||
} |
||||
} |
||||
// Deserialize message.
|
||||
grpc_health_v1_HealthCheckResponse response_struct; |
||||
pb_istream_t istream = |
||||
pb_istream_from_buffer(recv_message, slice_buffer->length); |
||||
if (!pb_decode(&istream, grpc_health_v1_HealthCheckResponse_fields, |
||||
&response_struct)) { |
||||
// Can't parse message; assume unhealthy.
|
||||
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( |
||||
"cannot parse health check response"); |
||||
return false; |
||||
} |
||||
if (!response_struct.has_status) { |
||||
// Field not present; assume unhealthy.
|
||||
*error = GRPC_ERROR_CREATE_FROM_STATIC_STRING( |
||||
"status field not present in health check response"); |
||||
return false; |
||||
} |
||||
return response_struct.status == |
||||
grpc_health_v1_HealthCheckResponse_ServingStatus_SERVING; |
||||
} |
||||
|
||||
} // namespace
|
||||
|
||||
//
|
||||
// HealthCheckClient::CallState
|
||||
//
|
||||
|
||||
HealthCheckClient::CallState::CallState( |
||||
RefCountedPtr<HealthCheckClient> health_check_client, |
||||
grpc_pollset_set* interested_parties) |
||||
: InternallyRefCountedWithTracing<CallState>( |
||||
&grpc_health_check_client_trace), |
||||
health_check_client_(std::move(health_check_client)), |
||||
pollent_(grpc_polling_entity_create_from_pollset_set(interested_parties)), |
||||
arena_(gpr_arena_create(health_check_client_->connected_subchannel_ |
||||
->GetInitialCallSizeEstimate(0))) { |
||||
memset(&call_combiner_, 0, sizeof(call_combiner_)); |
||||
grpc_call_combiner_init(&call_combiner_); |
||||
memset(context_, 0, sizeof(context_)); |
||||
gpr_atm_rel_store(&seen_response_, static_cast<gpr_atm>(0)); |
||||
} |
||||
|
||||
HealthCheckClient::CallState::~CallState() { |
||||
if (grpc_health_check_client_trace.enabled()) { |
||||
gpr_log(GPR_INFO, "HealthCheckClient %p: destroying CallState %p", |
||||
health_check_client_.get(), this); |
||||
} |
||||
if (call_ != nullptr) GRPC_SUBCHANNEL_CALL_UNREF(call_, "call_ended"); |
||||
for (size_t i = 0; i < GRPC_CONTEXT_COUNT; i++) { |
||||
if (context_[i].destroy != nullptr) { |
||||
context_[i].destroy(context_[i].value); |
||||
} |
||||
} |
||||
// Unset the call combiner cancellation closure. This has the
|
||||
// effect of scheduling the previously set cancellation closure, if
|
||||
// any, so that it can release any internal references it may be
|
||||
// holding to the call stack. Also flush the closures on exec_ctx so that
|
||||
// filters that schedule cancel notification closures on exec_ctx do not
|
||||
// need to take a ref of the call stack to guarantee closure liveness.
|
||||
grpc_call_combiner_set_notify_on_cancel(&call_combiner_, nullptr); |
||||
grpc_core::ExecCtx::Get()->Flush(); |
||||
grpc_call_combiner_destroy(&call_combiner_); |
||||
gpr_arena_destroy(arena_); |
||||
} |
||||
|
||||
void HealthCheckClient::CallState::Orphan() { |
||||
grpc_call_combiner_cancel(&call_combiner_, GRPC_ERROR_CANCELLED); |
||||
Cancel(); |
||||
} |
||||
|
||||
void HealthCheckClient::CallState::StartCall() { |
||||
ConnectedSubchannel::CallArgs args = { |
||||
&pollent_, |
||||
GRPC_MDSTR_SLASH_GRPC_DOT_HEALTH_DOT_V1_DOT_HEALTH_SLASH_WATCH, |
||||
gpr_now(GPR_CLOCK_MONOTONIC), // start_time
|
||||
GRPC_MILLIS_INF_FUTURE, // deadline
|
||||
arena_, |
||||
context_, |
||||
&call_combiner_, |
||||
0, // parent_data_size
|
||||
}; |
||||
grpc_error* error = |
||||
health_check_client_->connected_subchannel_->CreateCall(args, &call_); |
||||
if (error != GRPC_ERROR_NONE) { |
||||
gpr_log(GPR_ERROR, |
||||
"HealthCheckClient %p CallState %p: error creating health " |
||||
"checking call on subchannel (%s); will retry", |
||||
health_check_client_.get(), this, grpc_error_string(error)); |
||||
GRPC_ERROR_UNREF(error); |
||||
// Schedule instead of running directly, since we must not be
|
||||
// holding health_check_client_->mu_ when CallEnded() is called.
|
||||
Ref(DEBUG_LOCATION, "call_end_closure").release(); |
||||
GRPC_CLOSURE_SCHED( |
||||
GRPC_CLOSURE_INIT(&batch_.handler_private.closure, CallEndedRetry, this, |
||||
grpc_schedule_on_exec_ctx), |
||||
GRPC_ERROR_NONE); |
||||
return; |
||||
} |
||||
// Initialize payload and batch.
|
||||
memset(&batch_, 0, sizeof(batch_)); |
||||
payload_.context = context_; |
||||
batch_.payload = &payload_; |
||||
// on_complete callback takes ref, handled manually.
|
||||
Ref(DEBUG_LOCATION, "on_complete").release(); |
||||
batch_.on_complete = GRPC_CLOSURE_INIT(&on_complete_, OnComplete, this, |
||||
grpc_schedule_on_exec_ctx); |
||||
// Add send_initial_metadata op.
|
||||
grpc_metadata_batch_init(&send_initial_metadata_); |
||||
error = grpc_metadata_batch_add_head( |
||||
&send_initial_metadata_, &path_metadata_storage_, |
||||
grpc_mdelem_from_slices( |
||||
GRPC_MDSTR_PATH, |
||||
GRPC_MDSTR_SLASH_GRPC_DOT_HEALTH_DOT_V1_DOT_HEALTH_SLASH_WATCH)); |
||||
GPR_ASSERT(error == GRPC_ERROR_NONE); |
||||
payload_.send_initial_metadata.send_initial_metadata = |
||||
&send_initial_metadata_; |
||||
payload_.send_initial_metadata.send_initial_metadata_flags = 0; |
||||
payload_.send_initial_metadata.peer_string = nullptr; |
||||
batch_.send_initial_metadata = true; |
||||
// Add send_message op.
|
||||
EncodeRequest(health_check_client_->service_name_, &send_message_); |
||||
payload_.send_message.send_message.reset(send_message_.get()); |
||||
batch_.send_message = true; |
||||
// Add send_trailing_metadata op.
|
||||
grpc_metadata_batch_init(&send_trailing_metadata_); |
||||
payload_.send_trailing_metadata.send_trailing_metadata = |
||||
&send_trailing_metadata_; |
||||
batch_.send_trailing_metadata = true; |
||||
// Add recv_initial_metadata op.
|
||||
grpc_metadata_batch_init(&recv_initial_metadata_); |
||||
payload_.recv_initial_metadata.recv_initial_metadata = |
||||
&recv_initial_metadata_; |
||||
payload_.recv_initial_metadata.recv_flags = nullptr; |
||||
payload_.recv_initial_metadata.trailing_metadata_available = nullptr; |
||||
payload_.recv_initial_metadata.peer_string = nullptr; |
||||
// recv_initial_metadata_ready callback takes ref, handled manually.
|
||||
Ref(DEBUG_LOCATION, "recv_initial_metadata_ready").release(); |
||||
payload_.recv_initial_metadata.recv_initial_metadata_ready = |
||||
GRPC_CLOSURE_INIT(&recv_initial_metadata_ready_, RecvInitialMetadataReady, |
||||
this, grpc_schedule_on_exec_ctx); |
||||
batch_.recv_initial_metadata = true; |
||||
// Add recv_message op.
|
||||
payload_.recv_message.recv_message = &recv_message_; |
||||
// recv_message callback takes ref, handled manually.
|
||||
Ref(DEBUG_LOCATION, "recv_message_ready").release(); |
||||
payload_.recv_message.recv_message_ready = GRPC_CLOSURE_INIT( |
||||
&recv_message_ready_, RecvMessageReady, this, grpc_schedule_on_exec_ctx); |
||||
batch_.recv_message = true; |
||||
// Start batch.
|
||||
StartBatch(&batch_); |
||||
// Initialize recv_trailing_metadata batch.
|
||||
memset(&recv_trailing_metadata_batch_, 0, |
||||
sizeof(recv_trailing_metadata_batch_)); |
||||
recv_trailing_metadata_batch_.payload = &payload_; |
||||
// Add recv_trailing_metadata op.
|
||||
grpc_metadata_batch_init(&recv_trailing_metadata_); |
||||
payload_.recv_trailing_metadata.recv_trailing_metadata = |
||||
&recv_trailing_metadata_; |
||||
payload_.recv_trailing_metadata.collect_stats = &collect_stats_; |
||||
// This callback signals the end of the call, so it relies on the
|
||||
// initial ref instead of taking a new ref. When it's invoked, the
|
||||
// initial ref is released.
|
||||
payload_.recv_trailing_metadata.recv_trailing_metadata_ready = |
||||
GRPC_CLOSURE_INIT(&recv_trailing_metadata_ready_, |
||||
RecvTrailingMetadataReady, this, |
||||
grpc_schedule_on_exec_ctx); |
||||
recv_trailing_metadata_batch_.recv_trailing_metadata = true; |
||||
// Start recv_trailing_metadata batch.
|
||||
StartBatch(&recv_trailing_metadata_batch_); |
||||
} |
||||
|
||||
void HealthCheckClient::CallState::StartBatchInCallCombiner(void* arg, |
||||
grpc_error* error) { |
||||
grpc_transport_stream_op_batch* batch = |
||||
static_cast<grpc_transport_stream_op_batch*>(arg); |
||||
grpc_subchannel_call* call = |
||||
static_cast<grpc_subchannel_call*>(batch->handler_private.extra_arg); |
||||
grpc_subchannel_call_process_op(call, batch); |
||||
} |
||||
|
||||
void HealthCheckClient::CallState::StartBatch( |
||||
grpc_transport_stream_op_batch* batch) { |
||||
batch->handler_private.extra_arg = call_; |
||||
GRPC_CLOSURE_INIT(&batch->handler_private.closure, StartBatchInCallCombiner, |
||||
batch, grpc_schedule_on_exec_ctx); |
||||
GRPC_CALL_COMBINER_START(&call_combiner_, &batch->handler_private.closure, |
||||
GRPC_ERROR_NONE, "start_subchannel_batch"); |
||||
} |
||||
|
||||
void HealthCheckClient::CallState::OnCancelComplete(void* arg, |
||||
grpc_error* error) { |
||||
HealthCheckClient::CallState* self = |
||||
static_cast<HealthCheckClient::CallState*>(arg); |
||||
GRPC_CALL_COMBINER_STOP(&self->call_combiner_, "health_cancel"); |
||||
self->Unref(DEBUG_LOCATION, "cancel"); |
||||
} |
||||
|
||||
void HealthCheckClient::CallState::StartCancel(void* arg, grpc_error* error) { |
||||
HealthCheckClient::CallState* self = |
||||
static_cast<HealthCheckClient::CallState*>(arg); |
||||
auto* batch = grpc_make_transport_stream_op( |
||||
GRPC_CLOSURE_CREATE(OnCancelComplete, self, grpc_schedule_on_exec_ctx)); |
||||
batch->cancel_stream = true; |
||||
batch->payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED; |
||||
grpc_subchannel_call_process_op(self->call_, batch); |
||||
} |
||||
|
||||
void HealthCheckClient::CallState::Cancel() { |
||||
if (call_ != nullptr) { |
||||
Ref(DEBUG_LOCATION, "cancel").release(); |
||||
GRPC_CALL_COMBINER_START( |
||||
&call_combiner_, |
||||
GRPC_CLOSURE_CREATE(StartCancel, this, grpc_schedule_on_exec_ctx), |
||||
GRPC_ERROR_NONE, "health_cancel"); |
||||
} |
||||
} |
||||
|
||||
void HealthCheckClient::CallState::OnComplete(void* arg, grpc_error* error) { |
||||
HealthCheckClient::CallState* self = |
||||
static_cast<HealthCheckClient::CallState*>(arg); |
||||
GRPC_CALL_COMBINER_STOP(&self->call_combiner_, "on_complete"); |
||||
grpc_metadata_batch_destroy(&self->send_initial_metadata_); |
||||
grpc_metadata_batch_destroy(&self->send_trailing_metadata_); |
||||
self->Unref(DEBUG_LOCATION, "on_complete"); |
||||
} |
||||
|
||||
void HealthCheckClient::CallState::RecvInitialMetadataReady(void* arg, |
||||
grpc_error* error) { |
||||
HealthCheckClient::CallState* self = |
||||
static_cast<HealthCheckClient::CallState*>(arg); |
||||
GRPC_CALL_COMBINER_STOP(&self->call_combiner_, "recv_initial_metadata_ready"); |
||||
grpc_metadata_batch_destroy(&self->recv_initial_metadata_); |
||||
self->Unref(DEBUG_LOCATION, "recv_initial_metadata_ready"); |
||||
} |
||||
|
||||
void HealthCheckClient::CallState::DoneReadingRecvMessage(grpc_error* error) { |
||||
recv_message_.reset(); |
||||
if (error != GRPC_ERROR_NONE) { |
||||
GRPC_ERROR_UNREF(error); |
||||
Cancel(); |
||||
grpc_slice_buffer_destroy_internal(&recv_message_buffer_); |
||||
Unref(DEBUG_LOCATION, "recv_message_ready"); |
||||
return; |
||||
} |
||||
const bool healthy = DecodeResponse(&recv_message_buffer_, &error); |
||||
const grpc_connectivity_state state = |
||||
healthy ? GRPC_CHANNEL_READY : GRPC_CHANNEL_TRANSIENT_FAILURE; |
||||
if (error == GRPC_ERROR_NONE && !healthy) { |
||||
error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("backend unhealthy"); |
||||
} |
||||
health_check_client_->SetHealthStatus(state, error); |
||||
gpr_atm_rel_store(&seen_response_, static_cast<gpr_atm>(1)); |
||||
grpc_slice_buffer_destroy_internal(&recv_message_buffer_); |
||||
// Start another recv_message batch.
|
||||
// This re-uses the ref we're holding.
|
||||
// Note: Can't just reuse batch_ here, since we don't know that all
|
||||
// callbacks from the original batch have completed yet.
|
||||
memset(&recv_message_batch_, 0, sizeof(recv_message_batch_)); |
||||
recv_message_batch_.payload = &payload_; |
||||
payload_.recv_message.recv_message = &recv_message_; |
||||
payload_.recv_message.recv_message_ready = GRPC_CLOSURE_INIT( |
||||
&recv_message_ready_, RecvMessageReady, this, grpc_schedule_on_exec_ctx); |
||||
recv_message_batch_.recv_message = true; |
||||
StartBatch(&recv_message_batch_); |
||||
} |
||||
|
||||
grpc_error* HealthCheckClient::CallState::PullSliceFromRecvMessage() { |
||||
grpc_slice slice; |
||||
grpc_error* error = recv_message_->Pull(&slice); |
||||
if (error == GRPC_ERROR_NONE) { |
||||
grpc_slice_buffer_add(&recv_message_buffer_, slice); |
||||
} |
||||
return error; |
||||
} |
||||
|
||||
void HealthCheckClient::CallState::ContinueReadingRecvMessage() { |
||||
while (recv_message_->Next(SIZE_MAX, &recv_message_ready_)) { |
||||
grpc_error* error = PullSliceFromRecvMessage(); |
||||
if (error != GRPC_ERROR_NONE) { |
||||
DoneReadingRecvMessage(error); |
||||
return; |
||||
} |
||||
if (recv_message_buffer_.length == recv_message_->length()) { |
||||
DoneReadingRecvMessage(GRPC_ERROR_NONE); |
||||
break; |
||||
} |
||||
} |
||||
} |
||||
|
||||
void HealthCheckClient::CallState::OnByteStreamNext(void* arg, |
||||
grpc_error* error) { |
||||
HealthCheckClient::CallState* self = |
||||
static_cast<HealthCheckClient::CallState*>(arg); |
||||
if (error != GRPC_ERROR_NONE) { |
||||
self->DoneReadingRecvMessage(GRPC_ERROR_REF(error)); |
||||
return; |
||||
} |
||||
error = self->PullSliceFromRecvMessage(); |
||||
if (error != GRPC_ERROR_NONE) { |
||||
self->DoneReadingRecvMessage(error); |
||||
return; |
||||
} |
||||
if (self->recv_message_buffer_.length == self->recv_message_->length()) { |
||||
self->DoneReadingRecvMessage(GRPC_ERROR_NONE); |
||||
} else { |
||||
self->ContinueReadingRecvMessage(); |
||||
} |
||||
} |
||||
|
||||
void HealthCheckClient::CallState::RecvMessageReady(void* arg, |
||||
grpc_error* error) { |
||||
HealthCheckClient::CallState* self = |
||||
static_cast<HealthCheckClient::CallState*>(arg); |
||||
GRPC_CALL_COMBINER_STOP(&self->call_combiner_, "recv_message_ready"); |
||||
if (self->recv_message_ == nullptr) { |
||||
self->Unref(DEBUG_LOCATION, "recv_message_ready"); |
||||
return; |
||||
} |
||||
grpc_slice_buffer_init(&self->recv_message_buffer_); |
||||
GRPC_CLOSURE_INIT(&self->recv_message_ready_, OnByteStreamNext, self, |
||||
grpc_schedule_on_exec_ctx); |
||||
self->ContinueReadingRecvMessage(); |
||||
// Ref will continue to be held until we finish draining the byte stream.
|
||||
} |
||||
|
||||
void HealthCheckClient::CallState::RecvTrailingMetadataReady( |
||||
void* arg, grpc_error* error) { |
||||
HealthCheckClient::CallState* self = |
||||
static_cast<HealthCheckClient::CallState*>(arg); |
||||
GRPC_CALL_COMBINER_STOP(&self->call_combiner_, |
||||
"recv_trailing_metadata_ready"); |
||||
// Get call status.
|
||||
grpc_status_code status = GRPC_STATUS_UNKNOWN; |
||||
if (error != GRPC_ERROR_NONE) { |
||||
grpc_error_get_status(error, GRPC_MILLIS_INF_FUTURE, &status, |
||||
nullptr /* slice */, nullptr /* http_error */, |
||||
nullptr /* error_string */); |
||||
} else if (self->recv_trailing_metadata_.idx.named.grpc_status != nullptr) { |
||||
status = grpc_get_status_code_from_metadata( |
||||
self->recv_trailing_metadata_.idx.named.grpc_status->md); |
||||
} |
||||
if (grpc_health_check_client_trace.enabled()) { |
||||
gpr_log(GPR_INFO, |
||||
"HealthCheckClient %p CallState %p: health watch failed with " |
||||
"status %d", |
||||
self->health_check_client_.get(), self, status); |
||||
} |
||||
// Clean up.
|
||||
grpc_metadata_batch_destroy(&self->recv_trailing_metadata_); |
||||
// For status UNIMPLEMENTED, give up and assume always healthy.
|
||||
bool retry = true; |
||||
if (status == GRPC_STATUS_UNIMPLEMENTED) { |
||||
static const char kErrorMessage[] = |
||||
"health checking Watch method returned UNIMPLEMENTED; " |
||||
"disabling health checks but assuming server is healthy"; |
||||
gpr_log(GPR_ERROR, kErrorMessage); |
||||
if (self->health_check_client_->channelz_node_ != nullptr) { |
||||
self->health_check_client_->channelz_node_->AddTraceEvent( |
||||
channelz::ChannelTrace::Error, |
||||
grpc_slice_from_static_string(kErrorMessage)); |
||||
} |
||||
self->health_check_client_->SetHealthStatus(GRPC_CHANNEL_READY, |
||||
GRPC_ERROR_NONE); |
||||
retry = false; |
||||
} |
||||
self->CallEnded(retry); |
||||
} |
||||
|
||||
void HealthCheckClient::CallState::CallEndedRetry(void* arg, |
||||
grpc_error* error) { |
||||
HealthCheckClient::CallState* self = |
||||
static_cast<HealthCheckClient::CallState*>(arg); |
||||
self->CallEnded(true /* retry */); |
||||
self->Unref(DEBUG_LOCATION, "call_end_closure"); |
||||
} |
||||
|
||||
void HealthCheckClient::CallState::CallEnded(bool retry) { |
||||
// If this CallState is still in use, this call ended because of a failure,
|
||||
// so we need to stop using it and optionally create a new one.
|
||||
// Otherwise, we have deliberately ended this call, and no further action
|
||||
// is required.
|
||||
if (this == health_check_client_->call_state_.get()) { |
||||
health_check_client_->call_state_.reset(); |
||||
if (retry) { |
||||
GPR_ASSERT(!health_check_client_->shutting_down_); |
||||
if (static_cast<bool>(gpr_atm_acq_load(&seen_response_))) { |
||||
// If the call fails after we've gotten a successful response, reset
|
||||
// the backoff and restart the call immediately.
|
||||
health_check_client_->retry_backoff_.Reset(); |
||||
health_check_client_->StartCall(); |
||||
} else { |
||||
// If the call failed without receiving any messages, retry later.
|
||||
health_check_client_->StartRetryTimer(); |
||||
} |
||||
} |
||||
} |
||||
Unref(DEBUG_LOCATION, "call_ended"); |
||||
} |
||||
|
||||
} // namespace grpc_core
|
@ -0,0 +1,173 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HEALTH_HEALTH_CHECK_CLIENT_H |
||||
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HEALTH_HEALTH_CHECK_CLIENT_H |
||||
|
||||
#include <grpc/support/port_platform.h> |
||||
|
||||
#include <grpc/grpc.h> |
||||
#include <grpc/support/atm.h> |
||||
#include <grpc/support/sync.h> |
||||
|
||||
#include "src/core/ext/filters/client_channel/client_channel_channelz.h" |
||||
#include "src/core/ext/filters/client_channel/subchannel.h" |
||||
#include "src/core/lib/backoff/backoff.h" |
||||
#include "src/core/lib/gpr/arena.h" |
||||
#include "src/core/lib/gprpp/orphanable.h" |
||||
#include "src/core/lib/gprpp/ref_counted_ptr.h" |
||||
#include "src/core/lib/iomgr/call_combiner.h" |
||||
#include "src/core/lib/iomgr/closure.h" |
||||
#include "src/core/lib/iomgr/polling_entity.h" |
||||
#include "src/core/lib/iomgr/timer.h" |
||||
#include "src/core/lib/transport/byte_stream.h" |
||||
#include "src/core/lib/transport/metadata_batch.h" |
||||
#include "src/core/lib/transport/transport.h" |
||||
|
||||
namespace grpc_core { |
||||
|
||||
class HealthCheckClient |
||||
: public InternallyRefCountedWithTracing<HealthCheckClient> { |
||||
public: |
||||
HealthCheckClient(const char* service_name, |
||||
RefCountedPtr<ConnectedSubchannel> connected_subchannel, |
||||
grpc_pollset_set* interested_parties, |
||||
RefCountedPtr<channelz::SubchannelNode> channelz_node); |
||||
|
||||
~HealthCheckClient(); |
||||
|
||||
// When the health state changes from *state, sets *state to the new
|
||||
// value and schedules closure.
|
||||
// Only one closure can be outstanding at a time.
|
||||
void NotifyOnHealthChange(grpc_connectivity_state* state, |
||||
grpc_closure* closure); |
||||
|
||||
void Orphan() override; |
||||
|
||||
private: |
||||
// Contains a call to the backend and all the data related to the call.
|
||||
class CallState : public InternallyRefCountedWithTracing<CallState> { |
||||
public: |
||||
CallState(RefCountedPtr<HealthCheckClient> health_check_client, |
||||
grpc_pollset_set* interested_parties_); |
||||
~CallState(); |
||||
|
||||
void Orphan() override; |
||||
|
||||
void StartCall(); |
||||
|
||||
private: |
||||
void Cancel(); |
||||
|
||||
void StartBatch(grpc_transport_stream_op_batch* batch); |
||||
static void StartBatchInCallCombiner(void* arg, grpc_error* error); |
||||
|
||||
static void CallEndedRetry(void* arg, grpc_error* error); |
||||
void CallEnded(bool retry); |
||||
|
||||
static void OnComplete(void* arg, grpc_error* error); |
||||
static void RecvInitialMetadataReady(void* arg, grpc_error* error); |
||||
static void RecvMessageReady(void* arg, grpc_error* error); |
||||
static void RecvTrailingMetadataReady(void* arg, grpc_error* error); |
||||
static void StartCancel(void* arg, grpc_error* error); |
||||
static void OnCancelComplete(void* arg, grpc_error* error); |
||||
|
||||
static void OnByteStreamNext(void* arg, grpc_error* error); |
||||
void ContinueReadingRecvMessage(); |
||||
grpc_error* PullSliceFromRecvMessage(); |
||||
void DoneReadingRecvMessage(grpc_error* error); |
||||
|
||||
RefCountedPtr<HealthCheckClient> health_check_client_; |
||||
grpc_polling_entity pollent_; |
||||
|
||||
gpr_arena* arena_; |
||||
grpc_call_combiner call_combiner_; |
||||
grpc_call_context_element context_[GRPC_CONTEXT_COUNT]; |
||||
|
||||
// The streaming call to the backend. Always non-NULL.
|
||||
grpc_subchannel_call* call_; |
||||
|
||||
grpc_transport_stream_op_batch_payload payload_; |
||||
grpc_transport_stream_op_batch batch_; |
||||
grpc_transport_stream_op_batch recv_message_batch_; |
||||
grpc_transport_stream_op_batch recv_trailing_metadata_batch_; |
||||
|
||||
grpc_closure on_complete_; |
||||
|
||||
// send_initial_metadata
|
||||
grpc_metadata_batch send_initial_metadata_; |
||||
grpc_linked_mdelem path_metadata_storage_; |
||||
|
||||
// send_message
|
||||
ManualConstructor<SliceBufferByteStream> send_message_; |
||||
|
||||
// send_trailing_metadata
|
||||
grpc_metadata_batch send_trailing_metadata_; |
||||
|
||||
// recv_initial_metadata
|
||||
grpc_metadata_batch recv_initial_metadata_; |
||||
grpc_closure recv_initial_metadata_ready_; |
||||
|
||||
// recv_message
|
||||
OrphanablePtr<ByteStream> recv_message_; |
||||
grpc_closure recv_message_ready_; |
||||
grpc_slice_buffer recv_message_buffer_; |
||||
gpr_atm seen_response_; |
||||
|
||||
// recv_trailing_metadata
|
||||
grpc_metadata_batch recv_trailing_metadata_; |
||||
grpc_transport_stream_stats collect_stats_; |
||||
grpc_closure recv_trailing_metadata_ready_; |
||||
}; |
||||
|
||||
void StartCall(); |
||||
void StartCallLocked(); // Requires holding mu_.
|
||||
|
||||
void StartRetryTimer(); |
||||
static void OnRetryTimer(void* arg, grpc_error* error); |
||||
|
||||
void SetHealthStatus(grpc_connectivity_state state, grpc_error* error); |
||||
void SetHealthStatusLocked(grpc_connectivity_state state, |
||||
grpc_error* error); // Requires holding mu_.
|
||||
|
||||
const char* service_name_; // Do not own.
|
||||
RefCountedPtr<ConnectedSubchannel> connected_subchannel_; |
||||
grpc_pollset_set* interested_parties_; // Do not own.
|
||||
RefCountedPtr<channelz::SubchannelNode> channelz_node_; |
||||
|
||||
gpr_mu mu_; |
||||
grpc_connectivity_state state_ = GRPC_CHANNEL_CONNECTING; |
||||
grpc_error* error_ = GRPC_ERROR_NONE; |
||||
grpc_connectivity_state* notify_state_ = nullptr; |
||||
grpc_closure* on_health_changed_ = nullptr; |
||||
bool shutting_down_ = false; |
||||
|
||||
// The data associated with the current health check call. It holds a ref
|
||||
// to this HealthCheckClient object.
|
||||
OrphanablePtr<CallState> call_state_; |
||||
|
||||
// Call retry state.
|
||||
BackOff retry_backoff_; |
||||
grpc_timer retry_timer_; |
||||
grpc_closure retry_timer_callback_; |
||||
bool retry_timer_callback_pending_ = false; |
||||
}; |
||||
|
||||
} // namespace grpc_core
|
||||
|
||||
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HEALTH_HEALTH_CHECK_CLIENT_H */ |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,36 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_H |
||||
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_H |
||||
|
||||
#include <grpc/support/port_platform.h> |
||||
|
||||
/** Channel arg indicating if a target corresponding to the address is grpclb
|
||||
* loadbalancer. The type of this arg is an integer and the value is treated as |
||||
* a bool. */ |
||||
#define GRPC_ARG_ADDRESS_IS_XDS_LOAD_BALANCER \ |
||||
"grpc.address_is_xds_load_balancer" |
||||
/** Channel arg indicating if a target corresponding to the address is a backend
|
||||
* received from a balancer. The type of this arg is an integer and the value is |
||||
* treated as a bool. */ |
||||
#define GRPC_ARG_ADDRESS_IS_BACKEND_FROM_XDS_LOAD_BALANCER \ |
||||
"grpc.address_is_backend_from_xds_load_balancer" |
||||
|
||||
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_H \ |
||||
*/ |
@ -0,0 +1,26 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#include <grpc/support/port_platform.h> |
||||
|
||||
#include "src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h" |
||||
|
||||
grpc_channel_args* grpc_lb_policy_xds_modify_lb_channel_args( |
||||
grpc_channel_args* args) { |
||||
return args; |
||||
} |
@ -0,0 +1,36 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_CHANNEL_H |
||||
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_CHANNEL_H |
||||
|
||||
#include <grpc/support/port_platform.h> |
||||
|
||||
#include "src/core/ext/filters/client_channel/lb_policy_factory.h" |
||||
|
||||
/// Makes any necessary modifications to \a args for use in the xds
|
||||
/// balancer channel.
|
||||
///
|
||||
/// Takes ownership of \a args.
|
||||
///
|
||||
/// Caller takes ownership of the returned args.
|
||||
grpc_channel_args* grpc_lb_policy_xds_modify_lb_channel_args( |
||||
grpc_channel_args* args); |
||||
|
||||
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_CHANNEL_H \ |
||||
*/ |
@ -0,0 +1,107 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#include <grpc/support/port_platform.h> |
||||
|
||||
#include "src/core/ext/filters/client_channel/lb_policy/xds/xds_channel.h" |
||||
|
||||
#include <grpc/support/alloc.h> |
||||
#include <grpc/support/string_util.h> |
||||
#include <string.h> |
||||
|
||||
#include "src/core/ext/filters/client_channel/client_channel.h" |
||||
#include "src/core/lib/channel/channel_args.h" |
||||
#include "src/core/lib/gpr/string.h" |
||||
#include "src/core/lib/iomgr/sockaddr_utils.h" |
||||
#include "src/core/lib/security/credentials/credentials.h" |
||||
#include "src/core/lib/security/transport/target_authority_table.h" |
||||
#include "src/core/lib/slice/slice_internal.h" |
||||
|
||||
namespace grpc_core { |
||||
namespace { |
||||
|
||||
int BalancerNameCmp(const grpc_core::UniquePtr<char>& a, |
||||
const grpc_core::UniquePtr<char>& b) { |
||||
return strcmp(a.get(), b.get()); |
||||
} |
||||
|
||||
RefCountedPtr<TargetAuthorityTable> CreateTargetAuthorityTable( |
||||
grpc_lb_addresses* addresses) { |
||||
TargetAuthorityTable::Entry* target_authority_entries = |
||||
static_cast<TargetAuthorityTable::Entry*>(gpr_zalloc( |
||||
sizeof(*target_authority_entries) * addresses->num_addresses)); |
||||
for (size_t i = 0; i < addresses->num_addresses; ++i) { |
||||
char* addr_str; |
||||
GPR_ASSERT(grpc_sockaddr_to_string( |
||||
&addr_str, &addresses->addresses[i].address, true) > 0); |
||||
target_authority_entries[i].key = grpc_slice_from_copied_string(addr_str); |
||||
target_authority_entries[i].value.reset( |
||||
gpr_strdup(addresses->addresses[i].balancer_name)); |
||||
gpr_free(addr_str); |
||||
} |
||||
RefCountedPtr<TargetAuthorityTable> target_authority_table = |
||||
TargetAuthorityTable::Create(addresses->num_addresses, |
||||
target_authority_entries, BalancerNameCmp); |
||||
gpr_free(target_authority_entries); |
||||
return target_authority_table; |
||||
} |
||||
|
||||
} // namespace
|
||||
} // namespace grpc_core
|
||||
|
||||
grpc_channel_args* grpc_lb_policy_xds_modify_lb_channel_args( |
||||
grpc_channel_args* args) { |
||||
const char* args_to_remove[1]; |
||||
size_t num_args_to_remove = 0; |
||||
grpc_arg args_to_add[2]; |
||||
size_t num_args_to_add = 0; |
||||
// Add arg for targets info table.
|
||||
const grpc_arg* arg = grpc_channel_args_find(args, GRPC_ARG_LB_ADDRESSES); |
||||
GPR_ASSERT(arg != nullptr); |
||||
GPR_ASSERT(arg->type == GRPC_ARG_POINTER); |
||||
grpc_lb_addresses* addresses = |
||||
static_cast<grpc_lb_addresses*>(arg->value.pointer.p); |
||||
grpc_core::RefCountedPtr<grpc_core::TargetAuthorityTable> |
||||
target_authority_table = grpc_core::CreateTargetAuthorityTable(addresses); |
||||
args_to_add[num_args_to_add++] = |
||||
grpc_core::CreateTargetAuthorityTableChannelArg( |
||||
target_authority_table.get()); |
||||
// Substitute the channel credentials with a version without call
|
||||
// credentials: the load balancer is not necessarily trusted to handle
|
||||
// bearer token credentials.
|
||||
grpc_channel_credentials* channel_credentials = |
||||
grpc_channel_credentials_find_in_args(args); |
||||
grpc_channel_credentials* creds_sans_call_creds = nullptr; |
||||
if (channel_credentials != nullptr) { |
||||
creds_sans_call_creds = |
||||
grpc_channel_credentials_duplicate_without_call_credentials( |
||||
channel_credentials); |
||||
GPR_ASSERT(creds_sans_call_creds != nullptr); |
||||
args_to_remove[num_args_to_remove++] = GRPC_ARG_CHANNEL_CREDENTIALS; |
||||
args_to_add[num_args_to_add++] = |
||||
grpc_channel_credentials_to_arg(creds_sans_call_creds); |
||||
} |
||||
grpc_channel_args* result = grpc_channel_args_copy_and_add_and_remove( |
||||
args, args_to_remove, num_args_to_remove, args_to_add, num_args_to_add); |
||||
// Clean up.
|
||||
grpc_channel_args_destroy(args); |
||||
if (creds_sans_call_creds != nullptr) { |
||||
grpc_channel_credentials_unref(creds_sans_call_creds); |
||||
} |
||||
return result; |
||||
} |
@ -0,0 +1,85 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#include <grpc/support/port_platform.h> |
||||
|
||||
#include "src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h" |
||||
|
||||
#include <grpc/support/atm.h> |
||||
#include <grpc/support/string_util.h> |
||||
#include <string.h> |
||||
|
||||
namespace grpc_core { |
||||
|
||||
void XdsLbClientStats::AddCallStarted() { |
||||
gpr_atm_full_fetch_add(&num_calls_started_, (gpr_atm)1); |
||||
} |
||||
|
||||
void XdsLbClientStats::AddCallFinished(bool finished_with_client_failed_to_send, |
||||
bool finished_known_received) { |
||||
gpr_atm_full_fetch_add(&num_calls_finished_, (gpr_atm)1); |
||||
if (finished_with_client_failed_to_send) { |
||||
gpr_atm_full_fetch_add(&num_calls_finished_with_client_failed_to_send_, |
||||
(gpr_atm)1); |
||||
} |
||||
if (finished_known_received) { |
||||
gpr_atm_full_fetch_add(&num_calls_finished_known_received_, (gpr_atm)1); |
||||
} |
||||
} |
||||
|
||||
void XdsLbClientStats::AddCallDroppedLocked(char* token) { |
||||
// Increment num_calls_started and num_calls_finished.
|
||||
gpr_atm_full_fetch_add(&num_calls_started_, (gpr_atm)1); |
||||
gpr_atm_full_fetch_add(&num_calls_finished_, (gpr_atm)1); |
||||
// Record the drop.
|
||||
if (drop_token_counts_ == nullptr) { |
||||
drop_token_counts_.reset(New<DroppedCallCounts>()); |
||||
} |
||||
for (size_t i = 0; i < drop_token_counts_->size(); ++i) { |
||||
if (strcmp((*drop_token_counts_)[i].token.get(), token) == 0) { |
||||
++(*drop_token_counts_)[i].count; |
||||
return; |
||||
} |
||||
} |
||||
// Not found, so add a new entry.
|
||||
drop_token_counts_->emplace_back(UniquePtr<char>(gpr_strdup(token)), 1); |
||||
} |
||||
|
||||
namespace { |
||||
|
||||
void AtomicGetAndResetCounter(int64_t* value, gpr_atm* counter) { |
||||
*value = static_cast<int64_t>(gpr_atm_full_xchg(counter, (gpr_atm)0)); |
||||
} |
||||
|
||||
} // namespace
|
||||
|
||||
void XdsLbClientStats::GetLocked( |
||||
int64_t* num_calls_started, int64_t* num_calls_finished, |
||||
int64_t* num_calls_finished_with_client_failed_to_send, |
||||
int64_t* num_calls_finished_known_received, |
||||
UniquePtr<DroppedCallCounts>* drop_token_counts) { |
||||
AtomicGetAndResetCounter(num_calls_started, &num_calls_started_); |
||||
AtomicGetAndResetCounter(num_calls_finished, &num_calls_finished_); |
||||
AtomicGetAndResetCounter(num_calls_finished_with_client_failed_to_send, |
||||
&num_calls_finished_with_client_failed_to_send_); |
||||
AtomicGetAndResetCounter(num_calls_finished_known_received, |
||||
&num_calls_finished_known_received_); |
||||
*drop_token_counts = std::move(drop_token_counts_); |
||||
} |
||||
|
||||
} // namespace grpc_core
|
@ -0,0 +1,72 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_CLIENT_STATS_H |
||||
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_CLIENT_STATS_H |
||||
|
||||
#include <grpc/support/port_platform.h> |
||||
|
||||
#include <grpc/support/atm.h> |
||||
|
||||
#include "src/core/lib/gprpp/inlined_vector.h" |
||||
#include "src/core/lib/gprpp/memory.h" |
||||
#include "src/core/lib/gprpp/ref_counted.h" |
||||
|
||||
namespace grpc_core { |
||||
|
||||
class XdsLbClientStats : public RefCounted<XdsLbClientStats> { |
||||
public: |
||||
struct DropTokenCount { |
||||
UniquePtr<char> token; |
||||
int64_t count; |
||||
|
||||
DropTokenCount(UniquePtr<char> token, int64_t count) |
||||
: token(std::move(token)), count(count) {} |
||||
}; |
||||
|
||||
typedef InlinedVector<DropTokenCount, 10> DroppedCallCounts; |
||||
|
||||
XdsLbClientStats() {} |
||||
|
||||
void AddCallStarted(); |
||||
void AddCallFinished(bool finished_with_client_failed_to_send, |
||||
bool finished_known_received); |
||||
|
||||
// This method is not thread-safe; caller must synchronize.
|
||||
void AddCallDroppedLocked(char* token); |
||||
|
||||
// This method is not thread-safe; caller must synchronize.
|
||||
void GetLocked(int64_t* num_calls_started, int64_t* num_calls_finished, |
||||
int64_t* num_calls_finished_with_client_failed_to_send, |
||||
int64_t* num_calls_finished_known_received, |
||||
UniquePtr<DroppedCallCounts>* drop_token_counts); |
||||
|
||||
private: |
||||
// This field must only be accessed via *_locked() methods.
|
||||
UniquePtr<DroppedCallCounts> drop_token_counts_; |
||||
// These fields may be accessed from multiple threads at a time.
|
||||
gpr_atm num_calls_started_ = 0; |
||||
gpr_atm num_calls_finished_ = 0; |
||||
gpr_atm num_calls_finished_with_client_failed_to_send_ = 0; |
||||
gpr_atm num_calls_finished_known_received_ = 0; |
||||
}; |
||||
|
||||
} // namespace grpc_core
|
||||
|
||||
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_CLIENT_STATS_H \ |
||||
*/ |
@ -0,0 +1,307 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#include <grpc/support/port_platform.h> |
||||
|
||||
#include "pb_decode.h" |
||||
#include "pb_encode.h" |
||||
#include "src/core/ext/filters/client_channel/lb_policy/xds/xds_load_balancer_api.h" |
||||
|
||||
#include <grpc/support/alloc.h> |
||||
|
||||
/* invoked once for every Server in ServerList */ |
||||
static bool count_serverlist(pb_istream_t* stream, const pb_field_t* field, |
||||
void** arg) { |
||||
xds_grpclb_serverlist* sl = static_cast<xds_grpclb_serverlist*>(*arg); |
||||
xds_grpclb_server server; |
||||
if (GPR_UNLIKELY(!pb_decode(stream, grpc_lb_v1_Server_fields, &server))) { |
||||
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream)); |
||||
return false; |
||||
} |
||||
++sl->num_servers; |
||||
return true; |
||||
} |
||||
|
||||
typedef struct decode_serverlist_arg { |
||||
/* The decoding callback is invoked once per server in serverlist. Remember
|
||||
* which index of the serverlist are we currently decoding */ |
||||
size_t decoding_idx; |
||||
/* The decoded serverlist */ |
||||
xds_grpclb_serverlist* serverlist; |
||||
} decode_serverlist_arg; |
||||
|
||||
/* invoked once for every Server in ServerList */ |
||||
static bool decode_serverlist(pb_istream_t* stream, const pb_field_t* field, |
||||
void** arg) { |
||||
decode_serverlist_arg* dec_arg = static_cast<decode_serverlist_arg*>(*arg); |
||||
GPR_ASSERT(dec_arg->serverlist->num_servers >= dec_arg->decoding_idx); |
||||
xds_grpclb_server* server = |
||||
static_cast<xds_grpclb_server*>(gpr_zalloc(sizeof(xds_grpclb_server))); |
||||
if (GPR_UNLIKELY(!pb_decode(stream, grpc_lb_v1_Server_fields, server))) { |
||||
gpr_free(server); |
||||
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(stream)); |
||||
return false; |
||||
} |
||||
dec_arg->serverlist->servers[dec_arg->decoding_idx++] = server; |
||||
return true; |
||||
} |
||||
|
||||
xds_grpclb_request* xds_grpclb_request_create(const char* lb_service_name) { |
||||
xds_grpclb_request* req = |
||||
static_cast<xds_grpclb_request*>(gpr_malloc(sizeof(xds_grpclb_request))); |
||||
req->has_client_stats = false; |
||||
req->has_initial_request = true; |
||||
req->initial_request.has_name = true; |
||||
strncpy(req->initial_request.name, lb_service_name, |
||||
XDS_SERVICE_NAME_MAX_LENGTH); |
||||
return req; |
||||
} |
||||
|
||||
static void populate_timestamp(gpr_timespec timestamp, |
||||
xds_grpclb_timestamp* timestamp_pb) { |
||||
timestamp_pb->has_seconds = true; |
||||
timestamp_pb->seconds = timestamp.tv_sec; |
||||
timestamp_pb->has_nanos = true; |
||||
timestamp_pb->nanos = timestamp.tv_nsec; |
||||
} |
||||
|
||||
static bool encode_string(pb_ostream_t* stream, const pb_field_t* field, |
||||
void* const* arg) { |
||||
char* str = static_cast<char*>(*arg); |
||||
if (!pb_encode_tag_for_field(stream, field)) return false; |
||||
return pb_encode_string(stream, reinterpret_cast<uint8_t*>(str), strlen(str)); |
||||
} |
||||
|
||||
static bool encode_drops(pb_ostream_t* stream, const pb_field_t* field, |
||||
void* const* arg) { |
||||
grpc_core::XdsLbClientStats::DroppedCallCounts* drop_entries = |
||||
static_cast<grpc_core::XdsLbClientStats::DroppedCallCounts*>(*arg); |
||||
if (drop_entries == nullptr) return true; |
||||
for (size_t i = 0; i < drop_entries->size(); ++i) { |
||||
if (!pb_encode_tag_for_field(stream, field)) return false; |
||||
grpc_lb_v1_ClientStatsPerToken drop_message; |
||||
drop_message.load_balance_token.funcs.encode = encode_string; |
||||
drop_message.load_balance_token.arg = (*drop_entries)[i].token.get(); |
||||
drop_message.has_num_calls = true; |
||||
drop_message.num_calls = (*drop_entries)[i].count; |
||||
if (!pb_encode_submessage(stream, grpc_lb_v1_ClientStatsPerToken_fields, |
||||
&drop_message)) { |
||||
return false; |
||||
} |
||||
} |
||||
return true; |
||||
} |
||||
|
||||
xds_grpclb_request* xds_grpclb_load_report_request_create_locked( |
||||
grpc_core::XdsLbClientStats* client_stats) { |
||||
xds_grpclb_request* req = |
||||
static_cast<xds_grpclb_request*>(gpr_zalloc(sizeof(xds_grpclb_request))); |
||||
req->has_client_stats = true; |
||||
req->client_stats.has_timestamp = true; |
||||
populate_timestamp(gpr_now(GPR_CLOCK_REALTIME), &req->client_stats.timestamp); |
||||
req->client_stats.has_num_calls_started = true; |
||||
req->client_stats.has_num_calls_finished = true; |
||||
req->client_stats.has_num_calls_finished_with_client_failed_to_send = true; |
||||
req->client_stats.has_num_calls_finished_with_client_failed_to_send = true; |
||||
req->client_stats.has_num_calls_finished_known_received = true; |
||||
req->client_stats.calls_finished_with_drop.funcs.encode = encode_drops; |
||||
grpc_core::UniquePtr<grpc_core::XdsLbClientStats::DroppedCallCounts> |
||||
drop_counts; |
||||
client_stats->GetLocked( |
||||
&req->client_stats.num_calls_started, |
||||
&req->client_stats.num_calls_finished, |
||||
&req->client_stats.num_calls_finished_with_client_failed_to_send, |
||||
&req->client_stats.num_calls_finished_known_received, &drop_counts); |
||||
// Will be deleted in xds_grpclb_request_destroy().
|
||||
req->client_stats.calls_finished_with_drop.arg = drop_counts.release(); |
||||
return req; |
||||
} |
||||
|
||||
grpc_slice xds_grpclb_request_encode(const xds_grpclb_request* request) { |
||||
size_t encoded_length; |
||||
pb_ostream_t sizestream; |
||||
pb_ostream_t outputstream; |
||||
grpc_slice slice; |
||||
memset(&sizestream, 0, sizeof(pb_ostream_t)); |
||||
pb_encode(&sizestream, grpc_lb_v1_LoadBalanceRequest_fields, request); |
||||
encoded_length = sizestream.bytes_written; |
||||
|
||||
slice = GRPC_SLICE_MALLOC(encoded_length); |
||||
outputstream = |
||||
pb_ostream_from_buffer(GRPC_SLICE_START_PTR(slice), encoded_length); |
||||
GPR_ASSERT(pb_encode(&outputstream, grpc_lb_v1_LoadBalanceRequest_fields, |
||||
request) != 0); |
||||
return slice; |
||||
} |
||||
|
||||
void xds_grpclb_request_destroy(xds_grpclb_request* request) { |
||||
if (request->has_client_stats) { |
||||
grpc_core::XdsLbClientStats::DroppedCallCounts* drop_entries = |
||||
static_cast<grpc_core::XdsLbClientStats::DroppedCallCounts*>( |
||||
request->client_stats.calls_finished_with_drop.arg); |
||||
grpc_core::Delete(drop_entries); |
||||
} |
||||
gpr_free(request); |
||||
} |
||||
|
||||
typedef grpc_lb_v1_LoadBalanceResponse xds_grpclb_response; |
||||
xds_grpclb_initial_response* xds_grpclb_initial_response_parse( |
||||
grpc_slice encoded_xds_grpclb_response) { |
||||
pb_istream_t stream = |
||||
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_xds_grpclb_response), |
||||
GRPC_SLICE_LENGTH(encoded_xds_grpclb_response)); |
||||
xds_grpclb_response res; |
||||
memset(&res, 0, sizeof(xds_grpclb_response)); |
||||
if (GPR_UNLIKELY( |
||||
!pb_decode(&stream, grpc_lb_v1_LoadBalanceResponse_fields, &res))) { |
||||
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&stream)); |
||||
return nullptr; |
||||
} |
||||
|
||||
if (!res.has_initial_response) return nullptr; |
||||
|
||||
xds_grpclb_initial_response* initial_res = |
||||
static_cast<xds_grpclb_initial_response*>( |
||||
gpr_malloc(sizeof(xds_grpclb_initial_response))); |
||||
memcpy(initial_res, &res.initial_response, |
||||
sizeof(xds_grpclb_initial_response)); |
||||
|
||||
return initial_res; |
||||
} |
||||
|
||||
xds_grpclb_serverlist* xds_grpclb_response_parse_serverlist( |
||||
grpc_slice encoded_xds_grpclb_response) { |
||||
pb_istream_t stream = |
||||
pb_istream_from_buffer(GRPC_SLICE_START_PTR(encoded_xds_grpclb_response), |
||||
GRPC_SLICE_LENGTH(encoded_xds_grpclb_response)); |
||||
pb_istream_t stream_at_start = stream; |
||||
xds_grpclb_serverlist* sl = static_cast<xds_grpclb_serverlist*>( |
||||
gpr_zalloc(sizeof(xds_grpclb_serverlist))); |
||||
xds_grpclb_response res; |
||||
memset(&res, 0, sizeof(xds_grpclb_response)); |
||||
// First pass: count number of servers.
|
||||
res.server_list.servers.funcs.decode = count_serverlist; |
||||
res.server_list.servers.arg = sl; |
||||
bool status = pb_decode(&stream, grpc_lb_v1_LoadBalanceResponse_fields, &res); |
||||
if (GPR_UNLIKELY(!status)) { |
||||
gpr_free(sl); |
||||
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&stream)); |
||||
return nullptr; |
||||
} |
||||
// Second pass: populate servers.
|
||||
if (sl->num_servers > 0) { |
||||
sl->servers = static_cast<xds_grpclb_server**>( |
||||
gpr_zalloc(sizeof(xds_grpclb_server*) * sl->num_servers)); |
||||
decode_serverlist_arg decode_arg; |
||||
memset(&decode_arg, 0, sizeof(decode_arg)); |
||||
decode_arg.serverlist = sl; |
||||
res.server_list.servers.funcs.decode = decode_serverlist; |
||||
res.server_list.servers.arg = &decode_arg; |
||||
status = pb_decode(&stream_at_start, grpc_lb_v1_LoadBalanceResponse_fields, |
||||
&res); |
||||
if (GPR_UNLIKELY(!status)) { |
||||
xds_grpclb_destroy_serverlist(sl); |
||||
gpr_log(GPR_ERROR, "nanopb error: %s", PB_GET_ERROR(&stream)); |
||||
return nullptr; |
||||
} |
||||
} |
||||
return sl; |
||||
} |
||||
|
||||
void xds_grpclb_destroy_serverlist(xds_grpclb_serverlist* serverlist) { |
||||
if (serverlist == nullptr) { |
||||
return; |
||||
} |
||||
for (size_t i = 0; i < serverlist->num_servers; i++) { |
||||
gpr_free(serverlist->servers[i]); |
||||
} |
||||
gpr_free(serverlist->servers); |
||||
gpr_free(serverlist); |
||||
} |
||||
|
||||
xds_grpclb_serverlist* xds_grpclb_serverlist_copy( |
||||
const xds_grpclb_serverlist* sl) { |
||||
xds_grpclb_serverlist* copy = static_cast<xds_grpclb_serverlist*>( |
||||
gpr_zalloc(sizeof(xds_grpclb_serverlist))); |
||||
copy->num_servers = sl->num_servers; |
||||
copy->servers = static_cast<xds_grpclb_server**>( |
||||
gpr_malloc(sizeof(xds_grpclb_server*) * sl->num_servers)); |
||||
for (size_t i = 0; i < sl->num_servers; i++) { |
||||
copy->servers[i] = |
||||
static_cast<xds_grpclb_server*>(gpr_malloc(sizeof(xds_grpclb_server))); |
||||
memcpy(copy->servers[i], sl->servers[i], sizeof(xds_grpclb_server)); |
||||
} |
||||
return copy; |
||||
} |
||||
|
||||
bool xds_grpclb_serverlist_equals(const xds_grpclb_serverlist* lhs, |
||||
const xds_grpclb_serverlist* rhs) { |
||||
if (lhs == nullptr || rhs == nullptr) { |
||||
return false; |
||||
} |
||||
if (lhs->num_servers != rhs->num_servers) { |
||||
return false; |
||||
} |
||||
for (size_t i = 0; i < lhs->num_servers; i++) { |
||||
if (!xds_grpclb_server_equals(lhs->servers[i], rhs->servers[i])) { |
||||
return false; |
||||
} |
||||
} |
||||
return true; |
||||
} |
||||
|
||||
bool xds_grpclb_server_equals(const xds_grpclb_server* lhs, |
||||
const xds_grpclb_server* rhs) { |
||||
return memcmp(lhs, rhs, sizeof(xds_grpclb_server)) == 0; |
||||
} |
||||
|
||||
int xds_grpclb_duration_compare(const xds_grpclb_duration* lhs, |
||||
const xds_grpclb_duration* rhs) { |
||||
GPR_ASSERT(lhs && rhs); |
||||
if (lhs->has_seconds && rhs->has_seconds) { |
||||
if (lhs->seconds < rhs->seconds) return -1; |
||||
if (lhs->seconds > rhs->seconds) return 1; |
||||
} else if (lhs->has_seconds) { |
||||
return 1; |
||||
} else if (rhs->has_seconds) { |
||||
return -1; |
||||
} |
||||
|
||||
GPR_ASSERT(lhs->seconds == rhs->seconds); |
||||
if (lhs->has_nanos && rhs->has_nanos) { |
||||
if (lhs->nanos < rhs->nanos) return -1; |
||||
if (lhs->nanos > rhs->nanos) return 1; |
||||
} else if (lhs->has_nanos) { |
||||
return 1; |
||||
} else if (rhs->has_nanos) { |
||||
return -1; |
||||
} |
||||
|
||||
return 0; |
||||
} |
||||
|
||||
grpc_millis xds_grpclb_duration_to_millis(xds_grpclb_duration* duration_pb) { |
||||
return static_cast<grpc_millis>( |
||||
(duration_pb->has_seconds ? duration_pb->seconds : 0) * GPR_MS_PER_SEC + |
||||
(duration_pb->has_nanos ? duration_pb->nanos : 0) / GPR_NS_PER_MS); |
||||
} |
||||
|
||||
void xds_grpclb_initial_response_destroy( |
||||
xds_grpclb_initial_response* response) { |
||||
gpr_free(response); |
||||
} |
@ -0,0 +1,89 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2018 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_LOAD_BALANCER_API_H |
||||
#define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_LOAD_BALANCER_API_H |
||||
|
||||
#include <grpc/support/port_platform.h> |
||||
|
||||
#include <grpc/slice_buffer.h> |
||||
|
||||
#include "src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h" |
||||
#include "src/core/ext/filters/client_channel/lb_policy/xds/xds_client_stats.h" |
||||
#include "src/core/ext/filters/client_channel/lb_policy_factory.h" |
||||
|
||||
#define XDS_SERVICE_NAME_MAX_LENGTH 128 |
||||
|
||||
typedef grpc_lb_v1_Server_ip_address_t xds_grpclb_ip_address; |
||||
typedef grpc_lb_v1_LoadBalanceRequest xds_grpclb_request; |
||||
typedef grpc_lb_v1_InitialLoadBalanceResponse xds_grpclb_initial_response; |
||||
typedef grpc_lb_v1_Server xds_grpclb_server; |
||||
typedef google_protobuf_Duration xds_grpclb_duration; |
||||
typedef google_protobuf_Timestamp xds_grpclb_timestamp; |
||||
|
||||
typedef struct { |
||||
xds_grpclb_server** servers; |
||||
size_t num_servers; |
||||
} xds_grpclb_serverlist; |
||||
|
||||
/** Create a request for a gRPC LB service under \a lb_service_name */ |
||||
xds_grpclb_request* xds_grpclb_request_create(const char* lb_service_name); |
||||
xds_grpclb_request* xds_grpclb_load_report_request_create_locked( |
||||
grpc_core::XdsLbClientStats* client_stats); |
||||
|
||||
/** Protocol Buffers v3-encode \a request */ |
||||
grpc_slice xds_grpclb_request_encode(const xds_grpclb_request* request); |
||||
|
||||
/** Destroy \a request */ |
||||
void xds_grpclb_request_destroy(xds_grpclb_request* request); |
||||
|
||||
/** Parse (ie, decode) the bytes in \a encoded_xds_grpclb_response as a \a
|
||||
* xds_grpclb_initial_response */ |
||||
xds_grpclb_initial_response* xds_grpclb_initial_response_parse( |
||||
grpc_slice encoded_xds_grpclb_response); |
||||
|
||||
/** Parse the list of servers from an encoded \a xds_grpclb_response */ |
||||
xds_grpclb_serverlist* xds_grpclb_response_parse_serverlist( |
||||
grpc_slice encoded_xds_grpclb_response); |
||||
|
||||
/** Return a copy of \a sl. The caller is responsible for calling \a
|
||||
* xds_grpclb_destroy_serverlist on the returned copy. */ |
||||
xds_grpclb_serverlist* xds_grpclb_serverlist_copy( |
||||
const xds_grpclb_serverlist* sl); |
||||
|
||||
bool xds_grpclb_serverlist_equals(const xds_grpclb_serverlist* lhs, |
||||
const xds_grpclb_serverlist* rhs); |
||||
|
||||
bool xds_grpclb_server_equals(const xds_grpclb_server* lhs, |
||||
const xds_grpclb_server* rhs); |
||||
|
||||
/** Destroy \a serverlist */ |
||||
void xds_grpclb_destroy_serverlist(xds_grpclb_serverlist* serverlist); |
||||
|
||||
/** Compare \a lhs against \a rhs and return 0 if \a lhs and \a rhs are equal,
|
||||
* < 0 if \a lhs represents a duration shorter than \a rhs and > 0 otherwise */ |
||||
int xds_grpclb_duration_compare(const xds_grpclb_duration* lhs, |
||||
const xds_grpclb_duration* rhs); |
||||
|
||||
grpc_millis xds_grpclb_duration_to_millis(xds_grpclb_duration* duration_pb); |
||||
|
||||
/** Destroy \a initial_response */ |
||||
void xds_grpclb_initial_response_destroy(xds_grpclb_initial_response* response); |
||||
|
||||
#endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_XDS_XDS_LOAD_BALANCER_API_H \ |
||||
*/ |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue