Merge branch 'master' into fix-addrlen

pull/16517/head
Sree Kuchibhotla 6 years ago
commit c163baa559
  1. 329
      bazel/grpc_build_system.bzl
  2. 43
      include/grpc/grpc_security_constants.h
  3. 1
      include/grpcpp/impl/codegen/completion_queue.h
  4. 3
      src/core/ext/filters/max_age/max_age_filter.cc
  5. 7
      src/core/lib/iomgr/timer_generic.cc
  6. 484
      src/cpp/server/health/default_health_check_service.cc
  7. 242
      src/cpp/server/health/default_health_check_service.h
  8. 1
      src/cpp/server/health/health.pb.c
  9. 7
      src/cpp/server/health/health.pb.h
  10. 27
      src/cpp/server/server_cc.cc
  11. 20
      src/proto/grpc/health/v1/health.proto
  12. 4
      test/core/iomgr/timer_list_test.cc
  13. 76
      test/cpp/end2end/health_service_end2end_test.cc
  14. 67
      test/cpp/qps/BUILD
  15. 123
      test/cpp/qps/gen_build_yaml.py
  16. 38
      test/cpp/qps/json_run_localhost_scenario_gen.py
  17. 1
      test/cpp/qps/json_run_localhost_scenarios.bzl
  18. 72
      test/cpp/qps/qps_benchmark_script.bzl
  19. 38
      test/cpp/qps/qps_json_driver_scenario_gen.py
  20. 1
      test/cpp/qps/qps_json_driver_scenarios.bzl
  21. 0
      tools/buildgen/generate_build_additions.sh
  22. 18
      tools/distrib/check_nanopb_output.sh
  23. 33
      tools/run_tests/sanity/check_qps_scenario_changes.py
  24. 1
      tools/run_tests/sanity/sanity_tests.yaml

@ -24,178 +24,203 @@
#
# The set of pollers to test against if a test exercises polling
POLLERS = ['epollex', 'epollsig', 'epoll1', 'poll', 'poll-cv']
POLLERS = ["epollex", "epollsig", "epoll1", "poll", "poll-cv"]
def if_not_windows(a):
return select({
"//:windows": [],
"//:windows_msvc": [],
"//conditions:default": a,
})
return select({
"//:windows": [],
"//:windows_msvc": [],
"//conditions:default": a,
})
def _get_external_deps(external_deps):
ret = []
for dep in external_deps:
if dep == "address_sorting":
ret += ["//third_party/address_sorting"]
elif dep == "cares":
ret += select({"//:grpc_no_ares": [],
"//conditions:default": ["//external:cares"],})
else:
ret += ["//external:" + dep]
return ret
ret = []
for dep in external_deps:
if dep == "address_sorting":
ret += ["//third_party/address_sorting"]
elif dep == "cares":
ret += select({
"//:grpc_no_ares": [],
"//conditions:default": ["//external:cares"],
})
else:
ret += ["//external:" + dep]
return ret
def _maybe_update_cc_library_hdrs(hdrs):
ret = []
hdrs_to_update = {
"third_party/objective_c/Cronet/bidirectional_stream_c.h": "//third_party:objective_c/Cronet/bidirectional_stream_c.h",
}
for h in hdrs:
if h in hdrs_to_update.keys():
ret.append(hdrs_to_update[h])
else:
ret.append(h)
return ret
def grpc_cc_library(name, srcs = [], public_hdrs = [], hdrs = [],
external_deps = [], deps = [], standalone = False,
language = "C++", testonly = False, visibility = None,
alwayslink = 0, data = []):
copts = []
if language.upper() == "C":
copts = if_not_windows(["-std=c99"])
native.cc_library(
name = name,
srcs = srcs,
defines = select({"//:grpc_no_ares": ["GRPC_ARES=0"],
"//conditions:default": [],}) +
select({"//:remote_execution": ["GRPC_PORT_ISOLATED_RUNTIME=1"],
"//conditions:default": [],}) +
select({"//:grpc_allow_exceptions": ["GRPC_ALLOW_EXCEPTIONS=1"],
"//:grpc_disallow_exceptions":
["GRPC_ALLOW_EXCEPTIONS=0"],
"//conditions:default": [],}),
hdrs = _maybe_update_cc_library_hdrs(hdrs + public_hdrs),
deps = deps + _get_external_deps(external_deps),
copts = copts,
visibility = visibility,
testonly = testonly,
linkopts = if_not_windows(["-pthread"]),
includes = [
"include"
],
alwayslink = alwayslink,
data = data,
)
ret = []
hdrs_to_update = {
"third_party/objective_c/Cronet/bidirectional_stream_c.h": "//third_party:objective_c/Cronet/bidirectional_stream_c.h",
}
for h in hdrs:
if h in hdrs_to_update.keys():
ret.append(hdrs_to_update[h])
else:
ret.append(h)
return ret
def grpc_cc_library(
name,
srcs = [],
public_hdrs = [],
hdrs = [],
external_deps = [],
deps = [],
standalone = False,
language = "C++",
testonly = False,
visibility = None,
alwayslink = 0,
data = []):
copts = []
if language.upper() == "C":
copts = if_not_windows(["-std=c99"])
native.cc_library(
name = name,
srcs = srcs,
defines = select({
"//:grpc_no_ares": ["GRPC_ARES=0"],
"//conditions:default": [],
}) +
select({
"//:remote_execution": ["GRPC_PORT_ISOLATED_RUNTIME=1"],
"//conditions:default": [],
}) +
select({
"//:grpc_allow_exceptions": ["GRPC_ALLOW_EXCEPTIONS=1"],
"//:grpc_disallow_exceptions": ["GRPC_ALLOW_EXCEPTIONS=0"],
"//conditions:default": [],
}),
hdrs = _maybe_update_cc_library_hdrs(hdrs + public_hdrs),
deps = deps + _get_external_deps(external_deps),
copts = copts,
visibility = visibility,
testonly = testonly,
linkopts = if_not_windows(["-pthread"]),
includes = [
"include",
],
alwayslink = alwayslink,
data = data,
)
def grpc_proto_plugin(name, srcs = [], deps = []):
native.cc_binary(
name = name,
srcs = srcs,
deps = deps,
)
native.cc_binary(
name = name,
srcs = srcs,
deps = deps,
)
load("//:bazel/cc_grpc_library.bzl", "cc_grpc_library")
def grpc_proto_library(name, srcs = [], deps = [], well_known_protos = False,
has_services = True, use_external = False, generate_mocks = False):
cc_grpc_library(
name = name,
srcs = srcs,
deps = deps,
well_known_protos = well_known_protos,
proto_only = not has_services,
use_external = use_external,
generate_mocks = generate_mocks,
)
def grpc_proto_library(
name,
srcs = [],
deps = [],
well_known_protos = False,
has_services = True,
use_external = False,
generate_mocks = False):
cc_grpc_library(
name = name,
srcs = srcs,
deps = deps,
well_known_protos = well_known_protos,
proto_only = not has_services,
use_external = use_external,
generate_mocks = generate_mocks,
)
def grpc_cc_test(name, srcs = [], deps = [], external_deps = [], args = [], data = [], uses_polling = True, language = "C++", size = "medium", timeout = "moderate", tags = []):
copts = []
if language.upper() == "C":
copts = if_not_windows(["-std=c99"])
args = {
'name': name,
'srcs': srcs,
'args': args,
'data': data,
'deps': deps + _get_external_deps(external_deps),
'copts': copts,
'linkopts': if_not_windows(["-pthread"]),
'size': size,
'timeout': timeout,
}
if uses_polling:
native.cc_test(testonly=True, tags=['manual'], **args)
for poller in POLLERS:
native.sh_test(
name = name + '@poller=' + poller,
data = [name],
srcs = [
'//test/core/util:run_with_poller_sh',
],
size = size,
timeout = timeout,
args = [
poller,
'$(location %s)' % name,
] + args['args'],
tags = tags,
)
else:
native.cc_test(**args)
copts = []
if language.upper() == "C":
copts = if_not_windows(["-std=c99"])
args = {
"name": name,
"srcs": srcs,
"args": args,
"data": data,
"deps": deps + _get_external_deps(external_deps),
"copts": copts,
"linkopts": if_not_windows(["-pthread"]),
"size": size,
"timeout": timeout,
}
if uses_polling:
native.cc_test(testonly = True, tags = ["manual"], **args)
for poller in POLLERS:
native.sh_test(
name = name + "@poller=" + poller,
data = [name] + data,
srcs = [
"//test/core/util:run_with_poller_sh",
],
size = size,
timeout = timeout,
args = [
poller,
"$(location %s)" % name,
] + args["args"],
tags = tags,
)
else:
native.cc_test(**args)
def grpc_cc_binary(name, srcs = [], deps = [], external_deps = [], args = [], data = [], language = "C++", testonly = False, linkshared = False, linkopts = []):
copts = []
if language.upper() == "C":
copts = ["-std=c99"]
native.cc_binary(
name = name,
srcs = srcs,
args = args,
data = data,
testonly = testonly,
linkshared = linkshared,
deps = deps + _get_external_deps(external_deps),
copts = copts,
linkopts = if_not_windows(["-pthread"]) + linkopts,
)
def grpc_generate_one_off_targets(): pass
copts = []
if language.upper() == "C":
copts = ["-std=c99"]
native.cc_binary(
name = name,
srcs = srcs,
args = args,
data = data,
testonly = testonly,
linkshared = linkshared,
deps = deps + _get_external_deps(external_deps),
copts = copts,
linkopts = if_not_windows(["-pthread"]) + linkopts,
)
def grpc_generate_one_off_targets():
pass
def grpc_sh_test(name, srcs, args = [], data = []):
native.sh_test(
name = name,
srcs = srcs,
args = args,
data = data)
native.sh_test(
name = name,
srcs = srcs,
args = args,
data = data,
)
def grpc_sh_binary(name, srcs, data = []):
native.sh_binary(
name = name,
srcs = srcs,
data = data)
native.sh_binary(
name = name,
srcs = srcs,
data = data,
)
def grpc_py_binary(name, srcs, data = [], deps = [], external_deps = [], testonly = False):
native.py_binary(
name = name,
srcs = srcs,
testonly = testonly,
data = data,
deps = deps + _get_external_deps(external_deps)
)
native.py_binary(
name = name,
srcs = srcs,
testonly = testonly,
data = data,
deps = deps + _get_external_deps(external_deps),
)
def grpc_package(name, visibility = "private", features = []):
if visibility == "tests":
visibility = ["//test:__subpackages__"]
elif visibility == "public":
visibility = ["//visibility:public"]
elif visibility == "private":
visibility = []
else:
fail("Unknown visibility " + visibility)
if len(visibility) != 0:
native.package(
default_visibility = visibility,
features = features
)
if visibility == "tests":
visibility = ["//test:__subpackages__"]
elif visibility == "public":
visibility = ["//visibility:public"]
elif visibility == "private":
visibility = []
else:
fail("Unknown visibility " + visibility)
if len(visibility) != 0:
native.package(
default_visibility = visibility,
features = features,
)

@ -57,46 +57,51 @@ typedef enum {
} grpc_ssl_certificate_config_reload_status;
typedef enum {
/** Server does not request client certificate. A client can present a self
signed or signed certificates if it wishes to do so and they would be
accepted. */
/** Server does not request client certificate.
The certificate presented by the client is not checked by the server at
all. (A client may present a self signed or signed certificate or not
present a certificate at all and any of those option would be accepted) */
GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE,
/** Server requests client certificate but does not enforce that the client
presents a certificate.
If the client presents a certificate, the client authentication is left to
the application based on the metadata like certificate etc.
the application (the necessary metadata will be available to the
application via authentication context properties, see grpc_auth_context).
The key cert pair should still be valid for the SSL connection to be
established. */
The client's key certificate pair must be valid for the SSL connection to
be established. */
GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY,
/** Server requests client certificate but does not enforce that the client
presents a certificate.
If the client presents a certificate, the client authentication is done by
grpc framework (The client needs to either present a signed cert or skip no
certificate for a successful connection).
the gRPC framework. (For a successful connection the client needs to either
present a certificate that can be verified against the root certificate
configured by the server or not present a certificate at all)
The key cert pair should still be valid for the SSL connection to be
established. */
The client's key certificate pair must be valid for the SSL connection to
be established. */
GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY,
/** Server requests client certificate but enforces that the client presents a
/** Server requests client certificate and enforces that the client presents a
certificate.
If the client presents a certificate, the client authentication is left to
the application based on the metadata like certificate etc.
the application (the necessary metadata will be available to the
application via authentication context properties, see grpc_auth_context).
The key cert pair should still be valid for the SSL connection to be
established. */
The client's key certificate pair must be valid for the SSL connection to
be established. */
GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY,
/** Server requests client certificate but enforces that the client presents a
/** Server requests client certificate and enforces that the client presents a
certificate.
The cerificate presented by the client is verified by grpc framework (The
client needs to present signed certs for a successful connection).
The cerificate presented by the client is verified by the gRPC framework.
(For a successful connection the client needs to present a certificate that
can be verified against the root certificate configured by the server)
The key cert pair should still be valid for the SSL connection to be
established. */
The client's key certificate pair must be valid for the SSL connection to
be established. */
GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY
} grpc_ssl_client_certificate_request_type;

@ -384,7 +384,6 @@ class ServerCompletionQueue : public CompletionQueue {
grpc_cq_polling_type polling_type_;
friend class ServerBuilder;
friend class Server;
};
} // namespace grpc

@ -429,8 +429,7 @@ static grpc_error* init_channel_elem(grpc_channel_element* elem,
? GRPC_MILLIS_INF_FUTURE
: DEFAULT_MAX_CONNECTION_IDLE_MS;
chand->idle_state = MAX_IDLE_STATE_INIT;
gpr_atm_no_barrier_store(&chand->last_enter_idle_time_millis,
GRPC_MILLIS_INF_PAST);
gpr_atm_no_barrier_store(&chand->last_enter_idle_time_millis, GPR_ATM_MIN);
for (size_t i = 0; i < args->channel_args->num_args; ++i) {
if (0 == strcmp(args->channel_args->args[i].key,
GRPC_ARG_MAX_CONNECTION_AGE_MS)) {

@ -291,7 +291,7 @@ static void timer_list_init() {
static void timer_list_shutdown() {
size_t i;
run_some_expired_timers(
GPR_ATM_MAX, nullptr,
GRPC_MILLIS_INF_FUTURE, nullptr,
GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown"));
for (i = 0; i < g_num_shards; i++) {
timer_shard* shard = &g_shards[i];
@ -714,9 +714,10 @@ static grpc_timer_check_result timer_check(grpc_millis* next) {
#if GPR_ARCH_64
gpr_log(GPR_INFO,
"TIMER CHECK BEGIN: now=%" PRId64 " next=%s tls_min=%" PRId64
" glob_min=%" PRIdPTR,
" glob_min=%" PRId64,
now, next_str, min_timer,
gpr_atm_no_barrier_load((gpr_atm*)(&g_shared_mutables.min_timer)));
static_cast<grpc_millis>(gpr_atm_no_barrier_load(
(gpr_atm*)(&g_shared_mutables.min_timer))));
#else
gpr_log(GPR_INFO, "TIMER CHECK BEGIN: now=%" PRId64 " next=%s min=%" PRId64,
now, next_str, min_timer);

@ -30,162 +30,29 @@
#include "src/cpp/server/health/health.pb.h"
namespace grpc {
//
// DefaultHealthCheckService
//
DefaultHealthCheckService::DefaultHealthCheckService() {
services_map_[""].SetServingStatus(SERVING);
}
void DefaultHealthCheckService::SetServingStatus(
const grpc::string& service_name, bool serving) {
std::unique_lock<std::mutex> lock(mu_);
services_map_[service_name].SetServingStatus(serving ? SERVING : NOT_SERVING);
}
void DefaultHealthCheckService::SetServingStatus(bool serving) {
const ServingStatus status = serving ? SERVING : NOT_SERVING;
std::unique_lock<std::mutex> lock(mu_);
for (auto& p : services_map_) {
ServiceData& service_data = p.second;
service_data.SetServingStatus(status);
}
}
DefaultHealthCheckService::ServingStatus
DefaultHealthCheckService::GetServingStatus(
const grpc::string& service_name) const {
std::lock_guard<std::mutex> lock(mu_);
auto it = services_map_.find(service_name);
if (it == services_map_.end()) {
return NOT_FOUND;
}
const ServiceData& service_data = it->second;
return service_data.GetServingStatus();
}
void DefaultHealthCheckService::RegisterCallHandler(
const grpc::string& service_name,
std::shared_ptr<HealthCheckServiceImpl::CallHandler> handler) {
std::unique_lock<std::mutex> lock(mu_);
ServiceData& service_data = services_map_[service_name];
service_data.AddCallHandler(handler /* copies ref */);
handler->SendHealth(std::move(handler), service_data.GetServingStatus());
}
void DefaultHealthCheckService::UnregisterCallHandler(
const grpc::string& service_name,
std::shared_ptr<HealthCheckServiceImpl::CallHandler> handler) {
std::unique_lock<std::mutex> lock(mu_);
auto it = services_map_.find(service_name);
if (it == services_map_.end()) return;
ServiceData& service_data = it->second;
service_data.RemoveCallHandler(std::move(handler));
if (service_data.Unused()) {
services_map_.erase(it);
}
}
DefaultHealthCheckService::HealthCheckServiceImpl*
DefaultHealthCheckService::GetHealthCheckService(
std::unique_ptr<ServerCompletionQueue> cq) {
GPR_ASSERT(impl_ == nullptr);
impl_.reset(new HealthCheckServiceImpl(this, std::move(cq)));
return impl_.get();
}
//
// DefaultHealthCheckService::ServiceData
//
void DefaultHealthCheckService::ServiceData::SetServingStatus(
ServingStatus status) {
status_ = status;
for (auto& call_handler : call_handlers_) {
call_handler->SendHealth(call_handler /* copies ref */, status);
}
}
void DefaultHealthCheckService::ServiceData::AddCallHandler(
std::shared_ptr<HealthCheckServiceImpl::CallHandler> handler) {
call_handlers_.insert(std::move(handler));
}
void DefaultHealthCheckService::ServiceData::RemoveCallHandler(
std::shared_ptr<HealthCheckServiceImpl::CallHandler> handler) {
call_handlers_.erase(std::move(handler));
}
//
// DefaultHealthCheckService::HealthCheckServiceImpl
//
namespace {
const char kHealthCheckMethodName[] = "/grpc.health.v1.Health/Check";
const char kHealthWatchMethodName[] = "/grpc.health.v1.Health/Watch";
} // namespace
DefaultHealthCheckService::HealthCheckServiceImpl::HealthCheckServiceImpl(
DefaultHealthCheckService* database,
std::unique_ptr<ServerCompletionQueue> cq)
: database_(database), cq_(std::move(cq)) {
// Add Check() method.
check_method_ = new internal::RpcServiceMethod(
kHealthCheckMethodName, internal::RpcMethod::NORMAL_RPC, nullptr);
AddMethod(check_method_);
// Add Watch() method.
watch_method_ = new internal::RpcServiceMethod(
kHealthWatchMethodName, internal::RpcMethod::SERVER_STREAMING, nullptr);
AddMethod(watch_method_);
// Create serving thread.
thread_ = std::unique_ptr<::grpc_core::Thread>(
new ::grpc_core::Thread("grpc_health_check_service", Serve, this));
}
DefaultHealthCheckService::HealthCheckServiceImpl::~HealthCheckServiceImpl() {
// We will reach here after the server starts shutting down.
shutdown_ = true;
{
std::unique_lock<std::mutex> lock(cq_shutdown_mu_);
cq_->Shutdown();
}
thread_->Join();
}
void DefaultHealthCheckService::HealthCheckServiceImpl::StartServingThread() {
thread_->Start();
}
void DefaultHealthCheckService::HealthCheckServiceImpl::Serve(void* arg) {
HealthCheckServiceImpl* service =
reinterpret_cast<HealthCheckServiceImpl*>(arg);
// TODO(juanlishen): This is a workaround to wait for the cq to be ready.
// Need to figure out why cq is not ready after service starts.
gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_seconds(1, GPR_TIMESPAN)));
CheckCallHandler::CreateAndStart(service->cq_.get(), service->database_,
service);
WatchCallHandler::CreateAndStart(service->cq_.get(), service->database_,
service);
void* tag;
bool ok;
while (true) {
if (!service->cq_->Next(&tag, &ok)) {
// The completion queue is shutting down.
GPR_ASSERT(service->shutdown_);
break;
}
auto* next_step = static_cast<CallableTag*>(tag);
next_step->Run(ok);
}
}
bool DefaultHealthCheckService::HealthCheckServiceImpl::DecodeRequest(
const ByteBuffer& request, grpc::string* service_name) {
DefaultHealthCheckService* service)
: service_(service), method_(nullptr) {
internal::MethodHandler* handler =
new internal::RpcMethodHandler<HealthCheckServiceImpl, ByteBuffer,
ByteBuffer>(
std::mem_fn(&HealthCheckServiceImpl::Check), this);
method_ = new internal::RpcServiceMethod(
kHealthCheckMethodName, internal::RpcMethod::NORMAL_RPC, handler);
AddMethod(method_);
}
Status DefaultHealthCheckService::HealthCheckServiceImpl::Check(
ServerContext* context, const ByteBuffer* request, ByteBuffer* response) {
// Decode request.
std::vector<Slice> slices;
if (!request.Dump(&slices).ok()) return false;
if (!request->Dump(&slices).ok()) {
return Status(StatusCode::INVALID_ARGUMENT, "");
}
uint8_t* request_bytes = nullptr;
bool request_bytes_owned = false;
size_t request_size = 0;
@ -197,13 +64,14 @@ bool DefaultHealthCheckService::HealthCheckServiceImpl::DecodeRequest(
request_size = slices[0].size();
} else {
request_bytes_owned = true;
request_bytes = static_cast<uint8_t*>(gpr_malloc(request.Length()));
request_bytes = static_cast<uint8_t*>(gpr_malloc(request->Length()));
uint8_t* copy_to = request_bytes;
for (size_t i = 0; i < slices.size(); i++) {
memcpy(copy_to, slices[i].begin(), slices[i].size());
copy_to += slices[i].size();
}
}
if (request_bytes != nullptr) {
pb_istream_t istream = pb_istream_from_buffer(request_bytes, request_size);
bool decode_status = pb_decode(
@ -211,22 +79,26 @@ bool DefaultHealthCheckService::HealthCheckServiceImpl::DecodeRequest(
if (request_bytes_owned) {
gpr_free(request_bytes);
}
if (!decode_status) return false;
if (!decode_status) {
return Status(StatusCode::INVALID_ARGUMENT, "");
}
}
*service_name = request_struct.has_service ? request_struct.service : "";
return true;
}
bool DefaultHealthCheckService::HealthCheckServiceImpl::EncodeResponse(
ServingStatus status, ByteBuffer* response) {
// Check status from the associated default health checking service.
DefaultHealthCheckService::ServingStatus serving_status =
service_->GetServingStatus(
request_struct.has_service ? request_struct.service : "");
if (serving_status == DefaultHealthCheckService::NOT_FOUND) {
return Status(StatusCode::NOT_FOUND, "");
}
// Encode response
grpc_health_v1_HealthCheckResponse response_struct;
response_struct.has_status = true;
response_struct.status =
status == NOT_FOUND
? grpc_health_v1_HealthCheckResponse_ServingStatus_SERVICE_UNKNOWN
: status == SERVING
? grpc_health_v1_HealthCheckResponse_ServingStatus_SERVING
: grpc_health_v1_HealthCheckResponse_ServingStatus_NOT_SERVING;
serving_status == DefaultHealthCheckService::SERVING
? grpc_health_v1_HealthCheckResponse_ServingStatus_SERVING
: grpc_health_v1_HealthCheckResponse_ServingStatus_NOT_SERVING;
pb_ostream_t ostream;
memset(&ostream, 0, sizeof(ostream));
pb_encode(&ostream, grpc_health_v1_HealthCheckResponse_fields,
@ -236,282 +108,48 @@ bool DefaultHealthCheckService::HealthCheckServiceImpl::EncodeResponse(
GRPC_SLICE_LENGTH(response_slice));
bool encode_status = pb_encode(
&ostream, grpc_health_v1_HealthCheckResponse_fields, &response_struct);
if (!encode_status) return false;
if (!encode_status) {
return Status(StatusCode::INTERNAL, "Failed to encode response.");
}
Slice encoded_response(response_slice, Slice::STEAL_REF);
ByteBuffer response_buffer(&encoded_response, 1);
response->Swap(&response_buffer);
return true;
}
//
// DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler
//
void DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler::
CreateAndStart(ServerCompletionQueue* cq,
DefaultHealthCheckService* database,
HealthCheckServiceImpl* service) {
std::shared_ptr<CallHandler> self =
std::make_shared<CheckCallHandler>(cq, database, service);
CheckCallHandler* handler = static_cast<CheckCallHandler*>(self.get());
{
std::unique_lock<std::mutex> lock(service->cq_shutdown_mu_);
if (service->shutdown_) return;
// Request a Check() call.
handler->next_ =
CallableTag(std::bind(&CheckCallHandler::OnCallReceived, handler,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
service->RequestAsyncUnary(0, &handler->ctx_, &handler->request_,
&handler->writer_, cq, cq, &handler->next_);
}
}
DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler::
CheckCallHandler(ServerCompletionQueue* cq,
DefaultHealthCheckService* database,
HealthCheckServiceImpl* service)
: cq_(cq), database_(database), service_(service), writer_(&ctx_) {}
void DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler::
OnCallReceived(std::shared_ptr<CallHandler> self, bool ok) {
if (!ok) {
// The value of ok being false means that the server is shutting down.
return;
}
// Spawn a new handler instance to serve the next new client. Every handler
// instance will deallocate itself when it's done.
CreateAndStart(cq_, database_, service_);
// Process request.
gpr_log(GPR_DEBUG, "[HCS %p] Health check started for handler %p", service_,
this);
grpc::string service_name;
grpc::Status status = Status::OK;
ByteBuffer response;
if (!service_->DecodeRequest(request_, &service_name)) {
status = Status(INVALID_ARGUMENT, "");
} else {
ServingStatus serving_status = database_->GetServingStatus(service_name);
if (serving_status == NOT_FOUND) {
status = Status(StatusCode::NOT_FOUND, "service name unknown");
} else if (!service_->EncodeResponse(serving_status, &response)) {
status = Status(INTERNAL, "");
}
}
// Send response.
{
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_);
if (!service_->shutdown_) {
next_ =
CallableTag(std::bind(&CheckCallHandler::OnFinishDone, this,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
if (status.ok()) {
writer_.Finish(response, status, &next_);
} else {
writer_.FinishWithError(status, &next_);
}
}
}
}
void DefaultHealthCheckService::HealthCheckServiceImpl::CheckCallHandler::
OnFinishDone(std::shared_ptr<CallHandler> self, bool ok) {
if (ok) {
gpr_log(GPR_DEBUG, "[HCS %p] Health check call finished for handler %p",
service_, this);
}
}
//
// DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler
//
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
CreateAndStart(ServerCompletionQueue* cq,
DefaultHealthCheckService* database,
HealthCheckServiceImpl* service) {
std::shared_ptr<CallHandler> self =
std::make_shared<WatchCallHandler>(cq, database, service);
WatchCallHandler* handler = static_cast<WatchCallHandler*>(self.get());
{
std::unique_lock<std::mutex> lock(service->cq_shutdown_mu_);
if (service->shutdown_) return;
// Request AsyncNotifyWhenDone().
handler->on_done_notified_ =
CallableTag(std::bind(&WatchCallHandler::OnDoneNotified, handler,
std::placeholders::_1, std::placeholders::_2),
self /* copies ref */);
handler->ctx_.AsyncNotifyWhenDone(&handler->on_done_notified_);
// Request a Watch() call.
handler->next_ =
CallableTag(std::bind(&WatchCallHandler::OnCallReceived, handler,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
service->RequestAsyncServerStreaming(1, &handler->ctx_, &handler->request_,
&handler->stream_, cq, cq,
&handler->next_);
}
return Status::OK;
}
DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
WatchCallHandler(ServerCompletionQueue* cq,
DefaultHealthCheckService* database,
HealthCheckServiceImpl* service)
: cq_(cq),
database_(database),
service_(service),
stream_(&ctx_),
call_state_(WAITING_FOR_CALL) {}
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
OnCallReceived(std::shared_ptr<CallHandler> self, bool ok) {
if (ok) {
call_state_ = CALL_RECEIVED;
} else {
// AsyncNotifyWhenDone() needs to be called before the call starts, but the
// tag will not pop out if the call never starts (
// https://github.com/grpc/grpc/issues/10136). So we need to manually
// release the ownership of the handler in this case.
GPR_ASSERT(on_done_notified_.ReleaseHandler() != nullptr);
}
if (!ok || shutdown_) {
// The value of ok being false means that the server is shutting down.
Shutdown(std::move(self), "OnCallReceived");
return;
}
// Spawn a new handler instance to serve the next new client. Every handler
// instance will deallocate itself when it's done.
CreateAndStart(cq_, database_, service_);
// Parse request.
if (!service_->DecodeRequest(request_, &service_name_)) {
on_finish_done_ =
CallableTag(std::bind(&WatchCallHandler::OnFinishDone, this,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
stream_.Finish(Status(INVALID_ARGUMENT, ""), &on_finish_done_);
call_state_ = FINISH_CALLED;
return;
}
// Register the call for updates to the service.
gpr_log(GPR_DEBUG,
"[HCS %p] Health check watch started for service \"%s\" "
"(handler: %p)",
service_, service_name_.c_str(), this);
database_->RegisterCallHandler(service_name_, std::move(self));
}
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
SendHealth(std::shared_ptr<CallHandler> self, ServingStatus status) {
std::unique_lock<std::mutex> lock(mu_);
// If there's already a send in flight, cache the new status, and
// we'll start a new send for it when the one in flight completes.
if (send_in_flight_) {
pending_status_ = status;
return;
}
// Start a send.
SendHealthLocked(std::move(self), status);
}
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
SendHealthLocked(std::shared_ptr<CallHandler> self, ServingStatus status) {
std::unique_lock<std::mutex> cq_lock(service_->cq_shutdown_mu_);
if (service_->shutdown_) {
cq_lock.release()->unlock();
Shutdown(std::move(self), "SendHealthLocked");
return;
}
send_in_flight_ = true;
call_state_ = SEND_MESSAGE_PENDING;
// Construct response.
ByteBuffer response;
if (!service_->EncodeResponse(status, &response)) {
on_finish_done_ =
CallableTag(std::bind(&WatchCallHandler::OnFinishDone, this,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
stream_.Finish(Status(INTERNAL, ""), &on_finish_done_);
return;
}
next_ = CallableTag(std::bind(&WatchCallHandler::OnSendHealthDone, this,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
stream_.Write(response, &next_);
DefaultHealthCheckService::DefaultHealthCheckService() {
services_map_.emplace("", true);
}
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
OnSendHealthDone(std::shared_ptr<CallHandler> self, bool ok) {
if (!ok || shutdown_) {
Shutdown(std::move(self), "OnSendHealthDone");
return;
}
call_state_ = CALL_RECEIVED;
{
std::unique_lock<std::mutex> lock(mu_);
send_in_flight_ = false;
// If we got a new status since we started the last send, start a
// new send for it.
if (pending_status_ != NOT_FOUND) {
auto status = pending_status_;
pending_status_ = NOT_FOUND;
SendHealthLocked(std::move(self), status);
}
}
void DefaultHealthCheckService::SetServingStatus(
const grpc::string& service_name, bool serving) {
std::lock_guard<std::mutex> lock(mu_);
services_map_[service_name] = serving;
}
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
OnDoneNotified(std::shared_ptr<CallHandler> self, bool ok) {
GPR_ASSERT(ok);
done_notified_ = true;
if (ctx_.IsCancelled()) {
is_cancelled_ = true;
void DefaultHealthCheckService::SetServingStatus(bool serving) {
std::lock_guard<std::mutex> lock(mu_);
for (auto iter = services_map_.begin(); iter != services_map_.end(); ++iter) {
iter->second = serving;
}
gpr_log(GPR_DEBUG,
"[HCS %p] Healt check call is notified done (handler: %p, "
"is_cancelled: %d).",
service_, this, static_cast<int>(is_cancelled_));
Shutdown(std::move(self), "OnDoneNotified");
}
// TODO(roth): This method currently assumes that there will be only one
// thread polling the cq and invoking the corresponding callbacks. If
// that changes, we will need to add synchronization here.
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
Shutdown(std::shared_ptr<CallHandler> self, const char* reason) {
if (!shutdown_) {
gpr_log(GPR_DEBUG,
"[HCS %p] Shutting down the handler (service_name: \"%s\", "
"handler: %p, reason: %s).",
service_, service_name_.c_str(), this, reason);
shutdown_ = true;
}
// OnCallReceived() may be called after OnDoneNotified(), so we need to
// try to Finish() every time we are in Shutdown().
if (call_state_ >= CALL_RECEIVED && call_state_ < FINISH_CALLED) {
std::unique_lock<std::mutex> lock(service_->cq_shutdown_mu_);
if (!service_->shutdown_) {
on_finish_done_ =
CallableTag(std::bind(&WatchCallHandler::OnFinishDone, this,
std::placeholders::_1, std::placeholders::_2),
std::move(self));
// TODO(juanlishen): Maybe add a message proto for the client to
// explicitly cancel the stream so that we can return OK status in such
// cases.
stream_.Finish(Status::CANCELLED, &on_finish_done_);
call_state_ = FINISH_CALLED;
}
DefaultHealthCheckService::ServingStatus
DefaultHealthCheckService::GetServingStatus(
const grpc::string& service_name) const {
std::lock_guard<std::mutex> lock(mu_);
const auto& iter = services_map_.find(service_name);
if (iter == services_map_.end()) {
return NOT_FOUND;
}
return iter->second ? SERVING : NOT_SERVING;
}
void DefaultHealthCheckService::HealthCheckServiceImpl::WatchCallHandler::
OnFinishDone(std::shared_ptr<CallHandler> self, bool ok) {
if (ok) {
gpr_log(GPR_DEBUG,
"[HCS %p] Health check call finished (service_name: \"%s\", "
"handler: %p).",
service_, service_name_.c_str(), this);
}
DefaultHealthCheckService::HealthCheckServiceImpl*
DefaultHealthCheckService::GetHealthCheckService() {
GPR_ASSERT(impl_ == nullptr);
impl_.reset(new HealthCheckServiceImpl(this));
return impl_.get();
}
} // namespace grpc

@ -19,268 +19,42 @@
#ifndef GRPC_INTERNAL_CPP_SERVER_DEFAULT_HEALTH_CHECK_SERVICE_H
#define GRPC_INTERNAL_CPP_SERVER_DEFAULT_HEALTH_CHECK_SERVICE_H
#include <atomic>
#include <mutex>
#include <set>
#include <grpc/support/log.h>
#include <grpcpp/grpcpp.h>
#include <grpcpp/health_check_service_interface.h>
#include <grpcpp/impl/codegen/async_generic_service.h>
#include <grpcpp/impl/codegen/async_unary_call.h>
#include <grpcpp/impl/codegen/service_type.h>
#include <grpcpp/support/byte_buffer.h>
#include "src/core/lib/gprpp/thd.h"
namespace grpc {
// Default implementation of HealthCheckServiceInterface. Server will create and
// own it.
class DefaultHealthCheckService final : public HealthCheckServiceInterface {
public:
enum ServingStatus { NOT_FOUND, SERVING, NOT_SERVING };
// The service impl to register with the server.
class HealthCheckServiceImpl : public Service {
public:
// Base class for call handlers.
class CallHandler {
public:
virtual ~CallHandler() = default;
virtual void SendHealth(std::shared_ptr<CallHandler> self,
ServingStatus status) = 0;
};
explicit HealthCheckServiceImpl(DefaultHealthCheckService* service);
HealthCheckServiceImpl(DefaultHealthCheckService* database,
std::unique_ptr<ServerCompletionQueue> cq);
~HealthCheckServiceImpl();
void StartServingThread();
Status Check(ServerContext* context, const ByteBuffer* request,
ByteBuffer* response);
private:
// A tag that can be called with a bool argument. It's tailored for
// CallHandler's use. Before being used, it should be constructed with a
// method of CallHandler and a shared pointer to the handler. The
// shared pointer will be moved to the invoked function and the function
// can only be invoked once. That makes ref counting of the handler easier,
// because the shared pointer is not bound to the function and can be gone
// once the invoked function returns (if not used any more).
class CallableTag {
public:
using HandlerFunction =
std::function<void(std::shared_ptr<CallHandler>, bool)>;
CallableTag() {}
CallableTag(HandlerFunction func, std::shared_ptr<CallHandler> handler)
: handler_function_(std::move(func)), handler_(std::move(handler)) {
GPR_ASSERT(handler_function_ != nullptr);
GPR_ASSERT(handler_ != nullptr);
}
// Runs the tag. This should be called only once. The handler is no
// longer owned by this tag after this method is invoked.
void Run(bool ok) {
GPR_ASSERT(handler_function_ != nullptr);
GPR_ASSERT(handler_ != nullptr);
handler_function_(std::move(handler_), ok);
}
// Releases and returns the shared pointer to the handler.
std::shared_ptr<CallHandler> ReleaseHandler() {
return std::move(handler_);
}
private:
HandlerFunction handler_function_ = nullptr;
std::shared_ptr<CallHandler> handler_;
};
// Call handler for Check method.
// Each handler takes care of one call. It contains per-call data and it
// will access the members of the parent class (i.e.,
// DefaultHealthCheckService) for per-service health data.
class CheckCallHandler : public CallHandler {
public:
// Instantiates a CheckCallHandler and requests the next health check
// call. The handler object will manage its own lifetime, so no action is
// needed from the caller any more regarding that object.
static void CreateAndStart(ServerCompletionQueue* cq,
DefaultHealthCheckService* database,
HealthCheckServiceImpl* service);
// This ctor is public because we want to use std::make_shared<> in
// CreateAndStart(). This ctor shouldn't be used elsewhere.
CheckCallHandler(ServerCompletionQueue* cq,
DefaultHealthCheckService* database,
HealthCheckServiceImpl* service);
// Not used for Check.
void SendHealth(std::shared_ptr<CallHandler> self,
ServingStatus status) override {}
private:
// Called when we receive a call.
// Spawns a new handler so that we can keep servicing future calls.
void OnCallReceived(std::shared_ptr<CallHandler> self, bool ok);
// Called when Finish() is done.
void OnFinishDone(std::shared_ptr<CallHandler> self, bool ok);
// The members passed down from HealthCheckServiceImpl.
ServerCompletionQueue* cq_;
DefaultHealthCheckService* database_;
HealthCheckServiceImpl* service_;
ByteBuffer request_;
GenericServerAsyncResponseWriter writer_;
ServerContext ctx_;
CallableTag next_;
};
// Call handler for Watch method.
// Each handler takes care of one call. It contains per-call data and it
// will access the members of the parent class (i.e.,
// DefaultHealthCheckService) for per-service health data.
class WatchCallHandler : public CallHandler {
public:
// Instantiates a WatchCallHandler and requests the next health check
// call. The handler object will manage its own lifetime, so no action is
// needed from the caller any more regarding that object.
static void CreateAndStart(ServerCompletionQueue* cq,
DefaultHealthCheckService* database,
HealthCheckServiceImpl* service);
// This ctor is public because we want to use std::make_shared<> in
// CreateAndStart(). This ctor shouldn't be used elsewhere.
WatchCallHandler(ServerCompletionQueue* cq,
DefaultHealthCheckService* database,
HealthCheckServiceImpl* service);
void SendHealth(std::shared_ptr<CallHandler> self,
ServingStatus status) override;
private:
// Called when we receive a call.
// Spawns a new handler so that we can keep servicing future calls.
void OnCallReceived(std::shared_ptr<CallHandler> self, bool ok);
// Requires holding mu_.
void SendHealthLocked(std::shared_ptr<CallHandler> self,
ServingStatus status);
// When sending a health result finishes.
void OnSendHealthDone(std::shared_ptr<CallHandler> self, bool ok);
// Called when Finish() is done.
void OnFinishDone(std::shared_ptr<CallHandler> self, bool ok);
// Called when AsyncNotifyWhenDone() notifies us.
void OnDoneNotified(std::shared_ptr<CallHandler> self, bool ok);
void Shutdown(std::shared_ptr<CallHandler> self, const char* reason);
// The members passed down from HealthCheckServiceImpl.
ServerCompletionQueue* cq_;
DefaultHealthCheckService* database_;
HealthCheckServiceImpl* service_;
ByteBuffer request_;
grpc::string service_name_;
GenericServerAsyncWriter stream_;
ServerContext ctx_;
std::mutex mu_;
bool send_in_flight_ = false; // Guarded by mu_.
ServingStatus pending_status_ = NOT_FOUND; // Guarded by mu_.
// The state of the RPC progress.
enum CallState {
WAITING_FOR_CALL,
CALL_RECEIVED,
SEND_MESSAGE_PENDING,
FINISH_CALLED
} call_state_;
bool shutdown_ = false;
bool done_notified_ = false;
bool is_cancelled_ = false;
CallableTag next_;
CallableTag on_done_notified_;
CallableTag on_finish_done_;
};
// Handles the incoming requests and drives the completion queue in a loop.
static void Serve(void* arg);
// Returns true on success.
static bool DecodeRequest(const ByteBuffer& request,
grpc::string* service_name);
static bool EncodeResponse(ServingStatus status, ByteBuffer* response);
// Needed to appease Windows compilers, which don't seem to allow
// nested classes to access protected members in the parent's
// superclass.
using Service::RequestAsyncServerStreaming;
using Service::RequestAsyncUnary;
DefaultHealthCheckService* database_;
std::unique_ptr<ServerCompletionQueue> cq_;
internal::RpcServiceMethod* check_method_;
internal::RpcServiceMethod* watch_method_;
// To synchronize the operations related to shutdown state of cq_, so that
// we don't enqueue new tags into cq_ after it is already shut down.
std::mutex cq_shutdown_mu_;
std::atomic_bool shutdown_{false};
std::unique_ptr<::grpc_core::Thread> thread_;
const DefaultHealthCheckService* const service_;
internal::RpcServiceMethod* method_;
};
DefaultHealthCheckService();
void SetServingStatus(const grpc::string& service_name,
bool serving) override;
void SetServingStatus(bool serving) override;
enum ServingStatus { NOT_FOUND, SERVING, NOT_SERVING };
ServingStatus GetServingStatus(const grpc::string& service_name) const;
HealthCheckServiceImpl* GetHealthCheckService(
std::unique_ptr<ServerCompletionQueue> cq);
HealthCheckServiceImpl* GetHealthCheckService();
private:
// Stores the current serving status of a service and any call
// handlers registered for updates when the service's status changes.
class ServiceData {
public:
void SetServingStatus(ServingStatus status);
ServingStatus GetServingStatus() const { return status_; }
void AddCallHandler(
std::shared_ptr<HealthCheckServiceImpl::CallHandler> handler);
void RemoveCallHandler(
std::shared_ptr<HealthCheckServiceImpl::CallHandler> handler);
bool Unused() const {
return call_handlers_.empty() && status_ == NOT_FOUND;
}
private:
ServingStatus status_ = NOT_FOUND;
std::set<std::shared_ptr<HealthCheckServiceImpl::CallHandler>>
call_handlers_;
};
void RegisterCallHandler(
const grpc::string& service_name,
std::shared_ptr<HealthCheckServiceImpl::CallHandler> handler);
void UnregisterCallHandler(
const grpc::string& service_name,
std::shared_ptr<HealthCheckServiceImpl::CallHandler> handler);
mutable std::mutex mu_;
std::map<grpc::string, ServiceData> services_map_; // Guarded by mu_.
std::map<grpc::string, bool> services_map_;
std::unique_ptr<HealthCheckServiceImpl> impl_;
};

@ -2,6 +2,7 @@
/* Generated by nanopb-0.3.7-dev */
#include "src/cpp/server/health/health.pb.h"
/* @@protoc_insertion_point(includes) */
#if PB_PROTO_HEADER_VERSION != 30
#error Regenerate this file with the current version of nanopb generator.

@ -17,12 +17,11 @@ extern "C" {
typedef enum _grpc_health_v1_HealthCheckResponse_ServingStatus {
grpc_health_v1_HealthCheckResponse_ServingStatus_UNKNOWN = 0,
grpc_health_v1_HealthCheckResponse_ServingStatus_SERVING = 1,
grpc_health_v1_HealthCheckResponse_ServingStatus_NOT_SERVING = 2,
grpc_health_v1_HealthCheckResponse_ServingStatus_SERVICE_UNKNOWN = 3
grpc_health_v1_HealthCheckResponse_ServingStatus_NOT_SERVING = 2
} grpc_health_v1_HealthCheckResponse_ServingStatus;
#define _grpc_health_v1_HealthCheckResponse_ServingStatus_MIN grpc_health_v1_HealthCheckResponse_ServingStatus_UNKNOWN
#define _grpc_health_v1_HealthCheckResponse_ServingStatus_MAX grpc_health_v1_HealthCheckResponse_ServingStatus_SERVICE_UNKNOWN
#define _grpc_health_v1_HealthCheckResponse_ServingStatus_ARRAYSIZE ((grpc_health_v1_HealthCheckResponse_ServingStatus)(grpc_health_v1_HealthCheckResponse_ServingStatus_SERVICE_UNKNOWN+1))
#define _grpc_health_v1_HealthCheckResponse_ServingStatus_MAX grpc_health_v1_HealthCheckResponse_ServingStatus_NOT_SERVING
#define _grpc_health_v1_HealthCheckResponse_ServingStatus_ARRAYSIZE ((grpc_health_v1_HealthCheckResponse_ServingStatus)(grpc_health_v1_HealthCheckResponse_ServingStatus_NOT_SERVING+1))
/* Struct definitions */
typedef struct _grpc_health_v1_HealthCheckRequest {

@ -559,20 +559,16 @@ void Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
// Only create default health check service when user did not provide an
// explicit one.
ServerCompletionQueue* health_check_cq = nullptr;
DefaultHealthCheckService::HealthCheckServiceImpl*
default_health_check_service_impl = nullptr;
if (health_check_service_ == nullptr && !health_check_service_disabled_ &&
DefaultHealthCheckServiceEnabled()) {
auto* default_hc_service = new DefaultHealthCheckService;
health_check_service_.reset(default_hc_service);
health_check_cq = new ServerCompletionQueue(GRPC_CQ_DEFAULT_POLLING);
grpc_server_register_completion_queue(server_, health_check_cq->cq(),
nullptr);
default_health_check_service_impl =
default_hc_service->GetHealthCheckService(
std::unique_ptr<ServerCompletionQueue>(health_check_cq));
RegisterService(nullptr, default_health_check_service_impl);
if (sync_server_cqs_ == nullptr || sync_server_cqs_->empty()) {
gpr_log(GPR_INFO,
"Default health check service disabled at async-only server.");
} else {
auto* default_hc_service = new DefaultHealthCheckService;
health_check_service_.reset(default_hc_service);
RegisterService(nullptr, default_hc_service->GetHealthCheckService());
}
}
grpc_server_start(server_);
@ -587,9 +583,6 @@ void Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
new UnimplementedAsyncRequest(this, cqs[i]);
}
}
if (health_check_cq != nullptr) {
new UnimplementedAsyncRequest(this, health_check_cq);
}
}
// If this server has any support for synchronous methods (has any sync
@ -602,10 +595,6 @@ void Server::Start(ServerCompletionQueue** cqs, size_t num_cqs) {
for (auto it = sync_req_mgrs_.begin(); it != sync_req_mgrs_.end(); it++) {
(*it)->Start();
}
if (default_health_check_service_impl != nullptr) {
default_health_check_service_impl->StartServingThread();
}
}
void Server::ShutdownInternal(gpr_timespec deadline) {

@ -34,30 +34,10 @@ message HealthCheckResponse {
UNKNOWN = 0;
SERVING = 1;
NOT_SERVING = 2;
SERVICE_UNKNOWN = 3; // Used only by the Watch method.
}
ServingStatus status = 1;
}
service Health {
// If the requested service is unknown, the call will fail with status
// NOT_FOUND.
rpc Check(HealthCheckRequest) returns (HealthCheckResponse);
// Performs a watch for the serving status of the requested service.
// The server will immediately send back a message indicating the current
// serving status. It will then subsequently send a new message whenever
// the service's serving status changes.
//
// If the requested service is unknown when the call is received, the
// server will send a message setting the serving status to
// SERVICE_UNKNOWN but will *not* terminate the call. If at some
// future point, the serving status of the service becomes known, the
// server will send a new message with the service's serving status.
//
// If the call terminates with status UNIMPLEMENTED, then clients
// should assume this method is not supported and should not retry the
// call. If the call terminates with any other status (including OK),
// clients should retry the call with appropriate exponential backoff.
rpc Watch(HealthCheckRequest) returns (stream HealthCheckResponse);
}

@ -248,11 +248,7 @@ int main(int argc, char** argv) {
grpc_determine_iomgr_platform();
grpc_iomgr_platform_init();
gpr_set_log_verbosity(GPR_LOG_SEVERITY_DEBUG);
#ifndef GPR_WINDOWS
/* Skip this test on Windows until we figure out why it fails */
/* https://github.com/grpc/grpc/issues/16417 */
long_running_service_cleanup_test();
#endif // GPR_WINDOWS
add_test();
destruction_test();
grpc_iomgr_platform_shutdown();

@ -64,29 +64,6 @@ class HealthCheckServiceImpl : public ::grpc::health::v1::Health::Service {
return Status::OK;
}
Status Watch(ServerContext* context, const HealthCheckRequest* request,
::grpc::ServerWriter<HealthCheckResponse>* writer) override {
auto last_state = HealthCheckResponse::UNKNOWN;
while (!context->IsCancelled()) {
{
std::lock_guard<std::mutex> lock(mu_);
HealthCheckResponse response;
auto iter = status_map_.find(request->service());
if (iter == status_map_.end()) {
response.set_status(response.SERVICE_UNKNOWN);
} else {
response.set_status(iter->second);
}
if (response.status() != last_state) {
writer->Write(response, ::grpc::WriteOptions());
}
}
gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_millis(1000, GPR_TIMESPAN)));
}
return Status::OK;
}
void SetStatus(const grpc::string& service_name,
HealthCheckResponse::ServingStatus status) {
std::lock_guard<std::mutex> lock(mu_);
@ -129,6 +106,14 @@ class CustomHealthCheckService : public HealthCheckServiceInterface {
HealthCheckServiceImpl* impl_; // not owned
};
void LoopCompletionQueue(ServerCompletionQueue* cq) {
void* tag;
bool ok;
while (cq->Next(&tag, &ok)) {
abort(); // Nothing should come out of the cq.
}
}
class HealthServiceEnd2endTest : public ::testing::Test {
protected:
HealthServiceEnd2endTest() {}
@ -233,33 +218,6 @@ class HealthServiceEnd2endTest : public ::testing::Test {
Status(StatusCode::NOT_FOUND, ""));
}
void VerifyHealthCheckServiceStreaming() {
const grpc::string kServiceName("service_name");
HealthCheckServiceInterface* service = server_->GetHealthCheckService();
// Start Watch for service.
ClientContext context;
HealthCheckRequest request;
request.set_service(kServiceName);
std::unique_ptr<::grpc::ClientReaderInterface<HealthCheckResponse>> reader =
hc_stub_->Watch(&context, request);
// Initial response will be SERVICE_UNKNOWN.
HealthCheckResponse response;
EXPECT_TRUE(reader->Read(&response));
EXPECT_EQ(response.SERVICE_UNKNOWN, response.status());
response.Clear();
// Now set service to NOT_SERVING and make sure we get an update.
service->SetServingStatus(kServiceName, false);
EXPECT_TRUE(reader->Read(&response));
EXPECT_EQ(response.NOT_SERVING, response.status());
response.Clear();
// Now set service to SERVING and make sure we get another update.
service->SetServingStatus(kServiceName, true);
EXPECT_TRUE(reader->Read(&response));
EXPECT_EQ(response.SERVING, response.status());
// Finish call.
context.TryCancel();
}
TestServiceImpl echo_test_service_;
HealthCheckServiceImpl health_check_service_impl_;
std::unique_ptr<Health::Stub> hc_stub_;
@ -287,7 +245,6 @@ TEST_F(HealthServiceEnd2endTest, DefaultHealthService) {
EXPECT_TRUE(DefaultHealthCheckServiceEnabled());
SetUpServer(true, false, false, nullptr);
VerifyHealthCheckService();
VerifyHealthCheckServiceStreaming();
// The default service has a size limit of the service name.
const grpc::string kTooLongServiceName(201, 'x');
@ -295,6 +252,22 @@ TEST_F(HealthServiceEnd2endTest, DefaultHealthService) {
Status(StatusCode::INVALID_ARGUMENT, ""));
}
// The server has no sync service.
TEST_F(HealthServiceEnd2endTest, DefaultHealthServiceAsyncOnly) {
EnableDefaultHealthCheckService(true);
EXPECT_TRUE(DefaultHealthCheckServiceEnabled());
SetUpServer(false, true, false, nullptr);
cq_thread_ = std::thread(LoopCompletionQueue, cq_.get());
HealthCheckServiceInterface* default_service =
server_->GetHealthCheckService();
EXPECT_TRUE(default_service == nullptr);
ResetStubs();
SendHealthCheckRpc("", Status(StatusCode::UNIMPLEMENTED, ""));
}
// Provide an empty service to disable the default service.
TEST_F(HealthServiceEnd2endTest, ExplicitlyDisableViaOverride) {
EnableDefaultHealthCheckService(true);
@ -323,7 +296,6 @@ TEST_F(HealthServiceEnd2endTest, ExplicitlyOverride) {
ResetStubs();
VerifyHealthCheckService();
VerifyHealthCheckServiceStreaming();
}
} // namespace

@ -15,6 +15,7 @@
licenses(["notice"]) # Apache v2
load("//bazel:grpc_build_system.bzl", "grpc_cc_test", "grpc_cc_library", "grpc_cc_binary", "grpc_package")
load("//test/cpp/qps:qps_benchmark_script.bzl", "qps_json_driver_batch", "json_run_localhost_batch")
grpc_package(name = "test/cpp/qps")
@ -22,8 +23,8 @@ grpc_cc_library(
name = "parse_json",
srcs = ["parse_json.cc"],
hdrs = ["parse_json.h"],
deps = ["//:grpc++"],
external_deps = ["protobuf"],
deps = ["//:grpc++"],
)
grpc_cc_library(
@ -31,16 +32,19 @@ grpc_cc_library(
srcs = [
"client_async.cc",
"client_sync.cc",
"qps_server_builder.cc",
"qps_worker.cc",
"server_async.cc",
"server_sync.cc",
"qps_server_builder.cc",
],
hdrs = [
"client.h",
"qps_server_builder.h",
"qps_worker.h",
"server.h",
"qps_server_builder.h",
],
external_deps = [
"gflags",
],
deps = [
":histogram",
@ -56,11 +60,8 @@ grpc_cc_library(
"//test/core/end2end:ssl_test_data",
"//test/core/util:gpr_test_util",
"//test/core/util:grpc_test_util",
"//test/cpp/util:test_util",
"//test/cpp/util:test_config",
],
external_deps = [
"gflags",
"//test/cpp/util:test_util",
],
)
@ -97,15 +98,15 @@ grpc_cc_library(
hdrs = [
"benchmark_config.h",
],
external_deps = [
"gflags",
],
deps = [
":driver_impl",
":histogram",
"//:grpc++",
"//src/proto/grpc/testing:control_proto",
],
external_deps = [
"gflags",
],
)
grpc_cc_library(
@ -117,6 +118,21 @@ grpc_cc_library(
deps = ["//test/core/util:grpc_test_util"],
)
grpc_cc_binary(
name = "qps_json_driver",
srcs = ["qps_json_driver.cc"],
external_deps = [
"gflags",
],
deps = [
":benchmark_config",
":driver_impl",
"//:grpc++",
"//test/cpp/util:test_config",
"//test/cpp/util:test_util",
],
)
grpc_cc_test(
name = "inproc_sync_unary_ping_pong_test",
srcs = ["inproc_sync_unary_ping_pong_test.cc"],
@ -135,17 +151,9 @@ grpc_cc_library(
deps = ["//:grpc++"],
)
grpc_cc_binary(
name = "json_run_localhost",
srcs = ["json_run_localhost.cc"],
deps = [
"//:gpr",
"//test/core/util:gpr_test_util",
"//test/core/util:grpc_test_util",
"//test/cpp/util:test_config",
"//test/cpp/util:test_util",
],
)
qps_json_driver_batch()
json_run_localhost_batch()
grpc_cc_test(
name = "qps_interarrival_test",
@ -157,24 +165,10 @@ grpc_cc_test(
],
)
grpc_cc_binary(
name = "qps_json_driver",
srcs = ["qps_json_driver.cc"],
deps = [
":benchmark_config",
":driver_impl",
"//:grpc++",
"//test/cpp/util:test_config",
"//test/cpp/util:test_util",
],
external_deps = [
"gflags",
],
)
grpc_cc_test(
name = "qps_openloop_test",
srcs = ["qps_openloop_test.cc"],
data = ["//third_party/toolchains:RBE_USE_MACHINE_TYPE_LARGE"],
deps = [
":benchmark_config",
":driver_impl",
@ -182,7 +176,6 @@ grpc_cc_test(
"//test/cpp/util:test_config",
"//test/cpp/util:test_util",
],
data = ["//third_party/toolchains:RBE_USE_MACHINE_TYPE_LARGE"],
)
grpc_cc_test(

@ -14,6 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import pipes
import shutil
@ -68,62 +69,66 @@ def maybe_exclude_gcov(scenario_json):
return ['gcov']
return []
print yaml.dump({
'tests': [
{
'name': 'json_run_localhost',
'shortname': 'json_run_localhost:%s' % scenario_json['name'],
'args': ['--scenarios_json', _scenario_json_string(scenario_json, False)],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky': False,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': guess_cpu(scenario_json, False),
'exclude_configs': ['tsan', 'asan'] + maybe_exclude_gcov(scenario_json),
'timeout_seconds': 2*60,
'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', []),
'auto_timeout_scaling': False
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'scalable' in scenario_json.get('CATEGORIES', [])
] + [
{
'name': 'qps_json_driver',
'shortname': 'qps_json_driver:inproc_%s' % scenario_json['name'],
'args': ['--run_inproc', '--scenarios_json', _scenario_json_string(scenario_json, False)],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky': False,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': guess_cpu(scenario_json, False),
'exclude_configs': ['tsan', 'asan'],
'timeout_seconds': 6*60,
'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', [])
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'inproc' in scenario_json.get('CATEGORIES', [])
] + [
{
'name': 'json_run_localhost',
'shortname': 'json_run_localhost:%s_low_thread_count' % scenario_json['name'],
'args': ['--scenarios_json', _scenario_json_string(scenario_json, True)],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky': False,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': guess_cpu(scenario_json, True),
'exclude_configs': sorted(c for c in configs_from_yaml if c not in ('tsan', 'asan')),
'timeout_seconds': 10*60,
'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', []),
'auto_timeout_scaling': False
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'scalable' in scenario_json.get('CATEGORIES', [])
]
})
def generate_yaml():
return {
'tests': [
{
'name': 'json_run_localhost',
'shortname': 'json_run_localhost:%s' % scenario_json['name'],
'args': ['--scenarios_json', _scenario_json_string(scenario_json, False)],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky': False,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': guess_cpu(scenario_json, False),
'exclude_configs': ['tsan', 'asan'] + maybe_exclude_gcov(scenario_json),
'timeout_seconds': 2*60,
'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', []),
'auto_timeout_scaling': False
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'scalable' in scenario_json.get('CATEGORIES', [])
] + [
{
'name': 'qps_json_driver',
'shortname': 'qps_json_driver:inproc_%s' % scenario_json['name'],
'args': ['--run_inproc', '--scenarios_json', _scenario_json_string(scenario_json, False)],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky': False,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': guess_cpu(scenario_json, False),
'exclude_configs': ['tsan', 'asan'],
'timeout_seconds': 6*60,
'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', [])
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'inproc' in scenario_json.get('CATEGORIES', [])
] + [
{
'name': 'json_run_localhost',
'shortname': 'json_run_localhost:%s_low_thread_count' % scenario_json['name'],
'args': ['--scenarios_json', _scenario_json_string(scenario_json, True)],
'ci_platforms': ['linux'],
'platforms': ['linux'],
'flaky': False,
'language': 'c++',
'boringssl': True,
'defaults': 'boringssl',
'cpu_cost': guess_cpu(scenario_json, True),
'exclude_configs': sorted(c for c in configs_from_yaml if c not in ('tsan', 'asan')),
'timeout_seconds': 10*60,
'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', []),
'auto_timeout_scaling': False
}
for scenario_json in scenario_config.CXXLanguage().scenarios()
if 'scalable' in scenario_json.get('CATEGORIES', [])
]
}
print(yaml.dump(generate_yaml()))

@ -0,0 +1,38 @@
#!/usr/bin/env python2.7
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gen_build_yaml as gen
import json
def generate_args():
all_scenario_set = gen.generate_yaml()
all_scenario_set = all_scenario_set['tests']
json_run_localhost_scenarios = \
[item for item in all_scenario_set if item['name'] == 'qps_json_driver']
json_run_localhost_arg_set = \
[item['args'][2] for item in json_run_localhost_scenarios \
if 'args' in item and len(item['args']) > 2]
deserialized_scenarios = [json.loads(item)['scenarios'][0] \
for item in json_run_localhost_arg_set]
all_scenarios = {scenario['name'].encode('ascii', 'ignore'): \
'\'{\'scenarios\' : [' + json.dumps(scenario) + ']}\'' \
for scenario in deserialized_scenarios}
serialized_scenarios_str = str(all_scenarios).encode('ascii', 'ignore')
with open('json_run_localhost_scenarios.bzl', 'wb') as f:
f.write('JSON_RUN_LOCALHOST_SCENARIOS = ' + serialized_scenarios_str + '\n')
generate_args()

File diff suppressed because one or more lines are too long

@ -0,0 +1,72 @@
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is for the gRPC build system. This isn't intended to be used outsite of
# the BUILD file for gRPC. It contains the mapping for the template system we
# use to generate other platform's build system files.
#
# Please consider that there should be a high bar for additions and changes to
# this file.
# Each rule listed must be re-written for Google's internal build system, and
# each change must be ported from one to the other.
#
load("//bazel:grpc_build_system.bzl", "grpc_cc_binary", "grpc_cc_test")
load("//test/cpp/qps:qps_json_driver_scenarios.bzl", "QPS_JSON_DRIVER_SCENARIOS")
load("//test/cpp/qps:json_run_localhost_scenarios.bzl", "JSON_RUN_LOCALHOST_SCENARIOS")
def qps_json_driver_batch():
for scenario in QPS_JSON_DRIVER_SCENARIOS:
grpc_cc_test(
name = "qps_json_driver_test_%s" % scenario,
srcs = ["qps_json_driver.cc"],
args = [
"--run_inproc",
"--scenarios_json",
QPS_JSON_DRIVER_SCENARIOS[scenario],
],
external_deps = [
"gflags",
],
deps = [
":benchmark_config",
":driver_impl",
"//:grpc++",
"//test/cpp/util:test_config",
"//test/cpp/util:test_util",
],
)
def json_run_localhost_batch():
for scenario in JSON_RUN_LOCALHOST_SCENARIOS:
grpc_cc_test(
name = "json_run_localhost_%s" % scenario,
srcs = ["json_run_localhost.cc"],
args = [
"--scenarios_json",
JSON_RUN_LOCALHOST_SCENARIOS[scenario],
],
data = [
"//test/cpp/qps:qps_json_driver",
"//test/cpp/qps:qps_worker",
],
deps = [
"//:gpr",
"//test/core/util:gpr_test_util",
"//test/core/util:grpc_test_util",
"//test/cpp/util:test_config",
"//test/cpp/util:test_util",
],
)

@ -0,0 +1,38 @@
#!/usr/bin/env python2.7
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gen_build_yaml as gen
import json
def generate_args():
all_scenario_set = gen.generate_yaml()
all_scenario_set = all_scenario_set['tests']
qps_json_driver_scenario_set = \
[item for item in all_scenario_set if item['name'] == 'qps_json_driver']
qps_json_driver_arg_set = \
[item['args'][2] for item in qps_json_driver_scenario_set \
if 'args' in item and len(item['args']) > 2]
deserialized_scenarios = [json.loads(item)['scenarios'][0] \
for item in qps_json_driver_arg_set]
all_scenarios = {scenario['name'].encode('ascii', 'ignore'): \
'\'{\'scenarios\' : [' + json.dumps(scenario) + ']}\'' \
for scenario in deserialized_scenarios}
serialized_scenarios_str = str(all_scenarios).encode('ascii', 'ignore')
with open('qps_json_driver_scenarios.bzl', 'w') as f:
f.write('QPS_JSON_DRIVER_SCENARIOS = ' + serialized_scenarios_str + '\n')
generate_args()

File diff suppressed because one or more lines are too long

@ -16,7 +16,6 @@
set -ex
readonly NANOPB_ALTS_TMP_OUTPUT="$(mktemp -d)"
readonly NANOPB_HEALTH_TMP_OUTPUT="$(mktemp -d)"
readonly NANOPB_TMP_OUTPUT="$(mktemp -d)"
readonly PROTOBUF_INSTALL_PREFIX="$(mktemp -d)"
@ -68,23 +67,6 @@ if ! diff -r "$NANOPB_TMP_OUTPUT" src/core/ext/filters/client_channel/lb_policy/
exit 2
fi
#
# checks for health.proto
#
readonly HEALTH_GRPC_OUTPUT_PATH='src/cpp/server/health'
# nanopb-compile the proto to a temp location
./tools/codegen/core/gen_nano_proto.sh \
src/proto/grpc/health/v1/health.proto \
"$NANOPB_HEALTH_TMP_OUTPUT" \
"$HEALTH_GRPC_OUTPUT_PATH"
# compare outputs to checked compiled code
for NANOPB_OUTPUT_FILE in $NANOPB_HEALTH_TMP_OUTPUT/*.pb.*; do
if ! diff "$NANOPB_OUTPUT_FILE" "src/cpp/server/health/$(basename $NANOPB_OUTPUT_FILE)"; then
echo "Outputs differ: $NANOPB_HEALTH_TMP_OUTPUT vs $HEALTH_GRPC_OUTPUT_PATH"
exit 2
fi
done
#
# Checks for handshaker.proto and transport_security_common.proto
#

@ -0,0 +1,33 @@
#!/usr/bin/env python
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import subprocess
os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../../../test/cpp/qps'))
subprocess.call(['./json_run_localhost_scenario_gen.py'])
subprocess.call(['./qps_json_driver_scenario_gen.py'])
output = subprocess.check_output(['git', 'status', '--porcelain'])
qps_json_driver_bzl = 'test/cpp/qps/qps_json_driver_scenarios.bzl'
json_run_localhost_bzl = 'test/cpp/qps/json_run_localhost_scenarios.bzl'
if qps_json_driver_bzl in output or json_run_localhost_bzl in output:
print('qps benchmark scenarios have been updated, please commit '
'test/cpp/qps/qps_json_driver_scenarios.bzl and/or '
'test/cpp/qps/json_run_localhost_scenarios.bzl')
sys.exit(1)

@ -3,6 +3,7 @@
- script: tools/run_tests/sanity/check_bazel_workspace.py
- script: tools/run_tests/sanity/check_cache_mk.sh
- script: tools/run_tests/sanity/check_owners.sh
- script: tools/run_tests/sanity/check_qps_scenario_changes.py
- script: tools/run_tests/sanity/check_shellcheck.sh
- script: tools/run_tests/sanity/check_submodules.sh
- script: tools/run_tests/sanity/check_test_filtering.py

Loading…
Cancel
Save