The C based gRPC (C++, Python, Ruby, Objective-C, PHP, C#) https://grpc.io/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

241 lines
7.3 KiB

/*
*
* Copyright 2015-2016, Google Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Google Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <mutex>
#include <thread>
#include <grpc++/channel.h>
#include <grpc++/client_context.h>
#include <grpc++/create_channel.h>
#include <grpc++/server.h>
#include <grpc++/server_builder.h>
#include <grpc++/server_context.h>
#include <grpc/grpc.h>
#include <grpc/support/thd.h>
#include <grpc/support/time.h>
#include <gtest/gtest.h>
#include "src/proto/grpc/testing/duplicate/echo_duplicate.grpc.pb.h"
#include "src/proto/grpc/testing/echo.grpc.pb.h"
#include "test/core/util/port.h"
#include "test/core/util/test_config.h"
using grpc::testing::EchoRequest;
using grpc::testing::EchoResponse;
using std::chrono::system_clock;
namespace grpc {
namespace testing {
namespace {
// When echo_deadline is requested, deadline seen in the ServerContext is set in
// the response in seconds.
void MaybeEchoDeadline(ServerContext* context, const EchoRequest* request,
EchoResponse* response) {
if (request->has_param() && request->param().echo_deadline()) {
gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
if (context->deadline() != system_clock::time_point::max()) {
Timepoint2Timespec(context->deadline(), &deadline);
}
response->mutable_param()->set_request_deadline(deadline.tv_sec);
}
}
} // namespace
class TestServiceImpl : public ::grpc::testing::EchoTestService::Service {
public:
TestServiceImpl() : signal_client_(false) {}
Status Echo(ServerContext* context, const EchoRequest* request,
EchoResponse* response) GRPC_OVERRIDE {
response->set_message(request->message());
MaybeEchoDeadline(context, request, response);
if (request->has_param() && request->param().client_cancel_after_us()) {
{
std::unique_lock<std::mutex> lock(mu_);
signal_client_ = true;
}
while (!context->IsCancelled()) {
gpr_sleep_until(gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(request->param().client_cancel_after_us(),
GPR_TIMESPAN)));
}
return Status::CANCELLED;
} else if (request->has_param() &&
request->param().server_cancel_after_us()) {
gpr_sleep_until(gpr_time_add(
gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(request->param().server_cancel_after_us(),
GPR_TIMESPAN)));
return Status::CANCELLED;
} else {
EXPECT_FALSE(context->IsCancelled());
}
return Status::OK;
}
bool signal_client() {
std::unique_lock<std::mutex> lock(mu_);
return signal_client_;
}
private:
bool signal_client_;
std::mutex mu_;
};
class AsyncClientEnd2endTest : public ::testing::Test {
protected:
AsyncClientEnd2endTest() : kMaxMessageSize_(8192), rpcs_outstanding_(0) {}
void SetUp() GRPC_OVERRIDE {
int port = grpc_pick_unused_port_or_die();
server_address_ << "localhost:" << port;
// Setup server
ServerBuilder builder;
builder.AddListeningPort(server_address_.str(),
InsecureServerCredentials());
builder.RegisterService(&service_);
builder.SetMaxMessageSize(
kMaxMessageSize_); // For testing max message size.
server_ = builder.BuildAndStart();
}
void TearDown() GRPC_OVERRIDE { server_->Shutdown(); }
void ResetStub() {
std::shared_ptr<Channel> channel =
CreateChannel(server_address_.str(), InsecureChannelCredentials());
stub_ = grpc::testing::EchoTestService::NewStub(channel);
}
void Wait() {
std::unique_lock<std::mutex> l(mu_);
while (rpcs_outstanding_ != 0) {
cv_.wait(l);
}
cq_.Shutdown();
}
struct AsyncClientCall {
EchoResponse response;
ClientContext context;
Status status;
std::unique_ptr<ClientAsyncResponseReader<EchoResponse>> response_reader;
};
void AsyncSendRpc(int num_rpcs) {
for (int i = 0; i < num_rpcs; ++i) {
AsyncClientCall* call = new AsyncClientCall;
EchoRequest request;
request.set_message(std::to_string(i));
call->response_reader = stub_->AsyncEcho(&call->context, request, &cq_);
call->response_reader->Finish(&call->response, &call->status,
(void*)call);
std::unique_lock<std::mutex> l(mu_);
rpcs_outstanding_++;
}
}
void AsyncCompleteRpc() {
while (true) {
void* got_tag;
bool ok = false;
if (!cq_.Next(&got_tag, &ok)) break;
Call* call = static_cast<Call*>(got_tag);
GPR_ASSERT(ok);
delete call;
bool notify;
{
std::unique_lock<std::mutex> l(mu_);
rpcs_outstanding_--;
notify = (rpcs_outstanding_ == 0);
}
if (notify) {
cv_.notify_all();
}
}
}
std::unique_ptr<grpc::testing::EchoTestService::Stub> stub_;
std::unique_ptr<Server> server_;
std::ostringstream server_address_;
const int kMaxMessageSize_;
TestServiceImpl service_;
CompletionQueue cq_;
std::mutex mu_;
std::condition_variable cv_;
int rpcs_outstanding_;
};
TEST_F(AsyncClientEnd2endTest, ThreadStress) {
ResetStub();
std::vector<std::thread*> threads;
for (int i = 0; i < 100; ++i) {
threads.push_back(new std::thread(
&AsyncClientEnd2endTest_ThreadStress_Test::AsyncSendRpc, this, 1000));
}
for (int i = 0; i < 100; ++i) {
threads[i]->join();
delete threads[i];
}
threads.clear();
for (int i = 0; i < 100; ++i) {
threads.push_back(new std::thread(
&AsyncClientEnd2endTest_ThreadStress_Test::AsyncCompleteRpc, this));
}
Wait();
for (int i = 0; i < 100; ++i) {
threads[i]->join();
delete threads[i];
}
}
} // namespace testing
} // namespace grpc
int main(int argc, char** argv) {
grpc_test_init(argc, argv);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}