Merge github.com:grpc/grpc into bm_fullstack

pull/8503/head
Craig Tiller 8 years ago
commit 5f4ebe305f
  1. 8
      src/core/ext/lb_policy/grpclb/grpclb.c
  2. 4
      src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
  3. 3
      src/proto/grpc/lb/v1/load_balancer.options
  4. 8
      src/proto/grpc/lb/v1/load_balancer.proto
  5. 39
      test/core/end2end/tests/resource_quota_server.c
  6. 2
      test/cpp/grpclb/grpclb_test.cc
  7. 2
      tools/run_tests/performance/bq_upload_result.py

@ -407,10 +407,12 @@ static grpc_lb_addresses *process_serverlist(
/* lb token processing */
void *user_data;
if (server->has_load_balance_token) {
const size_t lb_token_size =
GPR_ARRAY_SIZE(server->load_balance_token) - 1;
const size_t lb_token_max_length =
GPR_ARRAY_SIZE(server->load_balance_token);
const size_t lb_token_length =
strnlen(server->load_balance_token, lb_token_max_length);
grpc_mdstr *lb_token_mdstr = grpc_mdstr_from_buffer(
(uint8_t *)server->load_balance_token, lb_token_size);
(uint8_t *)server->load_balance_token, lb_token_length);
user_data = grpc_mdelem_from_metadata_strings(GRPC_MDSTR_LB_TOKEN,
lb_token_mdstr);
} else {

@ -77,7 +77,7 @@ typedef struct _grpc_lb_v1_Server {
bool has_port;
int32_t port;
bool has_load_balance_token;
char load_balance_token[65];
char load_balance_token[50];
bool has_drop_request;
bool drop_request;
/* @@protoc_insertion_point(struct:grpc_lb_v1_Server) */
@ -172,7 +172,7 @@ extern const pb_field_t grpc_lb_v1_Server_fields[5];
#define grpc_lb_v1_LoadBalanceResponse_size (98 + grpc_lb_v1_ServerList_size)
#define grpc_lb_v1_InitialLoadBalanceResponse_size 90
/* grpc_lb_v1_ServerList_size depends on runtime parameters */
#define grpc_lb_v1_Server_size 98
#define grpc_lb_v1_Server_size 83
/* Message IDs (where set with "msgid" option) */
#ifdef PB_MSGID

@ -1,6 +1,5 @@
grpc.lb.v1.InitialLoadBalanceRequest.name max_size:128
grpc.lb.v1.InitialLoadBalanceResponse.client_config max_size:64
grpc.lb.v1.InitialLoadBalanceResponse.load_balancer_delegate max_size:64
grpc.lb.v1.Server.ip_address max_size:16
grpc.lb.v1.Server.load_balance_token max_size:65
grpc.lb.v1.Server.load_balance_token max_size:50
load_balancer.proto no_unions:true

@ -63,7 +63,8 @@ message LoadBalanceRequest {
}
message InitialLoadBalanceRequest {
// Name of load balanced service (IE, service.grpc.gslb.google.com)
// Name of load balanced service (IE, service.grpc.gslb.google.com). Its
// length should be less than 256 bytes.
string name = 1;
}
@ -95,7 +96,8 @@ message InitialLoadBalanceResponse {
// This is an application layer redirect that indicates the client should use
// the specified server for load balancing. When this field is non-empty in
// the response, the client should open a separate connection to the
// load_balancer_delegate and call the BalanceLoad method.
// load_balancer_delegate and call the BalanceLoad method. Its length should
// be less than 64 bytes.
string load_balancer_delegate = 1;
// This interval defines how often the client should send the client stats
@ -130,6 +132,8 @@ message Server {
// frontend requests for that pick must include the token in its initial
// metadata. The token is used by the backend to verify the request and to
// allow the backend to report load to the gRPC LB system.
//
// Its length is variable but less than 50 bytes.
string load_balance_token = 3;
// Indicates whether this particular request should be dropped by the client

@ -137,17 +137,22 @@ void resource_quota_server(grpc_end2end_test_config config) {
* will be verified on completion. */
gpr_slice request_payload_slice = generate_random_slice();
grpc_call *client_calls[NUM_CALLS];
grpc_call *server_calls[NUM_CALLS];
grpc_metadata_array initial_metadata_recv[NUM_CALLS];
grpc_metadata_array trailing_metadata_recv[NUM_CALLS];
grpc_metadata_array request_metadata_recv[NUM_CALLS];
grpc_call_details call_details[NUM_CALLS];
grpc_status_code status[NUM_CALLS];
char *details[NUM_CALLS];
size_t details_capacity[NUM_CALLS];
grpc_byte_buffer *request_payload_recv[NUM_CALLS];
int was_cancelled[NUM_CALLS];
grpc_call **client_calls = malloc(sizeof(grpc_call *) * NUM_CALLS);
grpc_call **server_calls = malloc(sizeof(grpc_call *) * NUM_CALLS);
grpc_metadata_array *initial_metadata_recv =
malloc(sizeof(grpc_metadata_array) * NUM_CALLS);
grpc_metadata_array *trailing_metadata_recv =
malloc(sizeof(grpc_metadata_array) * NUM_CALLS);
grpc_metadata_array *request_metadata_recv =
malloc(sizeof(grpc_metadata_array) * NUM_CALLS);
grpc_call_details *call_details =
malloc(sizeof(grpc_call_details) * NUM_CALLS);
grpc_status_code *status = malloc(sizeof(grpc_status_code) * NUM_CALLS);
char **details = malloc(sizeof(char *) * NUM_CALLS);
size_t *details_capacity = malloc(sizeof(size_t) * NUM_CALLS);
grpc_byte_buffer **request_payload_recv =
malloc(sizeof(grpc_byte_buffer *) * NUM_CALLS);
int *was_cancelled = malloc(sizeof(int) * NUM_CALLS);
grpc_call_error error;
int pending_client_calls = 0;
int pending_server_start_calls = 0;
@ -356,6 +361,18 @@ void resource_quota_server(grpc_end2end_test_config config) {
gpr_slice_unref(request_payload_slice);
grpc_resource_quota_unref(resource_quota);
free(client_calls);
free(server_calls);
free(initial_metadata_recv);
free(trailing_metadata_recv);
free(request_metadata_recv);
free(call_details);
free(status);
free(details);
free(details_capacity);
free(request_payload_recv);
free(was_cancelled);
end_test(&f);
config.tear_down_data(&f);
}

@ -144,7 +144,6 @@ static gpr_slice build_response_payload_slice(
// disfunctional implementation of std::to_string in gcc 4.4, which doesn't
// have a version for int but does have one for long long int.
string token_data = "token" + std::to_string((long long int)ports[i]);
token_data.resize(64, '-');
server->set_load_balance_token(token_data);
}
const grpc::string &enc_resp = response.SerializeAsString();
@ -333,7 +332,6 @@ static void start_backend_server(server_fixture *sf) {
// disfunctional implementation of std::to_string in gcc 4.4, which doesn't
// have a version for int but does have one for long long int.
string expected_token = "token" + std::to_string((long long int)sf->port);
expected_token.resize(64, '-');
GPR_ASSERT(contains_metadata(&request_metadata_recv, "lb-token",
expected_token.c_str()));

@ -122,6 +122,8 @@ def _flatten_result_inplace(scenario_result):
scenario_result['clientSuccess'] = json.dumps(scenario_result['clientSuccess'])
scenario_result['serverSuccess'] = json.dumps(scenario_result['serverSuccess'])
scenario_result['requestResults'] = json.dumps(scenario_result.get('requestResults', []))
scenario_result['summary'].pop('successfulRequestsPerSecond', None)
scenario_result['summary'].pop('failedRequestsPerSecond', None)
def _populate_metadata_inplace(scenario_result):

Loading…
Cancel
Save