Merge branch 'master' into cq_lockfree

pull/10662/head
Sree Kuchibhotla 8 years ago
commit 7aaab5b9db
  1. 38
      src/core/ext/transport/chttp2/transport/chttp2_transport.c
  2. 3
      src/core/ext/transport/cronet/transport/cronet_transport.c
  3. 9
      src/objective-c/tests/InteropTests.m
  4. 44
      src/ruby/spec/generic/rpc_server_pool_spec.rb
  5. 2
      test/core/security/credentials_test.c
  6. 2
      tools/profiling/microbenchmarks/bm_diff.py
  7. 1
      tools/profiling/microbenchmarks/bm_json.py

@ -1890,6 +1890,7 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_slice hdr;
grpc_slice status_hdr;
grpc_slice http_status_hdr;
grpc_slice content_type_hdr;
grpc_slice message_pfx;
uint8_t *p;
uint32_t len = 0;
@ -1923,6 +1924,42 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
*p++ = '0';
GPR_ASSERT(p == GRPC_SLICE_END_PTR(http_status_hdr));
len += (uint32_t)GRPC_SLICE_LENGTH(http_status_hdr);
content_type_hdr = grpc_slice_malloc(31);
p = GRPC_SLICE_START_PTR(content_type_hdr);
*p++ = 0x00;
*p++ = 12;
*p++ = 'c';
*p++ = 'o';
*p++ = 'n';
*p++ = 't';
*p++ = 'e';
*p++ = 'n';
*p++ = 't';
*p++ = '-';
*p++ = 't';
*p++ = 'y';
*p++ = 'p';
*p++ = 'e';
*p++ = 16;
*p++ = 'a';
*p++ = 'p';
*p++ = 'p';
*p++ = 'l';
*p++ = 'i';
*p++ = 'c';
*p++ = 'a';
*p++ = 't';
*p++ = 'i';
*p++ = 'o';
*p++ = 'n';
*p++ = '/';
*p++ = 'g';
*p++ = 'r';
*p++ = 'p';
*p++ = 'c';
GPR_ASSERT(p == GRPC_SLICE_END_PTR(content_type_hdr));
len += (uint32_t)GRPC_SLICE_LENGTH(content_type_hdr);
}
status_hdr = grpc_slice_malloc(15 + (grpc_status >= 10));
@ -1992,6 +2029,7 @@ static void close_from_api(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
grpc_slice_buffer_add(&t->qbuf, hdr);
if (!s->sent_initial_metadata) {
grpc_slice_buffer_add(&t->qbuf, http_status_hdr);
grpc_slice_buffer_add(&t->qbuf, content_type_hdr);
}
grpc_slice_buffer_add(&t->qbuf, status_hdr);
grpc_slice_buffer_add(&t->qbuf, message_pfx);

@ -1124,7 +1124,8 @@ static enum e_op_result execute_stream_op(grpc_exec_ctx *exec_ctx,
if (stream_state->rs.compressed) {
stream_state->rs.sbs.base.flags |= GRPC_WRITE_INTERNAL_COMPRESS;
}
*((grpc_byte_buffer **)stream_op->recv_message) =
*((grpc_byte_buffer **)
stream_op->payload->recv_message.recv_message) =
(grpc_byte_buffer *)&stream_state->rs.sbs;
grpc_closure_sched(
exec_ctx, stream_op->payload->recv_message.recv_message_ready,

@ -100,6 +100,15 @@
return 0;
}
+ (void)setUp {
#ifdef GRPC_COMPILE_WITH_CRONET
// Cronet setup
[Cronet setHttp2Enabled:YES];
[Cronet start];
[GRPCCall useCronetWithEngine:[Cronet getGlobalEngine]];
#endif
}
- (void)setUp {
self.continueAfterFailure = NO;

@ -52,28 +52,31 @@ describe GRPC::Pool do
expect(p.ready_for_work?).to be(false)
end
it 'it stops being ready after all workers jobs waiting or running' do
it 'it stops being ready after all workers are busy' do
p = Pool.new(5)
p.start
job = proc { sleep(3) } # sleep so workers busy when done scheduling
5.times do
expect(p.ready_for_work?).to be(true)
p.schedule(&job)
wait_mu = Mutex.new
wait_cv = ConditionVariable.new
wait = true
job = proc do
wait_mu.synchronize do
wait_cv.wait(wait_mu) while wait
end
end
expect(p.ready_for_work?).to be(false)
end
it 'it becomes ready again after jobs complete' do
p = Pool.new(5)
p.start
job = proc {}
5.times do
expect(p.ready_for_work?).to be(true)
p.schedule(&job)
end
expect(p.ready_for_work?).to be(false)
sleep 5 # give the pool time do get at least one task done
expect(p.ready_for_work?).to be(true)
wait_mu.synchronize do
wait = false
wait_cv.broadcast
end
end
end
@ -105,13 +108,20 @@ describe GRPC::Pool do
it 'stops jobs when there are long running jobs' do
p = Pool.new(1)
p.start
o, q = Object.new, Queue.new
wait_forever_mu = Mutex.new
wait_forever_cv = ConditionVariable.new
wait_forever = true
job_running = Queue.new
job = proc do
sleep(5) # long running
q.push(o)
job_running.push(Object.new)
wait_forever_mu.synchronize do
wait_forever_cv.wait while wait_forever
end
end
p.schedule(&job)
sleep(1) # should ensure the long job gets scheduled
job_running.pop
expect { p.stop }.not_to raise_error
end
end

@ -582,7 +582,7 @@ static void on_oauth2_creds_get_metadata_failure(
static void validate_compute_engine_http_request(
const grpc_httpcli_request *request) {
GPR_ASSERT(request->handshaker != &grpc_httpcli_ssl);
GPR_ASSERT(strcmp(request->host, "metadata") == 0);
GPR_ASSERT(strcmp(request->host, "metadata.google.internal") == 0);
GPR_ASSERT(
strcmp(request->http.path,
"/computeMetadata/v1/instance/service-accounts/default/token") ==

@ -226,7 +226,7 @@ really_interesting = set()
for name, bm in benchmarks.items():
print name
really_interesting.update(bm.process())
fields = [f for f in args.track if f in args.track]
fields = [f for f in args.track if f in really_interesting]
headers = ['Benchmark'] + fields
rows = []

@ -203,4 +203,5 @@ def expand_json(js, js2 = None):
row['real_time'] = bm2['real_time']
row['iterations'] = bm2['iterations']
bm2['already_used'] = True
break
yield row

Loading…
Cancel
Save