Merge branch 'master' into nocopyinterception

pull/17609/head
Yash Tibrewal 6 years ago
commit 1cfdf304f8
  1. 2
      CMakeLists.txt
  2. 2
      gRPC-Core.podspec
  3. 2
      setup.py
  4. 1
      src/core/lib/iomgr/resource_quota.cc
  5. 94
      src/core/lib/iomgr/tcp_windows.cc
  6. 72
      src/core/lib/surface/server.cc
  7. 1
      src/core/lib/transport/metadata.cc
  8. 2
      templates/CMakeLists.txt.template
  9. 2
      templates/gRPC-Core.podspec.template
  10. 9
      test/core/end2end/fixtures/h2_full+trace.cc
  11. 9
      test/core/end2end/fixtures/h2_sockpair+trace.cc
  12. 6
      test/core/memory_usage/BUILD

@ -94,7 +94,7 @@ endif()
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE) set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
add_definitions(-DPB_FIELD_16BIT) add_definitions(-DPB_FIELD_32BIT)
if (MSVC) if (MSVC)
include(cmake/msvc_static_runtime.cmake) include(cmake/msvc_static_runtime.cmake)

@ -93,7 +93,7 @@ Pod::Spec.new do |s|
} }
s.default_subspecs = 'Interface', 'Implementation' s.default_subspecs = 'Interface', 'Implementation'
s.compiler_flags = '-DGRPC_ARES=0', '-DPB_FIELD_16BIT' s.compiler_flags = '-DGRPC_ARES=0', '-DPB_FIELD_32BIT'
s.libraries = 'c++' s.libraries = 'c++'
# Like many other C libraries, gRPC-Core has its public headers under `include/<libname>/` and its # Like many other C libraries, gRPC-Core has its public headers under `include/<libname>/` and its

@ -160,7 +160,7 @@ if EXTRA_ENV_COMPILE_ARGS is None:
EXTRA_ENV_COMPILE_ARGS += ' -std=gnu99 -fvisibility=hidden -fno-wrapv -fno-exceptions' EXTRA_ENV_COMPILE_ARGS += ' -std=gnu99 -fvisibility=hidden -fno-wrapv -fno-exceptions'
elif "darwin" in sys.platform: elif "darwin" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -fvisibility=hidden -fno-wrapv -fno-exceptions' EXTRA_ENV_COMPILE_ARGS += ' -fvisibility=hidden -fno-wrapv -fno-exceptions'
EXTRA_ENV_COMPILE_ARGS += ' -DPB_FIELD_16BIT' EXTRA_ENV_COMPILE_ARGS += ' -DPB_FIELD_32BIT'
if EXTRA_ENV_LINK_ARGS is None: if EXTRA_ENV_LINK_ARGS is None:
EXTRA_ENV_LINK_ARGS = '' EXTRA_ENV_LINK_ARGS = ''

@ -665,6 +665,7 @@ void grpc_resource_quota_unref_internal(grpc_resource_quota* resource_quota) {
GPR_ASSERT(resource_quota->num_threads_allocated == 0); GPR_ASSERT(resource_quota->num_threads_allocated == 0);
GRPC_COMBINER_UNREF(resource_quota->combiner, "resource_quota"); GRPC_COMBINER_UNREF(resource_quota->combiner, "resource_quota");
gpr_free(resource_quota->name); gpr_free(resource_quota->name);
gpr_mu_destroy(&resource_quota->thread_count_mu);
gpr_free(resource_quota); gpr_free(resource_quota);
} }
} }

@ -42,6 +42,7 @@
#include "src/core/lib/iomgr/tcp_windows.h" #include "src/core/lib/iomgr/tcp_windows.h"
#include "src/core/lib/iomgr/timer.h" #include "src/core/lib/iomgr/timer.h"
#include "src/core/lib/slice/slice_internal.h" #include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/slice/slice_string_helpers.h"
#if defined(__MSYS__) && defined(GPR_ARCH_64) #if defined(__MSYS__) && defined(GPR_ARCH_64)
/* Nasty workaround for nasty bug when using the 64 bits msys compiler /* Nasty workaround for nasty bug when using the 64 bits msys compiler
@ -112,7 +113,10 @@ typedef struct grpc_tcp {
grpc_closure* read_cb; grpc_closure* read_cb;
grpc_closure* write_cb; grpc_closure* write_cb;
grpc_slice read_slice;
/* garbage after the last read */
grpc_slice_buffer last_read_buffer;
grpc_slice_buffer* write_slices; grpc_slice_buffer* write_slices;
grpc_slice_buffer* read_slices; grpc_slice_buffer* read_slices;
@ -131,6 +135,7 @@ static void tcp_free(grpc_tcp* tcp) {
grpc_winsocket_destroy(tcp->socket); grpc_winsocket_destroy(tcp->socket);
gpr_mu_destroy(&tcp->mu); gpr_mu_destroy(&tcp->mu);
gpr_free(tcp->peer_string); gpr_free(tcp->peer_string);
grpc_slice_buffer_destroy_internal(&tcp->last_read_buffer);
grpc_resource_user_unref(tcp->resource_user); grpc_resource_user_unref(tcp->resource_user);
if (tcp->shutting_down) GRPC_ERROR_UNREF(tcp->shutdown_error); if (tcp->shutting_down) GRPC_ERROR_UNREF(tcp->shutdown_error);
gpr_free(tcp); gpr_free(tcp);
@ -179,9 +184,12 @@ static void on_read(void* tcpp, grpc_error* error) {
grpc_tcp* tcp = (grpc_tcp*)tcpp; grpc_tcp* tcp = (grpc_tcp*)tcpp;
grpc_closure* cb = tcp->read_cb; grpc_closure* cb = tcp->read_cb;
grpc_winsocket* socket = tcp->socket; grpc_winsocket* socket = tcp->socket;
grpc_slice sub;
grpc_winsocket_callback_info* info = &socket->read_info; grpc_winsocket_callback_info* info = &socket->read_info;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_INFO, "TCP:%p on_read", tcp);
}
GRPC_ERROR_REF(error); GRPC_ERROR_REF(error);
if (error == GRPC_ERROR_NONE) { if (error == GRPC_ERROR_NONE) {
@ -189,13 +197,35 @@ static void on_read(void* tcpp, grpc_error* error) {
char* utf8_message = gpr_format_message(info->wsa_error); char* utf8_message = gpr_format_message(info->wsa_error);
error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(utf8_message); error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(utf8_message);
gpr_free(utf8_message); gpr_free(utf8_message);
grpc_slice_unref_internal(tcp->read_slice); grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
} else { } else {
if (info->bytes_transfered != 0 && !tcp->shutting_down) { if (info->bytes_transfered != 0 && !tcp->shutting_down) {
sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered); GPR_ASSERT((size_t)info->bytes_transfered <= tcp->read_slices->length);
grpc_slice_buffer_add(tcp->read_slices, sub); if (static_cast<size_t>(info->bytes_transfered) !=
tcp->read_slices->length) {
grpc_slice_buffer_trim_end(
tcp->read_slices,
tcp->read_slices->length -
static_cast<size_t>(info->bytes_transfered),
&tcp->last_read_buffer);
}
GPR_ASSERT((size_t)info->bytes_transfered == tcp->read_slices->length);
if (grpc_tcp_trace.enabled()) {
size_t i;
for (i = 0; i < tcp->read_slices->count; i++) {
char* dump = grpc_dump_slice(tcp->read_slices->slices[i],
GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_INFO, "READ %p (peer=%s): %s", tcp, tcp->peer_string,
dump);
gpr_free(dump);
}
}
} else { } else {
grpc_slice_unref_internal(tcp->read_slice); if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_INFO, "TCP:%p unref read_slice", tcp);
}
grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
error = tcp->shutting_down error = tcp->shutting_down
? GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( ? GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
"TCP stream shutting down", &tcp->shutdown_error, 1) "TCP stream shutting down", &tcp->shutdown_error, 1)
@ -209,6 +239,8 @@ static void on_read(void* tcpp, grpc_error* error) {
GRPC_CLOSURE_SCHED(cb, error); GRPC_CLOSURE_SCHED(cb, error);
} }
#define DEFAULT_TARGET_READ_SIZE 8192
#define MAX_WSABUF_COUNT 16
static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices, static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
grpc_closure* cb) { grpc_closure* cb) {
grpc_tcp* tcp = (grpc_tcp*)ep; grpc_tcp* tcp = (grpc_tcp*)ep;
@ -217,7 +249,12 @@ static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
int status; int status;
DWORD bytes_read = 0; DWORD bytes_read = 0;
DWORD flags = 0; DWORD flags = 0;
WSABUF buffer; WSABUF buffers[MAX_WSABUF_COUNT];
size_t i;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_INFO, "TCP:%p win_read", tcp);
}
if (tcp->shutting_down) { if (tcp->shutting_down) {
GRPC_CLOSURE_SCHED( GRPC_CLOSURE_SCHED(
@ -229,18 +266,27 @@ static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
tcp->read_cb = cb; tcp->read_cb = cb;
tcp->read_slices = read_slices; tcp->read_slices = read_slices;
grpc_slice_buffer_reset_and_unref_internal(read_slices); grpc_slice_buffer_reset_and_unref_internal(read_slices);
grpc_slice_buffer_swap(read_slices, &tcp->last_read_buffer);
tcp->read_slice = GRPC_SLICE_MALLOC(8192); if (tcp->read_slices->length < DEFAULT_TARGET_READ_SIZE / 2 &&
tcp->read_slices->count < MAX_WSABUF_COUNT) {
// TODO(jtattermusch): slice should be allocated using resource quota
grpc_slice_buffer_add(tcp->read_slices,
GRPC_SLICE_MALLOC(DEFAULT_TARGET_READ_SIZE));
}
buffer.len = (ULONG)GRPC_SLICE_LENGTH( GPR_ASSERT(tcp->read_slices->count <= MAX_WSABUF_COUNT);
tcp->read_slice); // we know slice size fits in 32bit. for (i = 0; i < tcp->read_slices->count; i++) {
buffer.buf = (char*)GRPC_SLICE_START_PTR(tcp->read_slice); buffers[i].len = (ULONG)GRPC_SLICE_LENGTH(
tcp->read_slices->slices[i]); // we know slice size fits in 32bit.
buffers[i].buf = (char*)GRPC_SLICE_START_PTR(tcp->read_slices->slices[i]);
}
TCP_REF(tcp, "read"); TCP_REF(tcp, "read");
/* First let's try a synchronous, non-blocking read. */ /* First let's try a synchronous, non-blocking read. */
status = status = WSARecv(tcp->socket->socket, buffers, (DWORD)tcp->read_slices->count,
WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, NULL, NULL); &bytes_read, &flags, NULL, NULL);
info->wsa_error = status == 0 ? 0 : WSAGetLastError(); info->wsa_error = status == 0 ? 0 : WSAGetLastError();
/* Did we get data immediately ? Yay. */ /* Did we get data immediately ? Yay. */
@ -252,8 +298,8 @@ static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
/* Otherwise, let's retry, by queuing a read. */ /* Otherwise, let's retry, by queuing a read. */
memset(&tcp->socket->read_info.overlapped, 0, sizeof(OVERLAPPED)); memset(&tcp->socket->read_info.overlapped, 0, sizeof(OVERLAPPED));
status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, status = WSARecv(tcp->socket->socket, buffers, (DWORD)tcp->read_slices->count,
&info->overlapped, NULL); &bytes_read, &flags, &info->overlapped, NULL);
if (status != 0) { if (status != 0) {
int wsa_error = WSAGetLastError(); int wsa_error = WSAGetLastError();
@ -275,6 +321,10 @@ static void on_write(void* tcpp, grpc_error* error) {
grpc_winsocket_callback_info* info = &handle->write_info; grpc_winsocket_callback_info* info = &handle->write_info;
grpc_closure* cb; grpc_closure* cb;
if (grpc_tcp_trace.enabled()) {
gpr_log(GPR_INFO, "TCP:%p on_write", tcp);
}
GRPC_ERROR_REF(error); GRPC_ERROR_REF(error);
gpr_mu_lock(&tcp->mu); gpr_mu_lock(&tcp->mu);
@ -303,11 +353,21 @@ static void win_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
unsigned i; unsigned i;
DWORD bytes_sent; DWORD bytes_sent;
int status; int status;
WSABUF local_buffers[16]; WSABUF local_buffers[MAX_WSABUF_COUNT];
WSABUF* allocated = NULL; WSABUF* allocated = NULL;
WSABUF* buffers = local_buffers; WSABUF* buffers = local_buffers;
size_t len; size_t len;
if (grpc_tcp_trace.enabled()) {
size_t i;
for (i = 0; i < slices->count; i++) {
char* data =
grpc_dump_slice(slices->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_INFO, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
gpr_free(data);
}
}
if (tcp->shutting_down) { if (tcp->shutting_down) {
GRPC_CLOSURE_SCHED( GRPC_CLOSURE_SCHED(
cb, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING( cb, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
@ -412,6 +472,7 @@ static void win_shutdown(grpc_endpoint* ep, grpc_error* why) {
static void win_destroy(grpc_endpoint* ep) { static void win_destroy(grpc_endpoint* ep) {
grpc_network_status_unregister_endpoint(ep); grpc_network_status_unregister_endpoint(ep);
grpc_tcp* tcp = (grpc_tcp*)ep; grpc_tcp* tcp = (grpc_tcp*)ep;
grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
TCP_UNREF(tcp, "destroy"); TCP_UNREF(tcp, "destroy");
} }
@ -463,6 +524,7 @@ grpc_endpoint* grpc_tcp_create(grpc_winsocket* socket,
GRPC_CLOSURE_INIT(&tcp->on_read, on_read, tcp, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&tcp->on_read, on_read, tcp, grpc_schedule_on_exec_ctx);
GRPC_CLOSURE_INIT(&tcp->on_write, on_write, tcp, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&tcp->on_write, on_write, tcp, grpc_schedule_on_exec_ctx);
tcp->peer_string = gpr_strdup(peer_string); tcp->peer_string = gpr_strdup(peer_string);
grpc_slice_buffer_init(&tcp->last_read_buffer);
tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string); tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
/* Tell network status tracking code about the new endpoint */ /* Tell network status tracking code about the new endpoint */
grpc_network_status_register_endpoint(&tcp->base); grpc_network_status_register_endpoint(&tcp->base);

@ -194,13 +194,10 @@ struct call_data {
}; };
struct request_matcher { struct request_matcher {
request_matcher(grpc_server* server);
~request_matcher();
grpc_server* server; grpc_server* server;
std::atomic<call_data*> pending_head{nullptr}; call_data* pending_head;
call_data* pending_tail = nullptr; call_data* pending_tail;
gpr_locked_mpscq* requests_per_cq = nullptr; gpr_locked_mpscq* requests_per_cq;
}; };
struct registered_method { struct registered_method {
@ -349,30 +346,22 @@ static void channel_broadcaster_shutdown(channel_broadcaster* cb,
* request_matcher * request_matcher
*/ */
namespace { static void request_matcher_init(request_matcher* rm, grpc_server* server) {
request_matcher::request_matcher(grpc_server* server) : server(server) { memset(rm, 0, sizeof(*rm));
requests_per_cq = static_cast<gpr_locked_mpscq*>( rm->server = server;
gpr_malloc(sizeof(*requests_per_cq) * server->cq_count)); rm->requests_per_cq = static_cast<gpr_locked_mpscq*>(
for (size_t i = 0; i < server->cq_count; i++) { gpr_malloc(sizeof(*rm->requests_per_cq) * server->cq_count));
gpr_locked_mpscq_init(&requests_per_cq[i]);
}
}
request_matcher::~request_matcher() {
for (size_t i = 0; i < server->cq_count; i++) { for (size_t i = 0; i < server->cq_count; i++) {
GPR_ASSERT(gpr_locked_mpscq_pop(&requests_per_cq[i]) == nullptr); gpr_locked_mpscq_init(&rm->requests_per_cq[i]);
gpr_locked_mpscq_destroy(&requests_per_cq[i]);
} }
gpr_free(requests_per_cq);
}
} // namespace
static void request_matcher_init(request_matcher* rm, grpc_server* server) {
new (rm) request_matcher(server);
} }
static void request_matcher_destroy(request_matcher* rm) { static void request_matcher_destroy(request_matcher* rm) {
rm->~request_matcher(); for (size_t i = 0; i < rm->server->cq_count; i++) {
GPR_ASSERT(gpr_locked_mpscq_pop(&rm->requests_per_cq[i]) == nullptr);
gpr_locked_mpscq_destroy(&rm->requests_per_cq[i]);
}
gpr_free(rm->requests_per_cq);
} }
static void kill_zombie(void* elem, grpc_error* error) { static void kill_zombie(void* elem, grpc_error* error) {
@ -381,10 +370,9 @@ static void kill_zombie(void* elem, grpc_error* error) {
} }
static void request_matcher_zombify_all_pending_calls(request_matcher* rm) { static void request_matcher_zombify_all_pending_calls(request_matcher* rm) {
call_data* calld; while (rm->pending_head) {
while ((calld = rm->pending_head.load(std::memory_order_relaxed)) != call_data* calld = rm->pending_head;
nullptr) { rm->pending_head = calld->pending_next;
rm->pending_head.store(calld->pending_next, std::memory_order_relaxed);
gpr_atm_no_barrier_store(&calld->state, ZOMBIED); gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
GRPC_CLOSURE_INIT( GRPC_CLOSURE_INIT(
&calld->kill_zombie_closure, kill_zombie, &calld->kill_zombie_closure, kill_zombie,
@ -582,9 +570,8 @@ static void publish_new_rpc(void* arg, grpc_error* error) {
} }
gpr_atm_no_barrier_store(&calld->state, PENDING); gpr_atm_no_barrier_store(&calld->state, PENDING);
if (rm->pending_head.load(std::memory_order_relaxed) == nullptr) { if (rm->pending_head == nullptr) {
rm->pending_head.store(calld, std::memory_order_relaxed); rm->pending_tail = rm->pending_head = calld;
rm->pending_tail = calld;
} else { } else {
rm->pending_tail->pending_next = calld; rm->pending_tail->pending_next = calld;
rm->pending_tail = calld; rm->pending_tail = calld;
@ -1448,25 +1435,15 @@ static grpc_call_error queue_call_request(grpc_server* server, size_t cq_idx,
rm = &rc->data.registered.method->matcher; rm = &rc->data.registered.method->matcher;
break; break;
} }
if (gpr_locked_mpscq_push(&rm->requests_per_cq[cq_idx], &rc->request_link)) {
// Fast path: if there is no pending request to be processed, immediately /* this was the first queued request: we need to lock and start
// return. matching calls */
if (!gpr_locked_mpscq_push(&rm->requests_per_cq[cq_idx], &rc->request_link) ||
// Note: We are reading the pending_head without holding the server's call
// mutex. Even if we read a non-null value here due to reordering,
// we will check it below again after grabbing the lock.
rm->pending_head.load(std::memory_order_relaxed) == nullptr) {
return GRPC_CALL_OK;
}
// Slow path: This was the first queued request and there are pendings:
// We need to lock and start matching calls.
gpr_mu_lock(&server->mu_call); gpr_mu_lock(&server->mu_call);
while ((calld = rm->pending_head.load(std::memory_order_relaxed)) != while ((calld = rm->pending_head) != nullptr) {
nullptr) {
rc = reinterpret_cast<requested_call*>( rc = reinterpret_cast<requested_call*>(
gpr_locked_mpscq_pop(&rm->requests_per_cq[cq_idx])); gpr_locked_mpscq_pop(&rm->requests_per_cq[cq_idx]));
if (rc == nullptr) break; if (rc == nullptr) break;
rm->pending_head.store(calld->pending_next, std::memory_order_relaxed); rm->pending_head = calld->pending_next;
gpr_mu_unlock(&server->mu_call); gpr_mu_unlock(&server->mu_call);
if (!gpr_atm_full_cas(&calld->state, PENDING, ACTIVATED)) { if (!gpr_atm_full_cas(&calld->state, PENDING, ACTIVATED)) {
// Zombied Call // Zombied Call
@ -1481,6 +1458,7 @@ static grpc_call_error queue_call_request(grpc_server* server, size_t cq_idx,
gpr_mu_lock(&server->mu_call); gpr_mu_lock(&server->mu_call);
} }
gpr_mu_unlock(&server->mu_call); gpr_mu_unlock(&server->mu_call);
}
return GRPC_CALL_OK; return GRPC_CALL_OK;
} }

@ -187,6 +187,7 @@ static void gc_mdtab(mdtab_shard* shard) {
((destroy_user_data_func)gpr_atm_no_barrier_load( ((destroy_user_data_func)gpr_atm_no_barrier_load(
&md->destroy_user_data))(user_data); &md->destroy_user_data))(user_data);
} }
gpr_mu_destroy(&md->mu_user_data);
gpr_free(md); gpr_free(md);
*prev_next = next; *prev_next = next;
num_freed++; num_freed++;

@ -143,7 +143,7 @@
## Some libraries are shared even with BUILD_SHARED_LIBRARIES=OFF ## Some libraries are shared even with BUILD_SHARED_LIBRARIES=OFF
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE) set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
add_definitions(-DPB_FIELD_16BIT) add_definitions(-DPB_FIELD_32BIT)
if (MSVC) if (MSVC)
include(cmake/msvc_static_runtime.cmake) include(cmake/msvc_static_runtime.cmake)

@ -152,7 +152,7 @@
} }
s.default_subspecs = 'Interface', 'Implementation' s.default_subspecs = 'Interface', 'Implementation'
s.compiler_flags = '-DGRPC_ARES=0', '-DPB_FIELD_16BIT' s.compiler_flags = '-DGRPC_ARES=0', '-DPB_FIELD_32BIT'
s.libraries = 'c++' s.libraries = 'c++'
# Like many other C libraries, gRPC-Core has its public headers under `include/<libname>/` and its # Like many other C libraries, gRPC-Core has its public headers under `include/<libname>/` and its

@ -113,6 +113,15 @@ int main(int argc, char** argv) {
g_fixture_slowdown_factor = 10; g_fixture_slowdown_factor = 10;
#endif #endif
#ifdef GPR_WINDOWS
/* on Windows, writing logs to stderr is very slow
when stderr is redirected to a disk file.
The "trace" tests fixtures generates large amount
of logs, so setting a buffer for stderr prevents certain
test cases from timing out. */
setvbuf(stderr, NULL, _IOLBF, 1024);
#endif
grpc::testing::TestEnvironment env(argc, argv); grpc::testing::TestEnvironment env(argc, argv);
grpc_end2end_tests_pre_init(); grpc_end2end_tests_pre_init();
grpc_init(); grpc_init();

@ -140,6 +140,15 @@ int main(int argc, char** argv) {
g_fixture_slowdown_factor = 10; g_fixture_slowdown_factor = 10;
#endif #endif
#ifdef GPR_WINDOWS
/* on Windows, writing logs to stderr is very slow
when stderr is redirected to a disk file.
The "trace" tests fixtures generates large amount
of logs, so setting a buffer for stderr prevents certain
test cases from timing out. */
setvbuf(stderr, NULL, _IOLBF, 1024);
#endif
grpc::testing::TestEnvironment env(argc, argv); grpc::testing::TestEnvironment env(argc, argv);
grpc_end2end_tests_pre_init(); grpc_end2end_tests_pre_init();
grpc_init(); grpc_init();

@ -12,13 +12,13 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//bazel:grpc_build_system.bzl", "grpc_cc_binary", "grpc_cc_test", "grpc_package") load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_package")
grpc_package(name = "test/core/memory_usage") grpc_package(name = "test/core/memory_usage")
licenses(["notice"]) # Apache v2 licenses(["notice"]) # Apache v2
grpc_cc_binary( grpc_cc_library(
name = "memory_usage_client", name = "memory_usage_client",
testonly = 1, testonly = 1,
srcs = ["client.cc"], srcs = ["client.cc"],
@ -29,7 +29,7 @@ grpc_cc_binary(
], ],
) )
grpc_cc_binary( grpc_cc_library(
name = "memory_usage_server", name = "memory_usage_server",
testonly = 1, testonly = 1,
srcs = ["server.cc"], srcs = ["server.cc"],

Loading…
Cancel
Save