Merge pull request #6498 from jtattermusch/csharp_server_improvements

C# server performance improvements
pull/6513/head
Jan Tattermusch 9 years ago
commit ade3a79553
  1. 34
      src/csharp/Grpc.Core/GrpcEnvironment.cs
  2. 24
      src/csharp/Grpc.Core/Server.cs
  3. 23
      src/csharp/Grpc.IntegrationTesting/ClientRunners.cs
  4. 20
      tools/run_tests/performance/scenario_config.py

@ -45,11 +45,12 @@ namespace Grpc.Core
/// </summary>
public class GrpcEnvironment
{
const int THREAD_POOL_SIZE = 4;
const int MinDefaultThreadPoolSize = 4;
static object staticLock = new object();
static GrpcEnvironment instance;
static int refCount;
static int? customThreadPoolSize;
static ILogger logger = new ConsoleLogger();
@ -122,6 +123,23 @@ namespace Grpc.Core
logger = customLogger;
}
/// <summary>
/// Sets the number of threads in the gRPC thread pool that polls for internal RPC events.
/// Can be only invoke before the <c>GrpcEnviroment</c> is started and cannot be changed afterwards.
/// Setting thread pool size is an advanced setting and you should only use it if you know what you are doing.
/// Most users should rely on the default value provided by gRPC library.
/// Note: this method is part of an experimental API that can change or be removed without any prior notice.
/// </summary>
public static void SetThreadPoolSize(int threadCount)
{
lock (staticLock)
{
GrpcPreconditions.CheckState(instance == null, "Can only be set before GrpcEnvironment is initialized");
GrpcPreconditions.CheckArgument(threadCount > 0, "threadCount needs to be a positive number");
customThreadPoolSize = threadCount;
}
}
/// <summary>
/// Creates gRPC environment.
/// </summary>
@ -129,7 +147,7 @@ namespace Grpc.Core
{
GrpcNativeInit();
completionRegistry = new CompletionRegistry(this);
threadPool = new GrpcThreadPool(this, THREAD_POOL_SIZE);
threadPool = new GrpcThreadPool(this, GetThreadPoolSizeOrDefault());
threadPool.Start();
}
@ -200,5 +218,17 @@ namespace Grpc.Core
debugStats.CheckOK();
}
private int GetThreadPoolSizeOrDefault()
{
if (customThreadPoolSize.HasValue)
{
return customThreadPoolSize.Value;
}
// In systems with many cores, use half of the cores for GrpcThreadPool
// and the other half for .NET thread pool. This heuristic definitely needs
// more work, but seems to work reasonably well for a start.
return Math.Max(MinDefaultThreadPoolSize, Environment.ProcessorCount / 2);
}
}
}

@ -48,6 +48,7 @@ namespace Grpc.Core
/// </summary>
public class Server
{
const int InitialAllowRpcTokenCount = 10;
static readonly ILogger Logger = GrpcEnvironment.Logger.ForType<Server>();
readonly AtomicCounter activeCallCounter = new AtomicCounter();
@ -65,7 +66,7 @@ namespace Grpc.Core
readonly TaskCompletionSource<object> shutdownTcs = new TaskCompletionSource<object>();
bool startRequested;
bool shutdownRequested;
volatile bool shutdownRequested;
/// <summary>
/// Create a new server.
@ -129,7 +130,13 @@ namespace Grpc.Core
startRequested = true;
handle.Start();
AllowOneRpc();
// Starting with more than one AllowOneRpc tokens can significantly increase
// unary RPC throughput.
for (int i = 0; i < InitialAllowRpcTokenCount; i++)
{
AllowOneRpc();
}
}
}
@ -239,12 +246,9 @@ namespace Grpc.Core
/// </summary>
private void AllowOneRpc()
{
lock (myLock)
if (!shutdownRequested)
{
if (!shutdownRequested)
{
handle.RequestCall(HandleNewServerRpc, environment);
}
handle.RequestCall(HandleNewServerRpc, environment);
}
}
@ -283,6 +287,8 @@ namespace Grpc.Core
/// </summary>
private void HandleNewServerRpc(bool success, BatchContextSafeHandle ctx)
{
Task.Run(() => AllowOneRpc());
if (success)
{
ServerRpcNew newRpc = ctx.GetServerRpcNew(this);
@ -290,11 +296,9 @@ namespace Grpc.Core
// after server shutdown, the callback returns with null call
if (!newRpc.Call.IsInvalid)
{
Task.Run(async () => await HandleCallAsync(newRpc)).ConfigureAwait(false);
HandleCallAsync(newRpc); // we don't need to await.
}
}
AllowOneRpc();
}
/// <summary>

@ -142,8 +142,7 @@ namespace Grpc.IntegrationTesting
for (int i = 0; i < outstandingRpcsPerChannel; i++)
{
var timer = CreateTimer(loadParams, 1.0 / this.channels.Count / outstandingRpcsPerChannel);
var threadBody = GetThreadBody(channel, timer);
this.runnerTasks.Add(Task.Factory.StartNew(threadBody, TaskCreationOptions.LongRunning));
this.runnerTasks.Add(RunClientAsync(channel, timer));
}
}
}
@ -269,38 +268,30 @@ namespace Grpc.IntegrationTesting
}
}
private Action GetThreadBody(Channel channel, IInterarrivalTimer timer)
private Task RunClientAsync(Channel channel, IInterarrivalTimer timer)
{
if (payloadConfig.PayloadCase == PayloadConfig.PayloadOneofCase.BytebufParams)
{
GrpcPreconditions.CheckArgument(clientType == ClientType.ASYNC_CLIENT, "Generic client only supports async API");
GrpcPreconditions.CheckArgument(rpcType == RpcType.STREAMING, "Generic client only supports streaming calls");
return () =>
{
RunGenericStreamingAsync(channel, timer).Wait();
};
return RunGenericStreamingAsync(channel, timer);
}
GrpcPreconditions.CheckNotNull(payloadConfig.SimpleParams);
if (clientType == ClientType.SYNC_CLIENT)
{
GrpcPreconditions.CheckArgument(rpcType == RpcType.UNARY, "Sync client can only be used for Unary calls in C#");
return () => RunUnary(channel, timer);
// create a dedicated thread for the synchronous client
return Task.Factory.StartNew(() => RunUnary(channel, timer), TaskCreationOptions.LongRunning);
}
else if (clientType == ClientType.ASYNC_CLIENT)
{
switch (rpcType)
{
case RpcType.UNARY:
return () =>
{
RunUnaryAsync(channel, timer).Wait();
};
return RunUnaryAsync(channel, timer);
case RpcType.STREAMING:
return () =>
{
RunStreamingPingPongAsync(channel, timer).Wait();
};
return RunStreamingPingPongAsync(channel, timer);
}
}
throw new ArgumentException("Unsupported configuration.");

@ -259,18 +259,16 @@ class CSharpLanguage:
'csharp_protobuf_sync_to_async_unary_ping_pong', rpc_type='UNARY',
client_type='SYNC_CLIENT', server_type='ASYNC_SERVER')
# TODO(jtattermusch): scenario works locally but fails on jenkins
#yield _ping_pong_scenario(
# 'csharp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
# client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
# use_unconstrained_client=True,
# categories=[SMOKETEST])
yield _ping_pong_scenario(
'csharp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
use_unconstrained_client=True,
categories=[SMOKETEST])
# TODO(jtattermusch): scenario works locally but fails on jenkins
#yield _ping_pong_scenario(
# 'csharp_protobuf_async_streaming_qps_unconstrained', rpc_type='STREAMING',
# client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
# use_unconstrained_client=True)
yield _ping_pong_scenario(
'csharp_protobuf_async_streaming_qps_unconstrained', rpc_type='STREAMING',
client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
use_unconstrained_client=True)
yield _ping_pong_scenario(
'csharp_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',

Loading…
Cancel
Save