commit
1e607d3dc0
23 changed files with 673 additions and 3 deletions
@ -0,0 +1,138 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2019 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#include <grpc/support/port_platform.h> |
||||
|
||||
#include "src/core/lib/iomgr/executor/threadpool.h" |
||||
|
||||
namespace grpc_core { |
||||
|
||||
void ThreadPoolWorker::Run() { |
||||
while (true) { |
||||
void* elem; |
||||
|
||||
if (GRPC_TRACE_FLAG_ENABLED(grpc_thread_pool_trace)) { |
||||
// Updates stats and print
|
||||
gpr_timespec wait_time = gpr_time_0(GPR_TIMESPAN); |
||||
elem = queue_->Get(&wait_time); |
||||
stats_.sleep_time = gpr_time_add(stats_.sleep_time, wait_time); |
||||
gpr_log(GPR_INFO, |
||||
"ThreadPool Worker [%s %d] Stats: sleep_time %f", |
||||
thd_name_, index_, gpr_timespec_to_micros(stats_.sleep_time)); |
||||
} else { |
||||
elem = queue_->Get(nullptr); |
||||
} |
||||
if (elem == nullptr) { |
||||
break; |
||||
} |
||||
// Runs closure
|
||||
auto* closure = |
||||
static_cast<grpc_experimental_completion_queue_functor*>(elem); |
||||
closure->functor_run(closure, closure->internal_success); |
||||
} |
||||
} |
||||
|
||||
void ThreadPool::SharedThreadPoolConstructor() { |
||||
// All worker threads in thread pool must be joinable.
|
||||
thread_options_.set_joinable(true); |
||||
|
||||
// Create at least 1 worker thread.
|
||||
if (num_threads_ <= 0) num_threads_ = 1; |
||||
|
||||
queue_ = New<InfLenFIFOQueue>(); |
||||
threads_ = static_cast<ThreadPoolWorker**>( |
||||
gpr_zalloc(num_threads_ * sizeof(ThreadPoolWorker*))); |
||||
for (int i = 0; i < num_threads_; ++i) { |
||||
threads_[i] = |
||||
New<ThreadPoolWorker>(thd_name_, this, queue_, thread_options_, i); |
||||
threads_[i]->Start(); |
||||
} |
||||
} |
||||
|
||||
size_t ThreadPool::DefaultStackSize() { |
||||
#if defined(__ANDROID__) || defined(__APPLE__) |
||||
return 1952 * 1024; |
||||
#else |
||||
return 64 * 1024; |
||||
#endif |
||||
} |
||||
|
||||
void ThreadPool::AssertHasNotBeenShutDown() { |
||||
// For debug checking purpose, using RELAXED order is sufficient.
|
||||
GPR_DEBUG_ASSERT(!shut_down_.Load(MemoryOrder::RELAXED)); |
||||
} |
||||
|
||||
ThreadPool::ThreadPool(int num_threads) : num_threads_(num_threads) { |
||||
thd_name_ = "ThreadPoolWorker"; |
||||
thread_options_ = Thread::Options(); |
||||
thread_options_.set_stack_size(DefaultStackSize()); |
||||
SharedThreadPoolConstructor(); |
||||
} |
||||
|
||||
ThreadPool::ThreadPool(int num_threads, const char* thd_name) |
||||
: num_threads_(num_threads), thd_name_(thd_name) { |
||||
thread_options_ = Thread::Options(); |
||||
thread_options_.set_stack_size(DefaultStackSize()); |
||||
SharedThreadPoolConstructor(); |
||||
} |
||||
|
||||
ThreadPool::ThreadPool(int num_threads, const char* thd_name, |
||||
const Thread::Options& thread_options) |
||||
: num_threads_(num_threads), |
||||
thd_name_(thd_name), |
||||
thread_options_(thread_options) { |
||||
if (thread_options_.stack_size() == 0) { |
||||
thread_options_.set_stack_size(DefaultStackSize()); |
||||
} |
||||
SharedThreadPoolConstructor(); |
||||
} |
||||
|
||||
ThreadPool::~ThreadPool() { |
||||
// For debug checking purpose, using RELAXED order is sufficient.
|
||||
shut_down_.Store(true, MemoryOrder::RELAXED); |
||||
|
||||
for (int i = 0; i < num_threads_; ++i) { |
||||
queue_->Put(nullptr); |
||||
} |
||||
|
||||
for (int i = 0; i < num_threads_; ++i) { |
||||
threads_[i]->Join(); |
||||
} |
||||
|
||||
for (int i = 0; i < num_threads_; ++i) { |
||||
Delete(threads_[i]); |
||||
} |
||||
gpr_free(threads_); |
||||
Delete(queue_); |
||||
} |
||||
|
||||
void ThreadPool::Add(grpc_experimental_completion_queue_functor* closure) { |
||||
AssertHasNotBeenShutDown(); |
||||
queue_->Put(static_cast<void*>(closure)); |
||||
} |
||||
|
||||
int ThreadPool::num_pending_closures() const { return queue_->count(); } |
||||
|
||||
int ThreadPool::pool_capacity() const { return num_threads_; } |
||||
|
||||
const Thread::Options& ThreadPool::thread_options() const { |
||||
return thread_options_; |
||||
} |
||||
|
||||
const char* ThreadPool::thread_name() const { return thd_name_; } |
||||
} // namespace grpc_core
|
@ -0,0 +1,153 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2019 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#ifndef GRPC_CORE_LIB_IOMGR_EXECUTOR_THREADPOOL_H |
||||
#define GRPC_CORE_LIB_IOMGR_EXECUTOR_THREADPOOL_H |
||||
|
||||
#include <grpc/support/port_platform.h> |
||||
|
||||
#include <grpc/grpc.h> |
||||
|
||||
#include "src/core/lib/gprpp/thd.h" |
||||
#include "src/core/lib/iomgr/executor/mpmcqueue.h" |
||||
|
||||
namespace grpc_core { |
||||
|
||||
// A base abstract base class for threadpool.
|
||||
// Threadpool is an executor that maintains a pool of threads sitting around
|
||||
// and waiting for closures. A threadpool also maintains a queue of pending
|
||||
// closures, when closures appearing in the queue, the threads in pool will
|
||||
// pull them out and execute them.
|
||||
class ThreadPoolInterface { |
||||
public: |
||||
// Waits for all pending closures to complete, then shuts down thread pool.
|
||||
virtual ~ThreadPoolInterface() {} |
||||
|
||||
// Schedules a given closure for execution later.
|
||||
// Depending on specific subclass implementation, this routine might cause
|
||||
// current thread to be blocked (in case of unable to schedule).
|
||||
// Closure should contain a function pointer and arguments it will take, more
|
||||
// details for closure struct at /grpc/include/grpc/impl/codegen/grpc_types.h
|
||||
virtual void Add(grpc_experimental_completion_queue_functor* closure) |
||||
GRPC_ABSTRACT; |
||||
|
||||
// Returns the current number of pending closures
|
||||
virtual int num_pending_closures() const GRPC_ABSTRACT; |
||||
|
||||
// Returns the capacity of pool (number of worker threads in pool)
|
||||
virtual int pool_capacity() const GRPC_ABSTRACT; |
||||
|
||||
// Thread option accessor
|
||||
virtual const Thread::Options& thread_options() const GRPC_ABSTRACT; |
||||
|
||||
// Returns the thread name for threads in this ThreadPool.
|
||||
virtual const char* thread_name() const GRPC_ABSTRACT; |
||||
|
||||
GRPC_ABSTRACT_BASE_CLASS |
||||
}; |
||||
|
||||
// Worker thread for threadpool. Executes closures in the queue, until getting a
|
||||
// NULL closure.
|
||||
class ThreadPoolWorker { |
||||
public: |
||||
ThreadPoolWorker(const char* thd_name, ThreadPoolInterface* pool, |
||||
MPMCQueueInterface* queue, Thread::Options& options, |
||||
int index) |
||||
: queue_(queue), thd_name_(thd_name), index_(index) { |
||||
thd_ = Thread(thd_name, |
||||
[](void* th) { static_cast<ThreadPoolWorker*>(th)->Run(); }, |
||||
this, nullptr, options); |
||||
} |
||||
|
||||
~ThreadPoolWorker() {} |
||||
|
||||
void Start() { thd_.Start(); } |
||||
void Join() { thd_.Join(); } |
||||
|
||||
private: |
||||
// struct for tracking stats of thread
|
||||
struct Stats { |
||||
gpr_timespec sleep_time; |
||||
Stats() { sleep_time = gpr_time_0(GPR_TIMESPAN); } |
||||
}; |
||||
|
||||
void Run(); // Pulls closures from queue and executes them
|
||||
|
||||
MPMCQueueInterface* queue_; // Queue in thread pool to pull closures from
|
||||
Thread thd_; // Thread wrapped in
|
||||
Stats stats_; // Stats to be collected in run time
|
||||
const char* thd_name_; // Name of thread
|
||||
int index_; // Index in thread pool
|
||||
}; |
||||
|
||||
// A fixed size thread pool implementation of abstract thread pool interface.
|
||||
// In this implementation, the number of threads in pool is fixed, but the
|
||||
// capacity of closure queue is unlimited.
|
||||
class ThreadPool : public ThreadPoolInterface { |
||||
public: |
||||
// Creates a thread pool with size of "num_threads", with default thread name
|
||||
// "ThreadPoolWorker" and all thread options set to default. If the given size
|
||||
// is 0 or less, there will be 1 worker thread created inside pool.
|
||||
ThreadPool(int num_threads); |
||||
|
||||
// Same as ThreadPool(int num_threads) constructor, except
|
||||
// that it also sets "thd_name" as the name of all threads in the thread pool.
|
||||
ThreadPool(int num_threads, const char* thd_name); |
||||
|
||||
// Same as ThreadPool(const char *thd_name, int num_threads) constructor,
|
||||
// except that is also set thread_options for threads.
|
||||
// Notes for stack size:
|
||||
// If the stack size field of the passed in Thread::Options is set to default
|
||||
// value 0, default ThreadPool stack size will be used. The current default
|
||||
// stack size of this implementation is 1952K for mobile platform and 64K for
|
||||
// all others.
|
||||
ThreadPool(int num_threads, const char* thd_name, |
||||
const Thread::Options& thread_options); |
||||
|
||||
// Waits for all pending closures to complete, then shuts down thread pool.
|
||||
~ThreadPool() override; |
||||
|
||||
// Adds given closure into pending queue immediately. Since closure queue has
|
||||
// infinite length, this routine will not block.
|
||||
void Add(grpc_experimental_completion_queue_functor* closure) override; |
||||
|
||||
int num_pending_closures() const override; |
||||
int pool_capacity() const override; |
||||
const Thread::Options& thread_options() const override; |
||||
const char* thread_name() const override; |
||||
|
||||
private: |
||||
int num_threads_ = 0; |
||||
const char* thd_name_ = nullptr; |
||||
Thread::Options thread_options_; |
||||
ThreadPoolWorker** threads_ = nullptr; // Array of worker threads
|
||||
MPMCQueueInterface* queue_ = nullptr; // Closure queue
|
||||
|
||||
Atomic<bool> shut_down_{false}; // Destructor has been called if set to true
|
||||
|
||||
void SharedThreadPoolConstructor(); |
||||
// For ThreadPool, default stack size for mobile platform is 1952K. for other
|
||||
// platforms is 64K.
|
||||
size_t DefaultStackSize(); |
||||
// Internal Use Only for debug checking.
|
||||
void AssertHasNotBeenShutDown(); |
||||
}; |
||||
|
||||
} // namespace grpc_core
|
||||
|
||||
#endif /* GRPC_CORE_LIB_IOMGR_EXECUTOR_THREADPOOL_H */ |
@ -0,0 +1,192 @@ |
||||
/*
|
||||
* |
||||
* Copyright 2019 gRPC authors. |
||||
* |
||||
* Licensed under the Apache License, Version 2.0 (the "License"); |
||||
* you may not use this file except in compliance with the License. |
||||
* You may obtain a copy of the License at |
||||
* |
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* |
||||
* Unless required by applicable law or agreed to in writing, software |
||||
* distributed under the License is distributed on an "AS IS" BASIS, |
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
* See the License for the specific language governing permissions and |
||||
* limitations under the License. |
||||
* |
||||
*/ |
||||
|
||||
#include "src/core/lib/iomgr/executor/threadpool.h" |
||||
|
||||
#include "test/core/util/test_config.h" |
||||
|
||||
static const int kSmallThreadPoolSize = 20; |
||||
static const int kLargeThreadPoolSize = 100; |
||||
static const int kThreadSmallIter = 100; |
||||
static const int kThreadLargeIter = 10000; |
||||
|
||||
static void test_size_zero(void) { |
||||
gpr_log(GPR_INFO, "test_size_zero"); |
||||
grpc_core::ThreadPool* pool_size_zero = |
||||
grpc_core::New<grpc_core::ThreadPool>(0); |
||||
GPR_ASSERT(pool_size_zero->pool_capacity() == 1); |
||||
Delete(pool_size_zero); |
||||
} |
||||
|
||||
static void test_constructor_option(void) { |
||||
gpr_log(GPR_INFO, "test_constructor_option"); |
||||
// Tests options
|
||||
grpc_core::Thread::Options options; |
||||
options.set_stack_size(192 * 1024); // Random non-default value
|
||||
grpc_core::ThreadPool* pool = grpc_core::New<grpc_core::ThreadPool>( |
||||
0, "test_constructor_option", options); |
||||
GPR_ASSERT(pool->thread_options().stack_size() == options.stack_size()); |
||||
Delete(pool); |
||||
} |
||||
|
||||
// Simple functor for testing. It will count how many times being called.
|
||||
class SimpleFunctorForAdd : public grpc_experimental_completion_queue_functor { |
||||
public: |
||||
friend class SimpleFunctorCheckForAdd; |
||||
SimpleFunctorForAdd() { |
||||
functor_run = &SimpleFunctorForAdd::Run; |
||||
internal_next = this; |
||||
internal_success = 0; |
||||
} |
||||
~SimpleFunctorForAdd() {} |
||||
static void Run(struct grpc_experimental_completion_queue_functor* cb, |
||||
int ok) { |
||||
auto* callback = static_cast<SimpleFunctorForAdd*>(cb); |
||||
callback->count_.FetchAdd(1, grpc_core::MemoryOrder::RELAXED); |
||||
} |
||||
|
||||
int count() { return count_.Load(grpc_core::MemoryOrder::RELAXED); } |
||||
|
||||
private: |
||||
grpc_core::Atomic<int> count_{0}; |
||||
}; |
||||
|
||||
static void test_add(void) { |
||||
gpr_log(GPR_INFO, "test_add"); |
||||
grpc_core::ThreadPool* pool = |
||||
grpc_core::New<grpc_core::ThreadPool>(kSmallThreadPoolSize, "test_add"); |
||||
|
||||
SimpleFunctorForAdd* functor = grpc_core::New<SimpleFunctorForAdd>(); |
||||
for (int i = 0; i < kThreadSmallIter; ++i) { |
||||
pool->Add(functor); |
||||
} |
||||
grpc_core::Delete(pool); |
||||
GPR_ASSERT(functor->count() == kThreadSmallIter); |
||||
grpc_core::Delete(functor); |
||||
gpr_log(GPR_DEBUG, "Done."); |
||||
} |
||||
|
||||
// Thread that adds closures to pool
|
||||
class WorkThread { |
||||
public: |
||||
WorkThread(grpc_core::ThreadPool* pool, SimpleFunctorForAdd* cb, int num_add) |
||||
: num_add_(num_add), cb_(cb), pool_(pool) { |
||||
thd_ = grpc_core::Thread( |
||||
"thread_pool_test_add_thd", |
||||
[](void* th) { static_cast<WorkThread*>(th)->Run(); }, this); |
||||
} |
||||
~WorkThread() {} |
||||
|
||||
void Start() { thd_.Start(); } |
||||
void Join() { thd_.Join(); } |
||||
|
||||
private: |
||||
void Run() { |
||||
for (int i = 0; i < num_add_; ++i) { |
||||
pool_->Add(cb_); |
||||
} |
||||
} |
||||
|
||||
int num_add_; |
||||
SimpleFunctorForAdd* cb_; |
||||
grpc_core::ThreadPool* pool_; |
||||
grpc_core::Thread thd_; |
||||
}; |
||||
|
||||
static void test_multi_add(void) { |
||||
gpr_log(GPR_INFO, "test_multi_add"); |
||||
const int num_work_thds = 10; |
||||
grpc_core::ThreadPool* pool = grpc_core::New<grpc_core::ThreadPool>( |
||||
kLargeThreadPoolSize, "test_multi_add"); |
||||
SimpleFunctorForAdd* functor = grpc_core::New<SimpleFunctorForAdd>(); |
||||
WorkThread** work_thds = static_cast<WorkThread**>( |
||||
gpr_zalloc(sizeof(WorkThread*) * num_work_thds)); |
||||
gpr_log(GPR_DEBUG, "Fork threads for adding..."); |
||||
for (int i = 0; i < num_work_thds; ++i) { |
||||
work_thds[i] = grpc_core::New<WorkThread>(pool, functor, kThreadLargeIter); |
||||
work_thds[i]->Start(); |
||||
} |
||||
// Wait for all threads finish
|
||||
gpr_log(GPR_DEBUG, "Waiting for all work threads finish..."); |
||||
for (int i = 0; i < num_work_thds; ++i) { |
||||
work_thds[i]->Join(); |
||||
grpc_core::Delete(work_thds[i]); |
||||
} |
||||
gpr_free(work_thds); |
||||
gpr_log(GPR_DEBUG, "Done."); |
||||
gpr_log(GPR_DEBUG, "Waiting for all closures finish..."); |
||||
// Destructor of thread pool will wait for all closures to finish
|
||||
grpc_core::Delete(pool); |
||||
GPR_ASSERT(functor->count() == kThreadLargeIter * num_work_thds); |
||||
grpc_core::Delete(functor); |
||||
gpr_log(GPR_DEBUG, "Done."); |
||||
} |
||||
|
||||
// Checks the current count with a given number.
|
||||
class SimpleFunctorCheckForAdd |
||||
: public grpc_experimental_completion_queue_functor { |
||||
public: |
||||
SimpleFunctorCheckForAdd(int ok, int* count) : count_(count) { |
||||
functor_run = &SimpleFunctorCheckForAdd::Run; |
||||
internal_success = ok; |
||||
} |
||||
~SimpleFunctorCheckForAdd() {} |
||||
static void Run(struct grpc_experimental_completion_queue_functor* cb, |
||||
int ok) { |
||||
auto* callback = static_cast<SimpleFunctorCheckForAdd*>(cb); |
||||
(*callback->count_)++; |
||||
GPR_ASSERT(*callback->count_ == callback->internal_success); |
||||
} |
||||
|
||||
private: |
||||
int* count_; |
||||
}; |
||||
|
||||
static void test_one_thread_FIFO(void) { |
||||
gpr_log(GPR_INFO, "test_one_thread_FIFO"); |
||||
int counter = 0; |
||||
grpc_core::ThreadPool* pool = |
||||
grpc_core::New<grpc_core::ThreadPool>(1, "test_one_thread_FIFO"); |
||||
SimpleFunctorCheckForAdd** check_functors = |
||||
static_cast<SimpleFunctorCheckForAdd**>( |
||||
gpr_zalloc(sizeof(SimpleFunctorCheckForAdd*) * kThreadSmallIter)); |
||||
for (int i = 0; i < kThreadSmallIter; ++i) { |
||||
check_functors[i] = |
||||
grpc_core::New<SimpleFunctorCheckForAdd>(i + 1, &counter); |
||||
pool->Add(check_functors[i]); |
||||
} |
||||
// Destructor of pool will wait until all closures finished.
|
||||
grpc_core::Delete(pool); |
||||
for (int i = 0; i < kThreadSmallIter; ++i) { |
||||
grpc_core::Delete(check_functors[i]); |
||||
} |
||||
gpr_free(check_functors); |
||||
gpr_log(GPR_DEBUG, "Done."); |
||||
} |
||||
|
||||
int main(int argc, char** argv) { |
||||
grpc::testing::TestEnvironment env(argc, argv); |
||||
grpc_init(); |
||||
test_size_zero(); |
||||
test_constructor_option(); |
||||
test_add(); |
||||
test_multi_add(); |
||||
test_one_thread_FIFO(); |
||||
grpc_shutdown(); |
||||
return 0; |
||||
} |
Loading…
Reference in new issue