Skip to content

Commit

Permalink
Merge pull request #4 from SynaptiveMedical/synaptive/dev/sever/COM-4…
Browse files Browse the repository at this point in the history
…66_OverflowPolicy

Overflow Policy
  • Loading branch information
SeverTopan authored Apr 3, 2018
2 parents c48dd19 + 15d0afd commit 38a3d89
Show file tree
Hide file tree
Showing 4 changed files with 192 additions and 91 deletions.
47 changes: 29 additions & 18 deletions include/thread_pool/rouser.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,16 @@ namespace tp
*/
class Rouser final
{
/**
* @brief State An Enum representing the Rouser thread state.
*/
enum class State
{
Initialized,
Running,
Stopped
};

public:
/**
* @brief Worker Constructor.
Expand Down Expand Up @@ -61,9 +71,9 @@ class Rouser final

/**
* @brief start Create the executing thread and start tasks execution.
* @param workers A pointer to the vector containing sibling workers for performing round robin work stealing.
* @param idle_workers A pointer to the slotted bag containing all idle workers.
* @param num_busy_waiters A pointer to the atomic busy waiter counter.
* @param workers A reference to the vector containing sibling workers for performing round robin work stealing.
* @param idle_workers A reference to the slotted bag containing all idle workers.
* @param num_busy_waiters A reference to the atomic busy waiter counter.
* @note The parameters passed into this function generally relate to the global thread pool state.
*/
template <typename Task, template<typename> class Queue>
Expand All @@ -72,29 +82,29 @@ class Rouser final
/**
* @brief stop Stop all worker's thread and stealing activity.
* Waits until the executing thread becomes finished.
* @note Stop may only be called once start() has been invoked.
* Repeated successful calls to stop() will be no-ops after the first.
*/
void stop();

private:

/**
* @brief threadFunc Executing thread function.
* @param workers A pointer to the vector containing sibling workers for performing round robin work stealing.
* @param idle_workers A pointer to the slotted bag containing all idle workers.
* @param num_busy_waiters A pointer to the atomic busy waiter counter.
* @param workers A reference to the vector containing sibling workers for performing round robin work stealing.
* @param idle_workers A reference to the slotted bag containing all idle workers.
* @param num_busy_waiters A reference to the atomic busy waiter counter.
*/
template <typename Task, template<typename> class Queue>
void threadFunc(std::vector<std::unique_ptr<Worker<Task, Queue>>>& workers, SlottedBag<Queue>& idle_workers, std::atomic<size_t>& num_busy_waiters);

std::atomic<bool> m_running_flag;
std::atomic<bool> m_started_flag;
std::atomic<State> m_state;
std::thread m_thread;
std::chrono::microseconds m_rouse_period;
};

inline Rouser::Rouser(std::chrono::microseconds rouse_period)
: m_running_flag(false)
, m_started_flag(false)
: m_state(State::Initialized)
, m_rouse_period(std::move(rouse_period))
{
}
Expand All @@ -108,8 +118,7 @@ inline Rouser& Rouser::operator=(Rouser&& rhs) noexcept
{
if (this != &rhs)
{
m_running_flag = rhs.m_running_flag.load();
m_started_flag = rhs.m_started_flag.load();
m_state = rhs.m_state.load();
m_thread = std::move(rhs.m_thread);
m_rouse_period = std::move(rhs.m_rouse_period);
}
Expand All @@ -125,24 +134,26 @@ inline Rouser::~Rouser()
template <typename Task, template<typename> class Queue>
inline void Rouser::start(std::vector<std::unique_ptr<Worker<Task, Queue>>>& workers, SlottedBag<Queue>& idle_workers, std::atomic<size_t>& num_busy_waiters)
{
if (m_started_flag.exchange(true, std::memory_order_acq_rel))
throw std::runtime_error("The Rouser has already been started.");
auto expectedState = State::Initialized;
if (!m_state.compare_exchange_strong(expectedState, State::Running, std::memory_order_acq_rel))
throw std::runtime_error("Cannot start Rouser: it has previously been started or stopped.");

m_running_flag.store(true, std::memory_order_release);
m_thread = std::thread(&Rouser::threadFunc<Task, Queue>, this, std::ref(workers), std::ref(idle_workers), std::ref(num_busy_waiters));
}

inline void Rouser::stop()
{
if (m_running_flag.exchange(false, std::memory_order_acq_rel))
auto expectedState = State::Running;
if (m_state.compare_exchange_strong(expectedState, State::Stopped, std::memory_order_acq_rel))
m_thread.join();
else if (expectedState == State::Initialized)
throw std::runtime_error("Cannot stop Rouser: stop may only be calld after the Rouser has been started.");
}


template <typename Task, template<typename> class Queue>
inline void Rouser::threadFunc(std::vector<std::unique_ptr<Worker<Task, Queue>>>& workers, SlottedBag<Queue>& idle_workers, std::atomic<size_t>& num_busy_waiters)
{
while (m_running_flag.load(std::memory_order_acquire))
while (m_state.load(std::memory_order_acquire) == State::Running)
{
// Try to wake up a thread if there are no current busy waiters.
if (num_busy_waiters.load(std::memory_order_acquire) == 0)
Expand Down
104 changes: 69 additions & 35 deletions include/thread_pool/thread_pool.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -92,14 +92,27 @@ class GenericThreadPool final

private:
/**
* @brief getWorker Obtain a reference to the local thread's associated worker,
* otherwise return the next worker in the round robin.
* @brief post Try post job to thread pool.
* @param handler Handler to be called from thread pool worker. It has
* to be callable as 'handler()'.
* @param failedWakeupRetryCap The number of retries to perform when worker
* wakeup fails.
* @return 'true' on success, false otherwise.
* @note All exceptions thrown by handler will be suppressed.
*/
Worker<Task, Queue>& getWorker();
template <typename Handler>
bool tryPostImpl(Handler&& handler, size_t failedWakeupRetryCap);

/**
* @brief getWorker Obtain the id of the local thread's associated worker,
* otherwise return the next worker id in the round robin.
*/
size_t getWorkerId();

SlottedBag<Queue> m_idle_workers;
WorkerVector m_workers;
Rouser m_rouser;
size_t m_failed_wakeup_retry_cap;
std::atomic<size_t> m_next_worker;
std::atomic<size_t> m_num_busy_waiters;
};
Expand All @@ -112,6 +125,7 @@ inline GenericThreadPool<Task, Queue>::GenericThreadPool(ThreadPoolOptions optio
: m_idle_workers(options.threadCount())
, m_workers(options.threadCount())
, m_rouser(options.rousePeriod())
, m_failed_wakeup_retry_cap(options.failedWakeupRetryCap())
, m_next_worker(0)
, m_num_busy_waiters(0)
{
Expand Down Expand Up @@ -140,6 +154,7 @@ inline GenericThreadPool<Task, Queue>& GenericThreadPool<Task, Queue>::operator=
m_idle_workers = std::move(rhs.m_idle_workers);
m_workers = std::move(rhs.m_workers);
m_rouser = std::move(rhs.m_rouser);
m_failed_wakeup_retry_cap = rhs.m_failed_wakeup_retry_cap;
m_next_worker = rhs.m_next_worker.load();
m_num_busy_waiters = rhs.m_num_busy_waiters.load();
}
Expand All @@ -159,6 +174,22 @@ inline GenericThreadPool<Task, Queue>::~GenericThreadPool()
template <typename Task, template<typename> class Queue>
template <typename Handler>
inline bool GenericThreadPool<Task, Queue>::tryPost(Handler&& handler)
{
return tryPostImpl(std::forward<Handler>(handler), m_failed_wakeup_retry_cap);
}

template <typename Task, template<typename> class Queue>
template <typename Handler>
inline void GenericThreadPool<Task, Queue>::post(Handler&& handler)
{
const auto ok = tryPost(std::forward<Handler>(handler));
if (!ok)
throw std::runtime_error("Thread pool queue is full.");
}

template <typename Task, template<typename> class Queue>
template <typename Handler>
inline bool GenericThreadPool<Task, Queue>::tryPostImpl(Handler&& handler, size_t failedWakeupRetryCap)
{
// This section of the code increases the probability that our thread pool
// is fully utilized (num active workers = argmin(num tasks, num total workers)).
Expand All @@ -169,56 +200,59 @@ inline bool GenericThreadPool<Task, Queue>::tryPost(Handler&& handler)
auto result = m_idle_workers.tryEmptyAny();
if (result.first)
{
if (m_workers[result.second]->tryPost(std::forward<Handler>(handler)))
{
m_workers[result.second]->wake();
return true;
}
auto success = m_workers[result.second]->tryPost(std::forward<Handler>(handler));
m_workers[result.second]->wake();

// If post is unsuccessful, we need to re-add the worker to the idle worker bag.
m_idle_workers.fill(result.second);
return false;
// The above post will only fail if the idle worker's queue is full, which is an extremely
// low probability scenario. In that case, we wake the worker and let it get to work on
// processing the items in its queue. We then re-try posting our current task.
if (success)
return true;
else if (failedWakeupRetryCap > 0)
return tryPostImpl(std::forward<Handler>(handler), failedWakeupRetryCap - 1);
}
}

// No idle threads. Our threads are either active or busy waiting
// Either way, submit the work item in a round robin fashion.
if (!getWorker().tryPost(std::forward<Handler>(handler)))
return false; // Worker's task queue is full.

// The following section increases the probability that tasks will not be dropped.
// This is a soft constraint, the strict task dropping bound is covered by the Rouser
// thread's functionality. This code experimentally lowers task response time under
// low thread pool utilization without incurring significant performance penalties at
// high thread pool utilization.
if (m_num_busy_waiters.load(std::memory_order_acquire) == 0)
auto id = getWorkerId();
auto initialWorkerId = id;
do
{
auto result = m_idle_workers.tryEmptyAny();
if (result.first)
m_workers[result.second]->wake();
}
if (m_workers[id]->tryPost(std::forward<Handler>(handler)))
{
// The following section increases the probability that tasks will not be dropped.
// This is a soft constraint, the strict task dropping bound is covered by the Rouser
// thread's functionality. This code experimentally lowers task response time under
// low thread pool utilization without incurring significant performance penalties at
// high thread pool utilization.
if (m_num_busy_waiters.load(std::memory_order_acquire) == 0)
{
auto result = m_idle_workers.tryEmptyAny();
if (result.first)
m_workers[result.second]->wake();
}

return true;
}
return true;
}

template <typename Task, template<typename> class Queue>
template <typename Handler>
inline void GenericThreadPool<Task, Queue>::post(Handler&& handler)
{
const auto ok = tryPost(std::forward<Handler>(handler));
if (!ok)
throw std::runtime_error("Thread pool queue is full.");
++id %= m_workers.size();
} while (id != initialWorkerId);

// All Queues in our thread pool are full during one whole iteration.
// We consider this a posting failure case.
return false;
}

template <typename Task, template<typename> class Queue>
inline Worker<Task, Queue>& GenericThreadPool<Task, Queue>::getWorker()
inline size_t GenericThreadPool<Task, Queue>::getWorkerId()
{
auto id = Worker<Task, Queue>::getWorkerIdForCurrentThread();

if (id > m_workers.size())
id = m_next_worker.fetch_add(1, std::memory_order_relaxed) % m_workers.size();

return *m_workers[id];
return id;
}

}
35 changes: 34 additions & 1 deletion include/thread_pool/thread_pool_options.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ class ThreadPoolOptions final
*/
ThreadPoolOptions(size_t thread_count = defaultThreadCount()
, size_t queue_size = defaultQueueSize()
, size_t failed_wakeup_retry_cap = defaultFailedWakeupRetryCap()
, BusyWaitOptions busy_wait_options = defaultBusyWaitOptions()
, std::chrono::microseconds rouse_period = defaultRousePeriod());

Expand All @@ -87,6 +88,11 @@ class ThreadPoolOptions final
*/
void setQueueSize(size_t size);

/**
* @brief setFailedWakeupRetryCap Set retry cap when a worker wakeup fails.
* @param cap The retry cap.
*/
void setFailedWakeupRetryCap(size_t cap);

/**
* @brief setBusyWaitOptions Set the parameters relating to worker busy waiting behaviour.
Expand All @@ -111,6 +117,11 @@ class ThreadPoolOptions final
*/
size_t queueSize() const;

/**
* @brief failedWakeupRetryCap Return the retry cap when a worker wakeup fails.
*/
size_t failedWakeupRetryCap() const;

/**
* @brief busyWaitOptions Return a reference to the busy wait options.
*/
Expand All @@ -131,6 +142,11 @@ class ThreadPoolOptions final
*/
static size_t defaultQueueSize();

/**
* @brief defaultFailedWakeupRetryCap Obtain the default retry cap when a worker wakeup fails.
*/
static size_t defaultFailedWakeupRetryCap();

/**
* @brief defaultBusyWaitOptions Obtain the default busy wait options.
*/
Expand All @@ -145,6 +161,7 @@ class ThreadPoolOptions final
private:
size_t m_thread_count;
size_t m_queue_size;
size_t m_failed_wakeup_retry_cap;
BusyWaitOptions m_busy_wait_options;
std::chrono::microseconds m_rouse_period;
};
Expand Down Expand Up @@ -189,9 +206,10 @@ inline ThreadPoolOptions::BusyWaitOptions::IterationFunction ThreadPoolOptions::
return [](size_t i) { return std::chrono::microseconds(static_cast<size_t>(pow(2, i))*1000); };
}

inline ThreadPoolOptions::ThreadPoolOptions(size_t thread_count, size_t queue_size, BusyWaitOptions busy_wait_options, std::chrono::microseconds rouse_period)
inline ThreadPoolOptions::ThreadPoolOptions(size_t thread_count, size_t queue_size, size_t failed_wakeup_retry_cap, BusyWaitOptions busy_wait_options, std::chrono::microseconds rouse_period)
: m_thread_count(thread_count)
, m_queue_size(queue_size)
, m_failed_wakeup_retry_cap(failed_wakeup_retry_cap)
, m_busy_wait_options(std::move(busy_wait_options))
, m_rouse_period(std::move(rouse_period))
{
Expand All @@ -207,6 +225,11 @@ inline void ThreadPoolOptions::setQueueSize(size_t size)
m_queue_size = std::max<size_t>(1u, size);
}

inline void ThreadPoolOptions::setFailedWakeupRetryCap(size_t cap)
{
m_failed_wakeup_retry_cap = std::max<size_t>(1u, cap);
}

inline void ThreadPoolOptions::setBusyWaitOptions(BusyWaitOptions busy_wait_options)
{
m_busy_wait_options = std::move(busy_wait_options);
Expand All @@ -227,6 +250,11 @@ inline size_t ThreadPoolOptions::queueSize() const
return m_queue_size;
}

inline size_t ThreadPoolOptions::failedWakeupRetryCap() const
{
return m_failed_wakeup_retry_cap;
}

inline ThreadPoolOptions::BusyWaitOptions const& ThreadPoolOptions::busyWaitOptions() const
{
return m_busy_wait_options;
Expand All @@ -248,6 +276,11 @@ inline size_t ThreadPoolOptions::defaultQueueSize()
return 1024;
}

inline size_t ThreadPoolOptions::defaultFailedWakeupRetryCap()
{
return 5;
}

inline ThreadPoolOptions::BusyWaitOptions ThreadPoolOptions::defaultBusyWaitOptions()
{
return ThreadPoolOptions::BusyWaitOptions();
Expand Down
Loading

0 comments on commit 38a3d89

Please sign in to comment.