FC Updates from BitShares and myself #21

Closed
nathanielhourt wants to merge 687 commits from dapp-support into latest-fc
6 changed files with 177 additions and 22 deletions
Showing only changes of commit afcb1e3543 - Show all commits

View file

@ -217,6 +217,7 @@ set( fc_sources
src/thread/spin_lock.cpp
src/thread/spin_yield_lock.cpp
src/thread/mutex.cpp
src/thread/parallel.cpp
src/thread/non_preemptable_scope_check.cpp
src/asio.cpp
src/string.cpp

View file

@ -77,7 +77,8 @@ namespace asio {
public:
default_io_service_scope();
~default_io_service_scope();
static void set_num_threads(uint16_t num_threads);
static void set_num_threads(uint16_t num_threads);
static uint16_t get_num_threads();
boost::asio::io_service* io;
private:
std::vector<boost::thread*> asio_threads;

View file

@ -25,23 +25,24 @@
#pragma once
#include <fc/thread/task.hpp>
#include <fc/thread/thread.hpp>
#include <fc/asio.hpp>
/* NOTE: the methods in this header are NOT to be mixed up with fc's
* multithreading. Parallel functions MUST NOT call fc::thread::yield NOR
* use fc's mutexes etc.!
*/
namespace fc {
namespace detail {
template<typename Task>
class parallel_completion_handler {
public:
parallel_completion_handler( Task* task ) : _task(task) {}
void operator()() { _task->run(); }
private:
Task* _task;
class pool_impl;
class worker_pool {
public:
worker_pool();
~worker_pool();
void post( task_base* task );
private:
pool_impl* my;
};
worker_pool& get_worker_pool();
}
/**
@ -57,7 +58,7 @@ namespace fc {
fc::task<Result,sizeof(FunctorType)>* tsk =
new fc::task<Result,sizeof(FunctorType)>( fc::forward<Functor>(f), desc );
fc::future<Result> r(fc::shared_ptr< fc::promise<Result> >(tsk,true) );
fc::asio::default_io_service().post( detail::parallel_completion_handler<fc::task<Result,sizeof(FunctorType)>>( tsk ) );
detail::get_worker_pool().post( tsk );
return r;
}
}

View file

@ -101,10 +101,12 @@ namespace fc {
* @param num_threads the number of threads
*/
void default_io_service_scope::set_num_threads(uint16_t num_threads) {
FC_ASSERT(fc::asio::default_io_service_scope::num_io_threads == 0);
fc::asio::default_io_service_scope::num_io_threads = num_threads;
FC_ASSERT(num_io_threads == 0);
num_io_threads = num_threads;
}
uint16_t default_io_service_scope::get_num_threads() { return num_io_threads; }
/***
* Default constructor
*/
@ -113,14 +115,14 @@ namespace fc {
io = new boost::asio::io_service();
the_work = new boost::asio::io_service::work(*io);
if (this->num_io_threads == 0)
if( num_io_threads == 0 )
{
// the default was not set by the configuration. Determine a good
// number of threads. Minimum of 8, maximum of hardware_concurrency
this->num_io_threads = std::max( boost::thread::hardware_concurrency(), 8u );
num_io_threads = std::max( boost::thread::hardware_concurrency(), 8u );
}
for( uint16_t i = 0; i < this->num_io_threads; ++i )
for( uint16_t i = 0; i < num_io_threads; ++i )
{
asio_threads.push_back( new boost::thread( [i,this]()
{

154
src/thread/parallel.cpp Normal file
View file

@ -0,0 +1,154 @@
/*
* Copyright (c) 2018 The BitShares Blockchain, and contributors.
*
* The MIT License
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <fc/thread/parallel.hpp>
#include <fc/thread/spin_yield_lock.hpp>
#include <fc/thread/unique_lock.hpp>
#include <fc/asio.hpp>
#include <boost/atomic/atomic.hpp>
#include <boost/lockfree/queue.hpp>
namespace fc {
namespace detail {
class pool_impl;
class idle_notifier_impl : public thread_idle_notifier
{
public:
idle_notifier_impl()
{
is_idle.store(false);
}
idle_notifier_impl( const idle_notifier_impl& copy )
{
id = copy.id;
my_pool = copy.my_pool;
is_idle.store( copy.is_idle.load() );
}
virtual task_base* idle();
virtual void busy()
{
is_idle.store(false);
}
uint32_t id;
pool_impl* my_pool;
boost::atomic<bool> is_idle;
};
class pool_impl
{
public:
pool_impl( const uint16_t num_threads )
{
notifiers.resize( num_threads );
threads.reserve( num_threads );
for( uint32_t i = 0; i < num_threads; i++ )
{
notifiers[i].id = i;
notifiers[i].my_pool = this;
threads.push_back( new thread( "pool worker " + fc::to_string(i), &notifiers[i] ) );
}
}
~pool_impl()
{
for( thread* t : threads)
delete t; // also calls quit()
waiting_tasks.consume_all( [] ( task_base* t ) {
t->cancel( "thread pool quitting" );
});
}
void post( task_base* task )
{
idle_notifier_impl* ini;
while( idle_threads.pop( ini ) )
if( ini->is_idle.exchange( false ) )
{ // minor race condition here, a thread might receive a task while it's busy
threads[ini->id]->async_task( task, priority() );
return;
}
boost::unique_lock<fc::spin_yield_lock> lock(pool_lock);
while( idle_threads.pop( ini ) )
if( ini->is_idle.exchange( false ) )
{ // minor race condition here, a thread might receive a task while it's busy
threads[ini->id]->async_task( task, priority() );
return;
}
waiting_tasks.push( task );
}
task_base* enqueue_idle_thread( idle_notifier_impl* ini )
{
task_base* task;
if( waiting_tasks.pop( task ) )
return task;
fc::unique_lock<fc::spin_yield_lock> lock(pool_lock);
if( waiting_tasks.pop( task ) )
return task;
idle_threads.push( ini );
return 0;
}
private:
std::vector<idle_notifier_impl> notifiers;
std::vector<thread*> threads;
boost::lockfree::queue<idle_notifier_impl*> idle_threads;
boost::lockfree::queue<task_base*> waiting_tasks;
fc::spin_yield_lock pool_lock;
};
task_base* idle_notifier_impl::idle()
{
is_idle.store( true );
task_base* result = my_pool->enqueue_idle_thread( this );
if( result ) is_idle.store( false );
return result;
}
worker_pool::worker_pool()
{
fc::asio::default_io_service();
my = new pool_impl( fc::asio::default_io_service_scope::get_num_threads() );
}
worker_pool::~worker_pool()
{
delete my;
}
void worker_pool::post( task_base* task )
{
my->post( task );
}
worker_pool& get_worker_pool()
{
static worker_pool the_pool;
return the_pool;
}
}
}

View file

@ -18,10 +18,6 @@ BOOST_AUTO_TEST_CASE(tcpconstructor_test)
class my_io_class : public fc::asio::default_io_service_scope
{
public:
uint16_t get_num_threads()
{
return fc::asio::default_io_service_scope::num_io_threads;
}
static void reset_num_threads() { fc::asio::default_io_service_scope::num_io_threads = 0; }
};