2013-06-05 19:19:00 +00:00
# include <fc/thread/thread.hpp>
2012-09-08 02:50:37 +00:00
# include <fc/time.hpp>
# include <boost/thread.hpp>
# include "context.hpp"
# include <boost/thread/condition_variable.hpp>
# include <boost/thread.hpp>
# include <boost/atomic.hpp>
# include <vector>
2013-06-05 19:19:00 +00:00
//#include <fc/logger.hpp>
2014-10-14 18:21:42 +00:00
2012-09-08 02:50:37 +00:00
namespace fc {
struct sleep_priority_less {
bool operator ( ) ( const context : : ptr & a , const context : : ptr & b ) {
return a - > resume_time > b - > resume_time ;
}
} ;
2018-10-03 08:47:02 +00:00
namespace detail {
class idle_guard {
public :
2018-10-09 21:42:03 +00:00
explicit idle_guard ( thread_d * t ) ;
2018-10-03 08:47:02 +00:00
~ idle_guard ( ) ;
private :
thread_idle_notifier * notifier ;
} ;
}
2012-09-08 02:50:37 +00:00
class thread_d {
2013-06-05 19:19:00 +00:00
2012-09-08 02:50:37 +00:00
public :
2017-11-09 12:18:58 +00:00
using context_pair = std : : pair < thread_d * , fc : : context * > ;
2018-10-03 08:47:02 +00:00
thread_d ( fc : : thread & s , thread_idle_notifier * n = 0 )
2012-09-08 02:50:37 +00:00
: self ( s ) , boost_thread ( 0 ) ,
task_in_queue ( 0 ) ,
2014-10-14 18:21:42 +00:00
next_posted_num ( 1 ) ,
2012-09-08 02:50:37 +00:00
done ( false ) ,
current ( 0 ) ,
pt_head ( 0 ) ,
2014-08-27 16:20:19 +00:00
blocked ( 0 ) ,
2018-10-03 08:47:02 +00:00
next_unused_task_storage_slot ( 0 ) ,
notifier ( n )
2014-08-02 23:43:28 +00:00
# ifndef NDEBUG
, non_preemptable_scope_count ( 0 )
# endif
2012-09-08 02:50:37 +00:00
{
2013-06-05 19:19:00 +00:00
static boost : : atomic < int > cnt ( 0 ) ;
2019-04-04 11:46:38 +00:00
name = std : : string ( " th_ " ) + char ( ' a ' + cnt + + ) ;
2013-06-05 19:19:00 +00:00
// printf("thread=%p\n",this);
2012-09-08 02:50:37 +00:00
}
2014-10-13 15:44:16 +00:00
~ thread_d ( )
{
2013-06-05 19:19:00 +00:00
delete current ;
2019-04-02 10:37:18 +00:00
current = nullptr ;
2013-06-05 19:19:00 +00:00
fc : : context * temp ;
2014-10-16 20:26:19 +00:00
for ( fc : : context * ready_context : ready_heap )
2019-05-14 14:31:28 +00:00
{
if ( ready_context - > cur_task )
{
ready_context - > cur_task - > release ( ) ;
ready_context - > cur_task = nullptr ;
}
delete ready_context ;
}
2014-10-16 20:26:19 +00:00
ready_heap . clear ( ) ;
2013-06-05 19:19:00 +00:00
while ( blocked )
{
temp = blocked - > next ;
delete blocked ;
blocked = temp ;
}
/*
while ( pt_head )
{
temp = pt_head - > next ;
delete pt_head ;
pt_head = temp ;
}
*/
2013-07-19 02:19:19 +00:00
//ilog("");
2013-06-05 19:19:00 +00:00
if ( boost_thread )
{
boost_thread - > detach ( ) ;
delete boost_thread ;
}
2014-10-13 15:44:16 +00:00
}
2012-09-08 02:50:37 +00:00
fc : : thread & self ;
boost : : thread * boost_thread ;
2014-09-07 22:02:39 +00:00
stack_allocator stack_alloc ;
2012-09-08 02:50:37 +00:00
boost : : condition_variable task_ready ;
2012-09-11 03:13:31 +00:00
boost : : mutex task_ready_mutex ;
2012-09-08 02:50:37 +00:00
boost : : atomic < task_base * > task_in_queue ;
2014-09-07 22:02:39 +00:00
std : : vector < task_base * > task_pqueue ; // heap of tasks that have never started, ordered by proirity & scheduling time
2014-10-14 18:21:42 +00:00
uint64_t next_posted_num ; // each task or context gets assigned a number in the order it is ready to execute, tracked here
2014-09-07 22:02:39 +00:00
std : : vector < task_base * > task_sch_queue ; // heap of tasks that have never started but are scheduled for a time in the future, ordered by the time they should be run
std : : vector < fc : : context * > sleep_pqueue ; // heap of running tasks that have sleeped, ordered by the time they should resume
std : : vector < fc : : context * > free_list ; // list of unused contexts that are ready for deletion
2012-09-08 02:50:37 +00:00
bool done ;
2019-04-04 11:46:38 +00:00
std : : string name ;
2014-09-07 22:02:39 +00:00
fc : : context * current ; // the currently-executing task in this thread
2012-09-08 02:50:37 +00:00
2014-09-07 22:02:39 +00:00
fc : : context * pt_head ; // list of contexts that can be reused for new tasks
2012-09-08 02:50:37 +00:00
2014-10-16 21:50:58 +00:00
std : : vector < fc : : context * > ready_heap ; // priority heap of contexts that are ready to run
2012-09-08 02:50:37 +00:00
2014-09-07 22:02:39 +00:00
fc : : context * blocked ; // linked list of contexts (using 'next_blocked') blocked on promises via wait()
2014-08-27 16:20:19 +00:00
// values for thread specific data objects for this thread
std : : vector < detail : : specific_data_info > thread_specific_data ;
// values for task_specific data for code executing on a thread that's
// not a task launched by async (usually the default task on the main
// thread in a process)
std : : vector < detail : : specific_data_info > non_task_specific_data ;
unsigned next_unused_task_storage_slot ;
2018-10-03 08:47:02 +00:00
thread_idle_notifier * notifier ;
2014-08-02 23:43:28 +00:00
# ifndef NDEBUG
unsigned non_preemptable_scope_count ;
# endif
2012-09-08 02:50:37 +00:00
2013-06-05 19:19:00 +00:00
#if 0
2019-04-04 11:46:38 +00:00
void debug ( const std : : string & s ) {
2013-08-19 18:44:13 +00:00
return ;
2013-06-05 19:19:00 +00:00
//boost::unique_lock<boost::mutex> lock(log_mutex());
2012-09-08 02:50:37 +00:00
2013-06-05 19:19:00 +00:00
fc : : cerr < < " --------------------- " < < s . c_str ( ) < < " - " < < current ;
if ( current & & current - > cur_task ) fc : : cerr < < ' ( ' < < current - > cur_task - > get_desc ( ) < < ' ) ' ;
fc : : cerr < < " --------------------------- \n " ;
fc : : cerr < < " Ready \n " ;
2012-09-08 02:50:37 +00:00
fc : : context * c = ready_head ;
while ( c ) {
2013-06-05 19:19:00 +00:00
fc : : cerr < < " " < < c ;
if ( c - > cur_task ) fc : : cerr < < ' ( ' < < c - > cur_task - > get_desc ( ) < < ' ) ' ;
2012-09-08 02:50:37 +00:00
fc : : context * p = c - > caller_context ;
while ( p ) {
2013-06-05 19:19:00 +00:00
fc : : cerr < < " -> " < < p ;
2012-09-08 02:50:37 +00:00
p = p - > caller_context ;
}
2013-06-05 19:19:00 +00:00
fc : : cerr < < " \n " ;
2012-09-08 02:50:37 +00:00
c = c - > next ;
}
2013-06-05 19:19:00 +00:00
fc : : cerr < < " Blocked \n " ;
2012-09-08 02:50:37 +00:00
c = blocked ;
while ( c ) {
2013-06-05 19:19:00 +00:00
fc : : cerr < < " ctx: " < < c ;
if ( c - > cur_task ) fc : : cerr < < ' ( ' < < c - > cur_task - > get_desc ( ) < < ' ) ' ;
fc : : cerr < < " blocked on prom: " ;
2012-09-08 02:50:37 +00:00
for ( uint32_t i = 0 ; i < c - > blocking_prom . size ( ) ; + + i ) {
2013-06-05 19:19:00 +00:00
fc : : cerr < < c - > blocking_prom [ i ] . prom < < ' ( ' < < c - > blocking_prom [ i ] . prom - > get_desc ( ) < < ' ) ' ;
2012-09-08 02:50:37 +00:00
if ( i + 1 < c - > blocking_prom . size ( ) ) {
2013-06-05 19:19:00 +00:00
fc : : cerr < < " , " ;
2012-09-08 02:50:37 +00:00
}
}
fc : : context * p = c - > caller_context ;
while ( p ) {
2013-06-05 19:19:00 +00:00
fc : : cerr < < " -> " < < p ;
2012-09-08 02:50:37 +00:00
p = p - > caller_context ;
}
2013-06-05 19:19:00 +00:00
fc : : cerr < < " \n " ;
2012-09-08 02:50:37 +00:00
c = c - > next_blocked ;
}
2013-06-05 19:19:00 +00:00
fc : : cerr < < " ------------------------------------------------- \n " ;
2012-09-08 02:50:37 +00:00
}
2013-06-05 19:19:00 +00:00
# endif
2012-09-08 02:50:37 +00:00
// insert at from of blocked linked list
2014-10-13 15:44:16 +00:00
inline void add_to_blocked ( fc : : context * c )
{
2012-09-08 02:50:37 +00:00
c - > next_blocked = blocked ;
blocked = c ;
}
2014-10-13 15:44:16 +00:00
void pt_push_back ( fc : : context * c )
{
2012-09-08 02:50:37 +00:00
c - > next = pt_head ;
pt_head = c ;
/*
fc : : context * n = pt_head ;
int i = 0 ;
while ( n ) {
+ + i ;
n = n - > next ;
}
wlog ( " idle context...%2% %1% " , c , i ) ;
*/
}
2014-10-13 15:44:16 +00:00
2014-10-16 20:26:19 +00:00
fc : : context : : ptr ready_pop_front ( )
2014-10-14 18:21:42 +00:00
{
2014-10-16 20:26:19 +00:00
fc : : context * highest_priority_context = ready_heap . front ( ) ;
std : : pop_heap ( ready_heap . begin ( ) , ready_heap . end ( ) , task_priority_less ( ) ) ;
ready_heap . pop_back ( ) ;
return highest_priority_context ;
2014-10-14 18:21:42 +00:00
}
2014-10-16 20:26:19 +00:00
void add_context_to_ready_list ( context * context_to_add , bool at_end = false )
{
context_to_add - > context_posted_num = next_posted_num + + ;
ready_heap . push_back ( context_to_add ) ;
std : : push_heap ( ready_heap . begin ( ) , ready_heap . end ( ) , task_priority_less ( ) ) ;
}
2014-10-14 18:21:42 +00:00
struct task_priority_less
{
2014-10-16 20:26:19 +00:00
bool operator ( ) ( const task_base * a , const task_base * b ) const
{
return a - > _prio . value < b - > _prio . value ? true :
( a - > _prio . value > b - > _prio . value ? false :
a - > _posted_num > b - > _posted_num ) ;
}
bool operator ( ) ( const task_base * a , const context * b ) const
{
return a - > _prio . value < b - > prio . value ? true :
( a - > _prio . value > b - > prio . value ? false :
a - > _posted_num > b - > context_posted_num ) ;
}
bool operator ( ) ( const context * a , const task_base * b ) const
{
return a - > prio . value < b - > _prio . value ? true :
( a - > prio . value > b - > _prio . value ? false :
a - > context_posted_num > b - > _posted_num ) ;
}
bool operator ( ) ( const context * a , const context * b ) const
2014-10-14 18:21:42 +00:00
{
2014-10-16 20:26:19 +00:00
return a - > prio . value < b - > prio . value ? true :
( a - > prio . value > b - > prio . value ? false :
a - > context_posted_num > b - > context_posted_num ) ;
2014-10-14 18:21:42 +00:00
}
} ;
struct task_when_less
{
bool operator ( ) ( task_base * a , task_base * b )
{
return a - > _when > b - > _when ;
}
} ;
2012-09-08 02:50:37 +00:00
2014-10-03 20:52:45 +00:00
void enqueue ( task_base * t )
{
time_point now = time_point : : now ( ) ;
task_base * cur = t ;
// the linked list of tasks passed to enqueue is in the reverse order of
// what you'd expect -- the first task to be scheduled is at the end of
// the list. We'll rectify the ordering by assigning the _posted_num
// in reverse order
unsigned num_ready_tasks = 0 ;
while ( cur )
{
if ( cur - > _when < = now )
+ + num_ready_tasks ;
cur = cur - > _next ;
}
cur = t ;
2014-10-14 18:21:42 +00:00
next_posted_num + = num_ready_tasks ;
2014-10-03 20:52:45 +00:00
unsigned tasks_posted = 0 ;
while ( cur )
{
if ( cur - > _when > now )
{
task_sch_queue . push_back ( cur ) ;
std : : push_heap ( task_sch_queue . begin ( ) ,
task_sch_queue . end ( ) , task_when_less ( ) ) ;
}
else
{
2014-10-14 18:21:42 +00:00
cur - > _posted_num = next_posted_num - ( + + tasks_posted ) ;
2014-10-03 20:52:45 +00:00
task_pqueue . push_back ( cur ) ;
std : : push_heap ( task_pqueue . begin ( ) ,
task_pqueue . end ( ) , task_priority_less ( ) ) ;
BOOST_ASSERT ( this = = thread : : current ( ) . my ) ;
2012-09-08 02:50:37 +00:00
}
2014-10-03 20:52:45 +00:00
cur = cur - > _next ;
}
2012-09-08 02:50:37 +00:00
}
2014-10-03 20:52:45 +00:00
2014-10-14 18:21:42 +00:00
void move_newly_scheduled_tasks_to_task_pqueue ( )
{
BOOST_ASSERT ( this = = thread : : current ( ) . my ) ;
// first, if there are any new tasks on 'task_in_queue', which is tasks that
// have been just been async or scheduled, but we haven't processed them.
// move them into the task_sch_queue or task_pqueue, as appropriate
//DLN: changed from memory_order_consume for boost 1.55.
//This appears to be safest replacement for now, maybe
//can be changed to relaxed later, but needs analysis.
task_base * pending_list = task_in_queue . exchange ( 0 , boost : : memory_order_seq_cst ) ;
if ( pending_list )
enqueue ( pending_list ) ;
// second, walk through task_sch_queue and move any scheduled tasks that are now
// able to run (because their scheduled time has arrived) to task_pqueue
while ( ! task_sch_queue . empty ( ) & &
task_sch_queue . front ( ) - > _when < = time_point : : now ( ) )
{
task_base * ready_task = task_sch_queue . front ( ) ;
std : : pop_heap ( task_sch_queue . begin ( ) , task_sch_queue . end ( ) , task_when_less ( ) ) ;
task_sch_queue . pop_back ( ) ;
ready_task - > _posted_num = next_posted_num + + ;
task_pqueue . push_back ( ready_task ) ;
std : : push_heap ( task_pqueue . begin ( ) , task_pqueue . end ( ) , task_priority_less ( ) ) ;
}
}
2014-10-13 15:44:16 +00:00
task_base * dequeue ( )
{
2012-09-08 02:50:37 +00:00
// get a new task
BOOST_ASSERT ( this = = thread : : current ( ) . my ) ;
2014-10-14 18:21:42 +00:00
assert ( ! task_pqueue . empty ( ) ) ;
task_base * p = task_pqueue . front ( ) ;
std : : pop_heap ( task_pqueue . begin ( ) , task_pqueue . end ( ) , task_priority_less ( ) ) ;
task_pqueue . pop_back ( ) ;
2012-09-08 02:50:37 +00:00
return p ;
}
2014-06-29 01:46:10 +00:00
bool process_canceled_tasks ( )
{
bool canceled_task = false ;
for ( auto task_itr = task_sch_queue . begin ( ) ;
task_itr ! = task_sch_queue . end ( ) ;
)
{
if ( ( * task_itr ) - > canceled ( ) )
{
( * task_itr ) - > run ( ) ;
2019-05-02 14:25:23 +00:00
( * task_itr ) - > release ( ) ; // HERE BE DRAGONS
2014-06-29 22:50:05 +00:00
task_itr = task_sch_queue . erase ( task_itr ) ;
2014-06-29 01:46:10 +00:00
canceled_task = true ;
continue ;
}
+ + task_itr ;
}
if ( canceled_task )
std : : make_heap ( task_sch_queue . begin ( ) , task_sch_queue . end ( ) , task_when_less ( ) ) ;
return canceled_task ;
}
2012-09-08 02:50:37 +00:00
/**
* This should be before or after a context switch to
* detect quit / cancel operations and throw an exception .
*/
2014-10-13 15:44:16 +00:00
void check_fiber_exceptions ( )
{
2014-10-21 14:25:28 +00:00
if ( current & & current - > canceled )
{
2014-08-28 19:42:01 +00:00
# ifdef NDEBUG
2013-06-05 19:19:00 +00:00
FC_THROW_EXCEPTION ( canceled_exception , " " ) ;
2014-08-28 19:42:01 +00:00
# else
FC_THROW_EXCEPTION ( canceled_exception , " cancellation reason: ${reason} " , ( " reason " , current - > cancellation_reason ? current - > cancellation_reason : " [none given] " ) ) ;
# endif
2014-10-21 14:25:28 +00:00
}
else if ( done )
{
2013-08-19 18:44:13 +00:00
ilog ( " throwing canceled exception " ) ;
2014-08-28 19:42:01 +00:00
FC_THROW_EXCEPTION ( canceled_exception , " cancellation reason: thread quitting " ) ;
2014-10-13 15:44:16 +00:00
// BOOST_THROW_EXCEPTION( thread_quit() );
2012-09-08 02:50:37 +00:00
}
}
/**
* Find the next available context and switch to it .
* If none are available then create a new context and
* have it wait for something to do .
*/
2014-10-13 15:44:16 +00:00
bool start_next_fiber ( bool reschedule = false )
{
2014-09-12 18:28:56 +00:00
/* If this assert fires, it means you are executing an operation that is causing
* the current task to yield , but there is a ASSERT_TASK_NOT_PREEMPTED ( ) in effect
* ( somewhere up the stack ) */
2014-08-02 23:43:28 +00:00
assert ( non_preemptable_scope_count = = 0 ) ;
2014-09-12 18:28:56 +00:00
/* If this assert fires, it means you are causing the current task to yield while
* in the middle of handling an exception . The boost : : context library ' s behavior
* is not well - defined in this case , and this has the potential to corrupt the
* exception stack , often resulting in a crash very soon after this */
/* NB: At least on Win64, this only catches a yield while in the body of
* a catch block ; it fails to catch a yield while unwinding the stack , which
* is probably just as likely to cause crashes */
assert ( std : : current_exception ( ) = = std : : exception_ptr ( ) ) ;
2012-09-08 02:50:37 +00:00
check_for_timeouts ( ) ;
2014-10-13 15:44:16 +00:00
if ( ! current )
current = new fc : : context ( & fc : : thread : : current ( ) ) ;
2012-09-08 02:50:37 +00:00
2014-10-16 20:26:19 +00:00
priority original_priority = current - > prio ;
2012-09-08 02:50:37 +00:00
// check to see if any other contexts are ready
2014-10-16 20:26:19 +00:00
if ( ! ready_heap . empty ( ) )
2014-10-13 15:44:16 +00:00
{
2012-09-08 02:50:37 +00:00
fc : : context * next = ready_pop_front ( ) ;
2014-10-16 20:26:19 +00:00
if ( next = = current )
2014-10-13 15:44:16 +00:00
{
2013-06-05 19:19:00 +00:00
// elog( "next == current... something went wrong" );
assert ( next ! = current ) ;
2014-10-13 15:44:16 +00:00
return false ;
2013-02-07 21:08:43 +00:00
}
2014-10-16 20:26:19 +00:00
BOOST_ASSERT ( next ! = current ) ;
2012-09-08 02:50:37 +00:00
// jump to next context, saving current context
fc : : context * prev = current ;
current = next ;
2014-10-14 18:21:42 +00:00
if ( reschedule )
2014-10-16 20:26:19 +00:00
{
current - > prio = priority : : _internal__priority_for_short_sleeps ( ) ;
add_context_to_ready_list ( prev , true ) ;
}
2014-10-13 15:44:16 +00:00
// slog( "jump to %p from %p", next, prev );
// fc_dlog( logger::get("fc_context"), "from ${from} to ${to}", ( "from", int64_t(prev) )( "to", int64_t(next) ) );
2017-11-09 12:18:58 +00:00
# if BOOST_VERSION >= 106100
auto p = context_pair { nullptr , prev } ;
auto t = bc : : jump_fcontext ( next - > my_context , & p ) ;
static_cast < context_pair * > ( t . data ) - > second - > my_context = t . fctx ;
# elif BOOST_VERSION >= 105600
2014-10-13 15:44:16 +00:00
bc : : jump_fcontext ( & prev - > my_context , next - > my_context , 0 ) ;
2014-09-07 22:02:39 +00:00
# elif BOOST_VERSION >= 105300
2014-10-13 15:44:16 +00:00
bc : : jump_fcontext ( prev - > my_context , next - > my_context , 0 ) ;
2013-06-05 19:19:00 +00:00
# else
2014-10-13 15:44:16 +00:00
bc : : jump_fcontext ( & prev - > my_context , & next - > my_context , 0 ) ;
2013-06-05 19:19:00 +00:00
# endif
2014-10-13 15:44:16 +00:00
BOOST_ASSERT ( current ) ;
BOOST_ASSERT ( current = = prev ) ;
2013-02-07 21:08:43 +00:00
//current = prev;
2014-10-13 15:44:16 +00:00
}
else
{
// all contexts are blocked, create a new context
// that will process posted tasks...
2013-02-07 21:08:43 +00:00
fc : : context * prev = current ;
fc : : context * next = nullptr ;
2014-10-13 15:44:16 +00:00
if ( pt_head )
{
// grab cached context
2012-09-08 02:50:37 +00:00
next = pt_head ;
pt_head = pt_head - > next ;
next - > next = 0 ;
2014-08-28 19:42:01 +00:00
next - > reinitialize ( ) ;
2014-10-13 15:44:16 +00:00
}
else
{
// create new context.
2012-09-08 02:50:37 +00:00
next = new fc : : context ( & thread_d : : start_process_tasks , stack_alloc ,
2014-10-13 15:44:16 +00:00
& fc : : thread : : current ( ) ) ;
2012-09-08 02:50:37 +00:00
}
2013-02-07 21:08:43 +00:00
2012-09-08 02:50:37 +00:00
current = next ;
2014-10-13 15:44:16 +00:00
if ( reschedule )
2014-10-16 20:26:19 +00:00
{
current - > prio = priority : : _internal__priority_for_short_sleeps ( ) ;
add_context_to_ready_list ( prev , true ) ;
}
2013-02-07 21:08:43 +00:00
2014-10-13 15:44:16 +00:00
// slog( "jump to %p from %p", next, prev );
// fc_dlog( logger::get("fc_context"), "from ${from} to ${to}", ( "from", int64_t(prev) )( "to", int64_t(next) ) );
2017-11-09 12:18:58 +00:00
# if BOOST_VERSION >= 106100
auto p = context_pair { this , prev } ;
auto t = bc : : jump_fcontext ( next - > my_context , & p ) ;
static_cast < context_pair * > ( t . data ) - > second - > my_context = t . fctx ;
# elif BOOST_VERSION >= 105600
2014-09-07 22:02:39 +00:00
bc : : jump_fcontext ( & prev - > my_context , next - > my_context , ( intptr_t ) this ) ;
# elif BOOST_VERSION >= 105300
2013-06-05 19:19:00 +00:00
bc : : jump_fcontext ( prev - > my_context , next - > my_context , ( intptr_t ) this ) ;
# else
2012-09-08 02:50:37 +00:00
bc : : jump_fcontext ( & prev - > my_context , & next - > my_context , ( intptr_t ) this ) ;
2013-06-05 19:19:00 +00:00
# endif
2012-09-08 02:50:37 +00:00
BOOST_ASSERT ( current ) ;
2013-02-07 21:08:43 +00:00
BOOST_ASSERT ( current = = prev ) ;
//current = prev;
2012-09-08 02:50:37 +00:00
}
2014-10-16 20:26:19 +00:00
if ( reschedule )
current - > prio = original_priority ;
2014-10-13 15:44:16 +00:00
if ( current - > canceled )
{
2014-08-28 19:42:01 +00:00
//current->canceled = false;
# ifdef NDEBUG
FC_THROW_EXCEPTION ( canceled_exception , " " ) ;
# else
FC_THROW_EXCEPTION ( canceled_exception , " cancellation reason: ${reason} " , ( " reason " , current - > cancellation_reason ? current - > cancellation_reason : " [none given] " ) ) ;
# endif
2014-08-21 18:36:29 +00:00
}
2012-09-08 02:50:37 +00:00
return true ;
}
2017-11-09 12:18:58 +00:00
# if BOOST_VERSION >= 106100
static void start_process_tasks ( bc : : transfer_t my )
{
auto p = static_cast < context_pair * > ( my . data ) ;
auto self = static_cast < thread_d * > ( p - > first ) ;
p - > second - > my_context = my . fctx ;
# else
2017-01-13 00:54:00 +00:00
static void start_process_tasks ( intptr_t my )
2014-10-13 15:44:16 +00:00
{
2012-09-08 02:50:37 +00:00
thread_d * self = ( thread_d * ) my ;
2017-11-09 12:18:58 +00:00
# endif
2014-10-13 15:44:16 +00:00
try
{
2012-09-08 02:50:37 +00:00
self - > process_tasks ( ) ;
2014-10-13 15:44:16 +00:00
}
2015-10-09 21:08:03 +00:00
catch ( canceled_exception & ) { /* allowed exception */ }
2014-10-13 15:44:16 +00:00
catch ( . . . )
{
elog ( " fiber ${name} exited with uncaught exception: ${e} " , ( " e " , fc : : except_str ( ) ) ( " name " , self - > name ) ) ;
// assert( !"fiber exited with uncaught exception" );
//TODO replace errror fc::cerr<<"fiber exited with uncaught exception:\n "<<
// boost::current_exception_diagnostic_information() <<std::endl;
2012-09-08 02:50:37 +00:00
}
self - > free_list . push_back ( self - > current ) ;
self - > start_next_fiber ( false ) ;
}
2014-10-14 18:21:42 +00:00
void run_next_task ( )
2014-10-13 15:44:16 +00:00
{
2014-10-14 18:21:42 +00:00
task_base * next = dequeue ( ) ;
2014-06-29 01:08:15 +00:00
2014-10-14 18:21:42 +00:00
next - > _set_active_context ( current ) ;
current - > cur_task = next ;
2019-05-02 14:25:23 +00:00
next - > run ( ) ;
2019-06-19 15:39:45 +00:00
current - > cur_task = nullptr ;
next - > _set_active_context ( nullptr ) ;
2019-05-02 14:25:23 +00:00
next - > release ( ) ; // HERE BE DRAGONS
2014-10-14 18:21:42 +00:00
current - > reinitialize ( ) ;
2012-09-08 02:50:37 +00:00
}
2014-10-13 15:44:16 +00:00
bool has_next_task ( )
{
2012-09-08 02:50:37 +00:00
if ( task_pqueue . size ( ) | |
( task_sch_queue . size ( ) & & task_sch_queue . front ( ) - > _when < = time_point : : now ( ) ) | |
task_in_queue . load ( boost : : memory_order_relaxed ) )
2014-10-13 15:44:16 +00:00
return true ;
2012-09-08 02:50:37 +00:00
return false ;
}
2014-10-13 15:44:16 +00:00
void clear_free_list ( )
{
for ( uint32_t i = 0 ; i < free_list . size ( ) ; + + i )
2012-09-08 02:50:37 +00:00
delete free_list [ i ] ;
free_list . clear ( ) ;
}
2014-10-13 15:44:16 +00:00
void process_tasks ( )
{
while ( ! done | | blocked )
{
2014-10-14 18:21:42 +00:00
// move all new tasks to the task_pqueue
move_newly_scheduled_tasks_to_task_pqueue ( ) ;
// move all now-ready sleeping tasks to the ready list
check_for_timeouts ( ) ;
if ( ! task_pqueue . empty ( ) )
{
2014-10-17 16:04:21 +00:00
if ( ! ready_heap . empty ( ) )
2014-10-14 18:21:42 +00:00
{
// a new task and an existing task are both ready to go
2014-10-17 16:04:21 +00:00
if ( task_priority_less ( ) ( task_pqueue . front ( ) , ready_heap . front ( ) ) )
2014-10-14 18:21:42 +00:00
{
// run the existing task first
pt_push_back ( current ) ;
start_next_fiber ( false ) ;
continue ;
}
}
// if we made it here, either there's no ready context, or the ready context is
2014-10-16 20:26:19 +00:00
// scheduled after the ready task, so we should run the task first
2014-10-14 18:21:42 +00:00
run_next_task ( ) ;
2014-10-13 15:44:16 +00:00
continue ;
2014-10-14 18:21:42 +00:00
}
2012-09-08 02:50:37 +00:00
// if I have something else to do other than
// process tasks... do it.
2014-10-16 20:26:19 +00:00
if ( ! ready_heap . empty ( ) )
2014-10-13 15:44:16 +00:00
{
2012-09-08 02:50:37 +00:00
pt_push_back ( current ) ;
start_next_fiber ( false ) ;
continue ;
}
2014-10-13 15:44:16 +00:00
if ( process_canceled_tasks ( ) )
continue ;
2014-06-29 01:46:10 +00:00
2012-09-08 02:50:37 +00:00
clear_free_list ( ) ;
{ // lock scope
boost : : unique_lock < boost : : mutex > lock ( task_ready_mutex ) ;
2014-10-16 20:26:19 +00:00
if ( has_next_task ( ) )
continue ;
2012-09-08 02:50:37 +00:00
time_point timeout_time = check_for_timeouts ( ) ;
2014-10-16 20:26:19 +00:00
if ( done )
return ;
2018-10-03 08:47:02 +00:00
detail : : idle_guard guard ( this ) ;
if ( task_in_queue . load ( boost : : memory_order_relaxed ) )
continue ;
2014-10-13 15:44:16 +00:00
if ( timeout_time = = time_point : : maximum ( ) )
2012-09-08 02:50:37 +00:00
task_ready . wait ( lock ) ;
2014-10-13 15:44:16 +00:00
else if ( timeout_time ! = time_point : : min ( ) )
{
2014-06-29 01:46:10 +00:00
// there may be tasks that have been canceled we should filter them out now
// rather than waiting...
2014-04-01 18:47:49 +00:00
/* This bit is kind of sloppy -- this wait was originally implemented as a wait
* with respect to boost : : chrono : : system_clock . This behaved rather comically
* if you were to do a :
* fc : : usleep ( fc : : seconds ( 60 ) ) ;
* and then set your system ' s clock back a month , it would sleep for a month
* plus a minute before waking back up ( this happened on Linux , it seems
* Windows ' behavior in this case was less unexpected ) .
*
* Boost Chrono ' s steady_clock will always increase monotonically so it will
* avoid this behavior .
*
* Right now we don ' t really have a way to distinguish when a timeout_time is coming
* from a function that takes a relative time like fc : : usleep ( ) vs something
* that takes an absolute time like fc : : promise : : wait_until ( ) , so we can ' t always
* do the right thing here .
*/
task_ready . wait_until ( lock , boost : : chrono : : steady_clock : : now ( ) +
boost : : chrono : : microseconds ( timeout_time . time_since_epoch ( ) . count ( ) - time_point : : now ( ) . time_since_epoch ( ) . count ( ) ) ) ;
2012-09-08 02:50:37 +00:00
}
}
}
}
/**
* Return system_clock : : time_point : : min ( ) if tasks have timed out
* Retunn system_clock : : time_point : : max ( ) if there are no scheduled tasks
* Return the time the next task needs to be run if there is anything scheduled .
*/
2014-10-13 15:44:16 +00:00
time_point check_for_timeouts ( )
{
if ( ! sleep_pqueue . size ( ) & & ! task_sch_queue . size ( ) )
{
// ilog( "no timeouts ready" );
return time_point : : maximum ( ) ;
2012-09-08 02:50:37 +00:00
}
2013-06-05 19:19:00 +00:00
time_point next = time_point : : maximum ( ) ;
2014-10-16 20:26:19 +00:00
if ( ! sleep_pqueue . empty ( ) & & next > sleep_pqueue . front ( ) - > resume_time )
2012-09-08 02:50:37 +00:00
next = sleep_pqueue . front ( ) - > resume_time ;
2014-10-16 20:26:19 +00:00
if ( ! task_sch_queue . empty ( ) & & next > task_sch_queue . front ( ) - > _when )
2013-08-19 18:44:13 +00:00
next = task_sch_queue . front ( ) - > _when ;
2012-09-08 02:50:37 +00:00
time_point now = time_point : : now ( ) ;
2014-10-13 15:44:16 +00:00
if ( now < next )
return next ;
2012-09-08 02:50:37 +00:00
// move all expired sleeping tasks to the ready queue
2013-08-19 18:44:13 +00:00
while ( sleep_pqueue . size ( ) & & sleep_pqueue . front ( ) - > resume_time < now )
{
2014-10-13 15:44:16 +00:00
fc : : context : : ptr c = sleep_pqueue . front ( ) ;
std : : pop_heap ( sleep_pqueue . begin ( ) , sleep_pqueue . end ( ) , sleep_priority_less ( ) ) ;
// ilog( "sleep pop back..." );
sleep_pqueue . pop_back ( ) ;
2012-09-08 02:50:37 +00:00
2014-10-13 15:44:16 +00:00
if ( c - > blocking_prom . size ( ) )
{
// ilog( "timeout blocking prom" );
c - > timeout_blocking_promises ( ) ;
}
else
{
// ilog( "..." );
// ilog( "ready_push_front" );
if ( c ! = current )
2014-10-14 18:21:42 +00:00
add_context_to_ready_list ( c ) ;
2014-10-13 15:44:16 +00:00
}
2012-09-08 02:50:37 +00:00
}
return time_point : : min ( ) ;
}
2014-10-13 15:44:16 +00:00
void unblock ( fc : : context * c )
{
if ( fc : : thread : : current ( ) . my ! = this )
{
2018-10-03 08:47:02 +00:00
self . async ( [ this , c ] ( ) { unblock ( c ) ; } , " thread_d::unblock " ) ;
2014-10-13 15:44:16 +00:00
return ;
}
2014-10-14 18:21:42 +00:00
if ( c ! = current )
add_context_to_ready_list ( c ) ;
2014-10-13 15:44:16 +00:00
}
2013-08-19 18:44:13 +00:00
2012-09-08 02:50:37 +00:00
void yield_until ( const time_point & tp , bool reschedule ) {
check_fiber_exceptions ( ) ;
2013-12-06 05:22:06 +00:00
if ( tp < = ( time_point : : now ( ) + fc : : microseconds ( 10000 ) ) )
2012-09-08 02:50:37 +00:00
return ;
2015-10-05 12:44:24 +00:00
FC_ASSERT ( std : : current_exception ( ) = = std : : exception_ptr ( ) ,
" Attempting to yield while processing an exception " ) ;
2014-10-13 15:44:16 +00:00
if ( ! current )
2012-09-08 02:50:37 +00:00
current = new fc : : context ( & fc : : thread : : current ( ) ) ;
current - > resume_time = tp ;
current - > clear_blocking_promises ( ) ;
sleep_pqueue . push_back ( current ) ;
std : : push_heap ( sleep_pqueue . begin ( ) ,
sleep_pqueue . end ( ) , sleep_priority_less ( ) ) ;
2013-08-19 18:44:13 +00:00
2012-09-08 02:50:37 +00:00
start_next_fiber ( reschedule ) ;
// clear current context from sleep queue...
2014-10-13 15:44:16 +00:00
for ( uint32_t i = 0 ; i < sleep_pqueue . size ( ) ; + + i )
{
if ( sleep_pqueue [ i ] = = current )
{
2012-09-08 02:50:37 +00:00
sleep_pqueue [ i ] = sleep_pqueue . back ( ) ;
sleep_pqueue . pop_back ( ) ;
std : : make_heap ( sleep_pqueue . begin ( ) ,
sleep_pqueue . end ( ) , sleep_priority_less ( ) ) ;
break ;
}
}
2013-06-05 19:19:00 +00:00
current - > resume_time = time_point : : maximum ( ) ;
2012-09-08 02:50:37 +00:00
check_fiber_exceptions ( ) ;
}
void wait ( const promise_base : : ptr & p , const time_point & timeout ) {
2014-10-13 15:44:16 +00:00
if ( p - > ready ( ) )
return ;
2015-10-05 12:44:24 +00:00
FC_ASSERT ( std : : current_exception ( ) = = std : : exception_ptr ( ) ,
" Attempting to yield while processing an exception " ) ;
2012-09-08 02:50:37 +00:00
if ( timeout < time_point : : now ( ) )
2014-10-13 15:44:16 +00:00
FC_THROW_EXCEPTION ( timeout_exception , " " ) ;
2012-09-08 02:50:37 +00:00
2014-10-13 15:44:16 +00:00
if ( ! current )
2012-09-08 02:50:37 +00:00
current = new fc : : context ( & fc : : thread : : current ( ) ) ;
2014-10-13 15:44:16 +00:00
// slog( " %1% blocking on %2%", current, p.get() );
2012-09-08 02:50:37 +00:00
current - > add_blocking_promise ( p . get ( ) , true ) ;
// if not max timeout, added to sleep pqueue
2014-10-13 15:44:16 +00:00
if ( timeout ! = time_point : : maximum ( ) )
{
current - > resume_time = timeout ;
sleep_pqueue . push_back ( current ) ;
std : : push_heap ( sleep_pqueue . begin ( ) ,
sleep_pqueue . end ( ) ,
sleep_priority_less ( ) ) ;
2012-09-08 02:50:37 +00:00
}
2014-10-13 15:44:16 +00:00
// elog( "blocking %1%", current );
2012-09-08 02:50:37 +00:00
add_to_blocked ( current ) ;
2014-10-13 15:44:16 +00:00
// debug("swtiching fibers..." );
2012-09-08 02:50:37 +00:00
start_next_fiber ( ) ;
2014-10-13 15:44:16 +00:00
// slog( "resuming %1%", current );
2012-09-08 02:50:37 +00:00
2014-10-13 15:44:16 +00:00
// slog( " %1% unblocking blocking on %2%", current, p.get() );
2012-09-08 02:50:37 +00:00
current - > remove_blocking_promise ( p . get ( ) ) ;
check_fiber_exceptions ( ) ;
}
2014-08-27 16:20:19 +00:00
void cleanup_thread_specific_data ( )
{
for ( auto iter = non_task_specific_data . begin ( ) ; iter ! = non_task_specific_data . end ( ) ; + + iter )
if ( iter - > cleanup )
iter - > cleanup ( iter - > value ) ;
for ( auto iter = thread_specific_data . begin ( ) ; iter ! = thread_specific_data . end ( ) ; + + iter )
if ( iter - > cleanup )
iter - > cleanup ( iter - > value ) ;
}
2014-09-08 14:31:07 +00:00
void notify_task_has_been_canceled ( )
{
for ( fc : : context * * iter = & blocked ; * iter ; )
{
if ( ( * iter ) - > canceled )
{
fc : : context * next_blocked = ( * iter ) - > next_blocked ;
( * iter ) - > next_blocked = nullptr ;
2014-10-14 18:21:42 +00:00
add_context_to_ready_list ( * iter ) ;
2014-09-08 14:31:07 +00:00
* iter = next_blocked ;
continue ;
}
iter = & ( * iter ) - > next_blocked ;
}
bool task_removed_from_sleep_pqueue = false ;
for ( auto sleep_iter = sleep_pqueue . begin ( ) ; sleep_iter ! = sleep_pqueue . end ( ) ; )
{
if ( ( * sleep_iter ) - > canceled )
{
2014-10-16 20:26:19 +00:00
bool already_on_ready_list = std : : find ( ready_heap . begin ( ) , ready_heap . end ( ) ,
* sleep_iter ) ! = ready_heap . end ( ) ;
2014-09-08 14:31:07 +00:00
if ( ! already_on_ready_list )
2014-10-14 18:21:42 +00:00
add_context_to_ready_list ( * sleep_iter ) ;
2014-09-08 14:31:07 +00:00
sleep_iter = sleep_pqueue . erase ( sleep_iter ) ;
task_removed_from_sleep_pqueue = true ;
}
else
+ + sleep_iter ;
}
if ( task_removed_from_sleep_pqueue )
std : : make_heap ( sleep_pqueue . begin ( ) , sleep_pqueue . end ( ) , sleep_priority_less ( ) ) ;
}
2012-09-08 02:50:37 +00:00
} ;
} // namespace fc