When a task is canceled while blocking on a mutex, fix the code that removes it from the mutex's block list to null out its "next" pointer, which is assumed to be null whenever not blocked on a mutex

This commit is contained in:
Eric Frias 2014-08-27 11:55:14 -04:00
parent 976bbce668
commit d9e6a9e568
2 changed files with 42 additions and 18 deletions

View file

@ -11,15 +11,18 @@ namespace fc {
:m_blist(0){} :m_blist(0){}
mutex::~mutex() { mutex::~mutex() {
if( m_blist ) { if( m_blist )
auto c = m_blist; {
context* c = m_blist;
fc::thread::current().debug("~mutex"); fc::thread::current().debug("~mutex");
#if 0
while( c ) { while( c ) {
// elog( "still blocking on context %p (%s)", m_blist, (m_blist->cur_task ? m_blist->cur_task->get_desc() : "no current task") ); // elog( "still blocking on context %p (%s)", m_blist, (m_blist->cur_task ? m_blist->cur_task->get_desc() : "no current task") );
c = c->next_blocked_mutex; c = c->next_blocked_mutex;
} }
#endif
BOOST_ASSERT( false && "Attempt to free mutex while others are blocking on lock." );
} }
BOOST_ASSERT( !m_blist && "Attempt to free mutex while others are blocking on lock." );
} }
/** /**
@ -40,18 +43,21 @@ namespace fc {
} }
static fc::context* remove( fc::context* head, fc::context* target ) { static fc::context* remove( fc::context* head, fc::context* target ) {
fc::context* c = head; fc::context* context_iter = head;
fc::context* p = 0; fc::context* previous = 0;
while( c ) { while( context_iter )
if( c == target ) { {
if( p ) { if( context_iter == target )
p->next_blocked_mutex = c->next_blocked_mutex; {
if( previous )
{
previous->next_blocked_mutex = context_iter->next_blocked_mutex;
return head; return head;
} }
return c->next_blocked_mutex; return context_iter->next_blocked_mutex;
} }
p = c; previous = context_iter;
c = c->next_blocked_mutex; context_iter = context_iter->next_blocked_mutex;
} }
return head; return head;
} }
@ -60,6 +66,7 @@ namespace fc {
fc::unique_lock<fc::spin_yield_lock> lock(syl); fc::unique_lock<fc::spin_yield_lock> lock(syl);
if( cc->next_blocked_mutex ) { if( cc->next_blocked_mutex ) {
bl = remove(bl, cc); bl = remove(bl, cc);
cc->next_blocked_mutex = nullptr;
return; return;
} }
} }
@ -123,7 +130,9 @@ namespace fc {
{ {
fc::unique_lock<fc::spin_yield_lock> lock(m_blist_lock); fc::unique_lock<fc::spin_yield_lock> lock(m_blist_lock);
if( !m_blist ) { if( !m_blist )
{
// nobody else owns the mutex, so we get it; add our context as the last and only element on the mutex's list
m_blist = current_context; m_blist = current_context;
assert(!current_context->next_blocked_mutex); assert(!current_context->next_blocked_mutex);
return; return;
@ -132,6 +141,8 @@ namespace fc {
// allow recusive locks // allow recusive locks
fc::context* dummy_context_to_unblock = 0; fc::context* dummy_context_to_unblock = 0;
if ( get_tail( m_blist, dummy_context_to_unblock ) == current_context ) { if ( get_tail( m_blist, dummy_context_to_unblock ) == current_context ) {
// if we already have the lock (meaning we're on the tail of the list) then
// we shouldn't be trying to grab the lock again
assert(false); assert(false);
// EMF: I think recursive locks are currently broken -- we need to // EMF: I think recursive locks are currently broken -- we need to
// keep track of how many times this mutex has been locked by the // keep track of how many times this mutex has been locked by the
@ -139,6 +150,7 @@ namespace fc {
// the next context only if the count drops to zero // the next context only if the count drops to zero
return; return;
} }
// add ourselves to the head of the list
current_context->next_blocked_mutex = m_blist; current_context->next_blocked_mutex = m_blist;
m_blist = current_context; m_blist = current_context;
@ -153,11 +165,21 @@ namespace fc {
#endif #endif
} }
try { try
{
fc::thread::current().yield(false); fc::thread::current().yield(false);
// if yield() returned normally, we should now own the lock (we should be at the tail of the list)
BOOST_ASSERT( current_context->next_blocked_mutex == 0 ); BOOST_ASSERT( current_context->next_blocked_mutex == 0 );
} catch ( ... ) { }
wlog( "lock threw" ); catch ( exception& e )
{
wlog( "lock threw: ${e}", ("e", e));
cleanup( *this, m_blist_lock, m_blist, current_context);
FC_RETHROW_EXCEPTION(e, warn, "lock threw: ${e}", ("e", e));
}
catch ( ... )
{
wlog( "lock threw unexpected exception" );
cleanup( *this, m_blist_lock, m_blist, current_context); cleanup( *this, m_blist_lock, m_blist, current_context);
throw; throw;
} }

View file

@ -39,7 +39,7 @@ BOOST_AUTO_TEST_CASE( cancel_task_blocked_on_mutex)
BOOST_TEST_MESSAGE("--- In test_task, sleeps done, exiting"); BOOST_TEST_MESSAGE("--- In test_task, sleeps done, exiting");
}, "test_task"); }, "test_task");
fc::usleep(fc::seconds(3)); fc::usleep(fc::seconds(3));
test_task.cancel(); //test_task.cancel();
try try
{ {
test_task.wait(fc::seconds(1)); test_task.wait(fc::seconds(1));
@ -55,6 +55,8 @@ BOOST_AUTO_TEST_CASE( cancel_task_blocked_on_mutex)
} }
BOOST_TEST_MESSAGE("Unlocking mutex locked from the main task so the test task will have the opportunity to lock it and be canceled"); BOOST_TEST_MESSAGE("Unlocking mutex locked from the main task so the test task will have the opportunity to lock it and be canceled");
} }
fc::usleep(fc::seconds(3));
test_task.cancel_and_wait(); test_task.cancel_and_wait();
} }
} }