#501 Fix _active_connections mutex

This commit is contained in:
Vlad Dobromyslov 2023-03-20 10:55:02 +03:00
parent 5203869a9d
commit 5e75e8043a

View file

@ -1687,23 +1687,18 @@ namespace graphene { namespace net { namespace detail {
{
VERIFY_CORRECT_THREAD();
fc::scoped_lock<fc::mutex> lock(_active_connections.get_mutex());
std::list<peer_connection_ptr> original_active_peers(_active_connections.begin(), _active_connections.end());
for( const peer_connection_ptr& active_peer : original_active_peers )
{
try
{
active_peer->send_message(address_request_message());
}
catch ( const fc::canceled_exception& )
{
throw;
}
catch (const fc::exception& e)
{
dlog("Caught exception while sending address request message to peer ${peer} : ${e}",
("peer", active_peer->get_remote_endpoint())("e", e));
}
fc::scoped_lock<fc::mutex> lock(_active_connections.get_mutex());
for (const peer_connection_ptr &active_peer : _active_connections) {
try {
active_peer->send_message(address_request_message());
} catch (const fc::canceled_exception &) {
throw;
} catch (const fc::exception &e) {
dlog("Caught exception while sending address request message to peer ${peer} : ${e}",
("peer", active_peer->get_remote_endpoint())("e", e));
}
}
}
// this has nothing to do with updating the peer list, but we need to prune this list
@ -1718,6 +1713,7 @@ namespace graphene { namespace net { namespace detail {
fc::time_point::now() + fc::minutes(15),
"fetch_updated_peer_lists_loop" );
}
void node_impl::update_bandwidth_data(uint32_t bytes_read_this_second, uint32_t bytes_written_this_second)
{
VERIFY_CORRECT_THREAD();
@ -3576,25 +3572,24 @@ namespace graphene { namespace net { namespace detail {
{
dlog("Already received and accepted this block (presumably through normal inventory mechanism), treating it as accepted");
std::vector< peer_connection_ptr > peers_needing_next_batch;
fc::scoped_lock<fc::mutex> lock(_active_connections.get_mutex());
for (const peer_connection_ptr& peer : _active_connections)
{
auto items_being_processed_iter = peer->ids_of_items_being_processed.find(received_block_iter->block_id);
if (items_being_processed_iter != peer->ids_of_items_being_processed.end())
{
peer->ids_of_items_being_processed.erase(items_being_processed_iter);
dlog("Removed item from ${endpoint}'s list of items being processed, still processing ${len} blocks",
("endpoint", peer->get_remote_endpoint())("len", peer->ids_of_items_being_processed.size()));
fc::scoped_lock<fc::mutex> lock(_active_connections.get_mutex());
for (const peer_connection_ptr &peer : _active_connections) {
auto items_being_processed_iter = peer->ids_of_items_being_processed.find(received_block_iter->block_id);
if (items_being_processed_iter != peer->ids_of_items_being_processed.end()) {
peer->ids_of_items_being_processed.erase(items_being_processed_iter);
dlog("Removed item from ${endpoint}'s list of items being processed, still processing ${len} blocks",
("endpoint", peer->get_remote_endpoint())("len", peer->ids_of_items_being_processed.size()));
// if we just processed the last item in our list from this peer, we will want to
// send another request to find out if we are now in sync (this is normally handled in
// send_sync_block_to_node_delegate)
if (peer->ids_of_items_to_get.empty() &&
peer->number_of_unfetched_item_ids == 0 &&
peer->ids_of_items_being_processed.empty())
{
dlog("We received last item in our list for peer ${endpoint}, setup to do a sync check", ("endpoint", peer->get_remote_endpoint()));
peers_needing_next_batch.push_back( peer );
// if we just processed the last item in our list from this peer, we will want to
// send another request to find out if we are now in sync (this is normally handled in
// send_sync_block_to_node_delegate)
if (peer->ids_of_items_to_get.empty() &&
peer->number_of_unfetched_item_ids == 0 &&
peer->ids_of_items_being_processed.empty()) {
dlog("We received last item in our list for peer ${endpoint}, setup to do a sync check", ("endpoint", peer->get_remote_endpoint()));
peers_needing_next_batch.push_back(peer);
}
}
}
}