Greatly reduce the amount of time the p2p network code will wait for a peer to

return a requested block/transaction.  Make this time dependent on the actual block
interval.  This should allow the the node to give up and request the block from
another peer before the ~30 second undo interval has passed.
Fix the merkle root calculation to avoid reading
past the end of a vector.  Modify the algorithm to do what was likely intended
(this modification is currently disabled because it will yield different results
than the currently-running testnet)
Fix windows build errors.
This commit is contained in:
Eric Frias 2015-08-21 19:53:35 -04:00
parent 7614beb338
commit c0b9af9a99
6 changed files with 56 additions and 13 deletions

View file

@ -550,6 +550,11 @@ namespace detail {
// notify GUI or something cool
}
uint8_t get_current_block_interval_in_seconds() const override
{
return _chain_db->get_global_properties().parameters.block_interval;
}
application* _self;
fc::path _data_dir;

View file

@ -156,7 +156,8 @@ optional<signed_block> block_database::fetch_optional( const block_id_type& id )
vector<char> data( e.block_size );
_blocks.seekg( e.block_pos );
_blocks.read( data.data(), e.block_size );
if (e.block_size)
_blocks.read( data.data(), e.block_size );
auto result = fc::raw::unpack<signed_block>(data);
FC_ASSERT( result.id() == e.block_id );
return result;

View file

@ -58,18 +58,44 @@ namespace graphene { namespace chain {
checksum_type signed_block::calculate_merkle_root()const
{
if( transactions.size() == 0 ) return checksum_type();
if( transactions.size() == 0 )
return checksum_type();
vector<digest_type> ids;
ids.resize( ((transactions.size() + 1)/2)*2 );
for( uint32_t i = 0; i < transactions.size(); ++i )
ids[i] = transactions[i].merkle_digest();
while( ids.size() > 1 )
vector<digest_type>::size_type current_number_of_hashes = ids.size();
while( true )
{
#define AUG_20_TESTNET_COMPATIBLE
#ifdef AUG_20_TESTNET_COMPATIBLE
for( uint32_t i = 0; i < transactions.size(); i += 2 )
ids[i/2] = digest_type::hash( std::make_pair( ids[i], ids[i+1] ) );
ids.resize( ids.size() / 2 );
#else
for( uint32_t i = 0; i < current_number_of_hashes; i += 2 )
#endif
ids[i/2] = digest_type::hash( std::make_pair( ids[i], ids[i+1] ) );
// since we're processing hashes in pairs, we need to ensure that we always
// have an even number of hashes in the ids list. If we would end up with
// an odd number, add a default-initialized hash to compensate
current_number_of_hashes /= 2;
#ifdef AUG_20_TESTNET_COMPATIBLE
if (current_number_of_hashes <= 1)
break;
#else
if (current_number_of_hashes == 1)
break;
if (current_number_of_hashes % 2)
{
++current_number_of_hashes;
// TODO: HARD FORK: we should probably enable the next line the next time we fire
// up a new testnet; it will change the merkle roots we generate, but will
// give us a better-defined algorithm for calculating them
//
ids[current_number_of_hashes - 1] = digest_type();
}
#endif
}
return checksum_type::hash( ids[0] );
}

@ -1 +1 @@
Subproject commit 458b601774c36b702e2d4712320b5d53c6b2ee1c
Subproject commit 71be796af50c407281a40e61e4199a87e0a19314

View file

@ -158,6 +158,8 @@ namespace graphene { namespace net {
virtual uint32_t estimate_last_known_fork_from_git_revision_timestamp(uint32_t unix_timestamp) const = 0;
virtual void error_encountered(const std::string& message, const fc::oexception& error) = 0;
virtual uint8_t get_current_block_interval_in_seconds() const = 0;
};
/**

View file

@ -289,7 +289,9 @@ namespace graphene { namespace net { namespace detail {
(get_block_time) \
(get_head_block_id) \
(estimate_last_known_fork_from_git_revision_timestamp) \
(error_encountered)
(error_encountered) \
(get_current_block_interval_in_seconds)
#define DECLARE_ACCUMULATOR(r, data, method_name) \
mutable call_stats_accumulator BOOST_PP_CAT(_, BOOST_PP_CAT(method_name, _execution_accumulator)); \
@ -390,6 +392,7 @@ namespace graphene { namespace net { namespace detail {
item_hash_t get_head_block_id() const override;
uint32_t estimate_last_known_fork_from_git_revision_timestamp(uint32_t unix_timestamp) const override;
void error_encountered(const std::string& message, const fc::oexception& error) override;
uint8_t get_current_block_interval_in_seconds() const override;
};
/////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -1282,9 +1285,10 @@ namespace graphene { namespace net { namespace detail {
}
// timeout for any active peers is two block intervals
uint32_t active_disconnect_timeout = std::max<uint32_t>(5 * GRAPHENE_MAX_BLOCK_INTERVAL / 2, 30);
uint32_t active_send_keepalive_timeount = std::max<uint32_t>(active_disconnect_timeout / 2, 11);
uint32_t active_ignored_request_timeount = std::max<uint32_t>(GRAPHENE_MAX_BLOCK_INTERVAL / 4, 10);
uint8_t current_block_interval_in_seconds = _delegate->get_current_block_interval_in_seconds();
uint32_t active_disconnect_timeout = 10 * current_block_interval_in_seconds;
uint32_t active_send_keepalive_timeount = active_disconnect_timeout / 2;
uint32_t active_ignored_request_timeount = 3 * current_block_interval_in_seconds;
fc::time_point active_disconnect_threshold = fc::time_point::now() - fc::seconds(active_disconnect_timeout);
fc::time_point active_send_keepalive_threshold = fc::time_point::now() - fc::seconds(active_send_keepalive_timeount);
fc::time_point active_ignored_request_threshold = fc::time_point::now() - fc::seconds(active_ignored_request_timeount);
@ -1334,7 +1338,7 @@ namespace graphene { namespace net { namespace detail {
peers_to_disconnect_forcibly.push_back(active_peer);
}
else if (active_peer->connection_initiation_time < active_send_keepalive_threshold &&
active_peer->get_last_message_received_time() < active_send_keepalive_threshold)
active_peer->get_last_message_received_time() < active_send_keepalive_threshold)
{
wlog( "Sending a keepalive message to peer ${peer} who hasn't sent us any messages in the last ${timeout} seconds",
( "peer", active_peer->get_remote_endpoint() )("timeout", active_send_keepalive_timeount ) );
@ -1387,11 +1391,11 @@ namespace graphene { namespace net { namespace detail {
peers_to_send_keep_alive.clear();
for (const peer_connection_ptr& peer : peers_to_terminate )
{
{
assert(_terminating_connections.find(peer) != _terminating_connections.end());
_terminating_connections.erase(peer);
schedule_peer_for_deletion(peer);
}
}
if (!_node_is_shutting_down && !_terminate_inactive_connections_loop_done.canceled())
_terminate_inactive_connections_loop_done = fc::schedule( [this](){ terminate_inactive_connections_loop(); },
@ -5217,6 +5221,11 @@ namespace graphene { namespace net { namespace detail {
INVOKE_AND_COLLECT_STATISTICS(error_encountered, message, error);
}
uint8_t statistics_gathering_node_delegate_wrapper::get_current_block_interval_in_seconds() const
{
INVOKE_AND_COLLECT_STATISTICS(get_current_block_interval_in_seconds);
}
#undef INVOKE_AND_COLLECT_STATISTICS
} // end namespace detail