Disconnect from non updated witness

This commit is contained in:
Davor Hirunda 2022-04-26 19:04:30 +00:00 committed by serkixenos
parent 0f0cf62b20
commit bd6f265409
14 changed files with 178 additions and 68 deletions

View file

@ -795,6 +795,10 @@ public:
FC_CAPTURE_AND_RETHROW((block_id))
}
virtual fc::time_point_sec get_last_known_hardfork_time() override {
return _chain_db->_hardfork_times[_chain_db->_hardfork_times.size() - 1];
}
/**
* Returns the time a block was produced (if block_id = 0, returns genesis time).
* If we don't know about the block, returns time_point_sec::min()

View file

@ -806,7 +806,7 @@ processed_transaction database::_apply_transaction(const signed_transaction& trx
return get_account_custom_authorities(id, op);
};
trx.verify_authority( chain_id, get_active, get_owner, get_custom,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(head_block_time()),
true,
get_global_properties().parameters.max_authority_depth );
}

View file

@ -329,6 +329,48 @@ void database::initialize_evaluators()
register_evaluator<random_number_store_evaluator>();
}
void database::initialize_hardforks()
{
_hardfork_times.emplace_back(HARDFORK_357_TIME);
_hardfork_times.emplace_back(HARDFORK_359_TIME);
_hardfork_times.emplace_back(HARDFORK_385_TIME);
_hardfork_times.emplace_back(HARDFORK_409_TIME);
_hardfork_times.emplace_back(HARDFORK_413_TIME);
_hardfork_times.emplace_back(HARDFORK_415_TIME);
_hardfork_times.emplace_back(HARDFORK_416_TIME);
_hardfork_times.emplace_back(HARDFORK_419_TIME);
_hardfork_times.emplace_back(HARDFORK_436_TIME);
_hardfork_times.emplace_back(HARDFORK_445_TIME);
_hardfork_times.emplace_back(HARDFORK_453_TIME);
_hardfork_times.emplace_back(HARDFORK_480_TIME);
_hardfork_times.emplace_back(HARDFORK_483_TIME);
_hardfork_times.emplace_back(HARDFORK_516_TIME);
_hardfork_times.emplace_back(HARDFORK_533_TIME);
_hardfork_times.emplace_back(HARDFORK_538_TIME);
_hardfork_times.emplace_back(HARDFORK_555_TIME);
_hardfork_times.emplace_back(HARDFORK_563_TIME);
_hardfork_times.emplace_back(HARDFORK_572_TIME);
_hardfork_times.emplace_back(HARDFORK_599_TIME);
_hardfork_times.emplace_back(HARDFORK_607_TIME);
_hardfork_times.emplace_back(HARDFORK_613_TIME);
_hardfork_times.emplace_back(HARDFORK_615_TIME);
_hardfork_times.emplace_back(HARDFORK_999_TIME);
_hardfork_times.emplace_back(HARDFORK_1000_TIME);
_hardfork_times.emplace_back(HARDFORK_1001_TIME);
_hardfork_times.emplace_back(HARDFORK_5050_1_TIME);
_hardfork_times.emplace_back(HARDFORK_CORE_429_TIME);
_hardfork_times.emplace_back(HARDFORK_GPOS_TIME);
_hardfork_times.emplace_back(HARDFORK_NFT_TIME);
_hardfork_times.emplace_back(HARDFORK_SON_FOR_HIVE_TIME);
_hardfork_times.emplace_back(HARDFORK_SON_TIME);
_hardfork_times.emplace_back(HARDFORK_SON2_TIME);
_hardfork_times.emplace_back(HARDFORK_SON3_TIME);
_hardfork_times.emplace_back(HARDFORK_SWEEPS_TIME);
std::sort(_hardfork_times.begin(), _hardfork_times.end());
}
void database::initialize_indexes()
{
reset_indexes();

View file

@ -44,6 +44,7 @@ database::database() :
{
initialize_indexes();
initialize_evaluators();
initialize_hardforks();
}
database::~database()

View file

@ -625,7 +625,6 @@ void database::notify_changed_objects()
if( _undo_db.enabled() )
{
const auto& head_undo = _undo_db.head();
auto chain_time = head_block_time();
// New
if( !new_objects.empty() )
@ -637,8 +636,7 @@ void database::notify_changed_objects()
new_ids.push_back(item);
auto obj = find_object(item);
if(obj != nullptr)
get_relevant_accounts(obj, new_accounts_impacted,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(chain_time));
get_relevant_accounts(obj, new_accounts_impacted, true);
}
GRAPHENE_TRY_NOTIFY( new_objects, new_ids, new_accounts_impacted)
@ -652,8 +650,7 @@ void database::notify_changed_objects()
for( const auto& item : head_undo.old_values )
{
changed_ids.push_back(item.first);
get_relevant_accounts(item.second.get(), changed_accounts_impacted,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(chain_time));
get_relevant_accounts(item.second.get(), changed_accounts_impacted, true);
}
GRAPHENE_TRY_NOTIFY( changed_objects, changed_ids, changed_accounts_impacted)
@ -670,8 +667,7 @@ void database::notify_changed_objects()
removed_ids.emplace_back( item.first );
auto obj = item.second.get();
removed.emplace_back( obj );
get_relevant_accounts(obj, removed_accounts_impacted,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(chain_time));
get_relevant_accounts(obj, removed_accounts_impacted, true);
}
GRAPHENE_TRY_NOTIFY( removed_objects, removed_ids, removed, removed_accounts_impacted)

View file

@ -1,10 +0,0 @@
// #210 Check authorities on custom_operation
#ifndef HARDFORK_CORE_210_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_CORE_210_TIME (fc::time_point_sec::from_iso_string("2030-01-01T00:00:00")) // (Not yet scheduled)
#else
#define HARDFORK_CORE_210_TIME (fc::time_point_sec::from_iso_string("2030-01-01T00:00:00")) // (Not yet scheduled)
#endif
// Bugfix: pre-HF 210, custom_operation's required_auths field was ignored.
#define MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(chain_time) (chain_time <= HARDFORK_CORE_210_TIME)
#endif

View file

@ -66,6 +66,8 @@ namespace graphene { namespace chain {
database();
~database();
std::vector<fc::time_point_sec> _hardfork_times;
enum validation_steps
{
skip_nothing = 0,
@ -332,6 +334,8 @@ namespace graphene { namespace chain {
void initialize_evaluators();
/// Reset the object graph in-memory
void initialize_indexes();
void initialize_hardforks();
void init_genesis(const genesis_state_type& genesis_state = genesis_state_type());
template<typename EvaluatorType>

View file

@ -302,8 +302,7 @@ void_result proposal_create_evaluator::do_evaluate( const proposal_create_operat
vector<authority> other;
for( auto& op : o.proposed_ops )
{
operation_get_required_authorities( op.op, auths, auths, other,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(block_time) );
operation_get_required_authorities( op.op, auths, auths, other, true );
}
FC_ASSERT( other.size() == 0 ); // TODO: what about other???
@ -352,8 +351,7 @@ object_id_type proposal_create_evaluator::do_apply( const proposal_create_operat
// TODO: consider caching values from evaluate?
for( auto& op : _proposed_trx.operations )
operation_get_required_authorities( op, required_active, proposal.required_owner_approvals, other,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(chain_time) );
operation_get_required_authorities( op, required_active, proposal.required_owner_approvals, other, true);
//All accounts which must provide both owner and active authority should be omitted from the active authority set;
//owner authority approval implies active authority approval.

View file

@ -39,7 +39,7 @@ bool proposal_object::is_authorized_to_execute( database& db ) const
[&]( account_id_type id ){ return &id(db).owner; },
[&]( account_id_type id, const operation& op ){
return db.get_account_custom_authorities(id, op); },
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS( db.head_block_time() ),
true,
db.get_global_properties().parameters.max_authority_depth,
true, /* allow committee */
available_active_approvals,

View file

@ -152,6 +152,8 @@ namespace graphene { namespace net {
virtual uint32_t get_block_number(const item_hash_t& block_id) = 0;
virtual fc::time_point_sec get_last_known_hardfork_time() = 0;
/**
* Returns the time a block was produced (if block_id = 0, returns genesis time).
* If we don't know about the block, returns time_point_sec::min()

View file

@ -258,6 +258,8 @@ namespace graphene { namespace net
uint32_t last_known_fork_block_number = 0;
fc::time_point_sec last_known_hardfork_time;
fc::future<void> accept_or_connect_task_done;
firewall_check_state_data *firewall_check_state = nullptr;

View file

@ -296,6 +296,7 @@ namespace graphene { namespace net { namespace detail {
(sync_status) \
(connection_count_changed) \
(get_block_number) \
(get_last_known_hardfork_time) \
(get_block_time) \
(get_head_block_id) \
(estimate_last_known_fork_from_git_revision_timestamp) \
@ -395,6 +396,7 @@ namespace graphene { namespace net { namespace detail {
void sync_status( uint32_t item_type, uint32_t item_count ) override;
void connection_count_changed( uint32_t c ) override;
uint32_t get_block_number(const item_hash_t& block_id) override;
fc::time_point_sec get_last_known_hardfork_time() override;
fc::time_point_sec get_block_time(const item_hash_t& block_id) override;
item_hash_t get_head_block_id() const override;
uint32_t estimate_last_known_fork_from_git_revision_timestamp(uint32_t unix_timestamp) const override;
@ -1864,6 +1866,10 @@ namespace graphene { namespace net { namespace detail {
user_data["last_known_block_hash"] = fc::variant( head_block_id, 1 );
user_data["last_known_block_number"] = _delegate->get_block_number(head_block_id);
user_data["last_known_block_time"] = _delegate->get_block_time(head_block_id);
user_data["last_known_hardfork_time"] = _delegate->get_last_known_hardfork_time().sec_since_epoch();
wlog("on generate hello message, hardfork: ${hardfork}", ("hardfork", _delegate->get_last_known_hardfork_time().sec_since_epoch()));
std::cout<<"on generate hello message :"<<fc::string(_delegate->get_last_known_hardfork_time())<<std::endl;
if (!_hard_fork_block_numbers.empty())
user_data["last_known_fork_block_number"] = _hard_fork_block_numbers.back();
@ -1890,6 +1896,11 @@ namespace graphene { namespace net { namespace detail {
originating_peer->node_id = user_data["node_id"].as<node_id_t>(1);
if (user_data.contains("last_known_fork_block_number"))
originating_peer->last_known_fork_block_number = user_data["last_known_fork_block_number"].as<uint32_t>(1);
if (user_data.contains("last_known_hardfork_time")){
originating_peer->last_known_hardfork_time = fc::time_point_sec(user_data["last_known_hardfork_time"].as<uint32_t>(1));
std::cout<<"on get helllo message: "<<originating_peer->last_known_hardfork_time.to_iso_string()<<std::endl;
}
}
void node_impl::on_hello_message( peer_connection* originating_peer, const hello_message& hello_message_received )
@ -1969,23 +1980,11 @@ namespace graphene { namespace net { namespace detail {
disconnect_from_peer(originating_peer, "You are on a different chain from me");
return;
}
if (originating_peer->last_known_fork_block_number != 0)
{
uint32_t next_fork_block_number = get_next_known_hard_fork_block_number(originating_peer->last_known_fork_block_number);
if (next_fork_block_number != 0)
{
// we know about a fork they don't. See if we've already passed that block. If we have, don't let them
// connect because we won't be able to give them anything useful
uint32_t head_block_num = _delegate->get_block_number(_delegate->get_head_block_id());
if (next_fork_block_number < head_block_num)
{
#ifdef ENABLE_DEBUG_ULOGS
auto disconnet_peer = [&](const std::ostringstream& rejection_message) {
#ifdef ENABLE_DEBUG_ULOGS
ulog("Rejecting connection from peer because their version is too old. Their version date: ${date}", ("date", originating_peer->graphene_git_revision_unix_timestamp));
#endif
wlog("Received hello message from peer running a version of that can only understand blocks up to #${their_hard_fork}, but I'm at head block number #${my_block_number}",
("their_hard_fork", next_fork_block_number)("my_block_number", head_block_num));
std::ostringstream rejection_message;
rejection_message << "Your client is outdated -- you can only understand blocks up to #" << next_fork_block_number << ", but I'm already on block #" << head_block_num;
#endif
connection_rejected_message connection_rejected(_user_agent_string, core_protocol_version,
originating_peer->get_socket().remote_endpoint(),
rejection_reason_code::unspecified,
@ -1997,10 +1996,42 @@ namespace graphene { namespace net { namespace detail {
// allowing her to ask us for peers (any of our peers will be on the same chain as us, so there's no
// benefit of sharing them)
disconnect_from_peer(originating_peer, "Your client is too old, please upgrade");
};
if (originating_peer->last_known_fork_block_number != 0)
{
uint32_t next_fork_block_number = get_next_known_hard_fork_block_number(originating_peer->last_known_fork_block_number);
if (next_fork_block_number != 0)
{
// we know about a fork they don't. See if we've already passed that block. If we have, don't let them
// connect because we won't be able to give them anything useful
uint32_t head_block_num = _delegate->get_block_number(_delegate->get_head_block_id());
if (next_fork_block_number < head_block_num)
{
wlog("Received hello message from peer running a version of that can only understand blocks up to #${their_hard_fork}, but I'm at head block number #${my_block_number}",
("their_hard_fork", next_fork_block_number)("my_block_number", head_block_num));
std::ostringstream rejection_message;
rejection_message << "Your client is outdated -- you can only understand blocks up to #" << next_fork_block_number << ", but I'm already on block #" << head_block_num;
disconnet_peer(rejection_message);
return;
}
}
}
// we wan't to disconnect from the peer that didn't updated the software. With the last hardforks we could
// indetify if peer's are not compatible due the hardforks
if ( _delegate->get_last_known_hardfork_time() != originating_peer->last_known_hardfork_time)
{
if (_delegate->get_block_time(_delegate->get_head_block_id()).sec_since_epoch() > originating_peer->last_known_hardfork_time.sec_since_epoch())
{
std::ostringstream rejection_message;
rejection_message << "Your client is outdated -- you can only understand blocks up to #" << originating_peer->last_known_hardfork_time.to_iso_string() << ", but I'm already on block #" << _delegate->get_block_time(_delegate->get_head_block_id()).to_iso_string();
std::cout<<"Reject connection due the hardforks on hello_message"<<std::endl;
disconnet_peer(rejection_message);
return;
}
}
if (already_connected_to_this_peer)
{
@ -3102,11 +3133,38 @@ namespace graphene { namespace net { namespace detail {
--_total_number_of_unfetched_items;
dlog("sync: client accpted the block, we now have only ${count} items left to fetch before we're in sync",
("count", _total_number_of_unfetched_items));
auto disconnet_peer = [&](const std::ostringstream& disconnect_reason_stream, const peer_connection_ptr& peer, bool& disconnecting_this_peer)
{
peers_to_disconnect[peer] = std::make_pair(disconnect_reason_stream.str(),
fc::oexception(fc::exception(FC_LOG_MESSAGE(error, "You need to upgrade your client"))));
#ifdef ENABLE_DEBUG_ULOGS
ulog("Disconnecting from peer during sync because their version is too old. Their version date: ${date}", ("date", peer->graphene_git_revision_unix_timestamp));
#endif
disconnecting_this_peer = true;
};
bool is_fork_block = is_hard_fork_block(block_message_to_send.block.block_num());
for (const peer_connection_ptr& peer : _active_connections)
{
ASSERT_TASK_NOT_PREEMPTED(); // don't yield while iterating over _active_connections
bool disconnecting_this_peer = false;
// if connected peer doesn't have the same version of witness which is fully indetified
// with last hardfork time received and block timestamp is grater than peers last known hardfork
// time disconnect that peer, since he will not be capable of handling already pushed block
if(peer->last_known_hardfork_time != _delegate->get_last_known_hardfork_time())
{
if( block_message_to_send.block.timestamp.sec_since_epoch() > peer->last_known_hardfork_time.sec_since_epoch() )
{
std::cout<<"disconnect peer from resync method"<<std::endl;
std::ostringstream disconnect_reason_stream;
disconnect_reason_stream << "You need to upgrade your client due to hard fork at block " << block_message_to_send.block.timestamp.to_iso_string();
disconnet_peer(disconnect_reason_stream, peer, disconnecting_this_peer);
}
}
if (is_fork_block)
{
// we just pushed a hard fork block. Find out if this peer is running a client
@ -3119,16 +3177,11 @@ namespace graphene { namespace net { namespace detail {
{
std::ostringstream disconnect_reason_stream;
disconnect_reason_stream << "You need to upgrade your client due to hard fork at block " << block_message_to_send.block.block_num();
peers_to_disconnect[peer] = std::make_pair(disconnect_reason_stream.str(),
fc::oexception(fc::exception(FC_LOG_MESSAGE(error, "You need to upgrade your client due to hard fork at block ${block_number}",
("block_number", block_message_to_send.block.block_num())))));
#ifdef ENABLE_DEBUG_ULOGS
ulog("Disconnecting from peer during sync because their version is too old. Their version date: ${date}", ("date", peer->graphene_git_revision_unix_timestamp));
#endif
disconnecting_this_peer = true;
disconnet_peer(disconnect_reason_stream, peer, disconnecting_this_peer);
}
}
}
if (!disconnecting_this_peer &&
peer->ids_of_items_to_get.empty() && peer->ids_of_items_being_processed.empty())
{
@ -3459,11 +3512,10 @@ namespace graphene { namespace net { namespace detail {
broadcast( block_message_to_process, propagation_data );
_message_cache.block_accepted();
if (is_hard_fork_block(block_number))
for (const peer_connection_ptr& peer : _active_connections)
{
// we just pushed a hard fork block. Find out if any of our peers are running clients
// that will be unable to process future blocks
for (const peer_connection_ptr& peer : _active_connections)
bool disconnect_this_peer = false;
if (is_hard_fork_block(block_number) )
{
if (peer->last_known_fork_block_number != 0)
{
@ -3471,21 +3523,36 @@ namespace graphene { namespace net { namespace detail {
if (next_fork_block_number != 0 &&
next_fork_block_number <= block_number)
{
peers_to_disconnect.insert(peer);
#ifdef ENABLE_DEBUG_ULOGS
ulog("Disconnecting from peer because their version is too old. Their version date: ${date}", ("date", peer->graphene_git_revision_unix_timestamp));
#endif
disconnect_this_peer = true;
}
}
}
if (!peers_to_disconnect.empty())
if(peer->last_known_hardfork_time != _delegate->get_last_known_hardfork_time())
{
std::ostringstream disconnect_reason_stream;
disconnect_reason_stream << "You need to upgrade your client due to hard fork at block " << block_number;
disconnect_reason = disconnect_reason_stream.str();
disconnect_exception = fc::exception(FC_LOG_MESSAGE(error, "You need to upgrade your client due to hard fork at block ${block_number}",
("block_number", block_number)));
if(block_message_to_process.block.timestamp.sec_since_epoch() > peer->last_known_hardfork_time.sec_since_epoch())
{
std::cout<<"disconnect peer on processing block during normal operation"<<std::endl;
disconnect_this_peer = true;
}
}
if( disconnect_this_peer )
{
peers_to_disconnect.insert(peer);
#ifdef ENABLE_DEBUG_ULOGS
ulog("Disconnecting from peer because their version is too old. Their version date: ${date}", ("date", peer->graphene_git_revision_unix_timestamp));
#endif
}
}
if (!peers_to_disconnect.empty())
{
std::ostringstream disconnect_reason_stream;
disconnect_reason_stream << "You need to upgrade your client due to hard fork at block " << block_number;
disconnect_reason = disconnect_reason_stream.str();
disconnect_exception = fc::exception(FC_LOG_MESSAGE(error, "You need to upgrade your client due to hard fork at block ${block_number}",
("block_number", block_number)));
}
}
catch (const fc::canceled_exception&)
@ -5542,6 +5609,14 @@ namespace graphene { namespace net { namespace detail {
return _node_delegate->get_block_number(block_id);
}
fc::time_point_sec statistics_gathering_node_delegate_wrapper::get_last_known_hardfork_time()
{
// this function doesn't need to block,
ASSERT_TASK_NOT_PREEMPTED();
return _node_delegate->get_last_known_hardfork_time();
}
fc::time_point_sec statistics_gathering_node_delegate_wrapper::get_block_time(const item_hash_t& block_id)
{
INVOKE_AND_COLLECT_STATISTICS(get_block_time, block_id);

View file

@ -126,14 +126,12 @@ void account_history_plugin_impl::update_account_histories( const signed_block&
flat_set<account_id_type> impacted;
vector<authority> other;
// fee payer is added here
operation_get_required_authorities( op.op, impacted, impacted, other,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS( db.head_block_time() ) );
operation_get_required_authorities( op.op, impacted, impacted, other, true );
if( op.op.which() == operation::tag< account_create_operation >::value )
impacted.insert( op.result.get<object_id_type>() );
else
graphene::chain::operation_get_impacted_accounts( op.op, impacted,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(db.head_block_time()) );
graphene::chain::operation_get_impacted_accounts( op.op, impacted, true );
if( op.op.which() == operation::tag< lottery_end_operation >::value )
{
auto lop = op.op.get< lottery_end_operation >();

View file

@ -173,14 +173,12 @@ bool elasticsearch_plugin_impl::update_account_histories( const signed_block& b
flat_set<account_id_type> impacted;
vector<authority> other;
// fee_payer is added here
operation_get_required_authorities( op.op, impacted, impacted, other,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS( db.head_block_time() ) );
operation_get_required_authorities( op.op, impacted, impacted, other, true );
if( op.op.which() == operation::tag< account_create_operation >::value )
impacted.insert( op.result.get<object_id_type>() );
else
operation_get_impacted_accounts( op.op, impacted,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS( db.head_block_time() ) );
operation_get_impacted_accounts( op.op, impacted, true );
for( auto& a : other )
for( auto& item : a.account_auths )