Merge branch 'develop' into feature/368/implement_the_witness_capability_to_overwrite_last_hardfork

This commit is contained in:
serkixenos 2022-05-18 14:09:32 +02:00
commit 4e9e7ec2cb
8 changed files with 118 additions and 63 deletions

View file

@ -29,27 +29,17 @@ build:
tags: tags:
- builder - builder
build-testnet: test:
stage: build stage: test
dependencies:
- build
script: script:
- rm -rf .git/modules/docs .git/modules/libraries/fc ./docs ./libraries/fc - ./build/libraries/fc/tests/all_tests
- git submodule sync - ./build/tests/betting_test --log_level=message
- git submodule update --init --recursive - ./build/tests/chain_test --log_level=message
- rm -rf build - ./build/tests/cli_test --log_level=message
- mkdir build
- cd build
- cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_PEERPLAYS_TESTNET=1 ..
- make -j$(nproc)
artifacts:
untracked: true
paths:
- build/libraries/
- build/programs/
- build/tests/
tags: tags:
- builder-testnet - builder
when:
manual
dockerize: dockerize:
stage: build stage: build
@ -68,10 +58,34 @@ dockerize:
timeout: timeout:
3h 3h
test: build-testnet:
stage: build
script:
- rm -rf .git/modules/docs .git/modules/libraries/fc ./docs ./libraries/fc
- git submodule sync
- git submodule update --init --recursive
- rm -rf build
- mkdir build
- cd build
- cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_PEERPLAYS_TESTNET=1 ..
- make -j$(nproc)
artifacts:
untracked: true
paths:
- build/libraries/
- build/programs/
- build/tests/
tags:
- builder
when:
manual
timeout:
3h
test-testnet:
stage: test stage: test
dependencies: dependencies:
- build - build-testnet
script: script:
- ./build/libraries/fc/tests/all_tests - ./build/libraries/fc/tests/all_tests
- ./build/tests/betting_test --log_level=message - ./build/tests/betting_test --log_level=message
@ -79,3 +93,7 @@ test:
- ./build/tests/cli_test --log_level=message - ./build/tests/cli_test --log_level=message
tags: tags:
- builder - builder
when:
manual
timeout:
1h

View file

@ -66,7 +66,7 @@ cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_PEERPLAYS_TESTNET=1
# make -j8 for 32GB RAM # make -j8 for 32GB RAM
make -j$(nproc) make -j$(nproc)
make install # this can install the executable files under /usr/local sudo make install # this can install the executable files under /usr/local
``` ```
## Ubuntu 18.04 ## Ubuntu 18.04
@ -142,7 +142,7 @@ cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_PEERPLAYS_TESTNET=1
# make -j8 for 32GB RAM # make -j8 for 32GB RAM
make -j$(nproc) make -j$(nproc)
make install # this can install the executable files under /usr/local sudo make install # this can install the executable files under /usr/local
``` ```
@ -165,6 +165,18 @@ docker pull datasecuritynode/peerplays:latest
### Building docker images manually ### Building docker images manually
``` ```
# Checkout the code
git clone https://gitlab.com/PBSA/peerplays.git
cd peerplays
# Checkout the branch you want
# E.g.
# git checkout beatrice
# git checkout develop
git checkout master
git submodule update --init --recursive
# Execute from the project root, must be a docker group member # Execute from the project root, must be a docker group member
# Build docker image, using Ubuntu 20.04 base # Build docker image, using Ubuntu 20.04 base
@ -176,7 +188,11 @@ docker build --no-cache -f Dockerfile.18.04 -t peerplays-18-04 .
### Start docker image ### Start docker image
``` ```
docker start peerplays # Start docker image, using Ubuntu 20.04 base
docker run peerplays:latest
# Start docker image, using Ubuntu 18.04 base
docker run peerplays-18-04:latest
``` ```
Rest of the instructions on starting the chain remains same. Rest of the instructions on starting the chain remains same.

View file

@ -1,7 +1,7 @@
#ifndef HARDFORK_SON3_TIME #ifndef HARDFORK_SON3_TIME
#ifdef BUILD_PEERPLAYS_TESTNET #ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_SON3_TIME (fc::time_point_sec::from_iso_string("2022-04-30T00:00:00")) #define HARDFORK_SON3_TIME (fc::time_point_sec::from_iso_string("2022-05-31T00:00:00"))
#else #else
#define HARDFORK_SON3_TIME (fc::time_point_sec::from_iso_string("2022-04-30T00:00:00")) #define HARDFORK_SON3_TIME (fc::time_point_sec::from_iso_string("2022-05-31T00:00:00"))
#endif #endif
#endif #endif

@ -1 +1 @@
Subproject commit 6171e973c7fcfc9e0a39eaee2f05da84416a90e6 Subproject commit e7369949bea26f3201d8442ba78286a88df74762

View file

@ -30,6 +30,8 @@
#include <iostream> #include <iostream>
#include <algorithm> #include <algorithm>
#include <tuple> #include <tuple>
#include <random>
#include <boost/tuple/tuple.hpp> #include <boost/tuple/tuple.hpp>
#include <boost/circular_buffer.hpp> #include <boost/circular_buffer.hpp>
@ -67,7 +69,6 @@
#include <fc/io/json.hpp> #include <fc/io/json.hpp>
#include <fc/io/enum_type.hpp> #include <fc/io/enum_type.hpp>
#include <fc/io/raw_fwd.hpp> #include <fc/io/raw_fwd.hpp>
#include <fc/crypto/rand.hpp>
#include <fc/network/rate_limiting.hpp> #include <fc/network/rate_limiting.hpp>
#include <fc/network/ip.hpp> #include <fc/network/ip.hpp>
@ -829,7 +830,11 @@ namespace graphene { namespace net { namespace detail {
_maximum_blocks_per_peer_during_syncing(GRAPHENE_NET_MAX_BLOCKS_PER_PEER_DURING_SYNCING) _maximum_blocks_per_peer_during_syncing(GRAPHENE_NET_MAX_BLOCKS_PER_PEER_DURING_SYNCING)
{ {
_rate_limiter.set_actual_rate_time_constant(fc::seconds(2)); _rate_limiter.set_actual_rate_time_constant(fc::seconds(2));
fc::rand_bytes(&_node_id.data[0], (int)_node_id.size());
using bytes_randomizer = std::independent_bits_engine<std::default_random_engine, CHAR_BIT, unsigned long>;
std::random_device rd;
bytes_randomizer br(rd());
std::generate(std::begin(_node_id.data), std::end(_node_id.data), std::ref(br));
} }
node_impl::~node_impl() node_impl::~node_impl()
@ -1333,7 +1338,7 @@ namespace graphene { namespace net { namespace detail {
// reconnect with the rest of the network, or it might just futher isolate us. // reconnect with the rest of the network, or it might just futher isolate us.
{ {
// As usual, the first step is to walk through all our peers and figure out which // As usual, the first step is to walk through all our peers and figure out which
// peers need action (disconneting, sending keepalives, etc), then we walk through // peers need action (disconnecting, sending keepalives, etc), then we walk through
// those lists yielding at our leisure later. // those lists yielding at our leisure later.
ASSERT_TASK_NOT_PREEMPTED(); ASSERT_TASK_NOT_PREEMPTED();
@ -1868,9 +1873,6 @@ namespace graphene { namespace net { namespace detail {
user_data["last_known_block_time"] = _delegate->get_block_time(head_block_id); user_data["last_known_block_time"] = _delegate->get_block_time(head_block_id);
user_data["last_known_hardfork_time"] = _delegate->get_last_known_hardfork_time().sec_since_epoch(); user_data["last_known_hardfork_time"] = _delegate->get_last_known_hardfork_time().sec_since_epoch();
wlog("on generate hello message, hardfork: ${hardfork}", ("hardfork", _delegate->get_last_known_hardfork_time().sec_since_epoch()));
std::cout<<"on generate hello message :"<<fc::string(_delegate->get_last_known_hardfork_time())<<std::endl;
if (!_hard_fork_block_numbers.empty()) if (!_hard_fork_block_numbers.empty())
user_data["last_known_fork_block_number"] = _hard_fork_block_numbers.back(); user_data["last_known_fork_block_number"] = _hard_fork_block_numbers.back();
@ -1898,7 +1900,6 @@ namespace graphene { namespace net { namespace detail {
originating_peer->last_known_fork_block_number = user_data["last_known_fork_block_number"].as<uint32_t>(1); originating_peer->last_known_fork_block_number = user_data["last_known_fork_block_number"].as<uint32_t>(1);
if (user_data.contains("last_known_hardfork_time")){ if (user_data.contains("last_known_hardfork_time")){
originating_peer->last_known_hardfork_time = fc::time_point_sec(user_data["last_known_hardfork_time"].as<uint32_t>(1)); originating_peer->last_known_hardfork_time = fc::time_point_sec(user_data["last_known_hardfork_time"].as<uint32_t>(1));
std::cout<<"on get helllo message: "<<originating_peer->last_known_hardfork_time.to_iso_string()<<std::endl;
} }
} }
@ -1981,10 +1982,10 @@ namespace graphene { namespace net { namespace detail {
return; return;
} }
auto disconnet_peer = [&](const std::ostringstream& rejection_message) { auto disconnect_peer = [&](const std::ostringstream& rejection_message) {
#ifdef ENABLE_DEBUG_ULOGS #ifdef ENABLE_DEBUG_ULOGS
ulog("Rejecting connection from peer because their version is too old. Their version date: ${date}", ("date", originating_peer->graphene_git_revision_unix_timestamp)); ulog("Rejecting connection from peer because their version is too old. Their version date: ${date}", ("date", originating_peer->graphene_git_revision_unix_timestamp));
#endif #endif
connection_rejected_message connection_rejected(_user_agent_string, core_protocol_version, connection_rejected_message connection_rejected(_user_agent_string, core_protocol_version,
originating_peer->get_socket().remote_endpoint(), originating_peer->get_socket().remote_endpoint(),
rejection_reason_code::unspecified, rejection_reason_code::unspecified,
@ -2012,7 +2013,7 @@ namespace graphene { namespace net { namespace detail {
("their_hard_fork", next_fork_block_number)("my_block_number", head_block_num)); ("their_hard_fork", next_fork_block_number)("my_block_number", head_block_num));
std::ostringstream rejection_message; std::ostringstream rejection_message;
rejection_message << "Your client is outdated -- you can only understand blocks up to #" << next_fork_block_number << ", but I'm already on block #" << head_block_num; rejection_message << "Your client is outdated -- you can only understand blocks up to #" << next_fork_block_number << ", but I'm already on block #" << head_block_num;
disconnet_peer(rejection_message); disconnect_peer(rejection_message);
return; return;
} }
} }
@ -2020,14 +2021,13 @@ namespace graphene { namespace net { namespace detail {
// we wan't to disconnect from the peer that didn't updated the software. With the last hardforks we could // we wan't to disconnect from the peer that didn't updated the software. With the last hardforks we could
// indetify if peer's are not compatible due the hardforks // indetify if peer's are not compatible due the hardforks
if ( _delegate->get_last_known_hardfork_time() != originating_peer->last_known_hardfork_time) if ( originating_peer->last_known_hardfork_time < _delegate->get_last_known_hardfork_time())
{ {
if (_delegate->get_block_time(_delegate->get_head_block_id()).sec_since_epoch() > originating_peer->last_known_hardfork_time.sec_since_epoch()) if (_delegate->get_block_time(_delegate->get_head_block_id()).sec_since_epoch() >= _delegate->get_last_known_hardfork_time().sec_since_epoch())
{ {
std::ostringstream rejection_message; std::ostringstream rejection_message;
rejection_message << "Your client is outdated -- you can only understand blocks up to #" << originating_peer->last_known_hardfork_time.to_iso_string() << ", but I'm already on block #" << _delegate->get_block_time(_delegate->get_head_block_id()).to_iso_string(); rejection_message << "Your client is outdated -- you can only understand blocks up to #" << originating_peer->last_known_hardfork_time.to_iso_string() << ", but I'm already on block #" << _delegate->get_block_time(_delegate->get_head_block_id()).to_iso_string();
std::cout<<"Reject connection due the hardforks on hello_message"<<std::endl; disconnect_peer(rejection_message);
disconnet_peer(rejection_message);
return; return;
} }
} }
@ -3135,7 +3135,7 @@ namespace graphene { namespace net { namespace detail {
("count", _total_number_of_unfetched_items)); ("count", _total_number_of_unfetched_items));
auto disconnet_peer = [&](const std::ostringstream& disconnect_reason_stream, const peer_connection_ptr& peer, bool& disconnecting_this_peer) auto disconnect_peer = [&](const std::ostringstream& disconnect_reason_stream, const peer_connection_ptr& peer, bool& disconnecting_this_peer)
{ {
peers_to_disconnect[peer] = std::make_pair(disconnect_reason_stream.str(), peers_to_disconnect[peer] = std::make_pair(disconnect_reason_stream.str(),
fc::oexception(fc::exception(FC_LOG_MESSAGE(error, "You need to upgrade your client")))); fc::oexception(fc::exception(FC_LOG_MESSAGE(error, "You need to upgrade your client"))));
@ -3154,14 +3154,13 @@ namespace graphene { namespace net { namespace detail {
// if connected peer doesn't have the same version of witness which is fully indetified // if connected peer doesn't have the same version of witness which is fully indetified
// with last hardfork time received and block timestamp is grater than peers last known hardfork // with last hardfork time received and block timestamp is grater than peers last known hardfork
// time disconnect that peer, since he will not be capable of handling already pushed block // time disconnect that peer, since he will not be capable of handling already pushed block
if(peer->last_known_hardfork_time != _delegate->get_last_known_hardfork_time()) if( peer->last_known_hardfork_time < _delegate->get_last_known_hardfork_time() )
{ {
if( block_message_to_send.block.timestamp.sec_since_epoch() > peer->last_known_hardfork_time.sec_since_epoch() ) if( block_message_to_send.block.timestamp.sec_since_epoch() >= _delegate->get_last_known_hardfork_time().sec_since_epoch() )
{ {
std::cout<<"disconnect peer from resync method"<<std::endl;
std::ostringstream disconnect_reason_stream; std::ostringstream disconnect_reason_stream;
disconnect_reason_stream << "You need to upgrade your client due to hard fork at block " << block_message_to_send.block.timestamp.to_iso_string(); disconnect_reason_stream << "You need to upgrade your client due to hard fork at block " << block_message_to_send.block.timestamp.to_iso_string();
disconnet_peer(disconnect_reason_stream, peer, disconnecting_this_peer); disconnect_peer(disconnect_reason_stream, peer, disconnecting_this_peer);
} }
} }
@ -3177,7 +3176,7 @@ namespace graphene { namespace net { namespace detail {
{ {
std::ostringstream disconnect_reason_stream; std::ostringstream disconnect_reason_stream;
disconnect_reason_stream << "You need to upgrade your client due to hard fork at block " << block_message_to_send.block.block_num(); disconnect_reason_stream << "You need to upgrade your client due to hard fork at block " << block_message_to_send.block.block_num();
disconnet_peer(disconnect_reason_stream, peer, disconnecting_this_peer); disconnect_peer(disconnect_reason_stream, peer, disconnecting_this_peer);
} }
} }
} }
@ -3444,8 +3443,16 @@ namespace graphene { namespace net { namespace detail {
std::string disconnect_reason; std::string disconnect_reason;
fc::oexception disconnect_exception; fc::oexception disconnect_exception;
fc::oexception restart_sync_exception; fc::oexception restart_sync_exception;
bool rejecting_block_due_hf = false;
try try
{ {
if(_delegate->get_last_known_hardfork_time().sec_since_epoch() < originating_peer->last_known_hardfork_time.sec_since_epoch()
&& block_message_to_process.block.timestamp.sec_since_epoch() >= originating_peer->last_known_hardfork_time.sec_since_epoch() )
{
rejecting_block_due_hf = true;
}
// we can get into an intersting situation near the end of synchronization. We can be in // we can get into an intersting situation near the end of synchronization. We can be in
// sync with one peer who is sending us the last block on the chain via a regular inventory // sync with one peer who is sending us the last block on the chain via a regular inventory
// message, while at the same time still be synchronizing with a peer who is sending us the // message, while at the same time still be synchronizing with a peer who is sending us the
@ -3454,7 +3461,7 @@ namespace graphene { namespace net { namespace detail {
// message id, for the peer in the sync case we only known the block_id). // message id, for the peer in the sync case we only known the block_id).
fc::time_point message_validated_time; fc::time_point message_validated_time;
if (std::find(_most_recent_blocks_accepted.begin(), _most_recent_blocks_accepted.end(), if (std::find(_most_recent_blocks_accepted.begin(), _most_recent_blocks_accepted.end(),
block_message_to_process.block_id) == _most_recent_blocks_accepted.end()) block_message_to_process.block_id) == _most_recent_blocks_accepted.end() && !rejecting_block_due_hf)
{ {
std::vector<fc::uint160_t> contained_transaction_message_ids; std::vector<fc::uint160_t> contained_transaction_message_ids;
_delegate->handle_block(block_message_to_process, false, contained_transaction_message_ids); _delegate->handle_block(block_message_to_process, false, contained_transaction_message_ids);
@ -3483,14 +3490,16 @@ namespace graphene { namespace net { namespace detail {
if (new_transaction_discovered) if (new_transaction_discovered)
trigger_advertise_inventory_loop(); trigger_advertise_inventory_loop();
} }
else else {
dlog( "Already received and accepted this block (presumably through sync mechanism), treating it as accepted" ); dlog( "Already received and accepted this block (presumably through sync mechanism), treating it as accepted or non compatible node witness");
}
dlog( "client validated the block, advertising it to other peers" ); dlog( "client validated the block, advertising it to other peers" );
item_id block_message_item_id(core_message_type_enum::block_message_type, message_hash); item_id block_message_item_id(core_message_type_enum::block_message_type, message_hash);
uint32_t block_number = block_message_to_process.block.block_num(); uint32_t block_number = block_message_to_process.block.block_num();
fc::time_point_sec block_time = block_message_to_process.block.timestamp; fc::time_point_sec block_time = block_message_to_process.block.timestamp;
bool disconnect_this_peer = false;
for (const peer_connection_ptr& peer : _active_connections) for (const peer_connection_ptr& peer : _active_connections)
{ {
@ -3514,7 +3523,6 @@ namespace graphene { namespace net { namespace detail {
for (const peer_connection_ptr& peer : _active_connections) for (const peer_connection_ptr& peer : _active_connections)
{ {
bool disconnect_this_peer = false;
if (is_hard_fork_block(block_number) ) if (is_hard_fork_block(block_number) )
{ {
if (peer->last_known_fork_block_number != 0) if (peer->last_known_fork_block_number != 0)
@ -3528,11 +3536,10 @@ namespace graphene { namespace net { namespace detail {
} }
} }
if(peer->last_known_hardfork_time != _delegate->get_last_known_hardfork_time()) if(peer->last_known_hardfork_time < _delegate->get_last_known_hardfork_time())
{ {
if(block_message_to_process.block.timestamp.sec_since_epoch() > peer->last_known_hardfork_time.sec_since_epoch()) if(block_message_to_process.block.timestamp.sec_since_epoch() >= _delegate->get_last_known_hardfork_time().sec_since_epoch())
{ {
std::cout<<"disconnect peer on processing block during normal operation"<<std::endl;
disconnect_this_peer = true; disconnect_this_peer = true;
} }
} }
@ -3546,6 +3553,13 @@ namespace graphene { namespace net { namespace detail {
} }
} }
if(rejecting_block_due_hf)
{
// disconnect originated peer since we rejected the block from him due
// the not anymore compatible witness nodes
peers_to_disconnect.insert(originating_peer->shared_from_this());
}
if (!peers_to_disconnect.empty()) if (!peers_to_disconnect.empty())
{ {
std::ostringstream disconnect_reason_stream; std::ostringstream disconnect_reason_stream;

View file

@ -1080,9 +1080,9 @@ std::vector<zmq::message_t> zmq_listener::receive_multipart() {
void zmq_listener::handle_zmq() { void zmq_listener::handle_zmq() {
int linger = 0; int linger = 0;
auto rc = zmq_setsockopt(socket, ZMQ_SUBSCRIBE, "hashblock", 9); auto rc = zmq_setsockopt(socket, ZMQ_SUBSCRIBE, "hashblock", 9);
FC_ASSERT(rc); FC_ASSERT(0 == rc);
rc = zmq_setsockopt(socket, ZMQ_LINGER, &linger, sizeof(linger)); rc = zmq_setsockopt(socket, ZMQ_LINGER, &linger, sizeof(linger));
FC_ASSERT(rc); FC_ASSERT(0 == rc);
//socket.setsockopt( ZMQ_SUBSCRIBE, "hashtx", 6 ); //socket.setsockopt( ZMQ_SUBSCRIBE, "hashtx", 6 );
//socket.setsockopt( ZMQ_SUBSCRIBE, "rawblock", 8 ); //socket.setsockopt( ZMQ_SUBSCRIBE, "rawblock", 8 );
//socket.setsockopt( ZMQ_SUBSCRIBE, "rawtx", 5 ); //socket.setsockopt( ZMQ_SUBSCRIBE, "rawtx", 5 );

View file

@ -29,6 +29,7 @@
#include <sstream> #include <sstream>
#include <string> #include <string>
#include <list> #include <list>
#include <random>
#include <boost/version.hpp> #include <boost/version.hpp>
#include <boost/lexical_cast.hpp> #include <boost/lexical_cast.hpp>
@ -62,7 +63,6 @@
#include <fc/crypto/hex.hpp> #include <fc/crypto/hex.hpp>
#include <fc/thread/mutex.hpp> #include <fc/thread/mutex.hpp>
#include <fc/thread/scoped_lock.hpp> #include <fc/thread/scoped_lock.hpp>
#include <fc/crypto/rand.hpp>
#include <graphene/app/api.hpp> #include <graphene/app/api.hpp>
#include <graphene/chain/asset_object.hpp> #include <graphene/chain/asset_object.hpp>
@ -7365,8 +7365,12 @@ signed_transaction wallet_api::rps_throw(game_id_type game_id,
// construct the complete throw, the commit, and reveal // construct the complete throw, the commit, and reveal
rock_paper_scissors_throw full_throw; rock_paper_scissors_throw full_throw;
fc::rand_bytes((char*)&full_throw.nonce1, sizeof(full_throw.nonce1)); std::random_device rd;
fc::rand_bytes((char*)&full_throw.nonce2, sizeof(full_throw.nonce2)); std::mt19937_64 gen(rd());
std::uniform_int_distribution<uint64_t> dis;
full_throw.nonce1 = dis(gen);
full_throw.nonce2 = dis(gen);
full_throw.gesture = gesture; full_throw.gesture = gesture;
rock_paper_scissors_throw_commit commit_throw; rock_paper_scissors_throw_commit commit_throw;

View file

@ -27,7 +27,7 @@
#include <graphene/chain/match_object.hpp> #include <graphene/chain/match_object.hpp>
#include <graphene/chain/tournament_object.hpp> #include <graphene/chain/tournament_object.hpp>
#include <fc/crypto/rand.hpp> #include <random>
using namespace graphene::chain; using namespace graphene::chain;
@ -276,8 +276,11 @@ void tournaments_helper::rps_throw(const game_id_type& game_id,
// construct the complete throw, the commit, and reveal // construct the complete throw, the commit, and reveal
rock_paper_scissors_throw full_throw; rock_paper_scissors_throw full_throw;
fc::rand_bytes((char*)&full_throw.nonce1, sizeof(full_throw.nonce1)); std::random_device rd;
fc::rand_bytes((char*)&full_throw.nonce2, sizeof(full_throw.nonce2)); std::mt19937_64 gen(rd());
std::uniform_int_distribution<uint64_t> dis;
full_throw.nonce1 = dis(gen);
full_throw.nonce2 = dis(gen);
full_throw.gesture = gesture; full_throw.gesture = gesture;
rock_paper_scissors_throw_commit commit_throw; rock_paper_scissors_throw_commit commit_throw;