Merge develop to beatrice 2022 06 #710

Merged
serkixenos merged 14 commits from merge-develop-to-beatrice-2022-06 into beatrice 2022-06-15 03:45:26 +00:00
19 changed files with 151 additions and 70 deletions

View file

@ -8,8 +8,9 @@ include:
stages:
- build
- test
- dockerize
build:
build-mainnet:
stage: build
script:
- rm -rf .git/modules/docs .git/modules/libraries/fc ./docs ./libraries/fc
@ -29,25 +30,10 @@ build:
tags:
- builder
dockerize:
stage: build
variables:
IMAGE: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA
before_script:
- docker info
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build -t $IMAGE .
- docker push $IMAGE
tags:
- builder
when: manual
timeout: 3h
test:
test-mainnet:
stage: test
dependencies:
- build
- build-mainnet
script:
- ./build/libraries/fc/tests/all_tests
- ./build/tests/betting_test --log_level=message
@ -55,3 +41,85 @@ test:
- ./build/tests/cli_test --log_level=message
tags:
- builder
dockerize-mainnet:
stage: dockerize
dependencies:
- test-mainnet
variables:
IMAGE: $CI_REGISTRY_IMAGE/mainnet/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA
before_script:
- docker info
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build --no-cache -t $IMAGE .
- docker push $IMAGE
after_script:
- docker rmi $IMAGE
tags:
- builder
when:
manual
timeout:
3h
build-testnet:
stage: build
script:
- rm -rf .git/modules/docs .git/modules/libraries/fc ./docs ./libraries/fc
- git submodule sync
- git submodule update --init --recursive
- rm -rf build
- mkdir build
- cd build
- cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_PEERPLAYS_TESTNET=1 ..
- make -j$(nproc)
artifacts:
untracked: true
paths:
- build/libraries/
- build/programs/
- build/tests/
tags:
- builder
when:
manual
timeout:
3h
test-testnet:
stage: test
dependencies:
- build-testnet
script:
- ./build/libraries/fc/tests/all_tests
- ./build/tests/betting_test --log_level=message
- ./build/tests/chain_test --log_level=message
- ./build/tests/cli_test --log_level=message
tags:
- builder
when:
manual
timeout:
1h
dockerize-testnet:
stage: dockerize
dependencies:
- test-testnet
variables:
IMAGE: $CI_REGISTRY_IMAGE/testnet/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA
before_script:
- docker info
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build --no-cache -t $IMAGE .
- docker push $IMAGE
after_script:
- docker rmi $IMAGE
tags:
- builder
when:
manual
timeout:
3h

View file

@ -23,7 +23,6 @@ RUN \
libbz2-dev \
libcurl4-openssl-dev \
libncurses-dev \
libreadline-dev \
libsnappy-dev \
libssl-dev \
libtool \

View file

@ -21,7 +21,6 @@ RUN \
libbz2-dev \
libcurl4-openssl-dev \
libncurses-dev \
libreadline-dev \
libsnappy-dev \
libssl-dev \
libtool \

View file

@ -15,7 +15,7 @@ Following dependencies are needed for a clean install of Ubuntu 20.04:
sudo apt-get install \
apt-utils autoconf bash build-essential ca-certificates clang-format cmake \
dnsutils doxygen expect git graphviz libboost-all-dev libbz2-dev \
libcurl4-openssl-dev libncurses-dev libreadline-dev libsnappy-dev \
libcurl4-openssl-dev libncurses-dev libsnappy-dev \
libssl-dev libtool libzip-dev locales lsb-release mc nano net-tools ntp \
openssh-server pkg-config perl python3 python3-jinja2 sudo \
systemd-coredump wget
@ -74,11 +74,12 @@ sudo make install # this can install the executable files under /usr/local
Following dependencies are needed for a clean install of Ubuntu 18.04:
```
sudo apt-get install \
apt-utils autoconf bash build-essential ca-certificates dnsutils doxygen \
expect git graphviz libbz2-dev libcurl4-openssl-dev libncurses-dev \
libreadline-dev libsnappy-dev libssl-dev libtool libzip-dev locales \
lsb-release mc nano net-tools ntp openssh-server pkg-config perl \
python3 python3-jinja2 sudo systemd-coredump wget
apt-utils autoconf bash build-essential ca-certificates clang-format \
dnsutils doxygen expect git graphviz libbz2-dev \
libcurl4-openssl-dev libncurses-dev libsnappy-dev \
libssl-dev libtool libzip-dev locales lsb-release mc nano net-tools ntp \
openssh-server pkg-config perl python3 python3-jinja2 sudo \
systemd-coredump wget
```
Install Boost libraries from source

View file

@ -48,6 +48,7 @@
#include <boost/range/algorithm/reverse.hpp>
#include <boost/signals2.hpp>
#include <atomic>
#include <iostream>
#include <fc/log/file_appender.hpp>
@ -107,6 +108,7 @@ public:
fc::optional<fc::temp_file> _lock_file;
bool _is_block_producer = false;
bool _force_validate = false;
std::atomic_bool _running{true};
void reset_p2p_node(const fc::path &data_dir) {
try {
@ -450,6 +452,12 @@ public:
*/
virtual bool handle_block(const graphene::net::block_message &blk_msg, bool sync_mode,
std::vector<fc::uint160_t> &contained_transaction_message_ids) override {
// check point for the threads which may be cancled on application shutdown
if(!_running.load()) {
return true;
}
try {
auto latency = fc::time_point::now() - blk_msg.block.timestamp;
FC_ASSERT((latency.count() / 1000) > -5000, "Rejecting block with timestamp in the future");
@ -1008,6 +1016,7 @@ void application::shutdown_plugins() {
return;
}
void application::shutdown() {
my->_running.store(false);
if (my->_p2p_network)
my->_p2p_network->close();
if (my->_chain_db)

View file

@ -433,7 +433,12 @@ processed_transaction database::push_proposal(const proposal_object& proposal)
{
for( size_t i=old_applied_ops_size,n=_applied_ops.size(); i<n; i++ )
{
ilog( "removing failed operation from applied_ops: ${op}", ("op", *(_applied_ops[i])) );
if(_applied_ops[i].valid()) {
ilog("removing failed operation from applied_ops: ${op}", ("op", *(_applied_ops[i])));
}
else{
ilog("Can't remove failed operation from applied_ops (operation is not valid), op_id : ${op_id}", ("op_id", i));
}
_applied_ops[i].reset();
}
}
@ -619,7 +624,7 @@ uint32_t database::push_applied_operation( const operation& op )
void database::set_applied_operation_result( uint32_t op_id, const operation_result& result )
{
assert( op_id < _applied_ops.size() );
if( _applied_ops[op_id] )
if( _applied_ops[op_id].valid() )
_applied_ops[op_id]->result = result;
else
{
@ -806,7 +811,7 @@ processed_transaction database::_apply_transaction(const signed_transaction& trx
return get_account_custom_authorities(id, op);
};
trx.verify_authority( chain_id, get_active, get_owner, get_custom,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(head_block_time()),
true,
get_global_properties().parameters.max_authority_depth );
}

View file

@ -625,7 +625,6 @@ void database::notify_changed_objects()
if( _undo_db.enabled() )
{
const auto& head_undo = _undo_db.head();
auto chain_time = head_block_time();
// New
if( !new_objects.empty() )
@ -637,8 +636,7 @@ void database::notify_changed_objects()
new_ids.push_back(item);
auto obj = find_object(item);
if(obj != nullptr)
get_relevant_accounts(obj, new_accounts_impacted,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(chain_time));
get_relevant_accounts(obj, new_accounts_impacted, true);
}
GRAPHENE_TRY_NOTIFY( new_objects, new_ids, new_accounts_impacted)
@ -652,8 +650,7 @@ void database::notify_changed_objects()
for( const auto& item : head_undo.old_values )
{
changed_ids.push_back(item.first);
get_relevant_accounts(item.second.get(), changed_accounts_impacted,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(chain_time));
get_relevant_accounts(item.second.get(), changed_accounts_impacted, true);
}
GRAPHENE_TRY_NOTIFY( changed_objects, changed_ids, changed_accounts_impacted)
@ -670,8 +667,7 @@ void database::notify_changed_objects()
removed_ids.emplace_back( item.first );
auto obj = item.second.get();
removed.emplace_back( obj );
get_relevant_accounts(obj, removed_accounts_impacted,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(chain_time));
get_relevant_accounts(obj, removed_accounts_impacted, true);
}
GRAPHENE_TRY_NOTIFY( removed_objects, removed_ids, removed, removed_accounts_impacted)

View file

@ -1,10 +0,0 @@
// #210 Check authorities on custom_operation
#ifndef HARDFORK_CORE_210_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_CORE_210_TIME (fc::time_point_sec::from_iso_string("2030-01-01T00:00:00")) // (Not yet scheduled)
#else
#define HARDFORK_CORE_210_TIME (fc::time_point_sec::from_iso_string("2030-01-01T00:00:00")) // (Not yet scheduled)
#endif
// Bugfix: pre-HF 210, custom_operation's required_auths field was ignored.
#define MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(chain_time) (chain_time <= HARDFORK_CORE_210_TIME)
#endif

View file

@ -131,6 +131,7 @@ namespace graphene { namespace chain {
}}
FC_REFLECT_DERIVED( graphene::chain::dynamic_global_property_object, (graphene::db::object),
(random)
(head_block_number)
(head_block_id)
(time)

View file

@ -302,8 +302,7 @@ void_result proposal_create_evaluator::do_evaluate( const proposal_create_operat
vector<authority> other;
for( auto& op : o.proposed_ops )
{
operation_get_required_authorities( op.op, auths, auths, other,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(block_time) );
operation_get_required_authorities( op.op, auths, auths, other, true );
}
FC_ASSERT( other.size() == 0 ); // TODO: what about other???
@ -352,8 +351,7 @@ object_id_type proposal_create_evaluator::do_apply( const proposal_create_operat
// TODO: consider caching values from evaluate?
for( auto& op : _proposed_trx.operations )
operation_get_required_authorities( op, required_active, proposal.required_owner_approvals, other,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(chain_time) );
operation_get_required_authorities( op, required_active, proposal.required_owner_approvals, other, true);
//All accounts which must provide both owner and active authority should be omitted from the active authority set;
//owner authority approval implies active authority approval.

View file

@ -39,7 +39,7 @@ bool proposal_object::is_authorized_to_execute( database& db ) const
[&]( account_id_type id ){ return &id(db).owner; },
[&]( account_id_type id, const operation& op ){
return db.get_account_custom_authorities(id, op); },
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS( db.head_block_time() ),
true,
db.get_global_properties().parameters.max_authority_depth,
true, /* allow committee */
available_active_approvals,

@ -1 +1 @@
Subproject commit 6171e973c7fcfc9e0a39eaee2f05da84416a90e6
Subproject commit e7369949bea26f3201d8442ba78286a88df74762

View file

@ -30,6 +30,8 @@
#include <iostream>
#include <algorithm>
#include <tuple>
#include <random>
#include <boost/tuple/tuple.hpp>
#include <boost/circular_buffer.hpp>
@ -67,7 +69,6 @@
#include <fc/io/json.hpp>
#include <fc/io/enum_type.hpp>
#include <fc/io/raw_fwd.hpp>
#include <fc/crypto/rand.hpp>
#include <fc/network/rate_limiting.hpp>
#include <fc/network/ip.hpp>
@ -91,6 +92,7 @@
#define DEFAULT_LOGGER "p2p"
#define P2P_IN_DEDICATED_THREAD 1
#define DISABLE_WITNESS_HF_CHECK 1
#define INVOCATION_COUNTER(name) \
static unsigned total_ ## name ## _counter = 0; \
@ -827,7 +829,11 @@ namespace graphene { namespace net { namespace detail {
_maximum_blocks_per_peer_during_syncing(GRAPHENE_NET_MAX_BLOCKS_PER_PEER_DURING_SYNCING)
{
_rate_limiter.set_actual_rate_time_constant(fc::seconds(2));
fc::rand_bytes(&_node_id.data[0], (int)_node_id.size());
using bytes_randomizer = std::independent_bits_engine<std::default_random_engine, CHAR_BIT, unsigned long>;
std::random_device rd;
bytes_randomizer br(rd());
std::generate(std::begin(_node_id.data), std::end(_node_id.data), std::ref(br));
}
node_impl::~node_impl()
@ -887,7 +893,7 @@ namespace graphene { namespace net { namespace detail {
void node_impl::p2p_network_connect_loop()
{
VERIFY_CORRECT_THREAD();
while (!_p2p_network_connect_loop_done.canceled())
while (!_p2p_network_connect_loop_done.canceled() && !_node_is_shutting_down)
{
try
{
@ -3961,6 +3967,8 @@ namespace graphene { namespace net { namespace detail {
{
VERIFY_CORRECT_THREAD();
_node_is_shutting_down = true;
try
{
_potential_peer_db.close();

View file

@ -126,14 +126,12 @@ void account_history_plugin_impl::update_account_histories( const signed_block&
flat_set<account_id_type> impacted;
vector<authority> other;
// fee payer is added here
operation_get_required_authorities( op.op, impacted, impacted, other,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS( db.head_block_time() ) );
operation_get_required_authorities( op.op, impacted, impacted, other, true );
if( op.op.which() == operation::tag< account_create_operation >::value )
impacted.insert( op.result.get<object_id_type>() );
else
graphene::chain::operation_get_impacted_accounts( op.op, impacted,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(db.head_block_time()) );
graphene::chain::operation_get_impacted_accounts( op.op, impacted, true );
if( op.op.which() == operation::tag< lottery_end_operation >::value )
{
auto lop = op.op.get< lottery_end_operation >();

View file

@ -173,14 +173,12 @@ bool elasticsearch_plugin_impl::update_account_histories( const signed_block& b
flat_set<account_id_type> impacted;
vector<authority> other;
// fee_payer is added here
operation_get_required_authorities( op.op, impacted, impacted, other,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS( db.head_block_time() ) );
operation_get_required_authorities( op.op, impacted, impacted, other, true );
if( op.op.which() == operation::tag< account_create_operation >::value )
impacted.insert( op.result.get<object_id_type>() );
else
operation_get_impacted_accounts( op.op, impacted,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS( db.head_block_time() ) );
operation_get_impacted_accounts( op.op, impacted, true );
for( auto& a : other )
for( auto& item : a.account_auths )

View file

@ -1,5 +1,6 @@
#pragma once
#include <mutex>
#include <vector>
#include <boost/program_options.hpp>
@ -60,6 +61,7 @@ protected:
std::map<std::string, std::string> private_keys;
std::vector<std::string> son_listener_log;
std::mutex son_listener_log_mutex;
void on_applied_block(const signed_block &b);

View file

@ -619,13 +619,15 @@ void sidechain_net_handler::settle_sidechain_transactions() {
}
void sidechain_net_handler::add_to_son_listener_log(std::string trx_id) {
const std::lock_guard<std::mutex> lock(son_listener_log_mutex);
son_listener_log.insert(son_listener_log.begin(), trx_id);
if (son_listener_log.size() > 33) {
son_listener_log.erase(son_listener_log.end());
son_listener_log.pop_back();
}
}
std::vector<std::string> sidechain_net_handler::get_son_listener_log() {
const std::lock_guard<std::mutex> lock(son_listener_log_mutex);
return son_listener_log;
}

View file

@ -29,6 +29,7 @@
#include <sstream>
#include <string>
#include <list>
#include <random>
#include <boost/version.hpp>
#include <boost/lexical_cast.hpp>
@ -62,7 +63,6 @@
#include <fc/crypto/hex.hpp>
#include <fc/thread/mutex.hpp>
#include <fc/thread/scoped_lock.hpp>
#include <fc/crypto/rand.hpp>
#include <graphene/app/api.hpp>
#include <graphene/chain/asset_object.hpp>
@ -7365,8 +7365,12 @@ signed_transaction wallet_api::rps_throw(game_id_type game_id,
// construct the complete throw, the commit, and reveal
rock_paper_scissors_throw full_throw;
fc::rand_bytes((char*)&full_throw.nonce1, sizeof(full_throw.nonce1));
fc::rand_bytes((char*)&full_throw.nonce2, sizeof(full_throw.nonce2));
std::random_device rd;
std::mt19937_64 gen(rd());
std::uniform_int_distribution<uint64_t> dis;
full_throw.nonce1 = dis(gen);
full_throw.nonce2 = dis(gen);
full_throw.gesture = gesture;
rock_paper_scissors_throw_commit commit_throw;

View file

@ -27,7 +27,7 @@
#include <graphene/chain/match_object.hpp>
#include <graphene/chain/tournament_object.hpp>
#include <fc/crypto/rand.hpp>
#include <random>
using namespace graphene::chain;
@ -276,8 +276,11 @@ void tournaments_helper::rps_throw(const game_id_type& game_id,
// construct the complete throw, the commit, and reveal
rock_paper_scissors_throw full_throw;
fc::rand_bytes((char*)&full_throw.nonce1, sizeof(full_throw.nonce1));
fc::rand_bytes((char*)&full_throw.nonce2, sizeof(full_throw.nonce2));
std::random_device rd;
std::mt19937_64 gen(rd());
std::uniform_int_distribution<uint64_t> dis;
full_throw.nonce1 = dis(gen);
full_throw.nonce2 = dis(gen);
full_throw.gesture = gesture;
rock_paper_scissors_throw_commit commit_throw;