Merge branch 'develop' into feature/son-for-ethereum
This commit is contained in:
commit
bac0642d4c
21 changed files with 258 additions and 129 deletions
|
|
@ -8,8 +8,9 @@ include:
|
|||
stages:
|
||||
- build
|
||||
- test
|
||||
- dockerize
|
||||
|
||||
build:
|
||||
build-mainnet:
|
||||
stage: build
|
||||
script:
|
||||
- rm -rf .git/modules/docs .git/modules/libraries/fc ./docs ./libraries/fc
|
||||
|
|
@ -29,10 +30,10 @@ build:
|
|||
tags:
|
||||
- builder
|
||||
|
||||
test:
|
||||
test-mainnet:
|
||||
stage: test
|
||||
dependencies:
|
||||
- build
|
||||
- build-mainnet
|
||||
script:
|
||||
- ./build/libraries/fc/tests/all_tests
|
||||
- ./build/tests/betting_test --log_level=message
|
||||
|
|
@ -41,16 +42,20 @@ test:
|
|||
tags:
|
||||
- builder
|
||||
|
||||
dockerize:
|
||||
stage: build
|
||||
dockerize-mainnet:
|
||||
stage: dockerize
|
||||
dependencies:
|
||||
- test-mainnet
|
||||
variables:
|
||||
IMAGE: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA
|
||||
IMAGE: $CI_REGISTRY_IMAGE/mainnet/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA
|
||||
before_script:
|
||||
- docker info
|
||||
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
|
||||
script:
|
||||
- docker build -t $IMAGE .
|
||||
- docker build --no-cache -t $IMAGE .
|
||||
- docker push $IMAGE
|
||||
after_script:
|
||||
- docker rmi $IMAGE
|
||||
tags:
|
||||
- builder
|
||||
when:
|
||||
|
|
@ -97,3 +102,24 @@ test-testnet:
|
|||
manual
|
||||
timeout:
|
||||
1h
|
||||
|
||||
dockerize-testnet:
|
||||
stage: dockerize
|
||||
dependencies:
|
||||
- test-testnet
|
||||
variables:
|
||||
IMAGE: $CI_REGISTRY_IMAGE/testnet/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA
|
||||
before_script:
|
||||
- docker info
|
||||
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
|
||||
script:
|
||||
- docker build --no-cache -t $IMAGE .
|
||||
- docker push $IMAGE
|
||||
after_script:
|
||||
- docker rmi $IMAGE
|
||||
tags:
|
||||
- builder
|
||||
when:
|
||||
manual
|
||||
timeout:
|
||||
3h
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@ RUN \
|
|||
libbz2-dev \
|
||||
libcurl4-openssl-dev \
|
||||
libncurses-dev \
|
||||
libreadline-dev \
|
||||
libsnappy-dev \
|
||||
libssl-dev \
|
||||
libtool \
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ RUN \
|
|||
libbz2-dev \
|
||||
libcurl4-openssl-dev \
|
||||
libncurses-dev \
|
||||
libreadline-dev \
|
||||
libsnappy-dev \
|
||||
libssl-dev \
|
||||
libtool \
|
||||
|
|
|
|||
13
README.md
13
README.md
|
|
@ -15,7 +15,7 @@ Following dependencies are needed for a clean install of Ubuntu 20.04:
|
|||
sudo apt-get install \
|
||||
apt-utils autoconf bash build-essential ca-certificates clang-format cmake \
|
||||
dnsutils doxygen expect git graphviz libboost-all-dev libbz2-dev \
|
||||
libcurl4-openssl-dev libncurses-dev libreadline-dev libsnappy-dev \
|
||||
libcurl4-openssl-dev libncurses-dev libsnappy-dev \
|
||||
libssl-dev libtool libzip-dev locales lsb-release mc nano net-tools ntp \
|
||||
openssh-server pkg-config perl python3 python3-jinja2 sudo \
|
||||
systemd-coredump wget
|
||||
|
|
@ -74,11 +74,12 @@ sudo make install # this can install the executable files under /usr/local
|
|||
Following dependencies are needed for a clean install of Ubuntu 18.04:
|
||||
```
|
||||
sudo apt-get install \
|
||||
apt-utils autoconf bash build-essential ca-certificates dnsutils doxygen \
|
||||
expect git graphviz libbz2-dev libcurl4-openssl-dev libncurses-dev \
|
||||
libreadline-dev libsnappy-dev libssl-dev libtool libzip-dev locales \
|
||||
lsb-release mc nano net-tools ntp openssh-server pkg-config perl \
|
||||
python3 python3-jinja2 sudo systemd-coredump wget
|
||||
apt-utils autoconf bash build-essential ca-certificates clang-format \
|
||||
dnsutils doxygen expect git graphviz libbz2-dev \
|
||||
libcurl4-openssl-dev libncurses-dev libsnappy-dev \
|
||||
libssl-dev libtool libzip-dev locales lsb-release mc nano net-tools ntp \
|
||||
openssh-server pkg-config perl python3 python3-jinja2 sudo \
|
||||
systemd-coredump wget
|
||||
```
|
||||
|
||||
Install Boost libraries from source
|
||||
|
|
|
|||
|
|
@ -48,6 +48,7 @@
|
|||
#include <boost/range/algorithm/reverse.hpp>
|
||||
#include <boost/signals2.hpp>
|
||||
|
||||
#include <atomic>
|
||||
#include <iostream>
|
||||
|
||||
#include <fc/log/file_appender.hpp>
|
||||
|
|
@ -107,6 +108,7 @@ public:
|
|||
fc::optional<fc::temp_file> _lock_file;
|
||||
bool _is_block_producer = false;
|
||||
bool _force_validate = false;
|
||||
std::atomic_bool _running{true};
|
||||
|
||||
void reset_p2p_node(const fc::path &data_dir) {
|
||||
try {
|
||||
|
|
@ -115,67 +117,29 @@ public:
|
|||
_p2p_network->load_configuration(data_dir / "p2p");
|
||||
_p2p_network->set_node_delegate(this);
|
||||
|
||||
vector<string> all_seeds;
|
||||
|
||||
if (_options->count("seed-node")) {
|
||||
auto seeds = _options->at("seed-node").as<vector<string>>();
|
||||
for (const string &endpoint_string : seeds) {
|
||||
try {
|
||||
std::vector<fc::ip::endpoint> endpoints = resolve_string_to_ip_endpoints(endpoint_string);
|
||||
for (const fc::ip::endpoint &endpoint : endpoints) {
|
||||
ilog("Adding seed node ${endpoint}", ("endpoint", endpoint));
|
||||
_p2p_network->add_node(endpoint);
|
||||
_p2p_network->connect_to_endpoint(endpoint);
|
||||
}
|
||||
} catch (const fc::exception &e) {
|
||||
wlog("caught exception ${e} while adding seed node ${endpoint}",
|
||||
("e", e.to_detail_string())("endpoint", endpoint_string));
|
||||
}
|
||||
}
|
||||
all_seeds.insert(all_seeds.end(), seeds.begin(), seeds.end());
|
||||
}
|
||||
|
||||
if (_options->count("seed-nodes")) {
|
||||
auto seeds_str = _options->at("seed-nodes").as<string>();
|
||||
auto seeds = fc::json::from_string(seeds_str).as<vector<string>>(2);
|
||||
for (const string &endpoint_string : seeds) {
|
||||
try {
|
||||
std::vector<fc::ip::endpoint> endpoints = resolve_string_to_ip_endpoints(endpoint_string);
|
||||
for (const fc::ip::endpoint &endpoint : endpoints) {
|
||||
ilog("Adding seed node ${endpoint}", ("endpoint", endpoint));
|
||||
_p2p_network->add_node(endpoint);
|
||||
}
|
||||
} catch (const fc::exception &e) {
|
||||
wlog("caught exception ${e} while adding seed node ${endpoint}",
|
||||
("e", e.to_detail_string())("endpoint", endpoint_string));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// t.me/peerplays #seednodes
|
||||
vector<string> seeds = {
|
||||
#ifdef BUILD_PEERPLAYS_TESTNET
|
||||
all_seeds.insert(all_seeds.end(), seeds.begin(), seeds.end());
|
||||
}
|
||||
|
||||
#else
|
||||
"51.222.110.110:9777",
|
||||
"95.216.90.243:9777",
|
||||
"96.46.48.98:19777",
|
||||
"96.46.48.98:29777",
|
||||
"96.46.48.98:39777",
|
||||
"96.46.48.98:49777",
|
||||
"96.46.48.98:59777",
|
||||
"seed.i9networks.net.br:9777",
|
||||
"witness.serverpit.com:9777"
|
||||
#endif
|
||||
};
|
||||
|
||||
for (const string &endpoint_string : seeds) {
|
||||
try {
|
||||
std::vector<fc::ip::endpoint> endpoints = resolve_string_to_ip_endpoints(endpoint_string);
|
||||
for (const fc::ip::endpoint &endpoint : endpoints) {
|
||||
ilog("Adding seed node ${endpoint}", ("endpoint", endpoint));
|
||||
_p2p_network->add_node(endpoint);
|
||||
}
|
||||
} catch (const fc::exception &e) {
|
||||
wlog("caught exception ${e} while adding seed node ${endpoint}",
|
||||
("e", e.to_detail_string())("endpoint", endpoint_string));
|
||||
for (const string &endpoint_string : all_seeds) {
|
||||
try {
|
||||
std::vector<fc::ip::endpoint> endpoints = resolve_string_to_ip_endpoints(endpoint_string);
|
||||
for (const fc::ip::endpoint &endpoint : endpoints) {
|
||||
ilog("Adding seed node ${endpoint}", ("endpoint", endpoint));
|
||||
_p2p_network->add_node(endpoint);
|
||||
}
|
||||
} catch (const fc::exception &e) {
|
||||
wlog("caught exception ${e} while adding seed node ${endpoint}",
|
||||
("e", e.to_detail_string())("endpoint", endpoint_string));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -450,6 +414,12 @@ public:
|
|||
*/
|
||||
virtual bool handle_block(const graphene::net::block_message &blk_msg, bool sync_mode,
|
||||
std::vector<fc::uint160_t> &contained_transaction_message_ids) override {
|
||||
|
||||
// check point for the threads which may be cancled on application shutdown
|
||||
if (!_running.load()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
try {
|
||||
auto latency = fc::time_point::now() - blk_msg.block.timestamp;
|
||||
FC_ASSERT((latency.count() / 1000) > -5000, "Rejecting block with timestamp in the future");
|
||||
|
|
@ -864,9 +834,24 @@ application::~application() {
|
|||
|
||||
void application::set_program_options(boost::program_options::options_description &cli,
|
||||
boost::program_options::options_description &cfg) const {
|
||||
|
||||
std::vector<string> seed_nodes = {
|
||||
#ifdef BUILD_PEERPLAYS_TESTNET
|
||||
#else
|
||||
"51.222.110.110:9777",
|
||||
"95.216.90.243:9777",
|
||||
"ca.peerplays.info:9777",
|
||||
"de.peerplays.xyz:9777",
|
||||
"pl.peerplays.org:9777",
|
||||
"seed.i9networks.net.br:9777",
|
||||
"witness.serverpit.com:9777"
|
||||
#endif
|
||||
};
|
||||
std::string seed_nodes_str = fc::json::to_string(seed_nodes);
|
||||
|
||||
cfg.add_options()("p2p-endpoint", bpo::value<string>()->default_value("0.0.0.0:9777"), "Endpoint for P2P node to listen on");
|
||||
cfg.add_options()("seed-node,s", bpo::value<vector<string>>()->composing(), "P2P nodes to connect to on startup (may specify multiple times)");
|
||||
cfg.add_options()("seed-nodes", bpo::value<string>()->composing(), "JSON array of P2P nodes to connect to on startup");
|
||||
cfg.add_options()("seed-nodes", bpo::value<string>()->composing()->default_value(seed_nodes_str), "JSON array of P2P nodes to connect to on startup");
|
||||
cfg.add_options()("checkpoint,c", bpo::value<vector<string>>()->composing(), "Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.");
|
||||
cfg.add_options()("rpc-endpoint", bpo::value<string>()->default_value("127.0.0.1:8090"), "Endpoint for websocket RPC to listen on");
|
||||
cfg.add_options()("rpc-tls-endpoint", bpo::value<string>()->implicit_value("127.0.0.1:8089"), "Endpoint for TLS websocket RPC to listen on");
|
||||
|
|
@ -1012,6 +997,7 @@ void application::shutdown_plugins() {
|
|||
return;
|
||||
}
|
||||
void application::shutdown() {
|
||||
my->_running.store(false);
|
||||
if (my->_p2p_network)
|
||||
my->_p2p_network->close();
|
||||
if (my->_chain_db)
|
||||
|
|
|
|||
|
|
@ -29,12 +29,15 @@
|
|||
#include <graphene/chain/pts_address.hpp>
|
||||
#include <graphene/chain/tournament_object.hpp>
|
||||
|
||||
#include <graphene/utilities/git_revision.hpp>
|
||||
|
||||
#include <fc/bloom_filter.hpp>
|
||||
|
||||
#include <fc/crypto/hex.hpp>
|
||||
#include <fc/rpc/api_connection.hpp>
|
||||
#include <fc/uint128.hpp>
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/multiprecision/cpp_int.hpp>
|
||||
#include <boost/range/iterator_range.hpp>
|
||||
#include <boost/rational.hpp>
|
||||
|
|
@ -90,6 +93,7 @@ public:
|
|||
processed_transaction get_transaction(uint32_t block_num, uint32_t trx_in_block) const;
|
||||
|
||||
// Globals
|
||||
version_info get_version_info() const;
|
||||
chain_property_object get_chain_properties() const;
|
||||
global_property_object get_global_properties() const;
|
||||
fc::variant_object get_config() const;
|
||||
|
|
@ -563,6 +567,27 @@ processed_transaction database_api_impl::get_transaction(uint32_t block_num, uin
|
|||
// //
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
version_info database_api::get_version_info() const {
|
||||
return my->get_version_info();
|
||||
}
|
||||
|
||||
version_info database_api_impl::get_version_info() const {
|
||||
|
||||
std::string witness_version(graphene::utilities::git_revision_description);
|
||||
const size_t pos = witness_version.find('/');
|
||||
if (pos != std::string::npos && witness_version.size() > pos)
|
||||
witness_version = witness_version.substr(pos + 1);
|
||||
|
||||
version_info vi;
|
||||
vi.version = witness_version;
|
||||
vi.git_revision = graphene::utilities::git_revision_sha;
|
||||
vi.built = std::string(__DATE__) + " at " + std::string(__TIME__);
|
||||
vi.openssl = OPENSSL_VERSION_TEXT;
|
||||
vi.boost = boost::replace_all_copy(std::string(BOOST_LIB_VERSION), "_", ".");
|
||||
|
||||
return vi;
|
||||
}
|
||||
|
||||
chain_property_object database_api::get_chain_properties() const {
|
||||
return my->get_chain_properties();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -130,6 +130,14 @@ struct gpos_info {
|
|||
share_type account_vested_balance;
|
||||
};
|
||||
|
||||
struct version_info {
|
||||
string version;
|
||||
string git_revision;
|
||||
string built;
|
||||
string openssl;
|
||||
string boost;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief The database_api class implements the RPC API for the chain database.
|
||||
*
|
||||
|
|
@ -218,6 +226,11 @@ public:
|
|||
// Globals //
|
||||
/////////////
|
||||
|
||||
/**
|
||||
* @brief Retrieve the @ref version_info associated with the witness node
|
||||
*/
|
||||
version_info get_version_info() const;
|
||||
|
||||
/**
|
||||
* @brief Retrieve the @ref chain_property_object associated with the chain
|
||||
*/
|
||||
|
|
@ -1040,6 +1053,7 @@ FC_REFLECT(graphene::app::market_ticker, (base)(quote)(latest)(lowest_ask)(highe
|
|||
FC_REFLECT(graphene::app::market_volume, (base)(quote)(base_volume)(quote_volume));
|
||||
FC_REFLECT(graphene::app::market_trade, (date)(price)(amount)(value));
|
||||
FC_REFLECT(graphene::app::gpos_info, (vesting_factor)(award)(total_amount)(current_subperiod)(last_voted_time)(allowed_withdraw_amount)(account_vested_balance));
|
||||
FC_REFLECT(graphene::app::version_info, (version)(git_revision)(built)(openssl)(boost));
|
||||
|
||||
FC_API(graphene::app::database_api,
|
||||
// Objects
|
||||
|
|
@ -1060,6 +1074,7 @@ FC_API(graphene::app::database_api,
|
|||
(get_recent_transaction_by_id)
|
||||
|
||||
// Globals
|
||||
(get_version_info)
|
||||
(get_chain_properties)
|
||||
(get_global_properties)
|
||||
(get_config)
|
||||
|
|
|
|||
|
|
@ -62,11 +62,14 @@ void verify_account_votes( const database& db, const account_options& options )
|
|||
const auto& gpo = db.get_global_properties();
|
||||
const auto& chain_params = gpo.parameters;
|
||||
|
||||
FC_ASSERT( db.find_object(options.voting_account), "Invalid proxy account specified." );
|
||||
|
||||
FC_ASSERT( options.num_witness <= chain_params.maximum_witness_count,
|
||||
"Voted for more witnesses than currently allowed (${c})", ("c", chain_params.maximum_witness_count) );
|
||||
FC_ASSERT( options.num_committee <= chain_params.maximum_committee_count,
|
||||
"Voted for more committee members than currently allowed (${c})", ("c", chain_params.maximum_committee_count) );
|
||||
FC_ASSERT( db.find_object(options.voting_account), "Invalid proxy account specified." );
|
||||
FC_ASSERT( options.num_son() <= chain_params.maximum_son_count(),
|
||||
"Voted for more sons than currently allowed (${c})", ("c", chain_params.maximum_son_count()) );
|
||||
|
||||
uint32_t max_vote_id = gpo.next_available_vote_id;
|
||||
bool has_worker_votes = false;
|
||||
|
|
|
|||
|
|
@ -433,7 +433,12 @@ processed_transaction database::push_proposal(const proposal_object& proposal)
|
|||
{
|
||||
for( size_t i=old_applied_ops_size,n=_applied_ops.size(); i<n; i++ )
|
||||
{
|
||||
ilog( "removing failed operation from applied_ops: ${op}", ("op", *(_applied_ops[i])) );
|
||||
if(_applied_ops[i].valid()) {
|
||||
ilog("removing failed operation from applied_ops: ${op}", ("op", *(_applied_ops[i])));
|
||||
}
|
||||
else{
|
||||
ilog("Can't remove failed operation from applied_ops (operation is not valid), op_id : ${op_id}", ("op_id", i));
|
||||
}
|
||||
_applied_ops[i].reset();
|
||||
}
|
||||
}
|
||||
|
|
@ -619,7 +624,7 @@ uint32_t database::push_applied_operation( const operation& op )
|
|||
void database::set_applied_operation_result( uint32_t op_id, const operation_result& result )
|
||||
{
|
||||
assert( op_id < _applied_ops.size() );
|
||||
if( _applied_ops[op_id] )
|
||||
if( _applied_ops[op_id].valid() )
|
||||
_applied_ops[op_id]->result = result;
|
||||
else
|
||||
{
|
||||
|
|
|
|||
|
|
@ -182,7 +182,26 @@ void database::pay_sons()
|
|||
const dynamic_global_property_object& dpo = get_dynamic_global_properties();
|
||||
// Current requirement is that we have to pay every 24 hours, so the following check
|
||||
if( dpo.son_budget.value > 0 && ((now - dpo.last_son_payout_time) >= fc::seconds(get_global_properties().parameters.son_pay_time()))) {
|
||||
auto sons = sort_votable_objects<son_index>(get_global_properties().parameters.maximum_son_count());
|
||||
assert( _son_count_histogram_buffer.size() > 0 );
|
||||
const share_type stake_target = (_total_voting_stake-_son_count_histogram_buffer[0]) / 2;
|
||||
/// accounts that vote for 0 or 1 son do not get to express an opinion on
|
||||
/// the number of sons to have (they abstain and are non-voting accounts)
|
||||
share_type stake_tally = 0;
|
||||
size_t son_count = 0;
|
||||
if( stake_target > 0 )
|
||||
{
|
||||
while( (son_count < _son_count_histogram_buffer.size() - 1)
|
||||
&& (stake_tally <= stake_target) )
|
||||
{
|
||||
stake_tally += _son_count_histogram_buffer[++son_count];
|
||||
}
|
||||
}
|
||||
const vector<std::reference_wrapper<const son_object>> sons = [this, &son_count]{
|
||||
if(head_block_time() >= HARDFORK_SON3_TIME)
|
||||
return sort_votable_objects<son_index>(std::max(son_count*2+1, (size_t)get_chain_properties().immutable_parameters.min_son_count));
|
||||
else
|
||||
return sort_votable_objects<son_index>(get_global_properties().parameters.maximum_son_count());
|
||||
}();
|
||||
// After SON2 HF
|
||||
uint64_t total_votes = 0;
|
||||
for( const son_object& son : sons )
|
||||
|
|
@ -660,6 +679,10 @@ void database::update_active_committee_members()
|
|||
|
||||
void database::update_active_sons()
|
||||
{ try {
|
||||
if (head_block_time() < HARDFORK_SON_TIME) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert( _son_count_histogram_buffer.size() > 0 );
|
||||
share_type stake_target = (_total_voting_stake-_son_count_histogram_buffer[0]) / 2;
|
||||
|
||||
|
|
@ -679,8 +702,12 @@ void database::update_active_sons()
|
|||
}
|
||||
|
||||
const global_property_object& gpo = get_global_properties();
|
||||
const chain_parameters& cp = gpo.parameters;
|
||||
auto sons = sort_votable_objects<son_index>(cp.maximum_son_count());
|
||||
const vector<std::reference_wrapper<const son_object>> sons = [this, &son_count]{
|
||||
if(head_block_time() >= HARDFORK_SON3_TIME)
|
||||
return sort_votable_objects<son_index>(std::max(son_count*2+1, (size_t)get_chain_properties().immutable_parameters.min_son_count));
|
||||
else
|
||||
return sort_votable_objects<son_index>(get_global_properties().parameters.maximum_son_count());
|
||||
}();
|
||||
|
||||
const auto& all_sons = get_index_type<son_index>().indices();
|
||||
|
||||
|
|
@ -759,11 +786,7 @@ void database::update_active_sons()
|
|||
}
|
||||
}
|
||||
|
||||
if (son_sets_equal) {
|
||||
ilog( "Active SONs set NOT CHANGED" );
|
||||
} else {
|
||||
ilog( "Active SONs set CHANGED" );
|
||||
|
||||
if (!son_sets_equal) {
|
||||
update_son_wallet(new_active_sons);
|
||||
update_son_statuses(cur_active_sons, new_active_sons);
|
||||
}
|
||||
|
|
@ -2041,6 +2064,13 @@ void update_son_params(database& db)
|
|||
gpo.parameters.extensions.value.maximum_son_count = 7;
|
||||
});
|
||||
}
|
||||
else
|
||||
{
|
||||
const auto& gpo = db.get_global_properties();
|
||||
db.modify( gpo, []( global_property_object& gpo ) {
|
||||
gpo.parameters.extensions.value.maximum_son_count = GRAPHENE_DEFAULT_MAX_SONS;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void database::perform_chain_maintenance(const signed_block& next_block, const global_property_object& global_props)
|
||||
|
|
@ -2167,9 +2197,9 @@ void database::perform_chain_maintenance(const signed_block& next_block, const g
|
|||
// same rationale as for witnesses
|
||||
d._committee_count_histogram_buffer[offset] += voting_stake;
|
||||
}
|
||||
if( opinion_account.options.num_son <= props.parameters.maximum_son_count() )
|
||||
if( opinion_account.options.num_son() <= props.parameters.maximum_son_count() )
|
||||
{
|
||||
uint16_t offset = std::min(size_t(opinion_account.options.num_son/2),
|
||||
uint16_t offset = std::min(size_t(opinion_account.options.num_son()/2),
|
||||
d._son_count_histogram_buffer.size() - 1);
|
||||
// votes for a number greater than maximum_son_count
|
||||
// are turned into votes for maximum_son_count.
|
||||
|
|
@ -2271,6 +2301,7 @@ void database::perform_chain_maintenance(const signed_block& next_block, const g
|
|||
// the following parameters are not allowed to be changed. So take what is in global property
|
||||
p.pending_parameters->extensions.value.hive_asset = p.parameters.extensions.value.hive_asset;
|
||||
p.pending_parameters->extensions.value.hbd_asset = p.parameters.extensions.value.hbd_asset;
|
||||
p.pending_parameters->extensions.value.maximum_son_count = p.parameters.extensions.value.maximum_son_count;
|
||||
p.pending_parameters->extensions.value.btc_asset = p.parameters.extensions.value.btc_asset;
|
||||
p.pending_parameters->extensions.value.son_account = p.parameters.extensions.value.son_account;
|
||||
p.pending_parameters->extensions.value.gpos_period_start = p.parameters.extensions.value.gpos_period_start;
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
#ifndef HARDFORK_SON3_TIME
|
||||
#ifdef BUILD_PEERPLAYS_TESTNET
|
||||
#define HARDFORK_SON3_TIME (fc::time_point_sec::from_iso_string("2022-05-31T00:00:00"))
|
||||
#define HARDFORK_SON3_TIME (fc::time_point_sec::from_iso_string("2022-07-16T00:00:00"))
|
||||
#else
|
||||
#define HARDFORK_SON3_TIME (fc::time_point_sec::from_iso_string("2022-05-31T00:00:00"))
|
||||
#define HARDFORK_SON3_TIME (fc::time_point_sec::from_iso_string("2022-07-16T00:00:00"))
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -131,6 +131,7 @@ namespace graphene { namespace chain {
|
|||
}}
|
||||
|
||||
FC_REFLECT_DERIVED( graphene::chain::dynamic_global_property_object, (graphene::db::object),
|
||||
(random)
|
||||
(head_block_number)
|
||||
(head_block_id)
|
||||
(time)
|
||||
|
|
|
|||
|
|
@ -37,6 +37,11 @@ namespace graphene { namespace chain {
|
|||
/// These are the fields which can be updated by the active authority.
|
||||
struct account_options
|
||||
{
|
||||
struct ext
|
||||
{
|
||||
optional< uint16_t > num_son = 0;
|
||||
};
|
||||
|
||||
/// The memo key is the key this account will typically use to encrypt/sign transaction memos and other non-
|
||||
/// validated account activities. This field is here to prevent confusion if the active authority has zero or
|
||||
/// multiple keys in it.
|
||||
|
|
@ -54,11 +59,11 @@ namespace graphene { namespace chain {
|
|||
uint16_t num_committee = 0;
|
||||
/// The number of active son members this account votes the blockchain should appoint
|
||||
/// Must not exceed the actual number of son members voted for in @ref votes
|
||||
uint16_t num_son = 0;
|
||||
uint16_t num_son() const { return extensions.value.num_son.valid() ? *extensions.value.num_son : 0; }
|
||||
/// This is the list of vote IDs this account votes for. The weight of these votes is determined by this
|
||||
/// account's balance of core asset.
|
||||
flat_set<vote_id_type> votes;
|
||||
extensions_type extensions;
|
||||
extension< ext > extensions;
|
||||
|
||||
/// Whether this account is voting
|
||||
inline bool is_voting() const
|
||||
|
|
@ -289,6 +294,7 @@ namespace graphene { namespace chain {
|
|||
|
||||
} } // graphene::chain
|
||||
|
||||
FC_REFLECT(graphene::chain::account_options::ext, (num_son) )
|
||||
FC_REFLECT(graphene::chain::account_options, (memo_key)(voting_account)(num_witness)(num_committee)(votes)(extensions))
|
||||
// FC_REFLECT_TYPENAME( graphene::chain::account_whitelist_operation::account_listing)
|
||||
FC_REFLECT_ENUM( graphene::chain::account_whitelist_operation::account_listing,
|
||||
|
|
|
|||
|
|
@ -174,7 +174,7 @@ void account_options::validate() const
|
|||
{
|
||||
auto needed_witnesses = num_witness;
|
||||
auto needed_committee = num_committee;
|
||||
auto needed_sons = num_son;
|
||||
auto needed_sons = num_son();
|
||||
|
||||
for( vote_id_type id : votes )
|
||||
if( id.type() == vote_id_type::witness && needed_witnesses )
|
||||
|
|
|
|||
|
|
@ -92,6 +92,7 @@
|
|||
#define DEFAULT_LOGGER "p2p"
|
||||
|
||||
#define P2P_IN_DEDICATED_THREAD 1
|
||||
#define DISABLE_WITNESS_HF_CHECK 1
|
||||
|
||||
#define INVOCATION_COUNTER(name) \
|
||||
static unsigned total_ ## name ## _counter = 0; \
|
||||
|
|
@ -894,7 +895,7 @@ namespace graphene { namespace net { namespace detail {
|
|||
void node_impl::p2p_network_connect_loop()
|
||||
{
|
||||
VERIFY_CORRECT_THREAD();
|
||||
while (!_p2p_network_connect_loop_done.canceled())
|
||||
while (!_p2p_network_connect_loop_done.canceled() && !_node_is_shutting_down)
|
||||
{
|
||||
try
|
||||
{
|
||||
|
|
@ -1338,7 +1339,7 @@ namespace graphene { namespace net { namespace detail {
|
|||
// reconnect with the rest of the network, or it might just futher isolate us.
|
||||
{
|
||||
// As usual, the first step is to walk through all our peers and figure out which
|
||||
// peers need action (disconnecting, sending keepalives, etc), then we walk through
|
||||
// peers need action (disconnecting, sending keepalives, etc), then we walk through
|
||||
// those lists yielding at our leisure later.
|
||||
ASSERT_TASK_NOT_PREEMPTED();
|
||||
|
||||
|
|
@ -1900,8 +1901,15 @@ namespace graphene { namespace net { namespace detail {
|
|||
originating_peer->last_known_fork_block_number = user_data["last_known_fork_block_number"].as<uint32_t>(1);
|
||||
if (user_data.contains("last_known_hardfork_time")){
|
||||
originating_peer->last_known_hardfork_time = fc::time_point_sec(user_data["last_known_hardfork_time"].as<uint32_t>(1));
|
||||
}else{
|
||||
// this state is invalid when node which wants to connect doesn't provide
|
||||
// last hardfork time. We are setting to 0 which will disconnect the node
|
||||
// on hello message
|
||||
originating_peer->last_known_hardfork_time = fc::time_point_sec(0);
|
||||
if(DISABLE_WITNESS_HF_CHECK) {
|
||||
originating_peer->last_known_hardfork_time = _delegate->get_last_known_hardfork_time();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void node_impl::on_hello_message( peer_connection* originating_peer, const hello_message& hello_message_received )
|
||||
|
|
@ -2023,7 +2031,8 @@ namespace graphene { namespace net { namespace detail {
|
|||
// indetify if peer's are not compatible due the hardforks
|
||||
if ( originating_peer->last_known_hardfork_time < _delegate->get_last_known_hardfork_time())
|
||||
{
|
||||
if (_delegate->get_block_time(_delegate->get_head_block_id()).sec_since_epoch() >= _delegate->get_last_known_hardfork_time().sec_since_epoch())
|
||||
if ((_delegate->get_block_time(_delegate->get_head_block_id()).sec_since_epoch() >= _delegate->get_last_known_hardfork_time().sec_since_epoch())
|
||||
|| originating_peer->last_known_hardfork_time.sec_since_epoch() == 0)
|
||||
{
|
||||
std::ostringstream rejection_message;
|
||||
rejection_message << "Your client is outdated -- you can only understand blocks up to #" << originating_peer->last_known_hardfork_time.to_iso_string() << ", but I'm already on block #" << _delegate->get_block_time(_delegate->get_head_block_id()).to_iso_string();
|
||||
|
|
@ -4042,6 +4051,8 @@ namespace graphene { namespace net { namespace detail {
|
|||
{
|
||||
VERIFY_CORRECT_THREAD();
|
||||
|
||||
_node_is_shutting_down = true;
|
||||
|
||||
try
|
||||
{
|
||||
_potential_peer_db.close();
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
#include <graphene/peerplays_sidechain/sidechain_net_handler.hpp>
|
||||
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <zmq_addon.hpp>
|
||||
|
||||
#include <boost/signals2.hpp>
|
||||
|
|
@ -90,7 +91,9 @@ private:
|
|||
class zmq_listener {
|
||||
public:
|
||||
zmq_listener(std::string _ip, uint32_t _zmq);
|
||||
virtual ~zmq_listener();
|
||||
|
||||
void start();
|
||||
boost::signals2::signal<void(const std::string &)> event_received;
|
||||
|
||||
private:
|
||||
|
|
@ -102,6 +105,9 @@ private:
|
|||
|
||||
zmq::context_t ctx;
|
||||
zmq::socket_t socket;
|
||||
|
||||
std::atomic_bool stopped;
|
||||
std::thread thr;
|
||||
};
|
||||
|
||||
// =============================================================================
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
#include <graphene/peerplays_sidechain/sidechain_net_handler_bitcoin.hpp>
|
||||
|
||||
#include <algorithm>
|
||||
#include <thread>
|
||||
|
||||
#include <boost/algorithm/hex.hpp>
|
||||
#include <boost/property_tree/json_parser.hpp>
|
||||
|
|
@ -1060,8 +1059,31 @@ zmq_listener::zmq_listener(std::string _ip, uint32_t _zmq) :
|
|||
ip(_ip),
|
||||
zmq_port(_zmq),
|
||||
ctx(1),
|
||||
socket(ctx, ZMQ_SUB) {
|
||||
std::thread(&zmq_listener::handle_zmq, this).detach();
|
||||
socket(ctx, ZMQ_SUB),
|
||||
stopped(false) {
|
||||
}
|
||||
|
||||
void zmq_listener::start() {
|
||||
int linger = 0;
|
||||
auto rc = zmq_setsockopt(socket, ZMQ_SUBSCRIBE, "hashblock", 9);
|
||||
FC_ASSERT(0 == rc);
|
||||
rc = zmq_setsockopt(socket, ZMQ_LINGER, &linger, sizeof(linger));
|
||||
FC_ASSERT(0 == rc);
|
||||
int timeout = 100; //millisec
|
||||
rc = zmq_setsockopt(socket, ZMQ_RCVTIMEO, &timeout, sizeof(timeout));
|
||||
//socket.setsockopt( ZMQ_SUBSCRIBE, "hashtx", 6 );
|
||||
//socket.setsockopt( ZMQ_SUBSCRIBE, "rawblock", 8 );
|
||||
//socket.setsockopt( ZMQ_SUBSCRIBE, "rawtx", 5 );
|
||||
socket.connect("tcp://" + ip + ":" + std::to_string(zmq_port));
|
||||
|
||||
thr = std::thread(&zmq_listener::handle_zmq, this);
|
||||
|
||||
ilog("zmq_listener thread started");
|
||||
}
|
||||
|
||||
zmq_listener::~zmq_listener() {
|
||||
stopped = true;
|
||||
thr.join();
|
||||
}
|
||||
|
||||
std::vector<zmq::message_t> zmq_listener::receive_multipart() {
|
||||
|
|
@ -1078,26 +1100,25 @@ std::vector<zmq::message_t> zmq_listener::receive_multipart() {
|
|||
}
|
||||
|
||||
void zmq_listener::handle_zmq() {
|
||||
int linger = 0;
|
||||
auto rc = zmq_setsockopt(socket, ZMQ_SUBSCRIBE, "hashblock", 9);
|
||||
FC_ASSERT(0 == rc);
|
||||
rc = zmq_setsockopt(socket, ZMQ_LINGER, &linger, sizeof(linger));
|
||||
FC_ASSERT(0 == rc);
|
||||
//socket.setsockopt( ZMQ_SUBSCRIBE, "hashtx", 6 );
|
||||
//socket.setsockopt( ZMQ_SUBSCRIBE, "rawblock", 8 );
|
||||
//socket.setsockopt( ZMQ_SUBSCRIBE, "rawtx", 5 );
|
||||
socket.connect("tcp://" + ip + ":" + std::to_string(zmq_port));
|
||||
|
||||
while (true) {
|
||||
while (false == stopped) {
|
||||
try {
|
||||
auto msg = receive_multipart();
|
||||
const auto header = std::string(static_cast<char *>(msg[0].data()), msg[0].size());
|
||||
const auto block_hash = boost::algorithm::hex(std::string(static_cast<char *>(msg[1].data()), msg[1].size()));
|
||||
event_received(block_hash);
|
||||
std::vector<zmq::message_t> msg;
|
||||
auto res = zmq::recv_multipart(socket, std::back_inserter(msg));
|
||||
if (res.has_value()) {
|
||||
if (3 != *res) {
|
||||
elog("zmq::recv_multipart returned: ${res}", ("res", *res));
|
||||
throw zmq::error_t();
|
||||
}
|
||||
const auto header = std::string(static_cast<char *>(msg[0].data()), msg[0].size());
|
||||
const auto block_hash = boost::algorithm::hex(std::string(static_cast<char *>(msg[1].data()), msg[1].size()));
|
||||
event_received(block_hash);
|
||||
}
|
||||
} catch (zmq::error_t &e) {
|
||||
elog("handle_zmq recv_multipart exception ${str}", ("str", e.what()));
|
||||
}
|
||||
}
|
||||
|
||||
ilog("zmq_listener thread finished");
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
|
|
@ -1173,6 +1194,7 @@ sidechain_net_handler_bitcoin::sidechain_net_handler_bitcoin(peerplays_sidechain
|
|||
ilog("Bitcoin major version is: '${version}'", ("version", bitcoin_major_version));
|
||||
|
||||
listener = std::unique_ptr<zmq_listener>(new zmq_listener(ip, zmq_port));
|
||||
listener->start();
|
||||
listener->event_received.connect([this](const std::string &event_data) {
|
||||
std::thread(&sidechain_net_handler_bitcoin::handle_event, this, event_data).detach();
|
||||
});
|
||||
|
|
|
|||
|
|
@ -2158,8 +2158,8 @@ public:
|
|||
return sign_transaction( tx, broadcast );
|
||||
} FC_CAPTURE_AND_RETHROW( (owner_account)(url)(block_signing_key)(broadcast) ) }
|
||||
|
||||
signed_transaction activate_deregistered_son(const string & owner_account,
|
||||
bool broadcast /* = false */)
|
||||
signed_transaction activate_deregistered_son(const string & owner_account,
|
||||
bool broadcast /* = false */)
|
||||
{ try {
|
||||
son_object son = get_son(owner_account);
|
||||
|
||||
|
|
@ -2408,7 +2408,7 @@ public:
|
|||
op.sidechain = sidechain;
|
||||
op.peerplays_uid = peerplays_uid;
|
||||
op.peerplays_transaction_id = peerplays_transaction_id;
|
||||
op.peerplays_from = peerplays_from;
|
||||
op.peerplays_from = peerplays_from;
|
||||
op.peerplays_asset = asset(asset_val.amount * asset_price.base.amount / asset_price.quote.amount);
|
||||
op.withdraw_sidechain = withdraw_sidechain;
|
||||
op.withdraw_address = withdraw_address;
|
||||
|
|
@ -2875,7 +2875,7 @@ public:
|
|||
if (!votes_removed)
|
||||
FC_THROW("Account ${account} is already not voting for SON ${son}", ("account", voting_account)("son", son));
|
||||
}
|
||||
voting_account_object.options.num_son = desired_number_of_sons;
|
||||
voting_account_object.options.extensions.value.num_son = desired_number_of_sons;
|
||||
|
||||
account_update_operation account_update_op;
|
||||
account_update_op.account = voting_account_object.id;
|
||||
|
|
@ -5410,7 +5410,7 @@ signed_transaction wallet_api::sidechain_withdrawal_transaction(const string &so
|
|||
const string &withdraw_amount)
|
||||
{
|
||||
return my->sidechain_withdrawal_transaction(son_name_or_id,
|
||||
block_num,
|
||||
block_num,
|
||||
sidechain,
|
||||
peerplays_uid,
|
||||
peerplays_transaction_id,
|
||||
|
|
@ -5540,7 +5540,7 @@ signed_transaction wallet_api::sidechain_deposit_transaction( const string &son
|
|||
const string &peerplays_from_name_or_id,
|
||||
const string &peerplays_to_name_or_id)
|
||||
{
|
||||
return my->sidechain_deposit_transaction(son_name_or_id,
|
||||
return my->sidechain_deposit_transaction(son_name_or_id,
|
||||
sidechain,
|
||||
transaction_id,
|
||||
operation_index,
|
||||
|
|
|
|||
|
|
@ -79,18 +79,12 @@ int main(int argc, char** argv) {
|
|||
node->set_program_options(cli, cfg);
|
||||
cfg_options.add(cfg);
|
||||
|
||||
cfg_options.add_options()
|
||||
("plugins", bpo::value<std::string>()->default_value("witness account_history market_history accounts_list affiliate_stats bookie"),
|
||||
"Space-separated list of plugins to activate");
|
||||
|
||||
auto witness_plug = node->register_plugin<witness_plugin::witness_plugin>();
|
||||
auto debug_witness_plug = node->register_plugin<debug_witness_plugin::debug_witness_plugin>();
|
||||
auto history_plug = node->register_plugin<account_history::account_history_plugin>();
|
||||
auto elasticsearch_plug = node->register_plugin<elasticsearch::elasticsearch_plugin>();
|
||||
auto es_objects_plug = node->register_plugin<es_objects::es_objects_plugin>();
|
||||
auto market_history_plug = node->register_plugin<market_history::market_history_plugin>();
|
||||
//auto generate_genesis_plug = node->register_plugin<generate_genesis_plugin::generate_genesis_plugin>();
|
||||
//auto generate_uia_sharedrop_genesis_plug = node->register_plugin<generate_uia_sharedrop_genesis::generate_uia_sharedrop_genesis_plugin>();
|
||||
auto list_plug = node->register_plugin<accounts_list::accounts_list_plugin>();
|
||||
auto affiliate_stats_plug = node->register_plugin<affiliate_stats::affiliate_stats_plugin>();
|
||||
auto bookie_plug = node->register_plugin<bookie::bookie_plugin>();
|
||||
|
|
|
|||
|
|
@ -337,7 +337,8 @@ BOOST_FIXTURE_TEST_CASE( select_top_fifteen_sons, cli_fixture )
|
|||
global_property_object gpo;
|
||||
|
||||
gpo = con.wallet_api_ptr->get_global_properties();
|
||||
unsigned int son_number = gpo.parameters.maximum_son_count();
|
||||
//! Set son number as 5 (as the begining son count)
|
||||
unsigned int son_number = 5;
|
||||
|
||||
flat_map<sidechain_type, string> sidechain_public_keys;
|
||||
|
||||
|
|
@ -400,7 +401,7 @@ BOOST_FIXTURE_TEST_CASE( select_top_fifteen_sons, cli_fixture )
|
|||
BOOST_TEST_MESSAGE("gpo: " << gpo.active_sons.size());
|
||||
BOOST_CHECK(generate_maintenance_block());
|
||||
|
||||
BOOST_CHECK(gpo.active_sons.size() == gpo.parameters.maximum_son_count());
|
||||
BOOST_CHECK(gpo.active_sons.size() == son_number);
|
||||
|
||||
} catch( fc::exception& e ) {
|
||||
BOOST_TEST_MESSAGE("SON cli wallet tests exception");
|
||||
|
|
@ -644,7 +645,8 @@ BOOST_FIXTURE_TEST_CASE( cli_list_active_sons, cli_fixture )
|
|||
global_property_object gpo;
|
||||
|
||||
gpo = con.wallet_api_ptr->get_global_properties();
|
||||
unsigned int son_number = gpo.parameters.maximum_son_count();
|
||||
//! Set son number as 5 (as the begining son count)
|
||||
unsigned int son_number = 5;
|
||||
|
||||
flat_map<sidechain_type, string> sidechain_public_keys;
|
||||
|
||||
|
|
|
|||
|
|
@ -1040,16 +1040,13 @@ BOOST_FIXTURE_TEST_CASE( hardfork_son2_time, database_fixture )
|
|||
|
||||
generate_blocks(HARDFORK_SON3_TIME);
|
||||
// after this hardfork maximum son account should not reset the value
|
||||
// on 7 after maintenance interval anymore. So change the global parameters
|
||||
// and check the value after maintenance interval
|
||||
db.modify(db.get_global_properties(), [](global_property_object& p) {
|
||||
p.parameters.extensions.value.maximum_son_count = 13;
|
||||
});
|
||||
// on 7 after maintenance interval anymore. It must be GRAPHENE_DEFAULT_MAX_SONS
|
||||
BOOST_CHECK_EQUAL(db.get_global_properties().parameters.maximum_son_count(), GRAPHENE_DEFAULT_MAX_SONS);
|
||||
|
||||
generate_blocks(db.get_dynamic_global_properties().next_maintenance_time);
|
||||
generate_block();
|
||||
|
||||
BOOST_CHECK_EQUAL(db.get_global_properties().parameters.maximum_son_count(), 13);
|
||||
BOOST_CHECK_EQUAL(db.get_global_properties().parameters.maximum_son_count(), 15);
|
||||
|
||||
} FC_LOG_AND_RETHROW() }
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue