Merge branch 'develop' into feature/260-voting-info
This commit is contained in:
commit
027464c36f
4 changed files with 173 additions and 171 deletions
94
README.md
94
README.md
|
|
@ -2,95 +2,73 @@ Intro for new developers and witnesses
|
||||||
------------------------
|
------------------------
|
||||||
|
|
||||||
This is a quick introduction to get new developers and witnesses up to speed on Peerplays blockchain. It is intended for witnesses plannig to join a live, already deployed blockchain.
|
This is a quick introduction to get new developers and witnesses up to speed on Peerplays blockchain. It is intended for witnesses plannig to join a live, already deployed blockchain.
|
||||||
# Building on Ubuntu 18.04 LTS and Installation Instructions
|
|
||||||
|
|
||||||
The following dependencies were necessary for a clean install of Ubuntu 18.04 LTS:
|
|
||||||
|
|
||||||
```
|
# Building and Installation Instructions
|
||||||
sudo apt-get install autoconf bash build-essential ca-certificates cmake \
|
|
||||||
doxygen git graphviz libbz2-dev libcurl4-openssl-dev libncurses-dev \
|
Officially supported OS is Ubuntu 20.04.
|
||||||
libreadline-dev libssl-dev libtool libzmq3-dev locales ntp pkg-config \
|
|
||||||
wget
|
Following dependencies are needed for a clean install of Ubuntu 20.04:
|
||||||
```
|
```
|
||||||
## Build Boost 1.67.0
|
sudo apt-get install \
|
||||||
|
apt-utils autoconf bash build-essential ca-certificates cmake dnsutils \
|
||||||
|
doxygen expect git graphviz libboost1.67-all-dev libbz2-dev libcurl4-openssl-dev \
|
||||||
```
|
libncurses-dev libreadline-dev libsnappy-dev libssl-dev libtool libzip-dev \
|
||||||
mkdir $HOME/src
|
libzmq3-dev locales mc nano net-tools ntp openssh-server pkg-config perl \
|
||||||
cd $HOME/src
|
python3 python3-jinja2 sudo wget
|
||||||
export BOOST_ROOT=$HOME/src/boost_1_67_0
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y autotools-dev build-essential libbz2-dev libicu-dev python-dev
|
|
||||||
wget -c 'http://sourceforge.net/projects/boost/files/boost/1.67.0/boost_1_67_0.tar.bz2/download'\
|
|
||||||
-O boost_1_67_0.tar.bz2
|
|
||||||
tar xjf boost_1_67_0.tar.bz2
|
|
||||||
cd boost_1_67_0/
|
|
||||||
./bootstrap.sh "--prefix=$BOOST_ROOT"
|
|
||||||
./b2 install
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## Building Peerplays
|
## Building Peerplays
|
||||||
|
|
||||||
```
|
```
|
||||||
|
mkdir $HOME/src
|
||||||
cd $HOME/src
|
cd $HOME/src
|
||||||
export BOOST_ROOT=$HOME/src/boost_1_67_0
|
git clone https://gitlab.com/PBSA/peerplays.git
|
||||||
git clone https://github.com/peerplays-network/peerplays.git
|
|
||||||
cd peerplays
|
cd peerplays
|
||||||
git submodule update --init --recursive
|
git submodule update --init --recursive
|
||||||
# If you want to build Mainnet node
|
# If you want to build Mainnet node
|
||||||
cmake -DBOOST_ROOT="$BOOST_ROOT" -DCMAKE_BUILD_TYPE=Release
|
cmake -DCMAKE_BUILD_TYPE=Release
|
||||||
# If you want to build Testnet node
|
# If you want to build Testnet node
|
||||||
cmake -DBOOST_ROOT="$BOOST_ROOT" -DCMAKE_BUILD_TYPE=Release -DBUILD_PEERPLAYS_TESTNET=1
|
cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_PEERPLAYS_TESTNET=1
|
||||||
make -j$(nproc)
|
make -j$(nproc)
|
||||||
|
|
||||||
make install # this can install the executable files under /usr/local
|
make install # this can install the executable files under /usr/local
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## Docker image
|
## Docker images
|
||||||
|
|
||||||
|
Install docker, and add current user to docker group.
|
||||||
```
|
```
|
||||||
# Install docker
|
|
||||||
sudo apt install docker.io
|
sudo apt install docker.io
|
||||||
|
|
||||||
|
|
||||||
# Add current user to docker group
|
|
||||||
sudo usermod -a -G docker $USER
|
sudo usermod -a -G docker $USER
|
||||||
|
|
||||||
# You need to restart your shell session, to apply group membership
|
# You need to restart your shell session, to apply group membership
|
||||||
# Type 'groups' to verify that you are a member of a docker group
|
# Type 'groups' to verify that you are a member of a docker group
|
||||||
|
|
||||||
|
|
||||||
# Build docker image (from the project root, must be a docker group member)
|
|
||||||
docker build -t peerplays .
|
|
||||||
|
|
||||||
|
|
||||||
# Start docker image
|
|
||||||
docker start peerplays
|
|
||||||
|
|
||||||
# Exposed ports
|
|
||||||
# # rpc service:
|
|
||||||
# EXPOSE 8090
|
|
||||||
# # p2p service:
|
|
||||||
# EXPOSE 1776
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Rest of the instructions on starting the chain remains same.
|
### Official docker image for Peerplas Mainnet
|
||||||
|
|
||||||
|
```
|
||||||
|
docker pull datasecuritynode/peerplays:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
### Building docker image manually
|
||||||
|
```
|
||||||
|
# Build docker image (from the project root, must be a docker group member)
|
||||||
|
docker build -t peerplays .
|
||||||
|
```
|
||||||
|
|
||||||
|
### Start docker image
|
||||||
|
```
|
||||||
|
docker start peerplays
|
||||||
|
```
|
||||||
|
|
||||||
|
Rest of the instructions on starting the chain remains same.
|
||||||
|
|
||||||
Starting A Peerplays Node
|
Starting A Peerplays Node
|
||||||
-----------------
|
-----------------
|
||||||
|
|
||||||
For Ubuntu 14.04 LTS and up users, see
|
|
||||||
[this](https://github.com/cryptonomex/graphene/wiki/build-ubuntu) and
|
|
||||||
then proceed with:
|
|
||||||
|
|
||||||
git clone https://github.com/peerplays-network/peerplays.git
|
|
||||||
cd peerplays
|
|
||||||
git submodule update --init --recursive
|
|
||||||
cmake -DBOOST_ROOT="$BOOST_ROOT" -DCMAKE_BUILD_TYPE=Release .
|
|
||||||
make
|
|
||||||
./programs/witness_node/witness_node
|
|
||||||
|
|
||||||
Launching the witness creates required directories. Next, **stop the witness** and continue.
|
Launching the witness creates required directories. Next, **stop the witness** and continue.
|
||||||
|
|
||||||
$ vi witness_node_data_dir/config.ini
|
$ vi witness_node_data_dir/config.ini
|
||||||
|
|
|
||||||
|
|
@ -23,6 +23,7 @@
|
||||||
*/
|
*/
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <boost/exception/diagnostic_information.hpp>
|
||||||
#include <fc/exception/exception.hpp>
|
#include <fc/exception/exception.hpp>
|
||||||
#include <graphene/chain/protocol/protocol.hpp>
|
#include <graphene/chain/protocol/protocol.hpp>
|
||||||
|
|
||||||
|
|
@ -65,19 +66,27 @@
|
||||||
msg \
|
msg \
|
||||||
)
|
)
|
||||||
|
|
||||||
#define GRAPHENE_TRY_NOTIFY( signal, ... ) \
|
#define GRAPHENE_TRY_NOTIFY( signal, ... ) \
|
||||||
try \
|
try \
|
||||||
{ \
|
{ \
|
||||||
signal( __VA_ARGS__ ); \
|
signal( __VA_ARGS__ ); \
|
||||||
} \
|
} \
|
||||||
catch( const graphene::chain::plugin_exception& e ) \
|
catch( const graphene::chain::plugin_exception& e ) \
|
||||||
{ \
|
{ \
|
||||||
elog( "Caught plugin exception: ${e}", ("e", e.to_detail_string() ) ); \
|
elog( "Caught plugin exception: ${e}", ("e", e.to_detail_string() ) ); \
|
||||||
throw; \
|
throw; \
|
||||||
} \
|
} \
|
||||||
catch( ... ) \
|
catch( const boost::exception& e ) \
|
||||||
{ \
|
{ \
|
||||||
wlog( "Caught unexpected exception in plugin" ); \
|
elog( "Caught plugin boost::exception: ${e}", ("e", boost::diagnostic_information(e) ) ); \
|
||||||
|
} \
|
||||||
|
catch( const std::exception& e ) \
|
||||||
|
{ \
|
||||||
|
elog( "Caught plugin std::exception: ${e}", ("e", e.what() ) ); \
|
||||||
|
} \
|
||||||
|
catch( ... ) \
|
||||||
|
{ \
|
||||||
|
wlog( "Caught unexpected exception in plugin" ); \
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace graphene { namespace chain {
|
namespace graphene { namespace chain {
|
||||||
|
|
|
||||||
|
|
@ -79,123 +79,138 @@ account_history_plugin_impl::~account_history_plugin_impl()
|
||||||
|
|
||||||
void account_history_plugin_impl::update_account_histories( const signed_block& b )
|
void account_history_plugin_impl::update_account_histories( const signed_block& b )
|
||||||
{
|
{
|
||||||
graphene::chain::database& db = database();
|
try \
|
||||||
vector<optional< operation_history_object > >& hist = db.get_applied_operations();
|
{
|
||||||
bool is_first = true;
|
graphene::chain::database& db = database();
|
||||||
auto skip_oho_id = [&is_first,&db,this]() {
|
vector<optional< operation_history_object > >& hist = db.get_applied_operations();
|
||||||
if( is_first && db._undo_db.enabled() ) // this ensures that the current id is rolled back on undo
|
bool is_first = true;
|
||||||
{
|
auto skip_oho_id = [&is_first,&db,this]() {
|
||||||
db.remove( db.create<operation_history_object>( []( operation_history_object& obj) {} ) );
|
if( is_first && db._undo_db.enabled() ) // this ensures that the current id is rolled back on undo
|
||||||
is_first = false;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
_oho_index->use_next_id();
|
|
||||||
};
|
|
||||||
|
|
||||||
for( optional< operation_history_object >& o_op : hist )
|
|
||||||
{
|
|
||||||
optional<operation_history_object> oho;
|
|
||||||
|
|
||||||
auto create_oho = [&]() {
|
|
||||||
is_first = false;
|
|
||||||
operation_history_object result = db.create<operation_history_object>( [&]( operation_history_object& h )
|
|
||||||
{
|
{
|
||||||
if( o_op.valid() )
|
db.remove( db.create<operation_history_object>( []( operation_history_object& obj) {} ) );
|
||||||
h = *o_op;
|
is_first = false;
|
||||||
} );
|
}
|
||||||
o_op->id = result.id;
|
else
|
||||||
return optional<operation_history_object>(result);
|
_oho_index->use_next_id();
|
||||||
};
|
};
|
||||||
|
|
||||||
if( !o_op.valid() || ( _max_ops_per_account == 0 && _partial_operations ) )
|
for( optional< operation_history_object >& o_op : hist )
|
||||||
{
|
{
|
||||||
// Note: the 2nd and 3rd checks above are for better performance, when the db is not clean,
|
optional<operation_history_object> oho;
|
||||||
// they will break consistency of account_stats.total_ops and removed_ops and most_recent_op
|
|
||||||
skip_oho_id();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
else if( !_partial_operations )
|
|
||||||
// add to the operation history index
|
|
||||||
oho = create_oho();
|
|
||||||
|
|
||||||
const operation_history_object& op = *o_op;
|
auto create_oho = [&]() {
|
||||||
|
is_first = false;
|
||||||
// get the set of accounts this operation applies to
|
operation_history_object result = db.create<operation_history_object>( [&]( operation_history_object& h )
|
||||||
flat_set<account_id_type> impacted;
|
|
||||||
vector<authority> other;
|
|
||||||
// fee payer is added here
|
|
||||||
operation_get_required_authorities( op.op, impacted, impacted, other,
|
|
||||||
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS( db.head_block_time() ) );
|
|
||||||
|
|
||||||
if( op.op.which() == operation::tag< account_create_operation >::value )
|
|
||||||
impacted.insert( op.result.get<object_id_type>() );
|
|
||||||
else
|
|
||||||
graphene::chain::operation_get_impacted_accounts( op.op, impacted,
|
|
||||||
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(db.head_block_time()) );
|
|
||||||
if( op.op.which() == operation::tag< lottery_end_operation >::value )
|
|
||||||
{
|
|
||||||
auto lop = op.op.get< lottery_end_operation >();
|
|
||||||
auto asset_object = lop.lottery( db );
|
|
||||||
impacted.insert( asset_object.issuer );
|
|
||||||
for( auto benefactor : asset_object.lottery_options->benefactors )
|
|
||||||
impacted.insert( benefactor.id );
|
|
||||||
}
|
|
||||||
|
|
||||||
for( auto& a : other )
|
|
||||||
for( auto& item : a.account_auths )
|
|
||||||
impacted.insert( item.first );
|
|
||||||
|
|
||||||
// be here, either _max_ops_per_account > 0, or _partial_operations == false, or both
|
|
||||||
// if _partial_operations == false, oho should have been created above
|
|
||||||
// so the only case should be checked here is:
|
|
||||||
// whether need to create oho if _max_ops_per_account > 0 and _partial_operations == true
|
|
||||||
|
|
||||||
// for each operation this account applies to that is in the config link it into the history
|
|
||||||
if( _tracked_accounts.size() == 0 ) // tracking all accounts
|
|
||||||
{
|
|
||||||
// if tracking all accounts, when impacted is not empty (although it will always be),
|
|
||||||
// still need to create oho if _max_ops_per_account > 0 and _partial_operations == true
|
|
||||||
// so always need to create oho if not done
|
|
||||||
if (!impacted.empty() && !oho.valid()) { oho = create_oho(); }
|
|
||||||
|
|
||||||
if( _max_ops_per_account > 0 )
|
|
||||||
{
|
|
||||||
// Note: the check above is for better performance, when the db is not clean,
|
|
||||||
// it breaks consistency of account_stats.total_ops and removed_ops and most_recent_op,
|
|
||||||
// but it ensures it's safe to remove old entries in add_account_history(...)
|
|
||||||
for( auto& account_id : impacted )
|
|
||||||
{
|
{
|
||||||
// we don't do index_account_keys here anymore, because
|
if( o_op.valid() )
|
||||||
// that indexing now happens in observers' post_evaluate()
|
h = *o_op;
|
||||||
|
} );
|
||||||
|
o_op->id = result.id;
|
||||||
|
return optional<operation_history_object>(result);
|
||||||
|
};
|
||||||
|
|
||||||
// add history
|
if( !o_op.valid() || ( _max_ops_per_account == 0 && _partial_operations ) )
|
||||||
add_account_history( account_id, oho->id );
|
{
|
||||||
}
|
// Note: the 2nd and 3rd checks above are for better performance, when the db is not clean,
|
||||||
|
// they will break consistency of account_stats.total_ops and removed_ops and most_recent_op
|
||||||
|
skip_oho_id();
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
}
|
else if( !_partial_operations )
|
||||||
else // tracking a subset of accounts
|
// add to the operation history index
|
||||||
{
|
oho = create_oho();
|
||||||
// whether need to create oho if _max_ops_per_account > 0 and _partial_operations == true ?
|
|
||||||
// the answer: only need to create oho if a tracked account is impacted and need to save history
|
|
||||||
|
|
||||||
if( _max_ops_per_account > 0 )
|
const operation_history_object& op = *o_op;
|
||||||
|
|
||||||
|
// get the set of accounts this operation applies to
|
||||||
|
flat_set<account_id_type> impacted;
|
||||||
|
vector<authority> other;
|
||||||
|
// fee payer is added here
|
||||||
|
operation_get_required_authorities( op.op, impacted, impacted, other,
|
||||||
|
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS( db.head_block_time() ) );
|
||||||
|
|
||||||
|
if( op.op.which() == operation::tag< account_create_operation >::value )
|
||||||
|
impacted.insert( op.result.get<object_id_type>() );
|
||||||
|
else
|
||||||
|
graphene::chain::operation_get_impacted_accounts( op.op, impacted,
|
||||||
|
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(db.head_block_time()) );
|
||||||
|
if( op.op.which() == operation::tag< lottery_end_operation >::value )
|
||||||
{
|
{
|
||||||
// Note: the check above is for better performance, when the db is not clean,
|
auto lop = op.op.get< lottery_end_operation >();
|
||||||
// it breaks consistency of account_stats.total_ops and removed_ops and most_recent_op,
|
auto asset_object = lop.lottery( db );
|
||||||
// but it ensures it's safe to remove old entries in add_account_history(...)
|
impacted.insert( asset_object.issuer );
|
||||||
for( auto account_id : _tracked_accounts )
|
for( auto benefactor : asset_object.lottery_options->benefactors )
|
||||||
|
impacted.insert( benefactor.id );
|
||||||
|
}
|
||||||
|
|
||||||
|
for( auto& a : other )
|
||||||
|
for( auto& item : a.account_auths )
|
||||||
|
impacted.insert( item.first );
|
||||||
|
|
||||||
|
// be here, either _max_ops_per_account > 0, or _partial_operations == false, or both
|
||||||
|
// if _partial_operations == false, oho should have been created above
|
||||||
|
// so the only case should be checked here is:
|
||||||
|
// whether need to create oho if _max_ops_per_account > 0 and _partial_operations == true
|
||||||
|
|
||||||
|
// for each operation this account applies to that is in the config link it into the history
|
||||||
|
if( _tracked_accounts.size() == 0 ) // tracking all accounts
|
||||||
|
{
|
||||||
|
// if tracking all accounts, when impacted is not empty (although it will always be),
|
||||||
|
// still need to create oho if _max_ops_per_account > 0 and _partial_operations == true
|
||||||
|
// so always need to create oho if not done
|
||||||
|
if (!impacted.empty() && !oho.valid()) { oho = create_oho(); }
|
||||||
|
|
||||||
|
if( _max_ops_per_account > 0 )
|
||||||
{
|
{
|
||||||
if( impacted.find( account_id ) != impacted.end() )
|
// Note: the check above is for better performance, when the db is not clean,
|
||||||
|
// it breaks consistency of account_stats.total_ops and removed_ops and most_recent_op,
|
||||||
|
// but it ensures it's safe to remove old entries in add_account_history(...)
|
||||||
|
for( auto& account_id : impacted )
|
||||||
{
|
{
|
||||||
if (!oho.valid()) { oho = create_oho(); }
|
// we don't do index_account_keys here anymore, because
|
||||||
|
// that indexing now happens in observers' post_evaluate()
|
||||||
|
|
||||||
// add history
|
// add history
|
||||||
add_account_history( account_id, oho->id );
|
add_account_history( account_id, oho->id );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
else // tracking a subset of accounts
|
||||||
|
{
|
||||||
|
// whether need to create oho if _max_ops_per_account > 0 and _partial_operations == true ?
|
||||||
|
// the answer: only need to create oho if a tracked account is impacted and need to save history
|
||||||
|
|
||||||
|
if( _max_ops_per_account > 0 )
|
||||||
|
{
|
||||||
|
// Note: the check above is for better performance, when the db is not clean,
|
||||||
|
// it breaks consistency of account_stats.total_ops and removed_ops and most_recent_op,
|
||||||
|
// but it ensures it's safe to remove old entries in add_account_history(...)
|
||||||
|
for( auto account_id : _tracked_accounts )
|
||||||
|
{
|
||||||
|
if( impacted.find( account_id ) != impacted.end() )
|
||||||
|
{
|
||||||
|
if (!oho.valid()) { oho = create_oho(); }
|
||||||
|
// add history
|
||||||
|
add_account_history( account_id, oho->id );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (_partial_operations && ! oho.valid())
|
||||||
|
skip_oho_id();
|
||||||
}
|
}
|
||||||
if (_partial_operations && ! oho.valid())
|
}
|
||||||
skip_oho_id();
|
catch( const boost::exception& e )
|
||||||
|
{
|
||||||
|
elog( "Caught account_history_plugin::update_account_histories(...) boost::exception: ${e}", ("e", boost::diagnostic_information(e) ) );
|
||||||
|
}
|
||||||
|
catch( const std::exception& e )
|
||||||
|
{
|
||||||
|
elog( "Caught account_history_plugin::update_account_histories(...) std::exception: ${e}", ("e", e.what() ) );
|
||||||
|
}
|
||||||
|
catch( ... )
|
||||||
|
{
|
||||||
|
wlog( "Caught unexpected exception in account_history_plugin::update_account_histories(...)" );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import xml.etree.ElementTree as etree
|
import defusedxml.ElementTree as etree
|
||||||
|
|
||||||
def process_node(path, node):
|
def process_node(path, node):
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue