2017-05-25 09:13:59 +00:00
/*
* Copyright ( c ) 2015 Cryptonomex , Inc . , and contributors .
*
* The MIT License
*
* Permission is hereby granted , free of charge , to any person obtaining a copy
* of this software and associated documentation files ( the " Software " ) , to deal
* in the Software without restriction , including without limitation the rights
* to use , copy , modify , merge , publish , distribute , sublicense , and / or sell
* copies of the Software , and to permit persons to whom the Software is
* furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM ,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE .
*/
# include <graphene/app/api.hpp>
# include <graphene/app/api_access.hpp>
# include <graphene/app/application.hpp>
# include <graphene/app/plugin.hpp>
# include <graphene/chain/protocol/fee_schedule.hpp>
# include <graphene/chain/protocol/types.hpp>
# include <graphene/egenesis/egenesis.hpp>
# include <graphene/net/core_messages.hpp>
# include <graphene/net/exceptions.hpp>
# include <graphene/chain/worker_evaluator.hpp>
2021-12-02 02:06:38 +00:00
# include <graphene/utilities/key_conversion.hpp>
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
# include <fc/crypto/base64.hpp>
2017-05-25 09:13:59 +00:00
# include <fc/io/fstream.hpp>
2021-12-02 02:06:38 +00:00
# include <fc/network/resolve.hpp>
2017-05-25 09:13:59 +00:00
# include <fc/rpc/api_connection.hpp>
# include <fc/rpc/websocket_api.hpp>
2021-12-02 02:06:38 +00:00
# include <boost/algorithm/string.hpp>
2017-05-25 09:13:59 +00:00
# include <boost/filesystem/path.hpp>
# include <boost/range/algorithm/reverse.hpp>
2021-12-02 02:06:38 +00:00
# include <boost/signals2.hpp>
2017-05-25 09:13:59 +00:00
2022-06-08 22:02:40 +00:00
# include <atomic>
2017-05-25 09:13:59 +00:00
# include <iostream>
# include <fc/log/file_appender.hpp>
# include <fc/log/logger.hpp>
# include <fc/log/logger_config.hpp>
# include <boost/range/adaptor/reversed.hpp>
namespace graphene { namespace app {
2021-12-02 02:06:38 +00:00
using net : : block_message ;
2017-05-25 09:13:59 +00:00
using net : : item_hash_t ;
using net : : item_id ;
using net : : message ;
using net : : trx_message ;
using chain : : block_header ;
using chain : : block_id_type ;
2021-12-02 02:06:38 +00:00
using chain : : signed_block ;
using chain : : signed_block_header ;
2017-05-25 09:13:59 +00:00
using std : : vector ;
namespace bpo = boost : : program_options ;
namespace detail {
2021-12-02 02:06:38 +00:00
genesis_state_type create_example_genesis ( ) {
auto nathan_key = fc : : ecc : : private_key : : regenerate ( fc : : sha256 : : hash ( string ( " nathan " ) ) ) ;
dlog ( " Allocating all stake to ${key} " , ( " key " , utilities : : key_to_wif ( nathan_key ) ) ) ;
genesis_state_type initial_state ;
initial_state . initial_parameters . current_fees = std : : make_shared < fee_schedule > ( fee_schedule : : get_default ( ) ) ;
initial_state . initial_active_witnesses = GRAPHENE_DEFAULT_MIN_WITNESS_COUNT ;
initial_state . initial_timestamp = time_point_sec ( time_point : : now ( ) . sec_since_epoch ( ) /
initial_state . initial_parameters . block_interval *
initial_state . initial_parameters . block_interval ) ;
for ( uint64_t i = 0 ; i < initial_state . initial_active_witnesses ; + + i ) {
auto name = " init " + fc : : to_string ( i ) ;
initial_state . initial_accounts . emplace_back ( name ,
nathan_key . get_public_key ( ) ,
nathan_key . get_public_key ( ) ,
true ) ;
initial_state . initial_committee_candidates . push_back ( { name } ) ;
initial_state . initial_witness_candidates . push_back ( { name , nathan_key . get_public_key ( ) } ) ;
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
initial_state . initial_accounts . emplace_back ( " nathan " , nathan_key . get_public_key ( ) ) ;
initial_state . initial_balances . push_back ( { nathan_key . get_public_key ( ) ,
GRAPHENE_SYMBOL ,
GRAPHENE_MAX_SHARE_SUPPLY } ) ;
initial_state . initial_chain_id = fc : : sha256 : : hash ( " BOGUS " ) ;
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
return initial_state ;
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
class application_impl : public net : : node_delegate {
public :
fc : : optional < fc : : temp_file > _lock_file ;
bool _is_block_producer = false ;
bool _force_validate = false ;
2022-06-08 22:02:40 +00:00
std : : atomic_bool _running { true } ;
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
void reset_p2p_node ( const fc : : path & data_dir ) {
try {
2018-10-12 10:20:30 +00:00
_p2p_network = std : : make_shared < net : : node > ( " PeerPlays Reference Implementation " ) ;
2017-05-25 09:13:59 +00:00
_p2p_network - > load_configuration ( data_dir / " p2p " ) ;
_p2p_network - > set_node_delegate ( this ) ;
2022-06-16 00:04:58 +00:00
vector < string > all_seeds ;
2021-12-02 02:06:38 +00:00
if ( _options - > count ( " seed-node " ) ) {
2017-05-25 09:13:59 +00:00
auto seeds = _options - > at ( " seed-node " ) . as < vector < string > > ( ) ;
2022-06-16 00:04:58 +00:00
all_seeds . insert ( all_seeds . end ( ) , seeds . begin ( ) , seeds . end ( ) ) ;
2017-05-25 09:13:59 +00:00
}
2021-12-02 02:06:38 +00:00
if ( _options - > count ( " seed-nodes " ) ) {
2017-05-25 09:13:59 +00:00
auto seeds_str = _options - > at ( " seed-nodes " ) . as < string > ( ) ;
2018-03-19 22:23:59 +00:00
auto seeds = fc : : json : : from_string ( seeds_str ) . as < vector < string > > ( 2 ) ;
2022-06-16 00:04:58 +00:00
all_seeds . insert ( all_seeds . end ( ) , seeds . begin ( ) , seeds . end ( ) ) ;
}
2018-04-05 09:40:31 +00:00
2022-06-16 00:04:58 +00:00
for ( const string & endpoint_string : all_seeds ) {
try {
std : : vector < fc : : ip : : endpoint > endpoints = resolve_string_to_ip_endpoints ( endpoint_string ) ;
for ( const fc : : ip : : endpoint & endpoint : endpoints ) {
ilog ( " Adding seed node ${endpoint} " , ( " endpoint " , endpoint ) ) ;
_p2p_network - > add_node ( endpoint ) ;
2017-05-25 09:13:59 +00:00
}
2022-06-16 00:04:58 +00:00
} catch ( const fc : : exception & e ) {
wlog ( " caught exception ${e} while adding seed node ${endpoint} " ,
( " e " , e . to_detail_string ( ) ) ( " endpoint " , endpoint_string ) ) ;
2017-05-25 09:13:59 +00:00
}
}
2021-12-02 02:06:38 +00:00
if ( _options - > count ( " p2p-endpoint " ) )
2017-05-25 09:13:59 +00:00
_p2p_network - > listen_on_endpoint ( fc : : ip : : endpoint : : from_string ( _options - > at ( " p2p-endpoint " ) . as < string > ( ) ) , true ) ;
else
_p2p_network - > listen_on_port ( 0 , false ) ;
_p2p_network - > listen_to_p2p_network ( ) ;
ilog ( " Configured p2p node to listen on ${ip} " , ( " ip " , _p2p_network - > get_actual_listening_endpoint ( ) ) ) ;
_p2p_network - > connect_to_p2p_network ( ) ;
_p2p_network - > sync_from ( net : : item_id ( net : : core_message_type_enum : : block_message_type ,
_chain_db - > head_block_id ( ) ) ,
std : : vector < uint32_t > ( ) ) ;
}
2021-12-02 02:06:38 +00:00
FC_CAPTURE_AND_RETHROW ( )
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
std : : vector < fc : : ip : : endpoint > resolve_string_to_ip_endpoints ( const std : : string & endpoint_string ) {
try {
string : : size_type colon_pos = endpoint_string . find ( ' : ' ) ;
if ( colon_pos = = std : : string : : npos )
FC_THROW ( " Missing required port number in endpoint string \" ${endpoint_string} \" " ,
( " endpoint_string " , endpoint_string ) ) ;
std : : string port_string = endpoint_string . substr ( colon_pos + 1 ) ;
try {
uint16_t port = boost : : lexical_cast < uint16_t > ( port_string ) ;
std : : string hostname = endpoint_string . substr ( 0 , colon_pos ) ;
std : : vector < fc : : ip : : endpoint > endpoints = fc : : resolve ( hostname , port ) ;
if ( endpoints . empty ( ) )
FC_THROW_EXCEPTION ( fc : : unknown_host_exception , " The host name can not be resolved: ${hostname} " , ( " hostname " , hostname ) ) ;
return endpoints ;
} catch ( const boost : : bad_lexical_cast & ) {
FC_THROW ( " Bad port: ${port} " , ( " port " , port_string ) ) ;
}
}
FC_CAPTURE_AND_RETHROW ( ( endpoint_string ) )
}
2017-01-27 07:41:23 +00:00
2021-12-02 02:06:38 +00:00
void new_connection ( const fc : : http : : websocket_connection_ptr & c ) {
auto wsc = std : : make_shared < fc : : rpc : : websocket_api_connection > ( c , GRAPHENE_MAX_NESTED_OBJECTS ) ;
auto login = std : : make_shared < graphene : : app : : login_api > ( std : : ref ( * _self ) ) ;
login - > enable_api ( " database_api " ) ;
2017-07-27 23:35:13 +00:00
2021-12-02 02:06:38 +00:00
wsc - > register_api ( login - > database ( ) ) ;
wsc - > register_api ( fc : : api < graphene : : app : : login_api > ( login ) ) ;
2017-07-27 23:35:13 +00:00
2021-12-02 02:06:38 +00:00
wsc - > register_api ( fc : : api < graphene : : app : : login_api > ( login ) ) ;
2017-01-27 07:41:23 +00:00
2021-12-02 02:06:38 +00:00
c - > set_session_data ( wsc ) ;
2017-01-27 07:41:23 +00:00
2021-12-02 02:06:38 +00:00
std : : string username = " * " ;
std : : string password = " * " ;
2017-01-27 07:41:23 +00:00
2021-12-02 02:06:38 +00:00
// Try to extract login information from "Authorization" header if present
std : : string auth = c - > get_request_header ( " Authorization " ) ;
if ( boost : : starts_with ( auth , " Basic " ) ) {
2017-01-27 07:41:23 +00:00
2021-12-02 02:06:38 +00:00
FC_ASSERT ( auth . size ( ) > 6 ) ;
auto user_pass = fc : : base64_decode ( auth . substr ( 6 ) ) ;
2017-01-27 07:41:23 +00:00
2021-12-02 02:06:38 +00:00
std : : vector < std : : string > parts ;
boost : : split ( parts , user_pass , boost : : is_any_of ( " : " ) ) ;
2017-01-27 07:41:23 +00:00
2021-12-02 02:06:38 +00:00
FC_ASSERT ( parts . size ( ) = = 2 ) ;
2017-01-27 07:41:23 +00:00
2021-12-02 02:06:38 +00:00
username = parts [ 0 ] ;
password = parts [ 1 ] ;
2017-01-27 07:41:23 +00:00
}
2021-12-02 02:06:38 +00:00
login - > login ( username , password ) ;
}
void reset_websocket_server ( ) {
try {
if ( ! _options - > count ( " rpc-endpoint " ) )
2017-05-25 09:13:59 +00:00
return ;
2017-01-12 23:47:44 +00:00
_websocket_server = std : : make_shared < fc : : http : : websocket_server > ( ) ;
2021-12-02 02:06:38 +00:00
_websocket_server - > on_connection ( std : : bind ( & application_impl : : new_connection , this , std : : placeholders : : _1 ) ) ;
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
ilog ( " Configured websocket rpc to listen on ${ip} " , ( " ip " , _options - > at ( " rpc-endpoint " ) . as < string > ( ) ) ) ;
_websocket_server - > listen ( fc : : ip : : endpoint : : from_string ( _options - > at ( " rpc-endpoint " ) . as < string > ( ) ) ) ;
2017-05-25 09:13:59 +00:00
_websocket_server - > start_accept ( ) ;
2021-12-02 02:06:38 +00:00
}
FC_CAPTURE_AND_RETHROW ( )
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
void reset_websocket_tls_server ( ) {
try {
if ( ! _options - > count ( " rpc-tls-endpoint " ) )
2017-05-25 09:13:59 +00:00
return ;
2021-12-02 02:06:38 +00:00
if ( ! _options - > count ( " server-pem " ) ) {
wlog ( " Please specify a server-pem to use rpc-tls-endpoint " ) ;
2017-05-25 09:13:59 +00:00
return ;
}
string password = _options - > count ( " server-pem-password " ) ? _options - > at ( " server-pem-password " ) . as < string > ( ) : " " ;
2021-12-02 02:06:38 +00:00
_websocket_tls_server = std : : make_shared < fc : : http : : websocket_tls_server > ( _options - > at ( " server-pem " ) . as < string > ( ) , password ) ;
_websocket_tls_server - > on_connection ( std : : bind ( & application_impl : : new_connection , this , std : : placeholders : : _1 ) ) ;
2015-06-08 15:50:35 +00:00
2021-12-02 02:06:38 +00:00
ilog ( " Configured websocket TLS rpc to listen on ${ip} " , ( " ip " , _options - > at ( " rpc-tls-endpoint " ) . as < string > ( ) ) ) ;
_websocket_tls_server - > listen ( fc : : ip : : endpoint : : from_string ( _options - > at ( " rpc-tls-endpoint " ) . as < string > ( ) ) ) ;
2017-05-25 09:13:59 +00:00
_websocket_tls_server - > start_accept ( ) ;
}
2021-12-02 02:06:38 +00:00
FC_CAPTURE_AND_RETHROW ( )
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
explicit application_impl ( application * self ) :
_self ( self ) ,
_chain_db ( std : : make_shared < chain : : database > ( ) ) {
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
~ application_impl ( ) {
}
void set_dbg_init_key ( genesis_state_type & genesis , const std : : string & init_key ) {
flat_set < std : : string > initial_witness_names ;
public_key_type init_pubkey ( init_key ) ;
for ( uint64_t i = 0 ; i < genesis . initial_active_witnesses ; i + + )
genesis . initial_witness_candidates [ i ] . block_signing_key = init_pubkey ;
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
void startup ( ) {
try {
2017-07-12 20:03:57 +00:00
fc : : create_directories ( _data_dir / " blockchain " ) ;
2017-05-25 09:13:59 +00:00
2018-03-19 22:23:59 +00:00
auto initial_state = [ this ] {
2017-05-25 09:13:59 +00:00
ilog ( " Initializing database... " ) ;
2021-12-02 02:06:38 +00:00
if ( _options - > count ( " genesis-json " ) ) {
2017-05-25 09:13:59 +00:00
std : : string genesis_str ;
2021-12-02 02:06:38 +00:00
fc : : read_file_contents ( _options - > at ( " genesis-json " ) . as < boost : : filesystem : : path > ( ) , genesis_str ) ;
genesis_state_type genesis = fc : : json : : from_string ( genesis_str ) . as < genesis_state_type > ( 20 ) ;
2017-05-25 09:13:59 +00:00
bool modified_genesis = false ;
2021-12-02 02:06:38 +00:00
if ( _options - > count ( " genesis-timestamp " ) ) {
genesis . initial_timestamp = fc : : time_point_sec ( fc : : time_point : : now ( ) ) + genesis . initial_parameters . block_interval + _options - > at ( " genesis-timestamp " ) . as < uint32_t > ( ) ;
2017-05-25 09:13:59 +00:00
genesis . initial_timestamp - = genesis . initial_timestamp . sec_since_epoch ( ) % genesis . initial_parameters . block_interval ;
modified_genesis = true ;
std : : cerr < < " Used genesis timestamp: " < < genesis . initial_timestamp . to_iso_string ( ) < < " (PLEASE RECORD THIS) \n " ;
}
2021-12-02 02:06:38 +00:00
if ( _options - > count ( " dbg-init-key " ) ) {
std : : string init_key = _options - > at ( " dbg-init-key " ) . as < string > ( ) ;
FC_ASSERT ( genesis . initial_witness_candidates . size ( ) > = genesis . initial_active_witnesses ) ;
set_dbg_init_key ( genesis , init_key ) ;
2017-05-25 09:13:59 +00:00
modified_genesis = true ;
std : : cerr < < " Set init witness key to " < < init_key < < " \n " ;
}
2021-12-02 02:06:38 +00:00
if ( modified_genesis ) {
2017-05-25 09:13:59 +00:00
std : : cerr < < " WARNING: GENESIS WAS MODIFIED, YOUR CHAIN ID MAY BE DIFFERENT \n " ;
genesis_str + = " BOGUS " ;
2021-12-02 02:06:38 +00:00
genesis . initial_chain_id = fc : : sha256 : : hash ( genesis_str ) ;
} else
genesis . initial_chain_id = fc : : sha256 : : hash ( genesis_str ) ;
2017-05-25 09:13:59 +00:00
return genesis ;
2021-12-02 02:06:38 +00:00
} else {
2017-05-25 09:13:59 +00:00
std : : string egenesis_json ;
2021-12-02 02:06:38 +00:00
graphene : : egenesis : : compute_egenesis_json ( egenesis_json ) ;
FC_ASSERT ( egenesis_json ! = " " ) ;
FC_ASSERT ( graphene : : egenesis : : get_egenesis_json_hash ( ) = = fc : : sha256 : : hash ( egenesis_json ) ) ;
auto genesis = fc : : json : : from_string ( egenesis_json ) . as < genesis_state_type > ( 20 ) ;
genesis . initial_chain_id = fc : : sha256 : : hash ( egenesis_json ) ;
2017-05-25 09:13:59 +00:00
return genesis ;
}
} ;
2021-12-02 02:06:38 +00:00
if ( _options - > count ( " resync-blockchain " ) )
2017-05-25 09:13:59 +00:00
_chain_db - > wipe ( _data_dir / " blockchain " , true ) ;
2021-12-02 02:06:38 +00:00
flat_map < uint32_t , block_id_type > loaded_checkpoints ;
if ( _options - > count ( " checkpoint " ) ) {
2017-05-25 09:13:59 +00:00
auto cps = _options - > at ( " checkpoint " ) . as < vector < string > > ( ) ;
2021-12-02 02:06:38 +00:00
loaded_checkpoints . reserve ( cps . size ( ) ) ;
for ( auto cp : cps ) {
auto item = fc : : json : : from_string ( cp ) . as < std : : pair < uint32_t , block_id_type > > ( 2 ) ;
2017-05-25 09:13:59 +00:00
loaded_checkpoints [ item . first ] = item . second ;
}
}
2021-12-02 02:06:38 +00:00
_chain_db - > add_checkpoints ( loaded_checkpoints ) ;
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
if ( _options - > count ( " enable-standby-votes-tracking " ) ) {
_chain_db - > enable_standby_votes_tracking ( _options - > at ( " enable-standby-votes-tracking " ) . as < bool > ( ) ) ;
2019-10-10 15:59:01 +00:00
}
2021-12-02 02:06:38 +00:00
2017-05-21 23:24:42 +00:00
std : : string replay_reason = " reason not provided " ;
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
if ( _options - > count ( " replay-blockchain " ) )
_chain_db - > wipe ( _data_dir / " blockchain " , false ) ;
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
try {
_chain_db - > open ( _data_dir / " blockchain " , initial_state , GRAPHENE_CURRENT_DB_VERSION ) ;
} catch ( const fc : : exception & e ) {
elog ( " Caught exception ${e} in open(), you might want to force a replay " , ( " e " , e . to_detail_string ( ) ) ) ;
2017-07-12 20:03:57 +00:00
throw ;
2017-05-25 09:13:59 +00:00
}
2021-12-02 02:06:38 +00:00
if ( _options - > count ( " force-validate " ) ) {
ilog ( " All transaction signatures will be validated " ) ;
2017-05-25 09:13:59 +00:00
_force_validate = true ;
}
2021-12-02 02:06:38 +00:00
if ( _options - > count ( " api-access " ) ) {
2018-03-19 22:23:59 +00:00
2021-12-02 02:06:38 +00:00
if ( fc : : exists ( _options - > at ( " api-access " ) . as < boost : : filesystem : : path > ( ) ) ) {
_apiaccess = fc : : json : : from_file ( _options - > at ( " api-access " ) . as < boost : : filesystem : : path > ( ) ) . as < api_access > ( 20 ) ;
ilog ( " Using api access file from ${path} " ,
( " path " , _options - > at ( " api-access " ) . as < boost : : filesystem : : path > ( ) . string ( ) ) ) ;
} else {
2018-03-19 22:23:59 +00:00
elog ( " Failed to load file from ${path} " ,
2021-12-02 02:06:38 +00:00
( " path " , _options - > at ( " api-access " ) . as < boost : : filesystem : : path > ( ) . string ( ) ) ) ;
2018-03-19 22:23:59 +00:00
std : : exit ( EXIT_FAILURE ) ;
}
2021-12-02 02:06:38 +00:00
} else {
2017-05-25 09:13:59 +00:00
// TODO: Remove this generous default access policy
// when the UI logs in properly
_apiaccess = api_access ( ) ;
api_access_info wild_access ;
wild_access . password_hash_b64 = " * " ;
wild_access . password_salt_b64 = " * " ;
2021-12-02 02:06:38 +00:00
wild_access . allowed_apis . push_back ( " database_api " ) ;
wild_access . allowed_apis . push_back ( " network_broadcast_api " ) ;
wild_access . allowed_apis . push_back ( " history_api " ) ;
wild_access . allowed_apis . push_back ( " bookie_api " ) ;
wild_access . allowed_apis . push_back ( " affiliate_stats_api " ) ;
2022-04-05 03:22:34 +00:00
wild_access . allowed_apis . push_back ( " sidechain_api " ) ;
2017-05-25 09:13:59 +00:00
_apiaccess . permission_map [ " * " ] = wild_access ;
}
reset_p2p_node ( _data_dir ) ;
reset_websocket_server ( ) ;
reset_websocket_tls_server ( ) ;
}
2021-12-02 02:06:38 +00:00
FC_LOG_AND_RETHROW ( )
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
optional < api_access_info > get_api_access_info ( const string & username ) const {
optional < api_access_info > result ;
auto it = _apiaccess . permission_map . find ( username ) ;
if ( it = = _apiaccess . permission_map . end ( ) ) {
it = _apiaccess . permission_map . find ( " * " ) ;
if ( it = = _apiaccess . permission_map . end ( ) )
return result ;
2017-05-25 09:13:59 +00:00
}
2021-12-02 02:06:38 +00:00
return it - > second ;
}
void set_api_access_info ( const string & username , api_access_info & & permissions ) {
_apiaccess . permission_map . insert ( std : : make_pair ( username , std : : move ( permissions ) ) ) ;
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
/**
2023-11-22 12:12:56 +00:00
* If delegate has the item , the network has no need to fetch it .
*/
2021-12-02 02:06:38 +00:00
virtual bool has_item ( const net : : item_id & id ) override {
try {
if ( id . item_type = = graphene : : net : : block_message_type )
return _chain_db - > is_known_block ( id . item_hash ) ;
else
return _chain_db - > is_known_transaction ( id . item_hash ) ;
2017-05-25 09:13:59 +00:00
}
2021-12-02 02:06:38 +00:00
FC_CAPTURE_AND_RETHROW ( ( id ) )
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
/**
2023-11-22 12:12:56 +00:00
* @ brief allows the application to validate an item prior to broadcasting to peers .
*
* @ param sync_mode true if the message was fetched through the sync process , false during normal operation
* @ returns true if this message caused the blockchain to switch forks , false if it did not
*
* @ throws exception if error validating the item , otherwise the item is safe to broadcast on .
*/
2021-12-02 02:06:38 +00:00
virtual bool handle_block ( const graphene : : net : : block_message & blk_msg , bool sync_mode ,
std : : vector < fc : : uint160_t > & contained_transaction_message_ids ) override {
2022-06-08 22:02:40 +00:00
// check point for the threads which may be cancled on application shutdown
2022-06-16 00:04:58 +00:00
if ( ! _running . load ( ) ) {
2022-06-08 22:02:40 +00:00
return true ;
}
2021-12-02 02:06:38 +00:00
try {
2017-06-04 22:32:44 +00:00
auto latency = fc : : time_point : : now ( ) - blk_msg . block . timestamp ;
2021-12-02 02:06:38 +00:00
FC_ASSERT ( ( latency . count ( ) / 1000 ) > - 5000 , " Rejecting block with timestamp in the future " ) ;
if ( ! sync_mode | | blk_msg . block . block_num ( ) % 10000 = = 0 ) {
const auto & witness = blk_msg . block . witness ( * _chain_db ) ;
const auto & witness_account = witness . witness_account ( * _chain_db ) ;
2017-05-25 09:13:59 +00:00
auto last_irr = _chain_db - > get_dynamic_global_properties ( ) . last_irreversible_block_num ;
2016-11-17 21:37:32 +00:00
ilog ( " Got block: #${n} time: ${t} latency: ${l} ms from: ${w} irreversible: ${i} (-${d}) " ,
2021-12-02 02:06:38 +00:00
( " t " , blk_msg . block . timestamp ) ( " n " , blk_msg . block . block_num ( ) ) ( " l " , ( latency . count ( ) / 1000 ) ) ( " w " , witness_account . name ) ( " i " , last_irr ) ( " d " , blk_msg . block . block_num ( ) - last_irr ) ) ;
2017-05-25 09:13:59 +00:00
}
2021-12-02 02:06:38 +00:00
FC_ASSERT ( ( latency . count ( ) / 1000 ) > - 5000 , " Rejecting block with timestamp in the future " ) ;
2017-05-25 09:13:59 +00:00
try {
// TODO: in the case where this block is valid but on a fork that's too old for us to switch to,
// you can help the network code out by throwing a block_older_than_undo_history exception.
// when the net code sees that, it will stop trying to push blocks from that chain, but
// leave that peer connected so that they can get sync blocks from us
bool result = _chain_db - > push_block ( blk_msg . block , ( _is_block_producer | _force_validate ) ? database : : skip_nothing : database : : skip_transaction_signatures ) ;
// the block was accepted, so we now know all of the transactions contained in the block
2021-12-02 02:06:38 +00:00
if ( ! sync_mode ) {
2017-05-25 09:13:59 +00:00
// if we're not in sync mode, there's a chance we will be seeing some transactions
// included in blocks before we see the free-floating transaction itself. If that
// happens, there's no reason to fetch the transactions, so construct a list of the
// transaction message ids we no longer need.
// during sync, it is unlikely that we'll see any old
2021-12-02 02:06:38 +00:00
for ( const processed_transaction & transaction : blk_msg . block . transactions ) {
2017-05-25 09:13:59 +00:00
graphene : : net : : trx_message transaction_message ( transaction ) ;
contained_transaction_message_ids . push_back ( graphene : : net : : message ( transaction_message ) . id ( ) ) ;
}
}
return result ;
2021-12-02 02:06:38 +00:00
} catch ( const graphene : : chain : : unlinkable_block_exception & e ) {
2017-05-25 09:13:59 +00:00
// translate to a graphene::net exception
elog ( " Error when pushing block: \n ${e} " , ( " e " , e . to_detail_string ( ) ) ) ;
FC_THROW_EXCEPTION ( graphene : : net : : unlinkable_block_exception , " Error when pushing block: \n ${e} " , ( " e " , e . to_detail_string ( ) ) ) ;
2021-12-02 02:06:38 +00:00
} catch ( const fc : : exception & e ) {
2017-05-25 09:13:59 +00:00
elog ( " Error when pushing block: \n ${e} " , ( " e " , e . to_detail_string ( ) ) ) ;
throw ;
}
2021-12-02 02:06:38 +00:00
if ( ! _is_finished_syncing & & ! sync_mode ) {
2017-05-25 09:13:59 +00:00
_is_finished_syncing = true ;
_self - > syncing_finished ( ) ;
}
2021-12-02 02:06:38 +00:00
}
FC_CAPTURE_AND_RETHROW ( ( blk_msg ) ( sync_mode ) )
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
virtual void handle_transaction ( const graphene : : net : : trx_message & transaction_message ) override {
try {
2017-05-25 09:13:59 +00:00
static fc : : time_point last_call ;
static int trx_count = 0 ;
+ + trx_count ;
auto now = fc : : time_point : : now ( ) ;
2021-12-02 02:06:38 +00:00
if ( now - last_call > fc : : seconds ( 1 ) ) {
ilog ( " Got ${c} transactions from network " , ( " c " , trx_count ) ) ;
2017-05-25 09:13:59 +00:00
last_call = now ;
trx_count = 0 ;
}
2021-12-02 02:06:38 +00:00
_chain_db - > push_transaction ( transaction_message . trx ) ;
2017-05-25 09:13:59 +00:00
}
2021-12-02 02:06:38 +00:00
FC_CAPTURE_AND_RETHROW ( ( transaction_message ) )
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
virtual void handle_message ( const message & message_to_process ) override {
// not a transaction, not a block
FC_THROW ( " Invalid Message Type " ) ;
}
bool is_included_block ( const block_id_type & block_id ) {
uint32_t block_num = block_header : : num_from_id ( block_id ) ;
block_id_type block_id_in_preferred_chain = _chain_db - > get_block_id_for_num ( block_num ) ;
return block_id = = block_id_in_preferred_chain ;
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
/**
2023-11-22 12:12:56 +00:00
* Assuming all data elements are ordered in some way , this method should
* return up to limit ids that occur * after * the last ID in synopsis that
* we recognize .
*
* On return , remaining_item_count will be set to the number of items
* in our blockchain after the last item returned in the result ,
* or 0 if the result contains the last item in the blockchain
*/
2021-12-02 02:06:38 +00:00
virtual std : : vector < item_hash_t > get_block_ids ( const std : : vector < item_hash_t > & blockchain_synopsis ,
uint32_t & remaining_item_count ,
uint32_t limit ) override {
try {
2017-05-25 09:13:59 +00:00
vector < block_id_type > result ;
remaining_item_count = 0 ;
2021-12-02 02:06:38 +00:00
if ( _chain_db - > head_block_num ( ) = = 0 )
2017-05-25 09:13:59 +00:00
return result ;
result . reserve ( limit ) ;
block_id_type last_known_block_id ;
2016-11-17 21:37:32 +00:00
2017-05-25 09:13:59 +00:00
if ( blockchain_synopsis . empty ( ) | |
2021-12-02 02:06:38 +00:00
( blockchain_synopsis . size ( ) = = 1 & & blockchain_synopsis [ 0 ] = = block_id_type ( ) ) ) {
// peer has sent us an empty synopsis meaning they have no blocks.
// A bug in old versions would cause them to send a synopsis containing block 000000000
// when they had an empty blockchain, so pretend they sent the right thing here.
// do nothing, leave last_known_block_id set to zero
} else {
bool found_a_block_in_synopsis = false ;
for ( const item_hash_t & block_id_in_synopsis : boost : : adaptors : : reverse ( blockchain_synopsis ) )
if ( block_id_in_synopsis = = block_id_type ( ) | |
( _chain_db - > is_known_block ( block_id_in_synopsis ) & & is_included_block ( block_id_in_synopsis ) ) ) {
last_known_block_id = block_id_in_synopsis ;
found_a_block_in_synopsis = true ;
break ;
}
if ( ! found_a_block_in_synopsis )
FC_THROW_EXCEPTION ( graphene : : net : : peer_is_on_an_unreachable_fork , " Unable to provide a list of blocks starting at any of the blocks in peer's synopsis " ) ;
2017-05-25 09:13:59 +00:00
}
2021-12-02 02:06:38 +00:00
for ( uint32_t num = block_header : : num_from_id ( last_known_block_id ) ;
2017-05-25 09:13:59 +00:00
num < = _chain_db - > head_block_num ( ) & & result . size ( ) < limit ;
2021-12-02 02:06:38 +00:00
+ + num )
if ( num > 0 )
2017-05-25 09:13:59 +00:00
result . push_back ( _chain_db - > get_block_id_for_num ( num ) ) ;
2021-12-02 02:06:38 +00:00
if ( ! result . empty ( ) & & block_header : : num_from_id ( result . back ( ) ) < _chain_db - > head_block_num ( ) )
2017-05-25 09:13:59 +00:00
remaining_item_count = _chain_db - > head_block_num ( ) - block_header : : num_from_id ( result . back ( ) ) ;
return result ;
2021-12-02 02:06:38 +00:00
}
FC_CAPTURE_AND_RETHROW ( ( blockchain_synopsis ) ( remaining_item_count ) ( limit ) )
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
/**
2023-11-22 12:12:56 +00:00
* Given the hash of the requested data , fetch the body .
*/
2021-12-02 02:06:38 +00:00
virtual message get_item ( const item_id & id ) override {
try {
// ilog("Request for item ${id}", ("id", id));
if ( id . item_type = = graphene : : net : : block_message_type ) {
2017-05-25 09:13:59 +00:00
auto opt_block = _chain_db - > fetch_block_by_id ( id . item_hash ) ;
2021-12-02 02:06:38 +00:00
if ( ! opt_block )
2017-05-25 09:13:59 +00:00
elog ( " Couldn't find block ${id} -- corresponding ID in our chain is ${id2} " ,
( " id " , id . item_hash ) ( " id2 " , _chain_db - > get_block_id_for_num ( block_header : : num_from_id ( id . item_hash ) ) ) ) ;
2021-12-02 02:06:38 +00:00
FC_ASSERT ( opt_block . valid ( ) ) ;
2017-05-25 09:13:59 +00:00
// ilog("Serving up block #${num}", ("num", opt_block->block_num()));
return block_message ( std : : move ( * opt_block ) ) ;
}
2021-12-02 02:06:38 +00:00
return trx_message ( _chain_db - > get_recent_transaction ( id . item_hash ) ) ;
2017-05-25 09:13:59 +00:00
}
2021-12-02 02:06:38 +00:00
FC_CAPTURE_AND_RETHROW ( ( id ) )
}
virtual chain_id_type get_chain_id ( ) const override {
return _chain_db - > get_chain_id ( ) ;
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
/**
2023-11-22 12:12:56 +00:00
* Returns a synopsis of the blockchain used for syncing . This consists of a list of
* block hashes at intervals exponentially increasing towards the genesis block .
* When syncing to a peer , the peer uses this data to determine if we ' re on the same
* fork as they are , and if not , what blocks they need to send us to get us on their
* fork .
*
* In the over - simplified case , this is a straighforward synopsis of our current
* preferred blockchain ; when we first connect up to a peer , this is what we will be sending .
* It looks like this :
* If the blockchain is empty , it will return the empty list .
* If the blockchain has one block , it will return a list containing just that block .
* If it contains more than one block :
* the first element in the list will be the hash of the highest numbered block that
* we cannot undo
* the second element will be the hash of an item at the half way point in the undoable
* segment of the blockchain
* the third will be ~ 3 / 4 of the way through the undoable segment of the block chain
* the fourth will be at ~ 7 / 8. . .
* & c .
* the last item in the list will be the hash of the most recent block on our preferred chain
* so if the blockchain had 26 blocks labeled a - z , the synopsis would be :
* a n u x z
* the idea being that by sending a small ( < 30 ) number of block ids , we can summarize a huge
* blockchain . The block ids are more dense near the end of the chain where because we are
* more likely to be almost in sync when we first connect , and forks are likely to be short .
* If the peer we ' re syncing with in our example is on a fork that started at block ' v ' ,
* then they will reply to our synopsis with a list of all blocks starting from block ' u ' ,
* the last block they know that we had in common .
*
* In the real code , there are several complications .
*
* First , as an optimization , we don ' t usually send a synopsis of the entire blockchain , we
* send a synopsis of only the segment of the blockchain that we have undo data for . If their
* fork doesn ' t build off of something in our undo history , we would be unable to switch , so there ' s
* no reason to fetch the blocks .
*
* Second , when a peer replies to our initial synopsis and gives us a list of the blocks they think
* we are missing , they only send a chunk of a few thousand blocks at once . After we get those
* block ids , we need to request more blocks by sending another synopsis ( we can ' t just say " send me
* the next 2000 ids " because they may have switched forks themselves and they don't track what
* they ' ve sent us ) . For faster performance , we want to get a fairly long list of block ids first ,
* then start downloading the blocks .
* The peer doesn ' t handle these follow - up block id requests any different from the initial request ;
* it treats the synopsis we send as our blockchain and bases its response entirely off that . So to
* get the response we want ( the next chunk of block ids following the last one they sent us , or ,
* failing that , the shortest fork off of the last list of block ids they sent ) , we need to construct
* a synopsis as if our blockchain was made up of :
* 1. the blocks in our block chain up to the fork point ( if there is a fork ) or the head block ( if no fork )
* 2. the blocks we ' ve already pushed from their fork ( if there ' s a fork )
* 3. the block ids they ' ve previously sent us
* Segment 3 is handled in the p2p code , it just tells us the number of blocks it has ( in
* number_of_blocks_after_reference_point ) so we can leave space in the synopsis for them .
* We ' re responsible for constructing the synopsis of Segments 1 and 2 from our active blockchain and
* fork database . The reference_point parameter is the last block from that peer that has been
* successfully pushed to the blockchain , so that tells us whether the peer is on a fork or on
* the main chain .
*/
2021-12-02 02:06:38 +00:00
virtual std : : vector < item_hash_t > get_blockchain_synopsis ( const item_hash_t & reference_point ,
uint32_t number_of_blocks_after_reference_point ) override {
try {
std : : vector < item_hash_t > synopsis ;
synopsis . reserve ( 30 ) ;
uint32_t high_block_num ;
uint32_t non_fork_high_block_num ;
uint32_t low_block_num = _chain_db - > last_non_undoable_block_num ( ) ;
std : : vector < block_id_type > fork_history ;
if ( reference_point ! = item_hash_t ( ) ) {
2017-05-25 09:13:59 +00:00
// the node is asking for a summary of the block chain up to a specified
// block, which may or may not be on a fork
// for now, assume it's not on a fork
2021-12-02 02:06:38 +00:00
if ( is_included_block ( reference_point ) ) {
// reference_point is a block we know about and is on the main chain
uint32_t reference_point_block_num = block_header : : num_from_id ( reference_point ) ;
assert ( reference_point_block_num > 0 ) ;
high_block_num = reference_point_block_num ;
non_fork_high_block_num = high_block_num ;
if ( reference_point_block_num < low_block_num ) {
// we're on the same fork (at least as far as reference_point) but we've passed
// reference point and could no longer undo that far if we diverged after that
// block. This should probably only happen due to a race condition where
// the network thread calls this function, and then immediately pushes a bunch of blocks,
// then the main thread finally processes this function.
// with the current framework, there's not much we can do to tell the network
// thread what our current head block is, so we'll just pretend that
// our head is actually the reference point.
// this *may* enable us to fetch blocks that we're unable to push, but that should
// be a rare case (and correctly handled)
low_block_num = reference_point_block_num ;
}
} else {
// block is a block we know about, but it is on a fork
try {
fork_history = _chain_db - > get_block_ids_on_fork ( reference_point ) ;
// returns a vector where the last element is the common ancestor with the preferred chain,
// and the first element is the reference point you passed in
assert ( fork_history . size ( ) > = 2 ) ;
if ( fork_history . front ( ) ! = reference_point ) {
edump ( ( fork_history ) ( reference_point ) ) ;
assert ( fork_history . front ( ) = = reference_point ) ;
}
block_id_type last_non_fork_block = fork_history . back ( ) ;
fork_history . pop_back ( ) ; // remove the common ancestor
boost : : reverse ( fork_history ) ;
if ( last_non_fork_block = = block_id_type ( ) ) // if the fork goes all the way back to genesis (does graphene's fork db allow this?)
non_fork_high_block_num = 0 ;
else
non_fork_high_block_num = block_header : : num_from_id ( last_non_fork_block ) ;
high_block_num = non_fork_high_block_num + fork_history . size ( ) ;
assert ( high_block_num = = block_header : : num_from_id ( fork_history . back ( ) ) ) ;
} catch ( const fc : : exception & e ) {
// unable to get fork history for some reason. maybe not linked?
// we can't return a synopsis of its chain
elog ( " Unable to construct a blockchain synopsis for reference hash ${hash}: ${exception} " , ( " hash " , reference_point ) ( " exception " , e ) ) ;
throw ;
}
if ( non_fork_high_block_num < low_block_num ) {
wlog ( " Unable to generate a usable synopsis because the peer we're generating it for forked too long ago "
" (our chains diverge after block #${non_fork_high_block_num} but only undoable to block #${low_block_num}) " ,
( " low_block_num " , low_block_num ) ( " non_fork_high_block_num " , non_fork_high_block_num ) ) ;
FC_THROW_EXCEPTION ( graphene : : net : : block_older_than_undo_history , " Peer is are on a fork I'm unable to switch to " ) ;
}
2017-05-25 09:13:59 +00:00
}
2021-12-02 02:06:38 +00:00
} else {
2017-05-25 09:13:59 +00:00
// no reference point specified, summarize the whole block chain
high_block_num = _chain_db - > head_block_num ( ) ;
non_fork_high_block_num = high_block_num ;
if ( high_block_num = = 0 )
2021-12-02 02:06:38 +00:00
return synopsis ; // we have no blocks
}
if ( low_block_num = = 0 )
low_block_num = 1 ;
// at this point:
// low_block_num is the block before the first block we can undo,
// non_fork_high_block_num is the block before the fork (if the peer is on a fork, or otherwise it is the same as high_block_num)
// high_block_num is the block number of the reference block, or the end of the chain if no reference provided
// true_high_block_num is the ending block number after the network code appends any item ids it
// knows about that we don't
uint32_t true_high_block_num = high_block_num + number_of_blocks_after_reference_point ;
do {
2017-05-25 09:13:59 +00:00
// for each block in the synopsis, figure out where to pull the block id from.
// if it's <= non_fork_high_block_num, we grab it from the main blockchain;
// if it's not, we pull it from the fork history
if ( low_block_num < = non_fork_high_block_num )
2021-12-02 02:06:38 +00:00
synopsis . push_back ( _chain_db - > get_block_id_for_num ( low_block_num ) ) ;
2017-05-25 09:13:59 +00:00
else
2021-12-02 02:06:38 +00:00
synopsis . push_back ( fork_history [ low_block_num - non_fork_high_block_num - 1 ] ) ;
2017-05-25 09:13:59 +00:00
low_block_num + = ( true_high_block_num - low_block_num + 2 ) / 2 ;
2021-12-02 02:06:38 +00:00
} while ( low_block_num < = high_block_num ) ;
2017-05-25 09:13:59 +00:00
2023-11-22 12:12:56 +00:00
// idump((synopsis));
2021-12-02 02:06:38 +00:00
return synopsis ;
}
FC_CAPTURE_AND_RETHROW ( )
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
/**
2023-11-22 12:12:56 +00:00
* Call this after the call to handle_message succeeds .
*
* @ param item_type the type of the item we ' re synchronizing , will be the same as item passed to the sync_from ( ) call
* @ param item_count the number of items known to the node that haven ' t been sent to handle_item ( ) yet .
* After ` item_count ` more calls to handle_item ( ) , the node will be in sync
*/
2021-12-02 02:06:38 +00:00
virtual void sync_status ( uint32_t item_type , uint32_t item_count ) override {
// any status reports to GUI go here
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
/**
2023-11-22 12:12:56 +00:00
* Call any time the number of connected peers changes .
*/
2021-12-02 02:06:38 +00:00
virtual void connection_count_changed ( uint32_t c ) override {
// any status reports to GUI go here
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
virtual uint32_t get_block_number ( const item_hash_t & block_id ) override {
try {
2017-05-25 09:13:59 +00:00
return block_header : : num_from_id ( block_id ) ;
2021-12-02 02:06:38 +00:00
}
FC_CAPTURE_AND_RETHROW ( ( block_id ) )
}
2017-05-25 09:13:59 +00:00
2022-04-26 19:04:30 +00:00
virtual fc : : time_point_sec get_last_known_hardfork_time ( ) override {
return _chain_db - > _hardfork_times [ _chain_db - > _hardfork_times . size ( ) - 1 ] ;
}
2021-12-02 02:06:38 +00:00
/**
2023-11-22 12:12:56 +00:00
* Returns the time a block was produced ( if block_id = 0 , returns genesis time ) .
* If we don ' t know about the block , returns time_point_sec : : min ( )
*/
2021-12-02 02:06:38 +00:00
virtual fc : : time_point_sec get_block_time ( const item_hash_t & block_id ) override {
try {
auto opt_block = _chain_db - > fetch_block_by_id ( block_id ) ;
if ( opt_block . valid ( ) )
return opt_block - > timestamp ;
2017-05-25 09:13:59 +00:00
return fc : : time_point_sec : : min ( ) ;
}
2021-12-02 02:06:38 +00:00
FC_CAPTURE_AND_RETHROW ( ( block_id ) )
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
virtual item_hash_t get_head_block_id ( ) const override {
return _chain_db - > head_block_id ( ) ;
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
virtual uint32_t estimate_last_known_fork_from_git_revision_timestamp ( uint32_t unix_timestamp ) const override {
return 0 ; // there are no forks in graphene
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
virtual void error_encountered ( const std : : string & message , const fc : : oexception & error ) override {
// notify GUI or something cool
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
uint8_t get_current_block_interval_in_seconds ( ) const override {
return _chain_db - > get_global_properties ( ) . parameters . block_interval ;
}
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
application * _self ;
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
fc : : path _data_dir ;
const bpo : : variables_map * _options = nullptr ;
api_access _apiaccess ;
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
std : : shared_ptr < graphene : : chain : : database > _chain_db ;
std : : shared_ptr < graphene : : net : : node > _p2p_network ;
std : : shared_ptr < fc : : http : : websocket_server > _websocket_server ;
std : : shared_ptr < fc : : http : : websocket_tls_server > _websocket_tls_server ;
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
std : : map < string , std : : shared_ptr < abstract_plugin > > _active_plugins ;
std : : map < string , std : : shared_ptr < abstract_plugin > > _available_plugins ;
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
bool _is_finished_syncing = false ;
} ;
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
} // namespace detail
2017-05-25 09:13:59 +00:00
2021-12-02 02:06:38 +00:00
application : : application ( ) :
my ( new detail : : application_impl ( this ) ) {
}
application : : ~ application ( ) {
if ( my - > _p2p_network ) {
2017-05-25 09:13:59 +00:00
my - > _p2p_network - > close ( ) ;
my - > _p2p_network . reset ( ) ;
}
2021-12-02 02:06:38 +00:00
if ( my - > _chain_db ) {
2017-05-25 09:13:59 +00:00
my - > _chain_db - > close ( ) ;
}
}
2021-12-02 02:06:38 +00:00
void application : : set_program_options ( boost : : program_options : : options_description & cli ,
boost : : program_options : : options_description & cfg ) const {
2022-06-16 00:04:58 +00:00
std : : vector < string > seed_nodes = {
# ifdef BUILD_PEERPLAYS_TESTNET
# else
" 51.222.110.110:9777 " ,
" 95.216.90.243:9777 " ,
2022-06-20 12:07:17 +00:00
" ca.peerplays.info:9777 " ,
" de.peerplays.xyz:9777 " ,
" pl.peerplays.org:9777 " ,
2022-06-16 00:04:58 +00:00
" seed.i9networks.net.br:9777 " ,
" witness.serverpit.com:9777 "
# endif
} ;
std : : string seed_nodes_str = fc : : json : : to_string ( seed_nodes ) ;
2021-12-16 21:37:51 +00:00
cfg . add_options ( ) ( " p2p-endpoint " , bpo : : value < string > ( ) - > default_value ( " 0.0.0.0:9777 " ) , " Endpoint for P2P node to listen on " ) ;
2021-12-02 02:06:38 +00:00
cfg . add_options ( ) ( " seed-node,s " , bpo : : value < vector < string > > ( ) - > composing ( ) , " P2P nodes to connect to on startup (may specify multiple times) " ) ;
2022-06-16 00:04:58 +00:00
cfg . add_options ( ) ( " seed-nodes " , bpo : : value < string > ( ) - > composing ( ) - > default_value ( seed_nodes_str ) , " JSON array of P2P nodes to connect to on startup " ) ;
2021-12-02 02:06:38 +00:00
cfg . add_options ( ) ( " checkpoint,c " , bpo : : value < vector < string > > ( ) - > composing ( ) , " Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints. " ) ;
2021-12-16 21:37:51 +00:00
cfg . add_options ( ) ( " rpc-endpoint " , bpo : : value < string > ( ) - > default_value ( " 127.0.0.1:8090 " ) , " Endpoint for websocket RPC to listen on " ) ;
2021-12-02 02:06:38 +00:00
cfg . add_options ( ) ( " rpc-tls-endpoint " , bpo : : value < string > ( ) - > implicit_value ( " 127.0.0.1:8089 " ) , " Endpoint for TLS websocket RPC to listen on " ) ;
cfg . add_options ( ) ( " server-pem,p " , bpo : : value < string > ( ) - > implicit_value ( " server.pem " ) , " The TLS certificate file for this server " ) ;
cfg . add_options ( ) ( " server-pem-password,P " , bpo : : value < string > ( ) - > implicit_value ( " " ) , " Password for this certificate " ) ;
cfg . add_options ( ) ( " genesis-json " , bpo : : value < boost : : filesystem : : path > ( ) , " File to read Genesis State from " ) ;
cfg . add_options ( ) ( " dbg-init-key " , bpo : : value < string > ( ) , " Block signing key to use for init witnesses, overrides genesis file " ) ;
cfg . add_options ( ) ( " api-access " , bpo : : value < boost : : filesystem : : path > ( ) , " JSON file specifying API permissions " ) ;
cfg . add_options ( ) ( " enable-standby-votes-tracking " , bpo : : value < bool > ( ) - > implicit_value ( true ) ,
" Whether to enable tracking of votes of standby witnesses and committee members. "
" Set it to true to provide accurate data to API clients, set to false for slightly better performance. " ) ;
cfg . add_options ( ) ( " plugins " , bpo : : value < string > ( ) - > default_value ( " account_history accounts_list affiliate_stats bookie market_history witness " ) ,
" Space-separated list of plugins to activate " ) ;
cli . add ( cfg ) ;
cli . add_options ( ) ( " create-genesis-json " , bpo : : value < boost : : filesystem : : path > ( ) ,
" Path to create a Genesis State at. If a well-formed JSON file exists at the path, it will be parsed and any "
" missing fields in a Genesis State will be added, and any unknown fields will be removed. If no file or an "
" invalid file is found, it will be replaced with an example Genesis State. " ) ;
cli . add_options ( ) ( " replay-blockchain " , " Rebuild object graph by replaying all blocks " ) ;
cli . add_options ( ) ( " resync-blockchain " , " Delete all blocks and re-sync with network from scratch " ) ;
cli . add_options ( ) ( " force-validate " , " Force validation of all transactions " ) ;
cli . add_options ( ) ( " genesis-timestamp " , bpo : : value < uint32_t > ( ) , " Replace timestamp from genesis.json with current time plus this many seconds (experts only!) " ) ;
cli . add ( _cli_options ) ;
cfg . add ( _cfg_options ) ;
2017-05-25 09:13:59 +00:00
}
2021-12-02 02:06:38 +00:00
void application : : initialize ( const fc : : path & data_dir , const boost : : program_options : : variables_map & options ) {
2017-05-25 09:13:59 +00:00
my - > _data_dir = data_dir ;
my - > _options = & options ;
2021-12-02 02:06:38 +00:00
if ( options . count ( " create-genesis-json " ) ) {
2017-05-25 09:13:59 +00:00
fc : : path genesis_out = options . at ( " create-genesis-json " ) . as < boost : : filesystem : : path > ( ) ;
genesis_state_type genesis_state = detail : : create_example_genesis ( ) ;
2021-12-02 02:06:38 +00:00
if ( fc : : exists ( genesis_out ) ) {
2017-05-25 09:13:59 +00:00
try {
2021-12-02 02:06:38 +00:00
genesis_state = fc : : json : : from_file ( genesis_out ) . as < genesis_state_type > ( 20 ) ;
} catch ( const fc : : exception & e ) {
std : : cerr < < " Unable to parse existing genesis file: \n "
< < e . to_string ( )
2017-05-25 09:13:59 +00:00
< < " \n Would you like to replace it? [y/N] " ;
char response = std : : cin . get ( ) ;
2021-12-02 02:06:38 +00:00
if ( toupper ( response ) ! = ' Y ' )
2017-05-25 09:13:59 +00:00
return ;
}
std : : cerr < < " Updating genesis state in file " < < genesis_out . generic_string ( ) < < " \n " ;
} else {
std : : cerr < < " Creating example genesis state in file " < < genesis_out . generic_string ( ) < < " \n " ;
}
fc : : json : : save_to_file ( genesis_state , genesis_out ) ;
std : : exit ( EXIT_SUCCESS ) ;
}
2020-03-09 12:58:13 +00:00
std : : set < string > wanted ;
2021-12-02 02:06:38 +00:00
if ( options . count ( " plugins " ) ) {
boost : : split ( wanted , options . at ( " plugins " ) . as < std : : string > ( ) , [ ] ( char c ) {
return c = = ' ' ;
} ) ;
} else {
2020-03-09 12:58:13 +00:00
wanted . insert ( " account_history " ) ;
wanted . insert ( " market_history " ) ;
wanted . insert ( " accounts_list " ) ;
wanted . insert ( " affiliate_stats " ) ;
}
2022-09-27 12:25:33 +00:00
if ( ! wanted . count ( " delayed_node " ) & & ! wanted . count ( " debug_witness " ) & & ! wanted . count ( " witness " ) ) // explicitly requested delayed_node or debug_witness functionality suppresses witness functions
2022-09-08 14:58:08 +00:00
wanted . insert ( " witness " ) ;
2020-03-09 12:58:13 +00:00
wanted . insert ( " bookie " ) ;
int es_ah_conflict_counter = 0 ;
2021-12-02 02:06:38 +00:00
for ( auto & it : wanted ) {
if ( it = = " account_history " )
2020-03-09 12:58:13 +00:00
+ + es_ah_conflict_counter ;
2021-12-02 02:06:38 +00:00
if ( it = = " elasticsearch " )
2020-03-09 12:58:13 +00:00
+ + es_ah_conflict_counter ;
2021-12-02 02:06:38 +00:00
if ( es_ah_conflict_counter > 1 ) {
2020-03-09 12:58:13 +00:00
elog ( " Can't start program with elasticsearch and account_history plugin at the same time " ) ;
std : : exit ( EXIT_FAILURE ) ;
}
2021-12-02 02:06:38 +00:00
if ( ! it . empty ( ) )
enable_plugin ( it ) ;
2020-03-09 12:58:13 +00:00
}
2017-05-25 09:13:59 +00:00
}
2021-12-02 02:06:38 +00:00
void application : : startup ( ) {
2017-05-25 09:13:59 +00:00
try {
2021-12-02 02:06:38 +00:00
my - > startup ( ) ;
} catch ( const fc : : exception & e ) {
elog ( " ${e} " , ( " e " , e . to_detail_string ( ) ) ) ;
2017-05-25 09:13:59 +00:00
throw ;
2021-12-02 02:06:38 +00:00
} catch ( . . . ) {
elog ( " unexpected exception " ) ;
2017-05-25 09:13:59 +00:00
throw ;
}
}
2021-12-02 02:06:38 +00:00
std : : shared_ptr < abstract_plugin > application : : get_plugin ( const string & name ) const {
2022-09-07 13:57:17 +00:00
return is_plugin_enabled ( name ) ? my - > _active_plugins [ name ] : nullptr ;
2020-03-09 12:58:13 +00:00
}
2021-12-02 02:06:38 +00:00
bool application : : is_plugin_enabled ( const string & name ) const {
2020-03-09 12:58:13 +00:00
return ! ( my - > _active_plugins . find ( name ) = = my - > _active_plugins . end ( ) ) ;
2017-05-25 09:13:59 +00:00
}
2021-12-02 02:06:38 +00:00
net : : node_ptr application : : p2p_node ( ) {
2017-05-25 09:13:59 +00:00
return my - > _p2p_network ;
}
2021-12-02 02:06:38 +00:00
std : : shared_ptr < chain : : database > application : : chain_database ( ) const {
2017-05-25 09:13:59 +00:00
return my - > _chain_db ;
}
2021-12-02 02:06:38 +00:00
void application : : set_block_production ( bool producing_blocks ) {
2017-05-25 09:13:59 +00:00
my - > _is_block_producer = producing_blocks ;
}
2021-12-02 02:06:38 +00:00
optional < api_access_info > application : : get_api_access_info ( const string & username ) const {
return my - > get_api_access_info ( username ) ;
2017-05-25 09:13:59 +00:00
}
2021-12-02 02:06:38 +00:00
void application : : set_api_access_info ( const string & username , api_access_info & & permissions ) {
2017-05-25 09:13:59 +00:00
my - > set_api_access_info ( username , std : : move ( permissions ) ) ;
}
2021-12-02 02:06:38 +00:00
bool application : : is_finished_syncing ( ) const {
2017-05-25 09:13:59 +00:00
return my - > _is_finished_syncing ;
}
2021-12-02 02:06:38 +00:00
void graphene : : app : : application : : enable_plugin ( const string & name ) {
2020-03-09 12:58:13 +00:00
FC_ASSERT ( my - > _available_plugins [ name ] , " Unknown plugin ' " + name + " ' " ) ;
my - > _active_plugins [ name ] = my - > _available_plugins [ name ] ;
my - > _active_plugins [ name ] - > plugin_set_app ( this ) ;
}
2021-12-02 02:06:38 +00:00
void graphene : : app : : application : : add_available_plugin ( std : : shared_ptr < graphene : : app : : abstract_plugin > p ) {
2020-03-09 12:58:13 +00:00
my - > _available_plugins [ p - > plugin_name ( ) ] = p ;
2017-05-25 09:13:59 +00:00
}
2021-12-02 02:06:38 +00:00
void application : : shutdown_plugins ( ) {
for ( auto & entry : my - > _active_plugins )
2017-05-25 09:13:59 +00:00
entry . second - > plugin_shutdown ( ) ;
return ;
}
2021-12-02 02:06:38 +00:00
void application : : shutdown ( ) {
2022-06-08 22:02:40 +00:00
my - > _running . store ( false ) ;
2021-12-02 02:06:38 +00:00
if ( my - > _p2p_network )
2017-05-25 09:13:59 +00:00
my - > _p2p_network - > close ( ) ;
2021-12-02 02:06:38 +00:00
if ( my - > _chain_db )
2017-05-25 09:13:59 +00:00
my - > _chain_db - > close ( ) ;
}
2021-12-02 02:06:38 +00:00
void application : : initialize_plugins ( const boost : : program_options : : variables_map & options ) {
for ( auto & entry : my - > _active_plugins )
entry . second - > plugin_initialize ( options ) ;
2017-05-25 09:13:59 +00:00
return ;
}
2021-12-02 02:06:38 +00:00
void application : : startup_plugins ( ) {
for ( auto & entry : my - > _active_plugins )
2017-05-25 09:13:59 +00:00
entry . second - > plugin_startup ( ) ;
return ;
}
2021-12-02 02:06:38 +00:00
} } // namespace graphene::app