Merge commit 'd8e7c0afee573d82647018a9efd530a03274a2aa' into betting

This commit is contained in:
Eric Frias 2017-06-21 16:58:50 -04:00
commit 9c9af76e12
14 changed files with 216 additions and 117 deletions

View file

@ -32,13 +32,16 @@ if (USE_PCH)
include (cotire)
endif(USE_PCH)
list( APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/libraries/fc/CMakeModules" )
IF( NOT WIN32 )
list( APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/libraries/fc/CMakeModules" )
ENDIF( NOT WIN32 )
list( APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/libraries/fc/GitVersionGen" )
include( GetGitRevisionDescription )
get_git_head_revision( GIT_REFSPEC GIT_SHA2 )
SET(BOOST_COMPONENTS)
LIST(APPEND BOOST_COMPONENTS thread
iostreams
date_time
system
filesystem

View file

@ -41,6 +41,8 @@ To build after all dependencies are installed:
cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo .
make
**NOTE:** BitShares requires an [OpenSSL](https://www.openssl.org/) version in the 1.0.x series. OpenSSL 1.1.0 and newer are NOT supported. If your system OpenSSL version is newer, then you will need to manually provide an older version of OpenSSL and specify it to CMake using `-DOPENSSL_INCLUDE_DIR`, `-DOPENSSL_SSL_LIBRARY`, and `-DOPENSSL_CRYPTO_LIBRARY`.
**NOTE:** BitShares requires a [Boost](http://www.boost.org/) version in the range [1.57, 1.60]. Versions earlier than
1.57 or newer than 1.60 are NOT supported. If your system Boost version is newer, then you will need to manually build
an older version of Boost and specify it to CMake using `DBOOST_ROOT`.

2
docs

@ -1 +1 @@
Subproject commit d99948280c6ae98a337be8ae93ab7182896d525c
Subproject commit bd792d02c70e7686da2b27197eba4fd6df30477c

View file

@ -39,6 +39,7 @@
#include <fc/crypto/hex.hpp>
#include <fc/smart_ref_impl.hpp>
#include <fc/thread/future.hpp>
namespace graphene { namespace app {
@ -159,6 +160,16 @@ namespace graphene { namespace app {
_app.p2p_node()->broadcast_transaction(trx);
}
fc::variant network_broadcast_api::broadcast_transaction_synchronous(const signed_transaction& trx)
{
fc::promise<fc::variant>::ptr prom( new fc::promise<fc::variant>() );
broadcast_transaction_with_callback( [=]( const fc::variant& v ){
prom->set_value(v);
}, trx );
return fc::future<fc::variant>(prom).wait();
}
void network_broadcast_api::broadcast_block( const signed_block& b )
{
_app.chain_database()->push_block(b);
@ -444,13 +455,13 @@ namespace graphene { namespace app {
return result;
}
vector<operation_history_object> history_api::get_account_history( account_id_type account,
operation_history_id_type stop,
unsigned limit,
vector<operation_history_object> history_api::get_account_history( account_id_type account,
operation_history_id_type stop,
unsigned limit,
operation_history_id_type start ) const
{
FC_ASSERT( _app.chain_database() );
const auto& db = *_app.chain_database();
const auto& db = *_app.chain_database();
FC_ASSERT( limit <= 100 );
vector<operation_history_object> result;
const auto& stats = account(db).statistics(db);
@ -458,7 +469,7 @@ namespace graphene { namespace app {
const account_transaction_history_object* node = &stats.most_recent_op(db);
if( start == operation_history_id_type() )
start = node->operation_id;
while(node && node->operation_id.instance.value > stop.instance.value && result.size() < limit)
{
if( node->operation_id.instance.value <= start.instance.value )
@ -467,13 +478,13 @@ namespace graphene { namespace app {
node = nullptr;
else node = &node->next(db);
}
return result;
}
vector<operation_history_object> history_api::get_account_history_operations( account_id_type account,
vector<operation_history_object> history_api::get_account_history_operations( account_id_type account,
int operation_id,
operation_history_id_type start,
operation_history_id_type start,
operation_history_id_type stop,
unsigned limit) const
{
@ -502,29 +513,30 @@ namespace graphene { namespace app {
}
vector<operation_history_object> history_api::get_relative_account_history( account_id_type account,
uint32_t stop,
unsigned limit,
vector<operation_history_object> history_api::get_relative_account_history( account_id_type account,
uint32_t stop,
unsigned limit,
uint32_t start) const
{
FC_ASSERT( _app.chain_database() );
const auto& db = *_app.chain_database();
FC_ASSERT(limit <= 100);
vector<operation_history_object> result;
const auto& stats = account(db).statistics(db);
if( start == 0 )
start = account(db).statistics(db).total_ops;
start = stats.total_ops;
else
start = min( account(db).statistics(db).total_ops, start );
start = min( stats.total_ops, start );
if( start >= stop && start > 0 && limit > 0 )
if( start >= stop && start > stats.removed_ops && limit > 0 )
{
const auto& hist_idx = db.get_index_type<account_transaction_history_index>();
const auto& by_seq_idx = hist_idx.indices().get<by_seq>();
auto itr = by_seq_idx.upper_bound( boost::make_tuple( account, start ) );
auto itr_stop = by_seq_idx.lower_bound( boost::make_tuple( account, stop ) );
do
{
--itr;
@ -567,14 +579,14 @@ namespace graphene { namespace app {
}
return result;
} FC_CAPTURE_AND_RETHROW( (a)(b)(bucket_seconds)(start)(end) ) }
crypto_api::crypto_api(){};
blind_signature crypto_api::blind_sign( const extended_private_key_type& key, const blinded_hash& hash, int i )
{
return fc::ecc::extended_private_key( key ).blind_sign( hash, i );
}
signature_type crypto_api::unblind_signature( const extended_private_key_type& key,
const extended_public_key_type& bob,
const blind_signature& sig,
@ -583,32 +595,32 @@ namespace graphene { namespace app {
{
return fc::ecc::extended_private_key( key ).unblind_signature( extended_public_key( bob ), sig, hash, i );
}
commitment_type crypto_api::blind( const blind_factor_type& blind, uint64_t value )
{
return fc::ecc::blind( blind, value );
}
blind_factor_type crypto_api::blind_sum( const std::vector<blind_factor_type>& blinds_in, uint32_t non_neg )
{
return fc::ecc::blind_sum( blinds_in, non_neg );
}
bool crypto_api::verify_sum( const std::vector<commitment_type>& commits_in, const std::vector<commitment_type>& neg_commits_in, int64_t excess )
{
return fc::ecc::verify_sum( commits_in, neg_commits_in, excess );
}
verify_range_result crypto_api::verify_range( const commitment_type& commit, const std::vector<char>& proof )
{
verify_range_result result;
result.success = fc::ecc::verify_range( result.min_val, result.max_val, commit, proof );
return result;
}
std::vector<char> crypto_api::range_proof_sign( uint64_t min_value,
const commitment_type& commit,
const blind_factor_type& commit_blind,
std::vector<char> crypto_api::range_proof_sign( uint64_t min_value,
const commitment_type& commit,
const blind_factor_type& commit_blind,
const blind_factor_type& nonce,
int8_t base10_exp,
uint8_t min_bits,
@ -616,23 +628,23 @@ namespace graphene { namespace app {
{
return fc::ecc::range_proof_sign( min_value, commit, commit_blind, nonce, base10_exp, min_bits, actual_value );
}
verify_range_proof_rewind_result crypto_api::verify_range_proof_rewind( const blind_factor_type& nonce,
const commitment_type& commit,
const commitment_type& commit,
const std::vector<char>& proof )
{
verify_range_proof_rewind_result result;
result.success = fc::ecc::verify_range_proof_rewind( result.blind_out,
result.value_out,
result.message_out,
nonce,
result.min_val,
result.max_val,
const_cast< commitment_type& >( commit ),
result.success = fc::ecc::verify_range_proof_rewind( result.blind_out,
result.value_out,
result.message_out,
nonce,
result.min_val,
result.max_val,
const_cast< commitment_type& >( commit ),
proof );
return result;
}
range_proof_info crypto_api::range_get_info( const std::vector<char>& proof )
{
return fc::ecc::range_get_info( proof );
@ -642,18 +654,27 @@ namespace graphene { namespace app {
asset_api::asset_api(graphene::chain::database& db) : _db(db) { }
asset_api::~asset_api() { }
vector<account_asset_balance> asset_api::get_asset_holders( asset_id_type asset_id ) const {
vector<account_asset_balance> asset_api::get_asset_holders( asset_id_type asset_id, uint32_t start, uint32_t limit ) const {
FC_ASSERT(limit <= 100);
const auto& bal_idx = _db.get_index_type< account_balance_index >().indices().get< by_asset_balance >();
auto range = bal_idx.equal_range( boost::make_tuple( asset_id ) );
vector<account_asset_balance> result;
uint32_t index = 0;
for( const account_balance_object& bal : boost::make_iterator_range( range.first, range.second ) )
{
if( bal.balance.value == 0 ) continue;
if( result.size() >= limit )
break;
auto account = _db.find(bal.owner);
if( bal.balance.value == 0 )
continue;
if( index++ < start )
continue;
const auto account = _db.find(bal.owner);
account_asset_balance aab;
aab.name = account->name;
@ -670,16 +691,16 @@ namespace graphene { namespace app {
const auto& bal_idx = _db.get_index_type< account_balance_index >().indices().get< by_asset_balance >();
auto range = bal_idx.equal_range( boost::make_tuple( asset_id ) );
int count = boost::distance(range) - 1;
return count;
}
// function to get vector of system assets with holders count.
vector<asset_holders> asset_api::get_all_asset_holders() const {
vector<asset_holders> result;
vector<asset_id_type> total_assets;
for( const asset_object& asset_obj : _db.get_index_type<asset_index>().indices() )
{
@ -692,7 +713,7 @@ namespace graphene { namespace app {
auto range = bal_idx.equal_range( boost::make_tuple( asset_id ) );
int count = boost::distance(range) - 1;
asset_holders ah;
ah.asset_id = asset_id;
ah.count = count;

View file

@ -525,7 +525,6 @@ namespace detail {
{ try {
auto latency = fc::time_point::now() - blk_msg.block.timestamp;
FC_ASSERT( (latency.count()/1000) > -5000, "Rejecting block with timestamp in the future" );
if (!sync_mode || blk_msg.block.block_num() % 10000 == 0)
{
const auto& witness = blk_msg.block.witness(*_chain_db);
@ -538,6 +537,7 @@ namespace detail {
("w",witness_account.name)
("i",last_irr)("d",blk_msg.block.block_num()-last_irr) );
}
FC_ASSERT( (latency.count()/1000) > -5000, "Rejecting block with timestamp in the future" );
try {
// TODO: in the case where this block is valid but on a fork that's too old for us to switch to,

View file

@ -636,7 +636,7 @@ std::map<std::string, full_account> database_api_impl::get_full_accounts( const
if( subscribe )
{
FC_ASSERT( std::distance(_subscribed_accounts.begin(), _subscribed_accounts.end()) < 100 );
FC_ASSERT( std::distance(_subscribed_accounts.begin(), _subscribed_accounts.end()) <= 100 );
_subscribed_accounts.insert( account->get_id() );
subscribe_to_item( account->id );
}
@ -1290,7 +1290,7 @@ order_book database_api_impl::get_order_book( const string& base, const string&
order ord;
ord.price = price_to_real( o.sell_price );
ord.quote = asset_to_real( o.for_sale, assets[1]->precision );
ord.base = asset_to_real( share_type( ( uint64_t( o.for_sale.value ) * o.sell_price.quote.amount.value ) / o.sell_price.base.amount.value ), assets[0]->precision );
ord.base = asset_to_real( share_type( ( uint128_t( o.for_sale.value ) * o.sell_price.quote.amount.value ) / o.sell_price.base.amount.value ), assets[0]->precision );
result.asks.push_back( ord );
}
}

View file

@ -195,6 +195,12 @@ namespace graphene { namespace app {
*/
void broadcast_transaction_with_callback( confirmation_callback cb, const signed_transaction& trx);
/** this version of broadcast transaction registers a callback method that will be called when the transaction is
* included into a block. The callback method includes the transaction id, block number, and transaction number in the
* block.
*/
fc::variant broadcast_transaction_synchronous(const signed_transaction& trx);
void broadcast_block( const signed_block& block );
/**
@ -305,7 +311,7 @@ namespace graphene { namespace app {
asset_api(graphene::chain::database& db);
~asset_api();
vector<account_asset_balance> get_asset_holders( asset_id_type asset_id )const;
vector<account_asset_balance> get_asset_holders( asset_id_type asset_id, uint32_t start, uint32_t limit )const;
int get_asset_holders_count( asset_id_type asset_id )const;
vector<asset_holders> get_all_asset_holders() const;
@ -394,6 +400,7 @@ FC_API(graphene::app::block_api,
FC_API(graphene::app::network_broadcast_api,
(broadcast_transaction)
(broadcast_transaction_with_callback)
(broadcast_transaction_synchronous)
(broadcast_block)
)
FC_API(graphene::app::network_node_api,

View file

@ -398,6 +398,6 @@ void database::notify_changed_objects()
removed_objects(removed_ids, removed, removed_accounts_impacted);
}
}
} FC_CAPTURE_AND_LOG( () ) }
} FC_CAPTURE_AND_LOG( (0) ) }
} }

View file

@ -50,7 +50,10 @@ namespace graphene { namespace chain {
* Keep the most recent operation as a root pointer to a linked list of the transaction history.
*/
account_transaction_history_id_type most_recent_op;
/** Total operations related to this account. */
uint32_t total_ops = 0;
/** Total operations related to this account that has been removed from the database. */
uint32_t removed_ops = 0;
/**
* When calculating votes it is necessary to know how much is stored in orders (and thus unavailable for
@ -386,7 +389,7 @@ FC_REFLECT_DERIVED( graphene::chain::account_statistics_object,
(graphene::chain::object),
(owner)
(most_recent_op)
(total_ops)
(total_ops)(removed_ops)
(total_core_in_orders)
(lifetime_fees_paid)
(pending_fees)(pending_vested_fees)

View file

@ -102,6 +102,7 @@ namespace graphene { namespace chain {
struct by_id;
struct by_seq;
struct by_op;
struct by_opid;
typedef multi_index_container<
account_transaction_history_object,
indexed_by<
@ -117,6 +118,9 @@ typedef multi_index_container<
member< account_transaction_history_object, account_id_type, &account_transaction_history_object::account>,
member< account_transaction_history_object, operation_history_id_type, &account_transaction_history_object::operation_id>
>
>,
ordered_non_unique< tag<by_opid>,
member< account_transaction_history_object, operation_history_id_type, &account_transaction_history_object::operation_id>
>
>
> account_transaction_history_multi_index_type;

@ -1 +1 @@
Subproject commit 7c88a95df11c16970412786e06d9b94b1aafddc4
Subproject commit 4d8ac59b0b42fcd4e229b44fbbbe15173e09d0b5

View file

@ -49,6 +49,9 @@
#define GRAPHENE_NET_PEER_DISCONNECT_TIMEOUT 20
/* uncomment next line to use testnet seed ip and port */
//#define GRAPHENE_TEST_NETWORK 1
#define GRAPHENE_NET_TEST_SEED_IP "104.236.44.210" // autogenerated
#define GRAPHENE_NET_TEST_P2P_PORT 1700
#define GRAPHENE_NET_DEFAULT_P2P_PORT 1776

View file

@ -974,11 +974,7 @@ namespace graphene { namespace net { namespace detail {
{
throw;
}
catch (const fc::exception& e)
{
elog("${e}", ("e", e));
}
FC_CAPTURE_AND_LOG( () )
FC_CAPTURE_AND_LOG( (0) )
}// while(!canceled)
}
@ -4193,7 +4189,7 @@ namespace graphene { namespace net { namespace detail {
// limit the rate at which we accept connections to mitigate DOS attacks
fc::usleep( fc::milliseconds(10) );
} FC_CAPTURE_AND_LOG( () )
} FC_CAPTURE_AND_LOG( (0) )
}
} // accept_loop()
@ -4394,7 +4390,7 @@ namespace graphene { namespace net { namespace detail {
_node_configuration = detail::node_configuration();
#ifdef GRAPHENE_TEST_NETWORK
uint32_t port = GRAPHENE_NET_TEST_P2P_PORT + GRAPHENE_TEST_NETWORK_VERSION;
uint32_t port = GRAPHENE_NET_TEST_P2P_PORT;
#else
uint32_t port = GRAPHENE_NET_DEFAULT_P2P_PORT;
#endif

View file

@ -66,6 +66,11 @@ class account_history_plugin_impl
flat_set<account_id_type> _tracked_accounts;
bool _partial_operations = false;
primary_index< simple_index< operation_history_object > >* _oho_index;
uint32_t _max_ops_per_account = -1;
private:
/** add one history record, then check and remove the earliest history record */
void add_account_history( const account_id_type account_id, const operation_history_id_type op_id );
};
account_history_plugin_impl::~account_history_plugin_impl()
@ -89,39 +94,26 @@ void account_history_plugin_impl::update_account_histories( const signed_block&
} ) );
};
if (_partial_operations)
if( !o_op.valid() || ( _max_ops_per_account == 0 && _partial_operations ) )
{
if( !o_op.valid() )
{
_oho_index->use_next_id();
continue;
}
// Note: the 2nd and 3rd checks above are for better performance, when the db is not clean,
// they will break consistency of account_stats.total_ops and removed_ops and most_recent_op
_oho_index->use_next_id();
continue;
}
else
{
else if( !_partial_operations )
// add to the operation history index
oho = create_oho();
if( !o_op.valid() )
{
ilog( "removing failed operation with ID: ${id}", ("id", oho->id) );
db.remove( *oho );
continue;
}
}
const operation_history_object& op = *o_op;
// get the set of accounts this operation applies to
flat_set<account_id_type> impacted;
vector<authority> other;
operation_get_required_authorities( op.op, impacted, impacted, other );
operation_get_required_authorities( op.op, impacted, impacted, other ); // fee_payer is added here
if( op.op.which() == operation::tag< account_create_operation >::value )
{
if (!oho.valid()) { oho = create_oho(); }
impacted.insert( oho->result.get<object_id_type>() );
}
impacted.insert( op.result.get<object_id_type>() );
else
graphene::app::operation_get_impacted_accounts( op.op, impacted );
@ -129,48 +121,52 @@ void account_history_plugin_impl::update_account_histories( const signed_block&
for( auto& item : a.account_auths )
impacted.insert( item.first );
// for each operation this account applies to that is in the config link it into the history
if( _tracked_accounts.size() == 0 )
{
if (!impacted.empty() && !oho.valid()) { oho = create_oho(); }
for( auto& account_id : impacted )
{
// we don't do index_account_keys here anymore, because
// that indexing now happens in observers' post_evaluate()
// be here, either _max_ops_per_account > 0, or _partial_operations == false, or both
// if _partial_operations == false, oho should have been created above
// so the only case should be checked here is:
// whether need to create oho if _max_ops_per_account > 0 and _partial_operations == true
// add history
const auto& stats_obj = account_id(db).statistics(db);
const auto& ath = db.create<account_transaction_history_object>( [&]( account_transaction_history_object& obj ){
obj.operation_id = oho->id;
obj.account = account_id;
obj.sequence = stats_obj.total_ops+1;
obj.next = stats_obj.most_recent_op;
});
db.modify( stats_obj, [&]( account_statistics_object& obj ){
obj.most_recent_op = ath.id;
obj.total_ops = ath.sequence;
});
// for each operation this account applies to that is in the config link it into the history
if( _tracked_accounts.size() == 0 ) // tracking all accounts
{
// if tracking all accounts, when impacted is not empty (although it will always be),
// still need to create oho if _max_ops_per_account > 0 and _partial_operations == true
// so always need to create oho if not done
if (!impacted.empty() && !oho.valid()) { oho = create_oho(); }
if( _max_ops_per_account > 0 )
{
// Note: the check above is for better performance, when the db is not clean,
// it breaks consistency of account_stats.total_ops and removed_ops and most_recent_op,
// but it ensures it's safe to remove old entries in add_account_history(...)
for( auto& account_id : impacted )
{
// we don't do index_account_keys here anymore, because
// that indexing now happens in observers' post_evaluate()
// add history
add_account_history( account_id, oho->id );
}
}
}
else
else // tracking a subset of accounts
{
for( auto account_id : _tracked_accounts )
// whether need to create oho if _max_ops_per_account > 0 and _partial_operations == true ?
// the answer: only need to create oho if a tracked account is impacted and need to save history
if( _max_ops_per_account > 0 )
{
if( impacted.find( account_id ) != impacted.end() )
// Note: the check above is for better performance, when the db is not clean,
// it breaks consistency of account_stats.total_ops and removed_ops and most_recent_op,
// but it ensures it's safe to remove old entries in add_account_history(...)
for( auto account_id : _tracked_accounts )
{
if (!oho.valid()) { oho = create_oho(); }
// add history
const auto& stats_obj = account_id(db).statistics(db);
const auto& ath = db.create<account_transaction_history_object>( [&]( account_transaction_history_object& obj ){
obj.operation_id = oho->id;
obj.account = account_id;
obj.sequence = stats_obj.total_ops+1;
obj.next = stats_obj.most_recent_op;
});
db.modify( stats_obj, [&]( account_statistics_object& obj ){
obj.most_recent_op = ath.id;
obj.total_ops = ath.sequence;
});
if( impacted.find( account_id ) != impacted.end() )
{
if (!oho.valid()) { oho = create_oho(); }
// add history
add_account_history( account_id, oho->id );
}
}
}
}
@ -178,6 +174,66 @@ void account_history_plugin_impl::update_account_histories( const signed_block&
_oho_index->use_next_id();
}
}
void account_history_plugin_impl::add_account_history( const account_id_type account_id, const operation_history_id_type op_id )
{
graphene::chain::database& db = database();
const auto& stats_obj = account_id(db).statistics(db);
// add new entry
const auto& ath = db.create<account_transaction_history_object>( [&]( account_transaction_history_object& obj ){
obj.operation_id = op_id;
obj.account = account_id;
obj.sequence = stats_obj.total_ops + 1;
obj.next = stats_obj.most_recent_op;
});
db.modify( stats_obj, [&]( account_statistics_object& obj ){
obj.most_recent_op = ath.id;
obj.total_ops = ath.sequence;
});
// remove the earliest account history entry if too many
// _max_ops_per_account is guaranteed to be non-zero outside
if( stats_obj.total_ops - stats_obj.removed_ops > _max_ops_per_account )
{
// look for the earliest entry
const auto& his_idx = db.get_index_type<account_transaction_history_index>();
const auto& by_seq_idx = his_idx.indices().get<by_seq>();
auto itr = by_seq_idx.lower_bound( boost::make_tuple( account_id, 0 ) );
// make sure don't remove the one just added
if( itr != by_seq_idx.end() && itr->account == account_id && itr->id != ath.id )
{
// if found, remove the entry, and adjust account stats object
const auto remove_op_id = itr->operation_id;
const auto itr_remove = itr;
++itr;
db.remove( *itr_remove );
db.modify( stats_obj, [&]( account_statistics_object& obj ){
obj.removed_ops = obj.removed_ops + 1;
});
// modify previous node's next pointer
// this should be always true, but just have a check here
if( itr != by_seq_idx.end() && itr->account == account_id )
{
db.modify( *itr, [&]( account_transaction_history_object& obj ){
obj.next = account_transaction_history_id_type();
});
}
// else need to modify the head pointer, but it shouldn't be true
// remove the operation history entry (1.11.x) if configured and no reference left
if( _partial_operations )
{
// check for references
const auto& by_opid_idx = his_idx.indices().get<by_opid>();
if( by_opid_idx.find( remove_op_id ) == by_opid_idx.end() )
{
// if no reference, remove
db.remove( remove_op_id(db) );
}
}
}
}
}
} // end namespace detail
@ -207,6 +263,7 @@ void account_history_plugin::plugin_set_program_options(
cli.add_options()
("track-account", boost::program_options::value<std::vector<std::string>>()->composing()->multitoken(), "Account ID to track history for (may specify multiple times)")
("partial-operations", boost::program_options::value<bool>(), "Keep only those operations in memory that are related to account history tracking")
("max-ops-per-account", boost::program_options::value<uint32_t>(), "Maximum number of operations per account will be kept in memory")
;
cfg.add(cli);
}
@ -221,6 +278,9 @@ void account_history_plugin::plugin_initialize(const boost::program_options::var
if (options.count("partial-operations")) {
my->_partial_operations = options["partial-operations"].as<bool>();
}
if (options.count("max-ops-per-account")) {
my->_max_ops_per_account = options["max-ops-per-account"].as<uint32_t>();
}
}
void account_history_plugin::plugin_startup()