Merge branch 'develop' into GRPH-50-network_broadcast_api-fix-v2

This commit is contained in:
Miha Čančula 2019-09-18 16:36:42 +02:00
commit 499a5fc33e
No known key found for this signature in database
GPG key ID: 4FC9D4BD4FBAB9B9
39 changed files with 1761 additions and 501 deletions

View file

@ -1,36 +1,28 @@
stages:
- pull
- build
- test
before_script:
- cd /var/www/Projects/595.peerplays/blockchain
pulljob:
stage: pull
script:
- git pull origin master
only:
- master
tags:
- pp-dev
buildjob:
build:
stage: build
script:
- git submodule update --init --recursive
- cmake .
- make
only:
- master
tags:
- pp-dev
testjob:
- make -j$(nproc)
artifacts:
untracked: true
paths:
- libraries/
- programs/
- tests/
tags:
- builder
test:
stage: test
dependencies:
- build
script:
- ./tests/betting_test
- ./tests/chain_test
- ./tests/tournament_test
only:
- master
tags:
- pp-dev
tags:
- builder

View file

@ -556,26 +556,32 @@ namespace graphene { namespace app {
unsigned limit,
operation_history_id_type start ) const
{
FC_ASSERT( _app.chain_database() );
const auto& db = *_app.chain_database();
FC_ASSERT( limit <= 100 );
vector<operation_history_object> result;
const auto& stats = account(db).statistics(db);
if( stats.most_recent_op == account_transaction_history_id_type() ) return result;
const account_transaction_history_object* node = &stats.most_recent_op(db);
if( start == operation_history_id_type() )
start = node->operation_id;
FC_ASSERT( _app.chain_database() );
const auto& db = *_app.chain_database();
FC_ASSERT( limit <= 100 );
vector<operation_history_object> result;
try {
const account_transaction_history_object& node = account(db).statistics(db).most_recent_op(db);
if(start == operation_history_id_type() || start.instance.value > node.operation_id.instance.value)
start = node.operation_id;
} catch(...) { return result; }
while(node && node->operation_id.instance.value > stop.instance.value && result.size() < limit)
{
if( node->operation_id.instance.value <= start.instance.value )
result.push_back( node->operation_id(db) );
if( node->next == account_transaction_history_id_type() )
node = nullptr;
else node = &node->next(db);
}
const auto& hist_idx = db.get_index_type<account_transaction_history_index>();
const auto& by_op_idx = hist_idx.indices().get<by_op>();
auto index_start = by_op_idx.begin();
auto itr = by_op_idx.lower_bound(boost::make_tuple(account, start));
return result;
while(itr != index_start && itr->account == account && itr->operation_id.instance.value > stop.instance.value && result.size() < limit)
{
if(itr->operation_id.instance.value <= start.instance.value)
result.push_back(itr->operation_id(db));
--itr;
}
if(stop.instance.value == 0 && result.size() < limit && itr->account == account) {
result.push_back(itr->operation_id(db));
}
return result;
}
vector<operation_history_object> history_api::get_account_history_operations( account_id_type account,
@ -600,11 +606,16 @@ namespace graphene { namespace app {
if(node->operation_id(db).op.which() == operation_id)
result.push_back( node->operation_id(db) );
}
}
if( node->next == account_transaction_history_id_type() )
node = nullptr;
else node = &node->next(db);
}
if( stop.instance.value == 0 && result.size() < limit ) {
auto head = db.find(account_transaction_history_id_type());
if (head != nullptr && head->account == account && head->operation_id(db).op.which() == operation_id)
result.push_back(head->operation_id(db));
}
return result;
}

View file

@ -226,7 +226,7 @@ namespace detail {
void new_connection( const fc::http::websocket_connection_ptr& c )
{
auto wsc = std::make_shared<fc::rpc::websocket_api_connection>(*c, GRAPHENE_NET_MAX_NESTED_OBJECTS);
auto wsc = std::make_shared<fc::rpc::websocket_api_connection>(c, GRAPHENE_MAX_NESTED_OBJECTS);
auto login = std::make_shared<graphene::app::login_api>( std::ref(*_self) );
login->enable_api("database_api");
@ -300,7 +300,6 @@ namespace detail {
~application_impl()
{
fc::remove_all(_data_dir / "blockchain/dblock");
}
void set_dbg_init_key( genesis_state_type& genesis, const std::string& init_key )
@ -313,8 +312,7 @@ namespace detail {
void startup()
{ try {
bool clean = !fc::exists(_data_dir / "blockchain/dblock");
fc::create_directories(_data_dir / "blockchain/dblock");
fc::create_directories(_data_dir / "blockchain");
auto initial_state = [this] {
ilog("Initializing database...");
@ -380,64 +378,17 @@ namespace detail {
bool replay = false;
std::string replay_reason = "reason not provided";
// never replay if data dir is empty
if( fc::exists( _data_dir ) && fc::directory_iterator( _data_dir ) != fc::directory_iterator() )
{
if( _options->count("replay-blockchain") )
{
replay = true;
replay_reason = "replay-blockchain argument specified";
}
else if( !clean )
{
replay = true;
replay_reason = "unclean shutdown detected";
}
else if( !fc::exists( _data_dir / "db_version" ) )
{
replay = true;
replay_reason = "db_version file not found";
}
else
{
std::string version_string;
fc::read_file_contents( _data_dir / "db_version", version_string );
if( _options->count("replay-blockchain") )
_chain_db->wipe( _data_dir / "blockchain", false );
if( version_string != GRAPHENE_CURRENT_DB_VERSION )
{
replay = true;
replay_reason = "db_version file content mismatch";
}
}
try
{
_chain_db->open( _data_dir / "blockchain", initial_state, GRAPHENE_CURRENT_DB_VERSION );
}
if( !replay )
catch( const fc::exception& e )
{
try
{
_chain_db->open( _data_dir / "blockchain", initial_state );
}
catch( const fc::exception& e )
{
ilog( "Caught exception ${e} in open()", ("e", e.to_detail_string()) );
replay = true;
replay_reason = "exception in open()";
}
}
if( replay )
{
ilog( "Replaying blockchain due to: ${reason}", ("reason", replay_reason) );
fc::remove_all( _data_dir / "db_version" );
_chain_db->reindex( _data_dir / "blockchain", initial_state() );
const auto mode = std::ios::out | std::ios::binary | std::ios::trunc;
std::ofstream db_version( (_data_dir / "db_version").generic_string().c_str(), mode );
std::string version_string = GRAPHENE_CURRENT_DB_VERSION;
db_version.write( version_string.c_str(), version_string.size() );
db_version.close();
elog( "Caught exception ${e} in open(), you might want to force a replay", ("e", e.to_detail_string()) );
throw;
}
if( _options->count("force-validate") )

View file

@ -551,7 +551,7 @@ vector<vector<account_id_type>> database_api_impl::get_key_references( vector<pu
auto itr = refs.account_to_address_memberships.find(a);
if( itr != refs.account_to_address_memberships.end() )
{
result.reserve( itr->second.size() );
result.reserve( result.size() + itr->second.size() );
for( auto item : itr->second )
{
wdump((a)(item)(item(_db).name));
@ -562,7 +562,7 @@ vector<vector<account_id_type>> database_api_impl::get_key_references( vector<pu
if( itr != refs.account_to_key_memberships.end() )
{
result.reserve( itr->second.size() );
result.reserve( result.size() + itr->second.size() );
for( auto item : itr->second ) result.push_back(item);
}
final_result.emplace_back( std::move(result) );

View file

@ -119,9 +119,9 @@ set<account_id_type> account_member_index::get_account_members(const account_obj
result.insert(auth.first);
return result;
}
set<public_key_type> account_member_index::get_key_members(const account_object& a)const
set<public_key_type, account_member_index::key_compare> account_member_index::get_key_members(const account_object& a)const
{
set<public_key_type> result;
set<public_key_type, key_compare> result;
for( auto auth : a.owner.key_auths )
result.insert(auth.first);
for( auto auth : a.active.key_auths )
@ -213,7 +213,7 @@ void account_member_index::object_modified(const object& after)
{
set<public_key_type> after_key_members = get_key_members(a);
set<public_key_type, key_compare> after_key_members = get_key_members(a);
vector<public_key_type> removed; removed.reserve(before_key_members.size());
std::set_difference(before_key_members.begin(), before_key_members.end(),

View file

@ -45,14 +45,15 @@ void block_database::open( const fc::path& dbdir )
_block_num_to_pos.exceptions(std::ios_base::failbit | std::ios_base::badbit);
_blocks.exceptions(std::ios_base::failbit | std::ios_base::badbit);
if( !fc::exists( dbdir/"index" ) )
_index_filename = dbdir / "index";
if( !fc::exists( _index_filename ) )
{
_block_num_to_pos.open( (dbdir/"index").generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out | std::fstream::trunc);
_block_num_to_pos.open( _index_filename.generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out | std::fstream::trunc);
_blocks.open( (dbdir/"blocks").generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out | std::fstream::trunc);
}
else
{
_block_num_to_pos.open( (dbdir/"index").generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out );
_block_num_to_pos.open( _index_filename.generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out );
_blocks.open( (dbdir/"blocks").generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out );
}
} FC_CAPTURE_AND_RETHROW( (dbdir) ) }
@ -121,7 +122,7 @@ bool block_database::contains( const block_id_type& id )const
index_entry e;
auto index_pos = sizeof(e)*block_header::num_from_id(id);
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
if ( _block_num_to_pos.tellg() <= index_pos )
if ( _block_num_to_pos.tellg() < index_pos + sizeof(e) )
return false;
_block_num_to_pos.seekg( index_pos );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
@ -206,34 +207,47 @@ optional<signed_block> block_database::fetch_by_number( uint32_t block_num )cons
return optional<signed_block>();
}
optional<signed_block> block_database::last()const
{
optional<index_entry> block_database::last_index_entry()const {
try
{
index_entry e;
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
std::streampos pos = _block_num_to_pos.tellg();
if( pos < sizeof(index_entry) )
return optional<index_entry>();
if( _block_num_to_pos.tellp() < sizeof(index_entry) )
return optional<signed_block>();
pos -= pos % sizeof(index_entry);
_block_num_to_pos.seekg( -sizeof(index_entry), _block_num_to_pos.end );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
uint64_t pos = _block_num_to_pos.tellg();
while( e.block_size == 0 && pos > 0 )
_blocks.seekg( 0, _block_num_to_pos.end );
const std::streampos blocks_size = _blocks.tellg();
while( pos > 0 )
{
pos -= sizeof(index_entry);
_block_num_to_pos.seekg( pos );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
if( _block_num_to_pos.gcount() == sizeof(e) && e.block_size > 0
&& e.block_pos + e.block_size <= blocks_size )
try
{
vector<char> data( e.block_size );
_blocks.seekg( e.block_pos );
_blocks.read( data.data(), e.block_size );
if( _blocks.gcount() == e.block_size )
{
const signed_block block = fc::raw::unpack<signed_block>(data);
if( block.id() == e.block_id )
return e;
}
}
catch (const fc::exception&)
{
}
catch (const std::exception&)
{
}
fc::resize_file( _index_filename, pos );
}
if( e.block_size == 0 )
return optional<signed_block>();
vector<char> data( e.block_size );
_blocks.seekg( e.block_pos );
_blocks.read( data.data(), e.block_size );
auto result = fc::raw::unpack<signed_block>(data);
return result;
}
catch (const fc::exception&)
{
@ -241,42 +255,21 @@ optional<signed_block> block_database::last()const
catch (const std::exception&)
{
}
return optional<index_entry>();
}
optional<signed_block> block_database::last()const
{
optional<index_entry> entry = last_index_entry();
if( entry.valid() ) return fetch_by_number( block_header::num_from_id(entry->block_id) );
return optional<signed_block>();
}
optional<block_id_type> block_database::last_id()const
{
try
{
index_entry e;
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
if( _block_num_to_pos.tellp() < sizeof(index_entry) )
return optional<block_id_type>();
_block_num_to_pos.seekg( -sizeof(index_entry), _block_num_to_pos.end );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
uint64_t pos = _block_num_to_pos.tellg();
while( e.block_size == 0 && pos > 0 )
{
pos -= sizeof(index_entry);
_block_num_to_pos.seekg( pos );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
}
if( e.block_size == 0 )
return optional<block_id_type>();
return e.block_id;
}
catch (const fc::exception&)
{
}
catch (const std::exception&)
{
}
optional<index_entry> entry = last_index_entry();
if( entry.valid() ) return entry->block_id;
return optional<block_id_type>();
}
} }

View file

@ -215,12 +215,15 @@ bool database::_push_block(const signed_block& new_block)
// pop blocks until we hit the forked block
while( head_block_id() != branches.second.back()->data.previous )
{
ilog( "popping block #${n} ${id}", ("n",head_block_num())("id",head_block_id()) );
pop_block();
}
// push all blocks on the new fork
for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr )
{
ilog( "pushing blocks from fork ${n} ${id}", ("n",(*ritr)->data.block_num())("id",(*ritr)->data.id()) );
ilog( "pushing block from fork #${n} ${id}", ("n",(*ritr)->data.block_num())("id",(*ritr)->id) );
optional<fc::exception> except;
try {
undo_database::session session = _undo_db.start_undo_session();
@ -235,21 +238,27 @@ bool database::_push_block(const signed_block& new_block)
// remove the rest of branches.first from the fork_db, those blocks are invalid
while( ritr != branches.first.rend() )
{
_fork_db.remove( (*ritr)->data.id() );
ilog( "removing block from fork_db #${n} ${id}", ("n",(*ritr)->data.block_num())("id",(*ritr)->id) );
_fork_db.remove( (*ritr)->id );
++ritr;
}
_fork_db.set_head( branches.second.front() );
// pop all blocks from the bad fork
while( head_block_id() != branches.second.back()->data.previous )
pop_block();
// restore all blocks from the good fork
for( auto ritr = branches.second.rbegin(); ritr != branches.second.rend(); ++ritr )
{
ilog( "popping block #${n} ${id}", ("n",head_block_num())("id",head_block_id()) );
pop_block();
}
ilog( "Switching back to fork: ${id}", ("id",branches.second.front()->data.id()) );
// restore all blocks from the good fork
for( auto ritr2 = branches.second.rbegin(); ritr2 != branches.second.rend(); ++ritr2 )
{
ilog( "pushing block #${n} ${id}", ("n",(*ritr2)->data.block_num())("id",(*ritr2)->id) );
auto session = _undo_db.start_undo_session();
apply_block( (*ritr)->data, skip );
_block_id_to_block.store( new_block.id(), (*ritr)->data );
apply_block( (*ritr2)->data, skip );
_block_id_to_block.store( (*ritr2)->id, (*ritr2)->data );
session.commit();
}
throw *except;
@ -506,7 +515,6 @@ void database::pop_block()
GRAPHENE_ASSERT( head_block.valid(), pop_empty_chain, "there are no blocks to pop" );
_fork_db.pop_block();
_block_id_to_block.remove( head_id );
pop_undo();
_popped_tx.insert( _popped_tx.begin(), head_block->transactions.begin(), head_block->transactions.end() );
@ -612,7 +620,8 @@ void database::_apply_block( const signed_block& next_block )
if (global_props.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SCHEDULED_ALGORITHM)
update_witness_schedule(next_block);
update_global_dynamic_data(next_block);
const uint32_t missed = update_witness_missed_blocks( next_block );
update_global_dynamic_data( next_block, missed );
update_signing_witness(signing_witness, next_block);
update_last_irreversible_block();
@ -671,9 +680,14 @@ processed_transaction database::_apply_transaction(const signed_transaction& trx
auto& trx_idx = get_mutable_index_type<transaction_index>();
const chain_id_type& chain_id = get_chain_id();
auto trx_id = trx.id();
FC_ASSERT( (skip & skip_transaction_dupe_check) ||
trx_idx.indices().get<by_trx_id>().find(trx_id) == trx_idx.indices().get<by_trx_id>().end() );
transaction_id_type trx_id;
if( !(skip & skip_transaction_dupe_check) )
{
trx_id = trx.id();
FC_ASSERT( trx_idx.indices().get<by_trx_id>().find(trx_id) == trx_idx.indices().get<by_trx_id>().end() );
}
transaction_evaluation_state eval_state(this);
const chain_parameters& chain_parameters = get_global_properties().parameters;
eval_state._trx = &trx;
@ -707,7 +721,7 @@ processed_transaction database::_apply_transaction(const signed_transaction& trx
//Insert transaction into unique transactions database.
if( !(skip & skip_transaction_dupe_check) )
{
create<transaction_object>([&](transaction_object& transaction) {
create<transaction_object>([&trx_id,&trx](transaction_object& transaction) {
transaction.trx_id = trx_id;
transaction.trx = trx;
});

View file

@ -47,33 +47,86 @@ database::~database()
clear_pending();
}
void database::reindex(fc::path data_dir, const genesis_state_type& initial_allocation)
{ try {
ilog( "reindexing blockchain" );
wipe(data_dir, false);
open(data_dir, [&initial_allocation]{return initial_allocation;});
// Right now, we leave undo_db enabled when replaying when the bookie plugin is
// enabled. It depends on new/changed/removed object notifications, and those are
// only fired when the undo_db is enabled.
// So we use this helper object to disable undo_db only if it is not forbidden
// with _slow_replays flag.
class auto_undo_enabler
{
const bool _slow_replays;
undo_database& _undo_db;
bool _disabled;
public:
auto_undo_enabler(bool slow_replays, undo_database& undo_db) :
_slow_replays(slow_replays),
_undo_db(undo_db),
_disabled(false)
{
}
auto start = fc::time_point::now();
~auto_undo_enabler()
{
try{
enable();
} FC_CAPTURE_AND_LOG(("undo_db enabling crash"))
}
void enable()
{
if(!_disabled)
return;
_undo_db.enable();
_disabled = false;
}
void disable()
{
if(_disabled)
return;
if(_slow_replays)
return;
_undo_db.disable();
_disabled = true;
}
};
void database::reindex( fc::path data_dir )
{ try {
auto last_block = _block_id_to_block.last();
if( !last_block ) {
elog( "!no last block" );
edump((last_block));
return;
}
if( last_block->block_num() <= head_block_num()) return;
ilog( "reindexing blockchain" );
auto start = fc::time_point::now();
const auto last_block_num = last_block->block_num();
uint32_t flush_point = last_block_num < 10000 ? 0 : last_block_num - 10000;
uint32_t undo_point = last_block_num < 50 ? 0 : last_block_num - 50;
ilog( "Replaying blocks..." );
// Right now, we leave undo_db enabled when replaying when the bookie plugin is
// enabled. It depends on new/changed/removed object notifications, and those are
// only fired when the undo_db is enabled
if (!_slow_replays)
_undo_db.disable();
for( uint32_t i = 1; i <= last_block_num; ++i )
ilog( "Replaying blocks, starting at ${next}...", ("next",head_block_num() + 1) );
auto_undo_enabler undo(_slow_replays, _undo_db);
if( head_block_num() >= undo_point )
{
if( i == 1 ||
i % 10000 == 0 )
std::cerr << " " << double(i*100)/last_block_num << "% "<< i << " of " <<last_block_num<<" \n";
if( head_block_num() > 0 )
_fork_db.start_block( *fetch_block_by_number( head_block_num() ) );
}
else
{
undo.disable();
}
for( uint32_t i = head_block_num() + 1; i <= last_block_num; ++i )
{
if( i % 10000 == 0 ) std::cerr << " " << double(i*100)/last_block_num << "% "<<i << " of " <<last_block_num<<" \n";
if( i == flush_point )
{
ilog( "Writing database to disk at block ${i}", ("i",i) );
flush();
ilog( "Done" );
}
fc::optional< signed_block > block = _block_id_to_block.fetch_by_number(i);
if( !block.valid() )
{
@ -94,24 +147,27 @@ void database::reindex(fc::path data_dir, const genesis_state_type& initial_allo
wlog( "Dropped ${n} blocks from after the gap", ("n", dropped_count) );
break;
}
if (_slow_replays)
push_block(*block, skip_fork_db |
skip_witness_signature |
skip_transaction_signatures |
skip_transaction_dupe_check |
skip_tapos_check |
skip_witness_schedule_check |
skip_authority_check);
else
if( i < undo_point && !_slow_replays)
{
apply_block(*block, skip_witness_signature |
skip_transaction_signatures |
skip_transaction_dupe_check |
skip_tapos_check |
skip_witness_schedule_check |
skip_authority_check);
}
else
{
undo.enable();
push_block(*block, skip_witness_signature |
skip_transaction_signatures |
skip_transaction_dupe_check |
skip_tapos_check |
skip_witness_schedule_check |
skip_authority_check);
}
}
if (!_slow_replays)
_undo_db.enable();
undo.enable();
auto end = fc::time_point::now();
ilog( "Done reindexing, elapsed time: ${t} sec", ("t",double((end-start).count())/1000000.0 ) );
} FC_CAPTURE_AND_RETHROW( (data_dir) ) }
@ -119,7 +175,9 @@ void database::reindex(fc::path data_dir, const genesis_state_type& initial_allo
void database::wipe(const fc::path& data_dir, bool include_blocks)
{
ilog("Wiping database", ("include_blocks", include_blocks));
close();
if (_opened) {
close();
}
object_database::wipe(data_dir);
if( include_blocks )
fc::remove_all( data_dir / "database" );
@ -127,10 +185,29 @@ void database::wipe(const fc::path& data_dir, bool include_blocks)
void database::open(
const fc::path& data_dir,
std::function<genesis_state_type()> genesis_loader)
std::function<genesis_state_type()> genesis_loader,
const std::string& db_version)
{
try
{
bool wipe_object_db = false;
if( !fc::exists( data_dir / "db_version" ) )
wipe_object_db = true;
else
{
std::string version_string;
fc::read_file_contents( data_dir / "db_version", version_string );
wipe_object_db = ( version_string != db_version );
}
if( wipe_object_db ) {
ilog("Wiping object_database due to missing or wrong version");
object_database::wipe( data_dir );
std::ofstream version_file( (data_dir / "db_version").generic_string().c_str(),
std::ios::out | std::ios::binary | std::ios::trunc );
version_file.write( db_version.c_str(), db_version.size() );
version_file.close();
}
object_database::open(data_dir);
_block_id_to_block.open(data_dir / "database" / "block_num_to_block");
@ -138,22 +215,24 @@ void database::open(
if( !find(global_property_id_type()) )
init_genesis(genesis_loader());
fc::optional<signed_block> last_block = _block_id_to_block.last();
fc::optional<block_id_type> last_block = _block_id_to_block.last_id();
if( last_block.valid() )
{
_fork_db.start_block( *last_block );
if( last_block->id() != head_block_id() )
{
FC_ASSERT( head_block_num() == 0, "last block ID does not match current chain state",
("last_block->id", last_block->id())("head_block_num",head_block_num()) );
}
FC_ASSERT( *last_block >= head_block_id(),
"last block ID does not match current chain state",
("last_block->id", last_block)("head_block_id",head_block_num()) );
reindex( data_dir );
}
_opened = true;
}
FC_CAPTURE_LOG_AND_RETHROW( (data_dir) )
}
void database::close(bool rewind)
{
if (!_opened)
return;
// TODO: Save pending tx's on close()
clear_pending();
@ -167,17 +246,9 @@ void database::close(bool rewind)
while( head_block_num() > cutoff )
{
// elog("pop");
block_id_type popped_block_id = head_block_id();
pop_block();
_fork_db.remove(popped_block_id); // doesn't throw on missing
try
{
_block_id_to_block.remove(popped_block_id);
}
catch (const fc::key_not_found_exception&)
{
}
}
}
catch ( const fc::exception& e )
@ -198,6 +269,8 @@ void database::close(bool rewind)
_block_id_to_block.close();
_fork_db.reset();
_opened = false;
}
void database::force_slow_replays()

View file

@ -43,43 +43,13 @@
namespace graphene { namespace chain {
void database::update_global_dynamic_data( const signed_block& b )
void database::update_global_dynamic_data( const signed_block& b, const uint32_t missed_blocks )
{
const dynamic_global_property_object& _dgp = dynamic_global_property_id_type(0)(*this);
const global_property_object& gpo = get_global_properties();
uint32_t missed_blocks = get_slot_at_time( b.timestamp );
//#define DIRTY_TRICK // problem with missed_blocks can occur when "maintenance_interval" set to few minutes
#ifdef DIRTY_TRICK
if (missed_blocks != 0) {
#else
assert( missed_blocks != 0 );
#endif
// bad if-condition, this code needs to execute for both shuffled and rng algorithms
// if (gpo.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SHUFFLED_ALGORITHM)
// {
missed_blocks--;
for( uint32_t i = 0; i < missed_blocks; ++i ) {
const auto& witness_missed = get_scheduled_witness( i+1 )(*this);
if( witness_missed.id != b.witness ) {
/*
const auto& witness_account = witness_missed.witness_account(*this);
if( (fc::time_point::now() - b.timestamp) < fc::seconds(30) )
wlog( "Witness ${name} missed block ${n} around ${t}", ("name",witness_account.name)("n",b.block_num())("t",b.timestamp) );
*/
modify( witness_missed, [&]( witness_object& w ) {
w.total_missed++;
});
}
}
// }
#ifdef DIRTY_TRICK
}
#endif
// dynamic global properties updating
modify( _dgp, [&]( dynamic_global_property_object& dgp ){
modify( _dgp, [&b,this,missed_blocks]( dynamic_global_property_object& dgp ){
secret_hash_type::encoder enc;
fc::raw::pack( enc, dgp.random );
fc::raw::pack( enc, b.previous_secret );
@ -87,9 +57,10 @@ void database::update_global_dynamic_data( const signed_block& b )
_random_number_generator = fc::hash_ctr_rng<secret_hash_type, 20>(dgp.random.data());
if( BOOST_UNLIKELY( b.block_num() == 1 ) )
const uint32_t block_num = b.block_num();
if( BOOST_UNLIKELY( block_num == 1 ) )
dgp.recently_missed_count = 0;
else if( _checkpoints.size() && _checkpoints.rbegin()->first >= b.block_num() )
else if( _checkpoints.size() && _checkpoints.rbegin()->first >= block_num )
dgp.recently_missed_count = 0;
else if( missed_blocks )
dgp.recently_missed_count += GRAPHENE_RECENTLY_MISSED_COUNT_INCREMENT*missed_blocks;
@ -98,7 +69,7 @@ void database::update_global_dynamic_data( const signed_block& b )
else if( dgp.recently_missed_count > 0 )
dgp.recently_missed_count--;
dgp.head_block_number = b.block_num();
dgp.head_block_number = block_num;
dgp.head_block_id = b.id();
dgp.time = b.timestamp;
dgp.current_witness = b.witness;

View file

@ -226,6 +226,22 @@ void database::update_witness_schedule(const signed_block& next_block)
idump( ( double(total_time/1000000.0)/calls) );
}
uint32_t database::update_witness_missed_blocks( const signed_block& b )
{
uint32_t missed_blocks = get_slot_at_time( b.timestamp );
FC_ASSERT( missed_blocks != 0, "Trying to push double-produced block onto current block?!" );
missed_blocks--;
const auto& witnesses = witness_schedule_id_type()(*this).current_shuffled_witnesses;
if( missed_blocks < witnesses.size() )
for( uint32_t i = 0; i < missed_blocks; ++i ) {
const auto& witness_missed = get_scheduled_witness( i+1 )(*this);
modify( witness_missed, []( witness_object& w ) {
w.total_missed++;
});
}
return missed_blocks;
}
uint32_t database::witness_participation_rate()const
{
const global_property_object& gpo = get_global_properties();

View file

@ -278,6 +278,25 @@ namespace graphene { namespace chain {
*/
class account_member_index : public secondary_index
{
/* std::less::operator() is less efficient so using key_compare here.
* Let assume that it has two keys key1 and key2 and we want to insert key3.
* the insert function needs to first call std::less::operator() with key1 and key3.
* Assume std::less::operator()(key1, key3) returns false.
* It has to call std::less::operator() again with the keys switched,
* std::less::operator()(key3, key1), to decide whether key1 is equal to key3 or
* key3 is greater than key1. There are two calls to std::less::operator() to make
* a decision if the first call returns false.
* std::map::insert and std::set used key_compare,
* there would be sufficient information to make the right decision using just one call.
*/
class key_compare {
public:
inline bool operator()( const public_key_type& a, const public_key_type& b )const
{
return a.key_data < b.key_data;
}
};
public:
virtual void object_inserted( const object& obj ) override;
virtual void object_removed( const object& obj ) override;
@ -287,18 +306,18 @@ namespace graphene { namespace chain {
/** given an account or key, map it to the set of accounts that reference it in an active or owner authority */
map< account_id_type, set<account_id_type> > account_to_account_memberships;
map< public_key_type, set<account_id_type> > account_to_key_memberships;
map< public_key_type, set<account_id_type>, key_compare > account_to_key_memberships;
/** some accounts use address authorities in the genesis block */
map< address, set<account_id_type> > account_to_address_memberships;
protected:
set<account_id_type> get_account_members( const account_object& a )const;
set<public_key_type> get_key_members( const account_object& a )const;
set<public_key_type, key_compare> get_key_members( const account_object& a )const;
set<address> get_address_members( const account_object& a )const;
set<account_id_type> before_account_members;
set<public_key_type> before_key_members;
set<public_key_type, key_compare> before_key_members;
set<address> before_address_members;
};

View file

@ -230,8 +230,16 @@ namespace graphene { namespace chain {
share_type settlement_fund;
///@}
/// The time when @ref current_feed would expire
time_point_sec feed_expiration_time()const
{ return current_feed_publication_time + options.feed_lifetime_sec; }
{
uint32_t current_feed_seconds = current_feed_publication_time.sec_since_epoch();
if( std::numeric_limits<uint32_t>::max() - current_feed_seconds <= options.feed_lifetime_sec )
return time_point_sec::maximum();
else
return current_feed_publication_time + options.feed_lifetime_sec;
}
bool feed_is_expired_before_hardfork_615(time_point_sec current_time)const
{ return feed_expiration_time() >= current_time; }
bool feed_is_expired(time_point_sec current_time)const

View file

@ -26,6 +26,8 @@
#include <graphene/chain/protocol/block.hpp>
namespace graphene { namespace chain {
class index_entry;
class block_database
{
public:
@ -44,6 +46,8 @@ namespace graphene { namespace chain {
optional<signed_block> last()const;
optional<block_id_type> last_id()const;
private:
optional<index_entry> last_index_entry()const;
fc::path _index_filename;
mutable std::fstream _blocks;
mutable std::fstream _block_num_to_pos;
};

View file

@ -91,10 +91,12 @@ namespace graphene { namespace chain {
*
* @param data_dir Path to open or create database in
* @param genesis_loader A callable object which returns the genesis state to initialize new databases on
* @param db_version a version string that changes when the internal database format and/or logic is modified
*/
void open(
const fc::path& data_dir,
std::function<genesis_state_type()> genesis_loader );
std::function<genesis_state_type()> genesis_loader,
const std::string& db_version );
/**
* @brief Rebuild object graph from block history and open detabase
@ -102,7 +104,7 @@ namespace graphene { namespace chain {
* This method may be called after or instead of @ref database::open, and will rebuild the object graph by
* replaying blockchain history. When this method exits successfully, the database will be open.
*/
void reindex(fc::path data_dir, const genesis_state_type& initial_allocation = genesis_state_type());
void reindex(fc::path data_dir);
/**
* @brief wipe Delete database from disk, and potentially the raw chain as well.
@ -488,8 +490,11 @@ namespace graphene { namespace chain {
const witness_object& _validate_block_header( const signed_block& next_block )const;
void create_block_summary(const signed_block& next_block);
//////////////////// db_witness_schedule.cpp ////////////////////
uint32_t update_witness_missed_blocks( const signed_block& b );
//////////////////// db_update.cpp ////////////////////
void update_global_dynamic_data( const signed_block& b );
void update_global_dynamic_data( const signed_block& b, const uint32_t missed_blocks );
void update_signing_witness(const witness_object& signing_witness, const signed_block& new_block);
void update_last_irreversible_block();
void clear_expired_transactions();
@ -558,6 +563,15 @@ namespace graphene { namespace chain {
node_property_object _node_property_object;
fc::hash_ctr_rng<secret_hash_type, 20> _random_number_generator;
bool _slow_replays = false;
/**
* Whether database is successfully opened or not.
*
* The database is considered open when there's no exception
* or assertion fail during database::open() method, and
* database::close() has not been called, or failed during execution.
*/
bool _opened = false;
};
namespace detail

View file

@ -51,8 +51,10 @@ class proposal_object : public abstract_object<proposal_object>
flat_set<account_id_type> available_owner_approvals;
flat_set<public_key_type> available_key_approvals;
account_id_type proposer;
std::string fail_reason;
bool is_authorized_to_execute(database& db) const;
bool is_authorized_to_execute(database& db)const;
};
/**

View file

@ -244,20 +244,6 @@ void_result proposal_update_evaluator::do_evaluate(const proposal_update_operati
"", ("id", id)("available", _proposal->available_owner_approvals) );
}
/* All authority checks happen outside of evaluators
if( (d.get_node_properties().skip_flags & database::skip_authority_check) == 0 )
{
for( const auto& id : o.key_approvals_to_add )
{
FC_ASSERT( trx_state->signed_by(id) );
}
for( const auto& id : o.key_approvals_to_remove )
{
FC_ASSERT( trx_state->signed_by(id) );
}
}
*/
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
@ -293,6 +279,9 @@ void_result proposal_update_evaluator::do_apply(const proposal_update_operation&
try {
_processed_transaction = d.push_proposal(*_proposal);
} catch(fc::exception& e) {
d.modify(*_proposal, [&e](proposal_object& p) {
p.fail_reason = e.to_string(fc::log_level(fc::log_level::all));
});
wlog("Proposed transaction ${id} failed to apply once approved with exception:\n----\n${reason}\n----\nWill try again when it expires.",
("id", o.proposal)("reason", e.to_detail_string()));
_proposal_failed = true;

View file

@ -43,14 +43,11 @@ bool proposal_object::is_authorized_to_execute(database& db) const
}
catch ( const fc::exception& e )
{
//idump((available_active_approvals));
//wlog((e.to_detail_string()));
return false;
}
return true;
}
void required_approval_index::object_inserted( const object& obj )
{
assert( dynamic_cast<const proposal_object*>(&obj) );

View file

@ -234,14 +234,12 @@ namespace graphene { namespace db {
fc::raw::unpack(ds, _next_id);
fc::raw::unpack(ds, open_ver);
FC_ASSERT( open_ver == get_object_version(), "Incompatible Version, the serialization of objects in this index has changed" );
try {
vector<char> tmp;
while( true )
{
fc::raw::unpack( ds, tmp );
load( tmp );
}
} catch ( const fc::exception& ){}
vector<char> tmp;
while( ds.remaining() > 0 )
{
fc::raw::unpack( ds, tmp );
load( tmp );
}
}
virtual void save( const path& db ) override

View file

@ -71,14 +71,20 @@ index& object_database::get_mutable_index(uint8_t space_id, uint8_t type_id)
void object_database::flush()
{
// ilog("Save object_database in ${d}", ("d", _data_dir));
fc::create_directories( _data_dir / "object_database.tmp" / "lock" );
for( uint32_t space = 0; space < _index.size(); ++space )
{
fc::create_directories( _data_dir / "object_database" / fc::to_string(space) );
fc::create_directories( _data_dir / "object_database.tmp" / fc::to_string(space) );
const auto types = _index[space].size();
for( uint32_t type = 0; type < types; ++type )
if( _index[space][type] )
_index[space][type]->save( _data_dir / "object_database" / fc::to_string(space)/fc::to_string(type) );
_index[space][type]->save( _data_dir / "object_database.tmp" / fc::to_string(space)/fc::to_string(type) );
}
fc::remove_all( _data_dir / "object_database.tmp" / "lock" );
if( fc::exists( _data_dir / "object_database" ) )
fc::rename( _data_dir / "object_database", _data_dir / "object_database.old" );
fc::rename( _data_dir / "object_database.tmp", _data_dir / "object_database" );
fc::remove_all( _data_dir / "object_database.old" );
}
void object_database::wipe(const fc::path& data_dir)
@ -91,8 +97,13 @@ void object_database::wipe(const fc::path& data_dir)
void object_database::open(const fc::path& data_dir)
{ try {
ilog("Opening object database from ${d} ...", ("d", data_dir));
_data_dir = data_dir;
if( fc::exists( _data_dir / "object_database" / "lock" ) )
{
wlog("Ignoring locked object_database");
return;
}
ilog("Opening object database from ${d} ...", ("d", data_dir));
for( uint32_t space = 0; space < _index.size(); ++space )
for( uint32_t type = 0; type < _index[space].size(); ++type )
if( _index[space][type] )

View file

@ -118,8 +118,6 @@ void undo_database::undo()
_db.insert( std::move(*item.second) );
_stack.pop_back();
if( _stack.empty() )
_stack.emplace_back();
enable();
--_active_sessions;
} FC_CAPTURE_AND_RETHROW() }
@ -127,6 +125,12 @@ void undo_database::undo()
void undo_database::merge()
{
FC_ASSERT( _active_sessions > 0 );
if( _active_sessions == 1 && _stack.size() == 1 )
{
_stack.pop_back();
--_active_sessions;
return;
}
FC_ASSERT( _stack.size() >=2 );
auto& state = _stack.back();
auto& prev_state = _stack[_stack.size()-2];

@ -1 +1 @@
Subproject commit 1f3735e3624dbf14013420bf721acfeac6f49581
Subproject commit 50932bb5ff97388a973b3c233987e57202d79912

View file

@ -1260,13 +1260,11 @@ namespace graphene { namespace net { namespace detail {
wdump((inventory_to_advertise));
for (const item_id& item_to_advertise : inventory_to_advertise)
{
if (peer->inventory_advertised_to_peer.find(item_to_advertise) != peer->inventory_advertised_to_peer.end() )
wdump((*peer->inventory_advertised_to_peer.find(item_to_advertise)));
if (peer->inventory_peer_advertised_to_us.find(item_to_advertise) != peer->inventory_peer_advertised_to_us.end() )
wdump((*peer->inventory_peer_advertised_to_us.find(item_to_advertise)));
auto adv_to_peer = peer->inventory_advertised_to_peer.find(item_to_advertise);
auto adv_to_us = peer->inventory_peer_advertised_to_us.find(item_to_advertise);
if (peer->inventory_advertised_to_peer.find(item_to_advertise) == peer->inventory_advertised_to_peer.end() &&
peer->inventory_peer_advertised_to_us.find(item_to_advertise) == peer->inventory_peer_advertised_to_us.end())
if (adv_to_peer == peer->inventory_advertised_to_peer.end() &&
adv_to_us == peer->inventory_peer_advertised_to_us.end())
{
items_to_advertise_by_type[item_to_advertise.item_type].push_back(item_to_advertise.item_hash);
peer->inventory_advertised_to_peer.insert(peer_connection::timestamped_item_id(item_to_advertise, fc::time_point::now()));
@ -1275,6 +1273,13 @@ namespace graphene { namespace net { namespace detail {
testnetlog("advertising transaction ${id} to peer ${endpoint}", ("id", item_to_advertise.item_hash)("endpoint", peer->get_remote_endpoint()));
dlog("advertising item ${id} to peer ${endpoint}", ("id", item_to_advertise.item_hash)("endpoint", peer->get_remote_endpoint()));
}
else
{
if (adv_to_peer != peer->inventory_advertised_to_peer.end() )
wdump( (*adv_to_peer) );
if (adv_to_us != peer->inventory_peer_advertised_to_us.end() )
wdump( (*adv_to_us) );
}
}
dlog("advertising ${count} new item(s) of ${types} type(s) to peer ${endpoint}",
("count", total_items_to_send_to_this_peer)

View file

@ -81,11 +81,23 @@ void account_history_plugin_impl::update_account_histories( const signed_block&
{
graphene::chain::database& db = database();
vector<optional< operation_history_object > >& hist = db.get_applied_operations();
bool is_first = true;
auto skip_oho_id = [&is_first,&db,this]() {
if( is_first && db._undo_db.enabled() ) // this ensures that the current id is rolled back on undo
{
db.remove( db.create<operation_history_object>( []( operation_history_object& obj) {} ) );
is_first = false;
}
else
_oho_index->use_next_id();
};
for( optional< operation_history_object >& o_op : hist )
{
optional<operation_history_object> oho;
auto create_oho = [&]() {
is_first = false;
operation_history_object result = db.create<operation_history_object>( [&]( operation_history_object& h )
{
if( o_op.valid() )
@ -99,7 +111,7 @@ void account_history_plugin_impl::update_account_histories( const signed_block&
{
// Note: the 2nd and 3rd checks above are for better performance, when the db is not clean,
// they will break consistency of account_stats.total_ops and removed_ops and most_recent_op
_oho_index->use_next_id();
skip_oho_id();
continue;
}
else if( !_partial_operations )
@ -179,7 +191,7 @@ void account_history_plugin_impl::update_account_histories( const signed_block&
}
}
if (_partial_operations && ! oho.valid())
_oho_index->use_next_id();
skip_oho_id();
}
}

View file

@ -65,7 +65,7 @@ void delayed_node_plugin::plugin_set_program_options(bpo::options_description& c
void delayed_node_plugin::connect()
{
my->client_connection = std::make_shared<fc::rpc::websocket_api_connection>(*my->client.connect(my->remote_endpoint), GRAPHENE_NET_MAX_NESTED_OBJECTS);
my->client_connection = std::make_shared<fc::rpc::websocket_api_connection>(my->client.connect(my->remote_endpoint), GRAPHENE_MAX_NESTED_OBJECTS);
my->database_api = my->client_connection->get_remote_api<graphene::app::database_api>(0);
my->client_connection_closed = my->client_connection->closed.connect([this] {
connection_failed();

View file

@ -206,7 +206,7 @@ block_production_condition::block_production_condition_enum witness_plugin::bloc
("n", capture["n"])("t", capture["t"])("c", capture["c"]));
break;
case block_production_condition::lag:
elog("Not producing block because node didn't wake up within 500ms of the slot time.");
elog("Not producing block because node didn't wake up within 2500ms of the slot time.");
break;
case block_production_condition::consecutive:
elog("Not producing block because the last block was generated by the same witness.\nThis node is probably disconnected from the network so block production has been disabled.\nDisable this check with --allow-consecutive option.");
@ -286,7 +286,7 @@ block_production_condition::block_production_condition_enum witness_plugin::mayb
// return block_production_condition::local_clock; //Not producing block because head block is less than a second old.
//}
if( llabs((scheduled_time - now).count()) > fc::milliseconds( 500 ).count() )
if( llabs((scheduled_time - now).count()) > fc::milliseconds( 2500 ).count() )
{
capture("scheduled_time", scheduled_time)("now", now);
return block_production_condition::lag;

View file

@ -718,8 +718,6 @@ public:
}
account_object get_account(account_id_type id) const
{
if( _wallet.my_accounts.get<by_id>().count(id) )
return *_wallet.my_accounts.get<by_id>().find(id);
auto rec = _remote_db->get_accounts({id}).front();
FC_ASSERT(rec);
return *rec;
@ -733,19 +731,6 @@ public:
// It's an ID
return get_account(*id);
} else {
// It's a name
if( _wallet.my_accounts.get<by_name>().count(account_name_or_id) )
{
auto local_account = *_wallet.my_accounts.get<by_name>().find(account_name_or_id);
auto blockchain_account = _remote_db->lookup_account_names({account_name_or_id}).front();
FC_ASSERT( blockchain_account );
if (local_account.id != blockchain_account->id)
elog("my account id ${id} different from blockchain id ${id2}", ("id", local_account.id)("id2", blockchain_account->id));
if (local_account.name != blockchain_account->name)
elog("my account name ${id} different from blockchain name ${id2}", ("id", local_account.name)("id2", blockchain_account->name));
return *_wallet.my_accounts.get<by_name>().find(account_name_or_id);
}
auto rec = _remote_db->lookup_account_names({account_name_or_id}).front();
FC_ASSERT( rec && rec->name == account_name_or_id );
return *rec;
@ -2204,77 +2189,15 @@ public:
signed_transaction sign_transaction(signed_transaction tx, bool broadcast = false)
{
flat_set<account_id_type> req_active_approvals;
flat_set<account_id_type> req_owner_approvals;
vector<authority> other_auths;
tx.get_required_authorities( req_active_approvals, req_owner_approvals, other_auths );
for( const auto& auth : other_auths )
for( const auto& a : auth.account_auths )
req_active_approvals.insert(a.first);
// std::merge lets us de-duplicate account_id's that occur in both
// sets, and dump them into a vector (as required by remote_db api)
// at the same time
vector<account_id_type> v_approving_account_ids;
std::merge(req_active_approvals.begin(), req_active_approvals.end(),
req_owner_approvals.begin() , req_owner_approvals.end(),
std::back_inserter(v_approving_account_ids));
/// TODO: fetch the accounts specified via other_auths as well.
vector< optional<account_object> > approving_account_objects =
_remote_db->get_accounts( v_approving_account_ids );
/// TODO: recursively check one layer deeper in the authority tree for keys
FC_ASSERT( approving_account_objects.size() == v_approving_account_ids.size() );
flat_map<account_id_type, account_object*> approving_account_lut;
size_t i = 0;
for( optional<account_object>& approving_acct : approving_account_objects )
{
if( !approving_acct.valid() )
{
wlog( "operation_get_required_auths said approval of non-existing account ${id} was needed",
("id", v_approving_account_ids[i]) );
i++;
continue;
}
approving_account_lut[ approving_acct->id ] = &(*approving_acct);
i++;
}
flat_set<public_key_type> approving_key_set;
for( account_id_type& acct_id : req_active_approvals )
{
const auto it = approving_account_lut.find( acct_id );
if( it == approving_account_lut.end() )
continue;
const account_object* acct = it->second;
vector<public_key_type> v_approving_keys = acct->active.get_keys();
for( const public_key_type& approving_key : v_approving_keys )
approving_key_set.insert( approving_key );
}
for( account_id_type& acct_id : req_owner_approvals )
{
const auto it = approving_account_lut.find( acct_id );
if( it == approving_account_lut.end() )
continue;
const account_object* acct = it->second;
vector<public_key_type> v_approving_keys = acct->owner.get_keys();
for( const public_key_type& approving_key : v_approving_keys )
approving_key_set.insert( approving_key );
}
for( const authority& a : other_auths )
{
for( const auto& k : a.key_auths )
approving_key_set.insert( k.first );
}
set<public_key_type> pks = _remote_db->get_potential_signatures(tx);
flat_set<public_key_type> owned_keys;
owned_keys.reserve(pks.size());
std::copy_if(pks.begin(), pks.end(), std::inserter(owned_keys, owned_keys.end()),
[this](const public_key_type &pk) { return _keys.find(pk) != _keys.end(); });
set<public_key_type> approving_key_set = _remote_db->get_required_signatures(tx, owned_keys);
auto dyn_props = get_dynamic_global_properties();
tx.set_reference_block( dyn_props.head_block_id );
tx.set_reference_block(dyn_props.head_block_id);
// first, some bookkeeping, expire old items from _recently_generated_transactions
// since transactions include the head block id, we just need the index for keeping transactions unique
@ -2288,23 +2211,11 @@ public:
uint32_t expiration_time_offset = 0;
for (;;)
{
tx.set_expiration( dyn_props.time + fc::seconds(30 + expiration_time_offset) );
tx.set_expiration(dyn_props.time + fc::seconds(30 + expiration_time_offset));
tx.signatures.clear();
for( public_key_type& key : approving_key_set )
{
auto it = _keys.find(key);
if( it != _keys.end() )
{
fc::optional<fc::ecc::private_key> privkey = wif_to_key( it->second );
FC_ASSERT( privkey.valid(), "Malformed private key in _keys" );
tx.sign( *privkey, _chain_id );
}
/// TODO: if transaction has enough signatures to be "valid" don't add any more,
/// there are cases where the wallet may have more keys than strictly necessary and
/// the transaction will be rejected if the transaction validates without requiring
/// all signatures provided
}
for (const public_key_type &key : approving_key_set)
tx.sign(get_private_key(key), _chain_id);
graphene::chain::transaction_id_type this_transaction_id = tx.id();
auto iter = _recently_generated_transactions.find(this_transaction_id);
@ -2326,11 +2237,11 @@ public:
{
try
{
_remote_net_broadcast->broadcast_transaction( tx );
_remote_net_broadcast->broadcast_transaction(tx);
}
catch (const fc::exception& e)
{
elog("Caught exception while broadcasting tx ${id}: ${e}", ("id", tx.id().str())("e", e.to_detail_string()) );
elog("Caught exception while broadcasting tx ${id}: ${e}", ("id", tx.id().str())("e", e.to_detail_string()));
throw;
}
}

View file

@ -174,7 +174,7 @@ int main( int argc, char** argv )
fc::http::websocket_client client;
idump((wdata.ws_server));
auto con = client.connect( wdata.ws_server );
auto apic = std::make_shared<fc::rpc::websocket_api_connection>(*con, GRAPHENE_MAX_NESTED_OBJECTS);
auto apic = std::make_shared<fc::rpc::websocket_api_connection>(con, GRAPHENE_MAX_NESTED_OBJECTS);
auto remote_api = apic->get_remote_api< login_api >(1);
edump((wdata.ws_user)(wdata.ws_password) );
@ -213,7 +213,7 @@ int main( int argc, char** argv )
_websocket_server->on_connection([&wapi]( const fc::http::websocket_connection_ptr& c ){
std::cout << "here... \n";
wlog("." );
auto wsc = std::make_shared<fc::rpc::websocket_api_connection>(*c, GRAPHENE_MAX_NESTED_OBJECTS);
auto wsc = std::make_shared<fc::rpc::websocket_api_connection>(c, GRAPHENE_MAX_NESTED_OBJECTS);
wsc->register_api(wapi);
c->set_session_data( wsc );
});
@ -229,8 +229,8 @@ int main( int argc, char** argv )
auto _websocket_tls_server = std::make_shared<fc::http::websocket_tls_server>(cert_pem);
if( options.count("rpc-tls-endpoint") )
{
_websocket_tls_server->on_connection([&wapi]( const fc::http::websocket_connection_ptr& c ){
auto wsc = std::make_shared<fc::rpc::websocket_api_connection>(*c, GRAPHENE_MAX_NESTED_OBJECTS);
_websocket_tls_server->on_connection([&]( const fc::http::websocket_connection_ptr& c ){
auto wsc = std::make_shared<fc::rpc::websocket_api_connection>(c, GRAPHENE_MAX_NESTED_OBJECTS);
wsc->register_api(wapi);
c->set_session_data( wsc );
});

View file

@ -41,4 +41,14 @@ file(GLOB RANDOM_SOURCES "random/*.cpp")
add_executable( random_test ${RANDOM_SOURCES} ${COMMON_SOURCES} )
target_link_libraries( random_test graphene_chain graphene_app graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} )
file(GLOB CLI_SOURCES "cli/*.cpp")
add_executable( cli_test ${CLI_SOURCES} )
if(WIN32)
list(APPEND PLATFORM_SPECIFIC_LIBS ws2_32)
endif()
target_link_libraries( cli_test graphene_chain graphene_app graphene_witness graphene_wallet graphene_egenesis_none fc ${PLATFORM_SPECIFIC_LIBS} )
if(MSVC)
set_source_files_properties( cli/main.cpp PROPERTIES COMPILE_FLAGS "/bigobj" )
endif(MSVC)
add_subdirectory( generate_empty_blocks )

View file

@ -74,13 +74,12 @@ BOOST_AUTO_TEST_CASE( two_node_network )
cfg.emplace("genesis-json", boost::program_options::variable_value(create_genesis_file(app_dir), false));
cfg2.emplace("genesis-json", boost::program_options::variable_value(create_genesis_file(app2_dir), false));
BOOST_TEST_MESSAGE( "Starting app1 and waiting 500 ms" );
BOOST_TEST_MESSAGE( "Starting app1 and waiting 1500 ms" );
app1.startup();
fc::usleep(fc::milliseconds(500));
BOOST_TEST_MESSAGE( "Starting app2 and waiting 500 ms" );
fc::usleep(fc::milliseconds(1500));
BOOST_TEST_MESSAGE( "Starting app2 and waiting 1500 ms" );
app2.startup();
fc::usleep(fc::milliseconds(500));
fc::usleep(fc::milliseconds(1500));
BOOST_REQUIRE_EQUAL(app1.p2p_node()->get_connection_count(), 1);
BOOST_CHECK_EQUAL(std::string(app1.p2p_node()->get_connected_peers().front().host.get_address()), "127.0.0.1");

View file

@ -68,7 +68,7 @@ BOOST_AUTO_TEST_CASE( genesis_and_persistence_bench )
{
database db;
db.open(data_dir.path(), [&]{return genesis_state;});
db.open(data_dir.path(), [&]{return genesis_state;}, "test");
for( int i = 11; i < account_count + 11; ++i)
BOOST_CHECK(db.get_balance(account_id_type(i), asset_id_type()).amount == GRAPHENE_MAX_SHARE_SUPPLY / account_count);
@ -81,7 +81,7 @@ BOOST_AUTO_TEST_CASE( genesis_and_persistence_bench )
database db;
fc::time_point start_time = fc::time_point::now();
db.open(data_dir.path(), [&]{return genesis_state;});
db.open(data_dir.path(), [&]{return genesis_state;}, "test");
ilog("Opened database in ${t} milliseconds.", ("t", (fc::time_point::now() - start_time).count() / 1000));
for( int i = 11; i < account_count + 11; ++i)
@ -116,7 +116,7 @@ BOOST_AUTO_TEST_CASE( genesis_and_persistence_bench )
auto start_time = fc::time_point::now();
wlog( "about to start reindex..." );
db.reindex(data_dir.path(), genesis_state);
db.open(data_dir.path(), [&]{return genesis_state;}, "force_wipe");
ilog("Replayed database in ${t} milliseconds.", ("t", (fc::time_point::now() - start_time).count() / 1000));
for( int i = 0; i < blocks_to_produce; ++i )

441
tests/cli/main.cpp Normal file
View file

@ -0,0 +1,441 @@
/*
* Copyright (c) 2019 PBSA, and contributors.
*
* The MIT License
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <graphene/app/application.hpp>
#include <graphene/app/plugin.hpp>
#include <graphene/app/api.hpp>
#include <graphene/utilities/tempdir.hpp>
#include <graphene/bookie/bookie_plugin.hpp>
#include <graphene/egenesis/egenesis.hpp>
#include <graphene/wallet/wallet.hpp>
#include <graphene/chain/config.hpp>
#include <fc/thread/thread.hpp>
#include <fc/network/http/websocket.hpp>
#include <fc/rpc/websocket_api.hpp>
#include <fc/rpc/cli.hpp>
#include <fc/crypto/base58.hpp>
#include <fc/crypto/aes.hpp>
#include <fc/smart_ref_impl.hpp>
#ifdef _WIN32
#ifndef _WIN32_WINNT
#define _WIN32_WINNT 0x0501
#endif
#include <winsock2.h>
#include <WS2tcpip.h>
#else
#include <sys/socket.h>
#include <netinet/ip.h>
#include <sys/types.h>
#endif
#include <thread>
#include <boost/filesystem/path.hpp>
#define BOOST_TEST_MODULE Test Application
#include <boost/test/included/unit_test.hpp>
/*****
* Global Initialization for Windows
* ( sets up Winsock stuf )
*/
#ifdef _WIN32
int sockInit(void)
{
WSADATA wsa_data;
return WSAStartup(MAKEWORD(1,1), &wsa_data);
}
int sockQuit(void)
{
return WSACleanup();
}
#endif
/*********************
* Helper Methods
*********************/
#include "../common/genesis_file_util.hpp"
#define INVOKE(test) ((struct test*)this)->test_method();
//////
/// @brief attempt to find an available port on localhost
/// @returns an available port number, or -1 on error
/////
int get_available_port()
{
struct sockaddr_in sin;
int socket_fd = socket(AF_INET, SOCK_STREAM, 0);
if (socket_fd == -1)
return -1;
sin.sin_family = AF_INET;
sin.sin_port = 0;
sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
if (::bind(socket_fd, (struct sockaddr*)&sin, sizeof(struct sockaddr_in)) == -1)
return -1;
socklen_t len = sizeof(sin);
if (getsockname(socket_fd, (struct sockaddr *)&sin, &len) == -1)
return -1;
#ifdef _WIN32
closesocket(socket_fd);
#else
close(socket_fd);
#endif
return ntohs(sin.sin_port);
}
///////////
/// @brief Start the application
/// @param app_dir the temporary directory to use
/// @param server_port_number to be filled with the rpc endpoint port number
/// @returns the application object
//////////
std::shared_ptr<graphene::app::application> start_application(fc::temp_directory& app_dir, int& server_port_number) {
std::shared_ptr<graphene::app::application> app1(new graphene::app::application{});
app1->register_plugin< graphene::bookie::bookie_plugin>();
app1->startup_plugins();
boost::program_options::variables_map cfg;
#ifdef _WIN32
sockInit();
#endif
server_port_number = get_available_port();
cfg.emplace(
"rpc-endpoint",
boost::program_options::variable_value(string("127.0.0.1:" + std::to_string(server_port_number)), false)
);
cfg.emplace("genesis-json", boost::program_options::variable_value(create_genesis_file(app_dir), false));
cfg.emplace("seed-nodes", boost::program_options::variable_value(string("[]"), false));
app1->initialize(app_dir.path(), cfg);
app1->initialize_plugins(cfg);
app1->startup_plugins();
app1->startup();
fc::usleep(fc::milliseconds(500));
return app1;
}
///////////
/// Send a block to the db
/// @param app the application
/// @param returned_block the signed block
/// @returns true on success
///////////
bool generate_block(std::shared_ptr<graphene::app::application> app, graphene::chain::signed_block& returned_block)
{
try {
fc::ecc::private_key committee_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("nathan")));
auto db = app->chain_database();
returned_block = db->generate_block( db->get_slot_time(1),
db->get_scheduled_witness(1),
committee_key,
database::skip_nothing );
return true;
} catch (exception &e) {
return false;
}
}
bool generate_block(std::shared_ptr<graphene::app::application> app)
{
graphene::chain::signed_block returned_block;
return generate_block(app, returned_block);
}
///////////
/// @brief Skip intermediate blocks, and generate a maintenance block
/// @param app the application
/// @returns true on success
///////////
bool generate_maintenance_block(std::shared_ptr<graphene::app::application> app) {
try {
fc::ecc::private_key committee_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("nathan")));
uint32_t skip = ~0;
auto db = app->chain_database();
auto maint_time = db->get_dynamic_global_properties().next_maintenance_time;
auto slots_to_miss = db->get_slot_at_time(maint_time);
db->generate_block(db->get_slot_time(slots_to_miss),
db->get_scheduled_witness(slots_to_miss),
committee_key,
skip);
return true;
} catch (exception& e)
{
return false;
}
}
///////////
/// @brief a class to make connecting to the application server easier
///////////
class client_connection
{
public:
/////////
// constructor
/////////
client_connection(
std::shared_ptr<graphene::app::application> app,
const fc::temp_directory& data_dir,
const int server_port_number
)
{
wallet_data.chain_id = app->chain_database()->get_chain_id();
wallet_data.ws_server = "ws://127.0.0.1:" + std::to_string(server_port_number);
wallet_data.ws_user = "";
wallet_data.ws_password = "";
websocket_connection = websocket_client.connect( wallet_data.ws_server );
api_connection = std::make_shared<fc::rpc::websocket_api_connection>(websocket_connection, GRAPHENE_MAX_NESTED_OBJECTS);
remote_login_api = api_connection->get_remote_api< graphene::app::login_api >(1);
BOOST_CHECK(remote_login_api->login( wallet_data.ws_user, wallet_data.ws_password ) );
wallet_api_ptr = std::make_shared<graphene::wallet::wallet_api>(wallet_data, remote_login_api);
wallet_filename = data_dir.path().generic_string() + "/wallet.json";
wallet_api_ptr->set_wallet_filename(wallet_filename);
wallet_api = fc::api<graphene::wallet::wallet_api>(wallet_api_ptr);
wallet_cli = std::make_shared<fc::rpc::cli>(GRAPHENE_MAX_NESTED_OBJECTS);
for( auto& name_formatter : wallet_api_ptr->get_result_formatters() )
wallet_cli->format_result( name_formatter.first, name_formatter.second );
boost::signals2::scoped_connection closed_connection(websocket_connection->closed.connect([=]{
cerr << "Server has disconnected us.\n";
wallet_cli->stop();
}));
(void)(closed_connection);
}
~client_connection()
{
// wait for everything to finish up
fc::usleep(fc::milliseconds(500));
}
public:
fc::http::websocket_client websocket_client;
graphene::wallet::wallet_data wallet_data;
fc::http::websocket_connection_ptr websocket_connection;
std::shared_ptr<fc::rpc::websocket_api_connection> api_connection;
fc::api<login_api> remote_login_api;
std::shared_ptr<graphene::wallet::wallet_api> wallet_api_ptr;
fc::api<graphene::wallet::wallet_api> wallet_api;
std::shared_ptr<fc::rpc::cli> wallet_cli;
std::string wallet_filename;
};
///////////////////////////////
// Cli Wallet Fixture
///////////////////////////////
struct cli_fixture
{
class dummy
{
public:
~dummy()
{
// wait for everything to finish up
fc::usleep(fc::milliseconds(500));
}
};
dummy dmy;
int server_port_number;
fc::temp_directory app_dir;
std::shared_ptr<graphene::app::application> app1;
client_connection con;
std::vector<std::string> nathan_keys;
cli_fixture() :
server_port_number(0),
app_dir( graphene::utilities::temp_directory_path() ),
app1( start_application(app_dir, server_port_number) ),
con( app1, app_dir, server_port_number ),
nathan_keys( {"5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"} )
{
BOOST_TEST_MESSAGE("Setup cli_wallet::boost_fixture_test_case");
using namespace graphene::chain;
using namespace graphene::app;
try
{
BOOST_TEST_MESSAGE("Setting wallet password");
con.wallet_api_ptr->set_password("supersecret");
con.wallet_api_ptr->unlock("supersecret");
// import Nathan account
BOOST_TEST_MESSAGE("Importing nathan key");
BOOST_CHECK_EQUAL(nathan_keys[0], "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3");
BOOST_CHECK(con.wallet_api_ptr->import_key("nathan", nathan_keys[0]));
} catch( fc::exception& e ) {
edump((e.to_detail_string()));
throw;
}
}
~cli_fixture()
{
BOOST_TEST_MESSAGE("Cleanup cli_wallet::boost_fixture_test_case");
// wait for everything to finish up
fc::usleep(fc::seconds(1));
app1->shutdown();
#ifdef _WIN32
sockQuit();
#endif
}
};
///////////////////////////////
// Tests
///////////////////////////////
////////////////
// Start a server and connect using the same calls as the CLI
////////////////
BOOST_FIXTURE_TEST_CASE( cli_connect, cli_fixture )
{
BOOST_TEST_MESSAGE("Testing wallet connection.");
}
BOOST_FIXTURE_TEST_CASE( upgrade_nathan_account, cli_fixture )
{
try
{
BOOST_TEST_MESSAGE("Upgrade Nathan's account");
account_object nathan_acct_before_upgrade, nathan_acct_after_upgrade;
std::vector<signed_transaction> import_txs;
signed_transaction upgrade_tx;
BOOST_TEST_MESSAGE("Importing nathan's balance");
import_txs = con.wallet_api_ptr->import_balance("nathan", nathan_keys, true);
nathan_acct_before_upgrade = con.wallet_api_ptr->get_account("nathan");
BOOST_CHECK(generate_block(app1));
// upgrade nathan
BOOST_TEST_MESSAGE("Upgrading Nathan to LTM");
upgrade_tx = con.wallet_api_ptr->upgrade_account("nathan", true);
nathan_acct_after_upgrade = con.wallet_api_ptr->get_account("nathan");
// verify that the upgrade was successful
BOOST_CHECK_PREDICATE(
std::not_equal_to<uint32_t>(),
(nathan_acct_before_upgrade.membership_expiration_date.sec_since_epoch())
(nathan_acct_after_upgrade.membership_expiration_date.sec_since_epoch())
);
BOOST_CHECK(nathan_acct_after_upgrade.is_lifetime_member());
} catch( fc::exception& e ) {
edump((e.to_detail_string()));
throw;
}
}
BOOST_FIXTURE_TEST_CASE( create_new_account, cli_fixture )
{
try
{
INVOKE(upgrade_nathan_account);
// create a new account
graphene::wallet::brain_key_info bki = con.wallet_api_ptr->suggest_brain_key();
BOOST_CHECK(!bki.brain_priv_key.empty());
signed_transaction create_acct_tx = con.wallet_api_ptr->create_account_with_brain_key(
bki.brain_priv_key, "jmjatlanta", "nathan", "nathan", true
);
// save the private key for this new account in the wallet file
BOOST_CHECK(con.wallet_api_ptr->import_key("jmjatlanta", bki.wif_priv_key));
con.wallet_api_ptr->save_wallet_file(con.wallet_filename);
// attempt to give jmjatlanta some CORE
BOOST_TEST_MESSAGE("Transferring CORE from Nathan to jmjatlanta");
signed_transaction transfer_tx = con.wallet_api_ptr->transfer(
"nathan", "jmjatlanta", "10000", "1.3.0", "Here are some CORE token for your new account", true
);
} catch( fc::exception& e ) {
edump((e.to_detail_string()));
throw;
}
}
///////////////////////
// Start a server and connect using the same calls as the CLI
// Vote for two witnesses, and make sure they both stay there
// after a maintenance block
///////////////////////
BOOST_FIXTURE_TEST_CASE( cli_vote_for_2_witnesses, cli_fixture )
{
try
{
BOOST_TEST_MESSAGE("Cli Vote Test for 2 Witnesses");
INVOKE(upgrade_nathan_account); // just to fund nathan
// get the details for init1
witness_object init1_obj = con.wallet_api_ptr->get_witness("init1");
int init1_start_votes = init1_obj.total_votes;
// Vote for a witness
signed_transaction vote_witness1_tx = con.wallet_api_ptr->vote_for_witness("nathan", "init1", true, true);
// generate a block to get things started
BOOST_CHECK(generate_block(app1));
// wait for a maintenance interval
BOOST_CHECK(generate_maintenance_block(app1));
// Verify that the vote is there
init1_obj = con.wallet_api_ptr->get_witness("init1");
witness_object init2_obj = con.wallet_api_ptr->get_witness("init2");
int init1_middle_votes = init1_obj.total_votes;
BOOST_CHECK(init1_middle_votes > init1_start_votes);
// Vote for a 2nd witness
int init2_start_votes = init2_obj.total_votes;
signed_transaction vote_witness2_tx = con.wallet_api_ptr->vote_for_witness("nathan", "init2", true, true);
// send another block to trigger maintenance interval
BOOST_CHECK(generate_maintenance_block(app1));
// Verify that both the first vote and the 2nd are there
init2_obj = con.wallet_api_ptr->get_witness("init2");
init1_obj = con.wallet_api_ptr->get_witness("init1");
int init2_middle_votes = init2_obj.total_votes;
BOOST_CHECK(init2_middle_votes > init2_start_votes);
int init1_last_votes = init1_obj.total_votes;
BOOST_CHECK(init1_last_votes > init1_start_votes);
} catch( fc::exception& e ) {
edump((e.to_detail_string()));
throw;
}
}

View file

@ -109,6 +109,24 @@ database_fixture::database_fixture()
genesis_state.initial_parameters.current_fees->zero_all_fees();
open_database();
// add account tracking for ahplugin for special test case with track-account enabled
if( !options.count("track-account") && boost::unit_test::framework::current_test_case().p_name.value == "track_account") {
std::vector<std::string> track_account;
std::string track = "\"1.2.18\"";
track_account.push_back(track);
options.insert(std::make_pair("track-account", boost::program_options::variable_value(track_account, false)));
options.insert(std::make_pair("partial-operations", boost::program_options::variable_value(true, false)));
}
// account tracking 2 accounts
if( !options.count("track-account") && boost::unit_test::framework::current_test_case().p_name.value == "track_account2") {
std::vector<std::string> track_account;
std::string track = "\"1.2.0\"";
track_account.push_back(track);
track = "\"1.2.17\"";
track_account.push_back(track);
options.insert(std::make_pair("track-account", boost::program_options::variable_value(track_account, false)));
}
// app.initialize();
ahplugin->plugin_set_app(&app);
ahplugin->plugin_initialize(options);
@ -355,7 +373,7 @@ void database_fixture::open_database()
{
if( !data_dir ) {
data_dir = fc::temp_directory( graphene::utilities::temp_directory_path() );
db.open(data_dir->path(), [this]{return genesis_state;});
db.open(data_dir->path(), [this]{return genesis_state;}, "test");
}
}

View file

@ -123,7 +123,7 @@ int main( int argc, char** argv )
database db;
fc::path db_path = data_dir / "db";
db.open(db_path, [&]() { return genesis; } );
db.open(db_path, [&]() { return genesis; }, "TEST" );
uint32_t slot = 1;
uint32_t missed = 0;

View file

@ -59,7 +59,7 @@ BOOST_AUTO_TEST_CASE( simple_single_signature )
sign(trx, nathan_key);
PUSH_TX( db, trx, database::skip_transaction_dupe_check );
BOOST_CHECK_EQUAL(get_balance(nathan, core), old_balance - 500);
BOOST_CHECK_EQUAL(get_balance(nathan, core), static_cast<int64_t>(old_balance - 500));
} catch (fc::exception& e) {
edump((e.to_detail_string()));
throw;
@ -97,25 +97,25 @@ BOOST_AUTO_TEST_CASE( any_two_of_three )
GRAPHENE_CHECK_THROW(PUSH_TX( db, trx, database::skip_transaction_dupe_check ), fc::exception);
sign(trx, nathan_key2);
PUSH_TX( db, trx, database::skip_transaction_dupe_check );
BOOST_CHECK_EQUAL(get_balance(nathan, core), old_balance - 500);
BOOST_CHECK_EQUAL(get_balance(nathan, core), static_cast<int64_t>(old_balance - 500));
trx.signatures.clear();
sign(trx, nathan_key2);
sign(trx, nathan_key3);
PUSH_TX( db, trx, database::skip_transaction_dupe_check );
BOOST_CHECK_EQUAL(get_balance(nathan, core), old_balance - 1000);
BOOST_CHECK_EQUAL(get_balance(nathan, core), static_cast<int64_t>(old_balance - 1000));
trx.signatures.clear();
sign(trx, nathan_key1);
sign(trx, nathan_key3);
PUSH_TX( db, trx, database::skip_transaction_dupe_check );
BOOST_CHECK_EQUAL(get_balance(nathan, core), old_balance - 1500);
BOOST_CHECK_EQUAL(get_balance(nathan, core), static_cast<int64_t>(old_balance - 1500));
trx.signatures.clear();
//sign(trx, fc::ecc::private_key::generate());
sign(trx,nathan_key3);
GRAPHENE_CHECK_THROW(PUSH_TX( db, trx, database::skip_transaction_dupe_check ), fc::exception);
BOOST_CHECK_EQUAL(get_balance(nathan, core), old_balance - 1500);
BOOST_CHECK_EQUAL(get_balance(nathan, core), static_cast<int64_t>(old_balance - 1500));
} catch (fc::exception& e) {
edump((e.to_detail_string()));
throw;
@ -165,7 +165,7 @@ BOOST_AUTO_TEST_CASE( recursive_accounts )
BOOST_TEST_MESSAGE( "Attempting to transfer with parent1 and parent2 signature, should succeed" );
sign(trx,parent1_key);
PUSH_TX( db, trx, database::skip_transaction_dupe_check );
BOOST_CHECK_EQUAL(get_balance(child, core), old_balance - 500);
BOOST_CHECK_EQUAL(get_balance(child, core), static_cast<int64_t>(old_balance - 500));
trx.operations.clear();
trx.signatures.clear();
@ -180,7 +180,7 @@ BOOST_AUTO_TEST_CASE( recursive_accounts )
sign(trx,parent1_key);
sign(trx,parent2_key);
PUSH_TX( db, trx, database::skip_transaction_dupe_check );
BOOST_REQUIRE_EQUAL(child.active.num_auths(), 3);
BOOST_REQUIRE_EQUAL(child.active.num_auths(), 3u);
trx.operations.clear();
trx.signatures.clear();
}
@ -203,13 +203,13 @@ BOOST_AUTO_TEST_CASE( recursive_accounts )
BOOST_TEST_MESSAGE( "Attempting transfer both parents, should succeed" );
sign(trx, parent1_key);
PUSH_TX( db, trx, database::skip_transaction_dupe_check );
BOOST_CHECK_EQUAL(get_balance(child, core), old_balance - 1000);
BOOST_CHECK_EQUAL(get_balance(child, core), static_cast<int64_t>(old_balance - 1000));
trx.signatures.clear();
BOOST_TEST_MESSAGE( "Attempting transfer with just child key, should succeed" );
sign(trx, child_key);
PUSH_TX( db, trx, database::skip_transaction_dupe_check );
BOOST_CHECK_EQUAL(get_balance(child, core), old_balance - 1500);
BOOST_CHECK_EQUAL(get_balance(child, core), static_cast<int64_t>(old_balance - 1500));
trx.operations.clear();
trx.signatures.clear();
@ -242,7 +242,7 @@ BOOST_AUTO_TEST_CASE( recursive_accounts )
BOOST_TEST_MESSAGE( "Attempt to transfer using parent2_key and grandparent_key" );
PUSH_TX( db, trx, database::skip_transaction_dupe_check );
BOOST_CHECK_EQUAL(get_balance(child, core), old_balance - 2000);
BOOST_CHECK_EQUAL(get_balance(child, core), static_cast<int64_t>(old_balance - 2000));
trx.clear();
BOOST_TEST_MESSAGE( "Update grandparent account authority to be committee account" );
@ -268,7 +268,7 @@ BOOST_AUTO_TEST_CASE( recursive_accounts )
trx.signatures.clear();
sign(trx, child_key);
PUSH_TX( db, trx, database::skip_transaction_dupe_check );
BOOST_CHECK_EQUAL(get_balance(child, core), old_balance - 2500);
BOOST_CHECK_EQUAL(get_balance(child, core), static_cast<int64_t>(old_balance - 2500));
trx.operations.clear();
trx.signatures.clear();
@ -329,17 +329,17 @@ BOOST_AUTO_TEST_CASE( proposed_single_account )
vector<authority> other;
flat_set<account_id_type> active_set, owner_set;
operation_get_required_authorities(op,active_set,owner_set,other);
BOOST_CHECK_EQUAL(active_set.size(), 1);
BOOST_CHECK_EQUAL(owner_set.size(), 0);
BOOST_CHECK_EQUAL(other.size(), 0);
BOOST_CHECK_EQUAL(active_set.size(), 1lu);
BOOST_CHECK_EQUAL(owner_set.size(), 0lu);
BOOST_CHECK_EQUAL(other.size(), 0lu);
BOOST_CHECK(*active_set.begin() == moneyman.get_id());
active_set.clear();
other.clear();
operation_get_required_authorities(op.proposed_ops.front().op,active_set,owner_set,other);
BOOST_CHECK_EQUAL(active_set.size(), 1);
BOOST_CHECK_EQUAL(owner_set.size(), 0);
BOOST_CHECK_EQUAL(other.size(), 0);
BOOST_CHECK_EQUAL(active_set.size(), 1lu);
BOOST_CHECK_EQUAL(owner_set.size(), 0lu);
BOOST_CHECK_EQUAL(other.size(), 0lu);
BOOST_CHECK(*active_set.begin() == nathan.id);
}
@ -349,10 +349,10 @@ BOOST_AUTO_TEST_CASE( proposed_single_account )
sign( trx, init_account_priv_key );
const proposal_object& proposal = db.get<proposal_object>(PUSH_TX( db, trx ).operation_results.front().get<object_id_type>());
BOOST_CHECK_EQUAL(proposal.required_active_approvals.size(), 1);
BOOST_CHECK_EQUAL(proposal.available_active_approvals.size(), 0);
BOOST_CHECK_EQUAL(proposal.required_owner_approvals.size(), 0);
BOOST_CHECK_EQUAL(proposal.available_owner_approvals.size(), 0);
BOOST_CHECK_EQUAL(proposal.required_active_approvals.size(), 1lu);
BOOST_CHECK_EQUAL(proposal.available_active_approvals.size(), 0lu);
BOOST_CHECK_EQUAL(proposal.required_owner_approvals.size(), 0lu);
BOOST_CHECK_EQUAL(proposal.available_owner_approvals.size(), 0lu);
BOOST_CHECK(*proposal.required_active_approvals.begin() == nathan.id);
proposal_update_operation pup;
@ -389,6 +389,49 @@ BOOST_AUTO_TEST_CASE( proposed_single_account )
}
}
BOOST_AUTO_TEST_CASE( proposal_failure )
{
try
{
ACTORS( (bob) (alice) );
fund( bob, asset(1000000) );
fund( alice, asset(1000000) );
// create proposal that will eventually fail due to lack of funds
transfer_operation top;
top.to = alice_id;
top.from = bob_id;
top.amount = asset(2000000);
proposal_create_operation pop;
pop.proposed_ops.push_back( { top } );
pop.expiration_time = db.head_block_time() + fc::days(1);
pop.fee_paying_account = bob_id;
trx.operations.push_back( pop );
trx.signatures.clear();
sign( trx, bob_private_key );
processed_transaction processed = PUSH_TX( db, trx );
proposal_object prop = db.get<proposal_object>(processed.operation_results.front().get<object_id_type>());
trx.clear();
generate_block();
// add signature
proposal_update_operation up_op;
up_op.proposal = prop.id;
up_op.fee_paying_account = bob_id;
up_op.active_approvals_to_add.emplace( bob_id );
trx.operations.push_back( up_op );
sign( trx, bob_private_key );
PUSH_TX( db, trx );
trx.clear();
// check fail reason
const proposal_object& result = db.get<proposal_object>(prop.id);
BOOST_CHECK(!result.fail_reason.empty());
BOOST_CHECK_EQUAL( result.fail_reason.substr(0, 16), "Assert Exception");
}
FC_LOG_AND_RETHROW()
}
/// Verify that committee authority cannot be invoked in a normal transaction
BOOST_AUTO_TEST_CASE( committee_authority )
{ try {
@ -478,9 +521,10 @@ BOOST_AUTO_TEST_CASE( committee_authority )
// Should throw because the transaction is now in review.
GRAPHENE_CHECK_THROW(PUSH_TX( db, trx ), fc::exception);
// generate_blocks(prop.expiration_time);
// fails
// BOOST_CHECK_EQUAL(get_balance(nathan, asset_id_type()(db)), 100000);
generate_blocks(prop.expiration_time);
BOOST_CHECK_EQUAL(get_balance(nathan, asset_id_type()(db)), 100000);
// proposal deleted
BOOST_CHECK_THROW( db.get<proposal_object>(prop.id), fc::exception );
} FC_LOG_AND_RETHROW() }
BOOST_FIXTURE_TEST_CASE( fired_committee_members, database_fixture )
@ -696,7 +740,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_delete, database_fixture )
PUSH_TX( db, trx );
trx.clear();
BOOST_CHECK(!prop.is_authorized_to_execute(db));
BOOST_CHECK_EQUAL(prop.available_active_approvals.size(), 1);
BOOST_CHECK_EQUAL(prop.available_active_approvals.size(), 1lu);
std::swap(uop.active_approvals_to_add, uop.active_approvals_to_remove);
trx.operations.push_back(uop);
@ -704,7 +748,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_delete, database_fixture )
PUSH_TX( db, trx );
trx.clear();
BOOST_CHECK(!prop.is_authorized_to_execute(db));
BOOST_CHECK_EQUAL(prop.available_active_approvals.size(), 0);
BOOST_CHECK_EQUAL(prop.available_active_approvals.size(), 0lu);
}
{
@ -758,8 +802,8 @@ BOOST_FIXTURE_TEST_CASE( proposal_owner_authority_delete, database_fixture )
}
const proposal_object& prop = *db.get_index_type<proposal_index>().indices().begin();
BOOST_CHECK_EQUAL(prop.required_active_approvals.size(), 1);
BOOST_CHECK_EQUAL(prop.required_owner_approvals.size(), 1);
BOOST_CHECK_EQUAL(prop.required_active_approvals.size(), 1lu);
BOOST_CHECK_EQUAL(prop.required_owner_approvals.size(), 1lu);
BOOST_CHECK(!prop.is_authorized_to_execute(db));
{
@ -772,7 +816,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_owner_authority_delete, database_fixture )
PUSH_TX( db, trx );
trx.clear();
BOOST_CHECK(!prop.is_authorized_to_execute(db));
BOOST_CHECK_EQUAL(prop.available_owner_approvals.size(), 1);
BOOST_CHECK_EQUAL(prop.available_owner_approvals.size(), 1lu);
std::swap(uop.owner_approvals_to_add, uop.owner_approvals_to_remove);
trx.operations.push_back(uop);
@ -780,7 +824,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_owner_authority_delete, database_fixture )
PUSH_TX( db, trx );
trx.clear();
BOOST_CHECK(!prop.is_authorized_to_execute(db));
BOOST_CHECK_EQUAL(prop.available_owner_approvals.size(), 0);
BOOST_CHECK_EQUAL(prop.available_owner_approvals.size(), 0lu);
}
{
@ -835,8 +879,8 @@ BOOST_FIXTURE_TEST_CASE( proposal_owner_authority_complete, database_fixture )
}
const proposal_object& prop = *db.get_index_type<proposal_index>().indices().begin();
BOOST_CHECK_EQUAL(prop.required_active_approvals.size(), 1);
BOOST_CHECK_EQUAL(prop.required_owner_approvals.size(), 1);
BOOST_CHECK_EQUAL(prop.required_active_approvals.size(), 1lu);
BOOST_CHECK_EQUAL(prop.required_owner_approvals.size(), 1lu);
BOOST_CHECK(!prop.is_authorized_to_execute(db));
{
@ -852,7 +896,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_owner_authority_complete, database_fixture )
PUSH_TX( db, trx );
trx.clear();
BOOST_CHECK(!prop.is_authorized_to_execute(db));
BOOST_CHECK_EQUAL(prop.available_key_approvals.size(), 1);
BOOST_CHECK_EQUAL(prop.available_key_approvals.size(), 1lu);
std::swap(uop.key_approvals_to_add, uop.key_approvals_to_remove);
trx.operations.push_back(uop);
@ -862,7 +906,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_owner_authority_complete, database_fixture )
PUSH_TX( db, trx );
trx.clear();
BOOST_CHECK(!prop.is_authorized_to_execute(db));
BOOST_CHECK_EQUAL(prop.available_key_approvals.size(), 0);
BOOST_CHECK_EQUAL(prop.available_key_approvals.size(), 0lu);
std::swap(uop.key_approvals_to_add, uop.key_approvals_to_remove);
trx.operations.push_back(uop);
@ -872,7 +916,7 @@ BOOST_FIXTURE_TEST_CASE( proposal_owner_authority_complete, database_fixture )
PUSH_TX( db, trx );
trx.clear();
BOOST_CHECK(!prop.is_authorized_to_execute(db));
BOOST_CHECK_EQUAL(prop.available_key_approvals.size(), 1);
BOOST_CHECK_EQUAL(prop.available_key_approvals.size(), 1lu);
uop.key_approvals_to_add.clear();
uop.owner_approvals_to_add.insert(nathan.get_id());

View file

@ -540,4 +540,19 @@ BOOST_AUTO_TEST_CASE( merkle_root )
BOOST_CHECK( block.calculate_merkle_root() == c(dO) );
}
/**
* Reproduces https://github.com/bitshares/bitshares-core/issues/888 and tests fix for it.
*/
BOOST_AUTO_TEST_CASE( bitasset_feed_expiration_test )
{
time_point_sec now = fc::time_point::now();
asset_bitasset_data_object o;
o.current_feed_publication_time = now - fc::hours(1);
o.options.feed_lifetime_sec = std::numeric_limits<uint32_t>::max() - 1;
BOOST_CHECK( !o.feed_is_expired( now ) );
}
BOOST_AUTO_TEST_SUITE_END()

View file

@ -33,6 +33,7 @@
#include <graphene/chain/proposal_object.hpp>
#include <graphene/chain/market_object.hpp>
#include <graphene/chain/witness_schedule_object.hpp>
#include <graphene/chain/witness_object.hpp>
#include <graphene/utilities/tempdir.hpp>
@ -136,9 +137,10 @@ BOOST_AUTO_TEST_CASE( generate_empty_blocks )
// TODO: Don't generate this here
auto init_account_priv_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key")) );
signed_block cutoff_block;
uint32_t last_block;
{
database db;
db.open(data_dir.path(), make_genesis );
db.open(data_dir.path(), make_genesis, "TEST" );
b = db.generate_block(db.get_slot_time(1), db.get_scheduled_witness(1), init_account_priv_key, database::skip_nothing);
// TODO: Change this test when we correct #406
@ -155,6 +157,7 @@ BOOST_AUTO_TEST_CASE( generate_empty_blocks )
if( cutoff_height >= 200 )
{
cutoff_block = *(db.fetch_block_by_number( cutoff_height ));
last_block = db.head_block_num();
break;
}
}
@ -162,8 +165,10 @@ BOOST_AUTO_TEST_CASE( generate_empty_blocks )
}
{
database db;
db.open(data_dir.path(), []{return genesis_state_type();});
BOOST_CHECK_EQUAL( db.head_block_num(), cutoff_block.block_num() );
db.open(data_dir.path(), []{return genesis_state_type();}, "TEST");
BOOST_CHECK_EQUAL( db.head_block_num(), last_block );
while( db.head_block_num() > cutoff_block.block_num() )
db.pop_block();
b = cutoff_block;
for( uint32_t i = 0; i < 200; ++i )
{
@ -187,7 +192,7 @@ BOOST_AUTO_TEST_CASE( undo_block )
fc::temp_directory data_dir( graphene::utilities::temp_directory_path() );
{
database db;
db.open(data_dir.path(), make_genesis);
db.open(data_dir.path(), make_genesis, "TEST");
fc::time_point_sec now( GRAPHENE_TESTING_GENESIS_TIMESTAMP );
std::vector< time_point_sec > time_stack;
@ -236,57 +241,112 @@ BOOST_AUTO_TEST_CASE( fork_blocks )
fc::temp_directory data_dir2( graphene::utilities::temp_directory_path() );
database db1;
db1.open(data_dir1.path(), make_genesis);
db1.open(data_dir1.path(), make_genesis, "TEST");
database db2;
db2.open(data_dir2.path(), make_genesis);
db2.open(data_dir2.path(), make_genesis, "TEST");
BOOST_CHECK( db1.get_chain_id() == db2.get_chain_id() );
auto init_account_priv_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key")) );
for( uint32_t i = 0; i < 10; ++i )
BOOST_TEST_MESSAGE( "Adding blocks 1 through 10" );
for( uint32_t i = 1; i <= 10; ++i )
{
auto b = db1.generate_block(db1.get_slot_time(1), db1.get_scheduled_witness(1), init_account_priv_key, database::skip_nothing);
try {
PUSH_BLOCK( db2, b );
} FC_CAPTURE_AND_RETHROW( ("db2") );
}
for( uint32_t i = 10; i < 13; ++i )
for( uint32_t j = 0; j <= 4; j += 4 )
{
auto b = db1.generate_block(db1.get_slot_time(1), db1.get_scheduled_witness(1), init_account_priv_key, database::skip_nothing);
}
string db1_tip = db1.head_block_id().str();
uint32_t next_slot = 3;
for( uint32_t i = 13; i < 16; ++i )
{
auto b = db2.generate_block(db2.get_slot_time(next_slot), db2.get_scheduled_witness(next_slot), init_account_priv_key, database::skip_nothing);
next_slot = 1;
// notify both databases of the new block.
// only db2 should switch to the new fork, db1 should not
PUSH_BLOCK( db1, b );
// add blocks 11 through 13 to db1 only
BOOST_TEST_MESSAGE( "Adding 3 blocks to db1 only" );
for( uint32_t i = 11 + j; i <= 13 + j; ++i )
{
BOOST_TEST_MESSAGE( i );
auto b = db1.generate_block(db1.get_slot_time(1), db1.get_scheduled_witness(1), init_account_priv_key, database::skip_nothing);
}
string db1_tip = db1.head_block_id().str();
// add different blocks 11 through 13 to db2 only
BOOST_TEST_MESSAGE( "Add 3 different blocks to db2 only" );
uint32_t next_slot = 3;
for( uint32_t i = 11 + j; i <= 13 + j; ++i )
{
BOOST_TEST_MESSAGE( i );
auto b = db2.generate_block(db2.get_slot_time(next_slot), db2.get_scheduled_witness(next_slot), init_account_priv_key, database::skip_nothing);
next_slot = 1;
// notify both databases of the new block.
// only db2 should switch to the new fork, db1 should not
PUSH_BLOCK( db1, b );
BOOST_CHECK_EQUAL(db1.head_block_id().str(), db1_tip);
BOOST_CHECK_EQUAL(db2.head_block_id().str(), b.id().str());
}
//The two databases are on distinct forks now, but at the same height.
BOOST_CHECK_EQUAL(db1.head_block_num(), 13u + j);
BOOST_CHECK_EQUAL(db2.head_block_num(), 13u + j);
BOOST_CHECK( db1.head_block_id() != db2.head_block_id() );
//Make a block on db2, make it invalid, then
//pass it to db1 and assert that db1 doesn't switch to the new fork.
signed_block good_block;
{
auto b = db2.generate_block(db2.get_slot_time(1), db2.get_scheduled_witness(1), init_account_priv_key, database::skip_nothing);
good_block = b;
b.transactions.emplace_back(signed_transaction());
b.transactions.back().operations.emplace_back(transfer_operation());
b.sign( init_account_priv_key );
BOOST_CHECK_EQUAL(b.block_num(), 14u + j);
GRAPHENE_CHECK_THROW(PUSH_BLOCK( db1, b ), fc::exception);
// At this point, `fetch_block_by_number` will fetch block from fork_db,
// so unable to reproduce the issue which is fixed in PR #938
// https://github.com/bitshares/bitshares-core/pull/938
fc::optional<signed_block> previous_block = db1.fetch_block_by_number(1);
BOOST_CHECK ( previous_block.valid() );
uint32_t db1_blocks = db1.head_block_num();
for( uint32_t curr_block_num = 2; curr_block_num <= db1_blocks; ++curr_block_num )
{
fc::optional<signed_block> curr_block = db1.fetch_block_by_number( curr_block_num );
BOOST_CHECK( curr_block.valid() );
BOOST_CHECK_EQUAL( curr_block->previous.str(), previous_block->id().str() );
previous_block = curr_block;
}
}
BOOST_CHECK_EQUAL(db1.head_block_num(), 13u + j);
BOOST_CHECK_EQUAL(db1.head_block_id().str(), db1_tip);
BOOST_CHECK_EQUAL(db2.head_block_id().str(), b.id().str());
if( j == 0 )
{
// assert that db1 switches to new fork with good block
BOOST_CHECK_EQUAL(db2.head_block_num(), 14u + j);
PUSH_BLOCK( db1, good_block );
BOOST_CHECK_EQUAL(db1.head_block_id().str(), db2.head_block_id().str());
}
}
//The two databases are on distinct forks now, but at the same height. Make a block on db2, make it invalid, then
//pass it to db1 and assert that db1 doesn't switch to the new fork.
signed_block good_block;
BOOST_CHECK_EQUAL(db1.head_block_num(), 13);
BOOST_CHECK_EQUAL(db2.head_block_num(), 13);
// generate more blocks to push the forked blocks out of fork_db
BOOST_TEST_MESSAGE( "Adding more blocks to db1, push the forked blocks out of fork_db" );
for( uint32_t i = 1; i <= 50; ++i )
{
auto b = db2.generate_block(db2.get_slot_time(1), db2.get_scheduled_witness(1), init_account_priv_key, database::skip_nothing);
good_block = b;
b.transactions.emplace_back(signed_transaction());
b.transactions.back().operations.emplace_back(transfer_operation());
b.sign( init_account_priv_key );
BOOST_CHECK_EQUAL(b.block_num(), 14);
GRAPHENE_CHECK_THROW(PUSH_BLOCK( db1, b ), fc::exception);
db1.generate_block(db1.get_slot_time(1), db1.get_scheduled_witness(1), init_account_priv_key, database::skip_nothing);
}
BOOST_CHECK_EQUAL(db1.head_block_num(), 13);
BOOST_CHECK_EQUAL(db1.head_block_id().str(), db1_tip);
// assert that db1 switches to new fork with good block
BOOST_CHECK_EQUAL(db2.head_block_num(), 14);
PUSH_BLOCK( db1, good_block );
BOOST_CHECK_EQUAL(db1.head_block_id().str(), db2.head_block_id().str());
{
// PR #938 make sure db is in a good state https://github.com/bitshares/bitshares-core/pull/938
BOOST_TEST_MESSAGE( "Checking whether all blocks on disk are good" );
fc::optional<signed_block> previous_block = db1.fetch_block_by_number(1);
BOOST_CHECK ( previous_block.valid() );
uint32_t db1_blocks = db1.head_block_num();
for( uint32_t curr_block_num = 2; curr_block_num <= db1_blocks; ++curr_block_num )
{
fc::optional<signed_block> curr_block = db1.fetch_block_by_number( curr_block_num );
BOOST_CHECK( curr_block.valid() );
BOOST_CHECK_EQUAL( curr_block->previous.str(), previous_block->id().str() );
previous_block = curr_block;
}
}
} catch (fc::exception& e) {
edump((e.to_detail_string()));
throw;
@ -381,7 +441,7 @@ BOOST_AUTO_TEST_CASE( undo_pending )
fc::temp_directory data_dir( graphene::utilities::temp_directory_path() );
{
database db;
db.open(data_dir.path(), make_genesis);
db.open(data_dir.path(), make_genesis, "TEST");
auto init_account_priv_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key")) );
public_key_type init_account_pub_key = init_account_priv_key.get_public_key();
@ -446,8 +506,8 @@ BOOST_AUTO_TEST_CASE( switch_forks_undo_create )
dir2( graphene::utilities::temp_directory_path() );
database db1,
db2;
db1.open(dir1.path(), make_genesis);
db2.open(dir2.path(), make_genesis);
db1.open(dir1.path(), make_genesis, "TEST");
db2.open(dir2.path(), make_genesis, "TEST");
BOOST_CHECK( db1.get_chain_id() == db2.get_chain_id() );
auto init_account_priv_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key")) );
@ -505,8 +565,8 @@ BOOST_AUTO_TEST_CASE( duplicate_transactions )
dir2( graphene::utilities::temp_directory_path() );
database db1,
db2;
db1.open(dir1.path(), make_genesis);
db2.open(dir2.path(), make_genesis);
db1.open(dir1.path(), make_genesis, "TEST");
db2.open(dir2.path(), make_genesis, "TEST");
BOOST_CHECK( db1.get_chain_id() == db2.get_chain_id() );
auto skip_sigs = database::skip_transaction_signatures | database::skip_authority_check;
@ -555,7 +615,7 @@ BOOST_AUTO_TEST_CASE( tapos )
try {
fc::temp_directory dir1( graphene::utilities::temp_directory_path() );
database db1;
db1.open(dir1.path(), make_genesis);
db1.open(dir1.path(), make_genesis, "TEST");
const account_object& init1 = *db1.get_index_type<account_index>().indices().get<by_name>().find("init1");
@ -1106,7 +1166,7 @@ BOOST_FIXTURE_TEST_CASE( transaction_invalidated_in_cache, database_fixture )
fc::temp_directory data_dir2( graphene::utilities::temp_directory_path() );
database db2;
db2.open(data_dir2.path(), make_genesis);
db2.open(data_dir2.path(), make_genesis, "TEST");
BOOST_CHECK( db.get_chain_id() == db2.get_chain_id() );
while( db2.head_block_num() < db.head_block_num() )
@ -1269,7 +1329,7 @@ BOOST_AUTO_TEST_CASE( genesis_reserve_ids )
genesis_state.initial_assets.push_back( usd );
return genesis_state;
} );
}, "TEST" );
const auto& acct_idx = db.get_index_type<account_index>().indices().get<by_name>();
auto acct_itr = acct_idx.find("init0");
@ -1288,18 +1348,50 @@ BOOST_AUTO_TEST_CASE( genesis_reserve_ids )
}
}
BOOST_FIXTURE_TEST_CASE( miss_some_blocks, database_fixture )
{ try {
std::vector<witness_id_type> witnesses = witness_schedule_id_type()(db).current_shuffled_witnesses;
BOOST_CHECK_EQUAL( 10, witnesses.size() );
// database_fixture constructor calls generate_block once, signed by witnesses[0]
generate_block(); // witnesses[1]
generate_block(); // witnesses[2]
for( const auto& id : witnesses )
BOOST_CHECK_EQUAL( 0, id(db).total_missed );
// generate_blocks generates another block *now* (witnesses[3])
// and one at now+10 blocks (witnesses[12%10])
generate_blocks( db.head_block_time() + db.get_global_properties().parameters.block_interval * 10, true );
// i. e. 8 blocks are missed in between by witness[4..11%10]
for( uint32_t i = 0; i < witnesses.size(); i++ )
BOOST_CHECK_EQUAL( (i+7) % 10 < 2 ? 0 : 1, witnesses[i](db).total_missed );
} FC_LOG_AND_RETHROW() }
BOOST_FIXTURE_TEST_CASE( miss_many_blocks, database_fixture )
{
try
{
auto get_misses = []( database& db ) {
std::map< witness_id_type, uint32_t > misses;
for( const auto& witness_id : witness_schedule_id_type()(db).current_shuffled_witnesses )
misses[witness_id] = witness_id(db).total_missed;
return misses;
};
generate_block();
generate_block();
generate_block();
auto missed_before = get_misses( db );
// miss 10 maintenance intervals
generate_blocks( db.get_dynamic_global_properties().next_maintenance_time + db.get_global_properties().parameters.maintenance_interval * 10, true );
generate_block();
generate_block();
generate_block();
auto missed_after = get_misses( db );
BOOST_CHECK_EQUAL( missed_before.size(), missed_after.size() );
for( const auto& miss : missed_before )
{
const auto& after = missed_after.find( miss.first );
BOOST_REQUIRE( after != missed_after.end() );
BOOST_CHECK_EQUAL( miss.second, after->second );
}
}
catch (fc::exception& e)
{

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015 Cryptonomex, Inc., and contributors.
* Copyright (c) 2017 Cryptonomex, Inc., and contributors.
*
* The MIT License
*
@ -34,6 +34,8 @@
using namespace graphene::chain;
BOOST_FIXTURE_TEST_SUITE( database_tests, database_fixture )
BOOST_AUTO_TEST_CASE( undo_test )
{
try {
@ -59,3 +61,53 @@ BOOST_AUTO_TEST_CASE( undo_test )
throw;
}
}
BOOST_AUTO_TEST_CASE( merge_test )
{
try {
database db;
auto ses = db._undo_db.start_undo_session();
const auto& bal_obj1 = db.create<account_balance_object>( [&]( account_balance_object& obj ){
obj.balance = 42;
});
ses.merge();
auto balance = db.get_balance( account_id_type(), asset_id_type() );
BOOST_CHECK_EQUAL( 42, balance.amount.value );
} catch ( const fc::exception& e )
{
edump( (e.to_detail_string()) );
throw;
}
}
BOOST_AUTO_TEST_CASE( flat_index_test )
{
ACTORS((sam));
const auto& bitusd = create_bitasset("USDBIT", sam.id);
update_feed_producers(bitusd, {sam.id});
price_feed current_feed;
current_feed.settlement_price = bitusd.amount(100) / asset(100);
publish_feed(bitusd, sam, current_feed);
FC_ASSERT( bitusd.bitasset_data_id->instance == 0 );
FC_ASSERT( !(*bitusd.bitasset_data_id)(db).current_feed.settlement_price.is_null() );
try {
auto ses = db._undo_db.start_undo_session();
const auto& obj1 = db.create<asset_bitasset_data_object>( [&]( asset_bitasset_data_object& obj ){
obj.settlement_fund = 17;
});
FC_ASSERT( obj1.settlement_fund == 17 );
throw std::string("Expected");
// With flat_index, obj1 will not really be removed from the index
} catch ( const std::string& e )
{ // ignore
}
// force maintenance
const auto& dynamic_global_props = db.get<dynamic_global_property_object>(dynamic_global_property_id_type());
generate_blocks(dynamic_global_props.next_maintenance_time, true);
FC_ASSERT( !(*bitusd.bitasset_data_id)(db).current_feed.settlement_price.is_null() );
}
BOOST_AUTO_TEST_SUITE_END()

View file

@ -0,0 +1,594 @@
/*
* Copyright (c) 2015 Cryptonomex, Inc., and contributors.
* Copyright (c) 2019 PBSA, and contributors.
*
* The MIT License
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <boost/test/unit_test.hpp>
#include <graphene/app/database_api.hpp>
#include <graphene/app/api.hpp>
#include <graphene/chain/account_object.hpp>
#include "../common/database_fixture.hpp"
#include <fc/smart_ref_impl.hpp>
#include <fc/crypto/digest.hpp>
using namespace graphene::app;
using namespace graphene::chain;
using namespace graphene::chain::test;
BOOST_FIXTURE_TEST_SUITE(account_history_tests, database_fixture)
BOOST_AUTO_TEST_CASE(get_account_history) {
try {
graphene::app::history_api hist_api(app);
//account_id_type() do 3 ops
create_bitasset("USD", account_id_type());
auto dan_acc = create_account("dan");
auto bob_acc = create_account("bob");
generate_block();
fc::usleep(fc::milliseconds(2000));
int asset_create_op_id = operation::tag<asset_create_operation>::value;
int account_create_op_id = operation::tag<account_create_operation>::value;
//account_id_type() did 3 ops and includes id0
vector<operation_history_object> histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 100, operation_history_id_type());
BOOST_CHECK_EQUAL(histories.size(), 3u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 0u);
BOOST_CHECK_EQUAL(histories[2].op.which(), asset_create_op_id);
// 1 account_create op larger than id1
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(1), 100, operation_history_id_type());
BOOST_CHECK_EQUAL(histories.size(), 1u);
BOOST_CHECK(histories[0].id.instance() != 0);
BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id);
// Limit 2 returns 2 result
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 2, operation_history_id_type());
BOOST_CHECK_EQUAL(histories.size(), 2u);
BOOST_CHECK(histories[1].id.instance() != 0);
BOOST_CHECK_EQUAL(histories[1].op.which(), account_create_op_id);
// bob has 1 op
histories = hist_api.get_account_history(bob_acc.get_id(), operation_history_id_type(), 100, operation_history_id_type());
BOOST_CHECK_EQUAL(histories.size(), 1u);
BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id);
} FC_LOG_AND_RETHROW()
}
BOOST_AUTO_TEST_CASE(zero_id_object) {
try {
graphene::app::history_api hist_api(app);
// no history at all in the chain
vector<operation_history_object> histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(0), 4, operation_history_id_type(0));
BOOST_CHECK_EQUAL(histories.size(), 0u);
create_bitasset("USD", account_id_type()); // create op 0
generate_block();
fc::usleep(fc::milliseconds(2000));
// what if the account only has one history entry and it is 0?
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 4, operation_history_id_type());
BOOST_CHECK_EQUAL(histories.size(), 1u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 0u);
} FC_LOG_AND_RETHROW()
}
BOOST_AUTO_TEST_CASE(get_account_history_additional) {
try {
graphene::app::history_api hist_api(app);
// A = account_id_type() with records { 5, 3, 1, 0 }, and
// B = dan with records { 6, 4, 2, 1 }
// account_id_type() and dan share operation id 1(account create) - share can be also in id 0
// no history at all in the chain
vector<operation_history_object> histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(0), 4, operation_history_id_type(0));
BOOST_CHECK_EQUAL(histories.size(), 0u);
create_bitasset("USD", account_id_type()); // create op 0
generate_block();
// what if the account only has one history entry and it is 0?
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 4, operation_history_id_type());
BOOST_CHECK_EQUAL(histories.size(), 1u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 0u);
const account_object& dan = create_account("dan"); // create op 1
create_bitasset("CNY", dan.id); // create op 2
create_bitasset("BTC", account_id_type()); // create op 3
create_bitasset("XMR", dan.id); // create op 4
create_bitasset("EUR", account_id_type()); // create op 5
create_bitasset("OIL", dan.id); // create op 6
generate_block();
// f(A, 0, 4, 9) = { 5, 3, 1, 0 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 4, operation_history_id_type(9));
BOOST_CHECK_EQUAL(histories.size(), 4u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u);
BOOST_CHECK_EQUAL(histories[3].id.instance(), 0u);
// f(A, 0, 4, 6) = { 5, 3, 1, 0 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 4, operation_history_id_type(6));
BOOST_CHECK_EQUAL(histories.size(), 4u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u);
BOOST_CHECK_EQUAL(histories[3].id.instance(), 0u);
// f(A, 0, 4, 5) = { 5, 3, 1, 0 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 4, operation_history_id_type(5));
BOOST_CHECK_EQUAL(histories.size(), 4u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u);
BOOST_CHECK_EQUAL(histories[3].id.instance(), 0u);
// f(A, 0, 4, 4) = { 3, 1, 0 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 4, operation_history_id_type(4));
BOOST_CHECK_EQUAL(histories.size(), 3u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 1u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 0u);
// f(A, 0, 4, 3) = { 3, 1, 0 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 4, operation_history_id_type(3));
BOOST_CHECK_EQUAL(histories.size(), 3u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 1u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 0u);
// f(A, 0, 4, 2) = { 1, 0 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 4, operation_history_id_type(2));
BOOST_CHECK_EQUAL(histories.size(), 2u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 1u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 0u);
// f(A, 0, 4, 1) = { 1, 0 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 4, operation_history_id_type(1));
BOOST_CHECK_EQUAL(histories.size(), 2u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 1u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 0u);
// f(A, 0, 4, 0) = { 5, 3, 1, 0 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 4, operation_history_id_type());
BOOST_CHECK_EQUAL(histories.size(), 4u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u);
BOOST_CHECK_EQUAL(histories[3].id.instance(), 0u);
// f(A, 1, 5, 9) = { 5, 3 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(1), 5, operation_history_id_type(9));
BOOST_CHECK_EQUAL(histories.size(), 2u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u);
// f(A, 1, 5, 6) = { 5, 3 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(1), 5, operation_history_id_type(6));
BOOST_CHECK_EQUAL(histories.size(), 2u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u);
// f(A, 1, 5, 5) = { 5, 3 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(1), 5, operation_history_id_type(5));
BOOST_CHECK_EQUAL(histories.size(), 2u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u);
// f(A, 1, 5, 4) = { 3 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(1), 5, operation_history_id_type(4));
BOOST_CHECK_EQUAL(histories.size(), 1u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u);
// f(A, 1, 5, 3) = { 3 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(1), 5, operation_history_id_type(3));
BOOST_CHECK_EQUAL(histories.size(), 1u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u);
// f(A, 1, 5, 2) = { }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(1), 5, operation_history_id_type(2));
BOOST_CHECK_EQUAL(histories.size(), 0u);
// f(A, 1, 5, 1) = { }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(1), 5, operation_history_id_type(1));
BOOST_CHECK_EQUAL(histories.size(), 0u);
// f(A, 1, 5, 0) = { 5, 3 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(1), 5, operation_history_id_type(0));
BOOST_CHECK_EQUAL(histories.size(), 2u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u);
// f(A, 0, 3, 9) = { 5, 3, 1 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 3, operation_history_id_type(9));
BOOST_CHECK_EQUAL(histories.size(), 3u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u);
// f(A, 0, 3, 6) = { 5, 3, 1 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 3, operation_history_id_type(6));
BOOST_CHECK_EQUAL(histories.size(), 3u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u);
// f(A, 0, 3, 5) = { 5, 3, 1 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 3, operation_history_id_type(5));
BOOST_CHECK_EQUAL(histories.size(), 3u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u);
// f(A, 0, 3, 4) = { 3, 1, 0 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 3, operation_history_id_type(4));
BOOST_CHECK_EQUAL(histories.size(), 3u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 1u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 0u);
// f(A, 0, 3, 3) = { 3, 1, 0 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 3, operation_history_id_type(3));
BOOST_CHECK_EQUAL(histories.size(), 3u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 1u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 0u);
// f(A, 0, 3, 2) = { 1, 0 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 3, operation_history_id_type(2));
BOOST_CHECK_EQUAL(histories.size(), 2u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 1u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 0u);
// f(A, 0, 3, 1) = { 1, 0 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 3, operation_history_id_type(1));
BOOST_CHECK_EQUAL(histories.size(), 2u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 1u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 0u);
// f(A, 0, 3, 0) = { 5, 3, 1 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(), 3, operation_history_id_type());
BOOST_CHECK_EQUAL(histories.size(), 3u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 5u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u);
// f(B, 0, 4, 9) = { 6, 4, 2, 1 }
histories = hist_api.get_account_history(dan.get_id(), operation_history_id_type(), 4, operation_history_id_type(9));
BOOST_CHECK_EQUAL(histories.size(), 4u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 2u);
BOOST_CHECK_EQUAL(histories[3].id.instance(), 1u);
// f(B, 0, 4, 6) = { 6, 4, 2, 1 }
histories = hist_api.get_account_history(dan.get_id(), operation_history_id_type(), 4, operation_history_id_type(6));
BOOST_CHECK_EQUAL(histories.size(), 4u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 2u);
BOOST_CHECK_EQUAL(histories[3].id.instance(), 1u);
// f(B, 0, 4, 5) = { 4, 2, 1 }
histories = hist_api.get_account_history(dan.get_id(), operation_history_id_type(), 4, operation_history_id_type(5));
BOOST_CHECK_EQUAL(histories.size(), 3u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 4u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 2u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u);
// f(B, 0, 4, 4) = { 4, 2, 1 }
histories = hist_api.get_account_history(dan.get_id(), operation_history_id_type(), 4, operation_history_id_type(4));
BOOST_CHECK_EQUAL(histories.size(), 3u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 4u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 2u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u);
// f(B, 0, 4, 3) = { 2, 1 }
histories = hist_api.get_account_history(dan.get_id(), operation_history_id_type(), 4, operation_history_id_type(3));
BOOST_CHECK_EQUAL(histories.size(), 2u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 2u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 1u);
// f(B, 0, 4, 2) = { 2, 1 }
histories = hist_api.get_account_history(dan.get_id(), operation_history_id_type(), 4, operation_history_id_type(2));
BOOST_CHECK_EQUAL(histories.size(), 2u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 2u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 1u);
// f(B, 0, 4, 1) = { 1 }
histories = hist_api.get_account_history(dan.get_id(), operation_history_id_type(), 4, operation_history_id_type(1));
BOOST_CHECK_EQUAL(histories.size(), 1u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 1u);
// f(B, 0, 4, 0) = { 6, 4, 2, 1 }
histories = hist_api.get_account_history(dan.get_id(), operation_history_id_type(), 4, operation_history_id_type());
BOOST_CHECK_EQUAL(histories.size(), 4u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 2u);
BOOST_CHECK_EQUAL(histories[3].id.instance(), 1u);
// f(B, 2, 4, 9) = { 6, 4 }
histories = hist_api.get_account_history(dan.get_id(), operation_history_id_type(2), 4, operation_history_id_type(9));
BOOST_CHECK_EQUAL(histories.size(), 2u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u);
// f(B, 2, 4, 6) = { 6, 4 }
histories = hist_api.get_account_history(dan.get_id(), operation_history_id_type(2), 4, operation_history_id_type(6));
BOOST_CHECK_EQUAL(histories.size(), 2u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u);
// f(B, 2, 4, 5) = { 4 }
histories = hist_api.get_account_history(dan.get_id(), operation_history_id_type(2), 4, operation_history_id_type(5));
BOOST_CHECK_EQUAL(histories.size(), 1u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 4u);
// f(B, 2, 4, 4) = { 4 }
histories = hist_api.get_account_history(dan.get_id(), operation_history_id_type(2), 4, operation_history_id_type(4));
BOOST_CHECK_EQUAL(histories.size(), 1u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 4u);
// f(B, 2, 4, 3) = { }
histories = hist_api.get_account_history(dan.get_id(), operation_history_id_type(2), 4, operation_history_id_type(3));
BOOST_CHECK_EQUAL(histories.size(), 0u);
// f(B, 2, 4, 2) = { }
histories = hist_api.get_account_history(dan.get_id(), operation_history_id_type(2), 4, operation_history_id_type(2));
BOOST_CHECK_EQUAL(histories.size(), 0u);
// f(B, 2, 4, 1) = { }
histories = hist_api.get_account_history(dan.get_id(), operation_history_id_type(2), 4, operation_history_id_type(1));
BOOST_CHECK_EQUAL(histories.size(), 0u);
// f(B, 2, 4, 0) = { 6, 4 }
histories = hist_api.get_account_history(dan.get_id(), operation_history_id_type(2), 4, operation_history_id_type(0));
BOOST_CHECK_EQUAL(histories.size(), 2u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u);
// 0 limits
histories = hist_api.get_account_history(dan.get_id(), operation_history_id_type(0), 0, operation_history_id_type(0));
BOOST_CHECK_EQUAL(histories.size(), 0u);
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(3), 0, operation_history_id_type(9));
BOOST_CHECK_EQUAL(histories.size(), 0u);
// create a new account C = alice { 7 }
auto alice = create_account("alice");
generate_block();
// f(C, 0, 4, 10) = { 7 }
histories = hist_api.get_account_history(alice.get_id(), operation_history_id_type(0), 4, operation_history_id_type(10));
BOOST_CHECK_EQUAL(histories.size(), 1u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 7u);
// f(C, 8, 4, 10) = { }
histories = hist_api.get_account_history(alice.get_id(), operation_history_id_type(8), 4, operation_history_id_type(10));
BOOST_CHECK_EQUAL(histories.size(), 0u);
// f(A, 0, 10, 0) = { 7, 5, 3, 1, 0 }
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(0), 10, operation_history_id_type(0));
BOOST_CHECK_EQUAL(histories.size(), 5u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 7u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 5u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 3u);
BOOST_CHECK_EQUAL(histories[3].id.instance(), 1u);
BOOST_CHECK_EQUAL(histories[4].id.instance(), 0u);
} FC_LOG_AND_RETHROW()
}
BOOST_AUTO_TEST_CASE(track_account) {
try {
graphene::app::history_api hist_api(app);
// account_id_type() is not tracked
// account_id_type() creates alice(not tracked account)
const account_object& alice = create_account("alice");
auto alice_id = alice.id;
//account_id_type() creates some ops
create_bitasset("CNY", account_id_type());
create_bitasset("USD", account_id_type());
// account_id_type() creates dan(account tracked)
const account_object& dan = create_account("dan");
auto dan_id = dan.id;
// dan makes 1 op
create_bitasset("EUR", dan_id);
generate_block( ~database::skip_fork_db );
// anything against account_id_type() should be {}
vector<operation_history_object> histories =
hist_api.get_account_history(account_id_type(), operation_history_id_type(0), 10, operation_history_id_type(0));
BOOST_CHECK_EQUAL(histories.size(), 0u);
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(1), 10, operation_history_id_type(0));
BOOST_CHECK_EQUAL(histories.size(), 0u);
histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(1), 1, operation_history_id_type(2));
BOOST_CHECK_EQUAL(histories.size(), 0u);
// anything against alice should be {}
histories = hist_api.get_account_history(alice_id, operation_history_id_type(0), 10, operation_history_id_type(0));
BOOST_CHECK_EQUAL(histories.size(), 0u);
histories = hist_api.get_account_history(alice_id, operation_history_id_type(1), 10, operation_history_id_type(0));
BOOST_CHECK_EQUAL(histories.size(), 0u);
histories = hist_api.get_account_history(alice_id, operation_history_id_type(1), 1, operation_history_id_type(2));
BOOST_CHECK_EQUAL(histories.size(), 0u);
// dan should have history
histories = hist_api.get_account_history(dan_id, operation_history_id_type(0), 10, operation_history_id_type(0));
BOOST_CHECK_EQUAL(histories.size(), 2u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 4u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 3u);
// create more ops, starting with an untracked account
create_bitasset( "BTC", account_id_type() );
create_bitasset( "GBP", dan_id );
generate_block( ~database::skip_fork_db );
histories = hist_api.get_account_history(dan_id, operation_history_id_type(0), 10, operation_history_id_type(0));
BOOST_CHECK_EQUAL(histories.size(), 3u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 3u);
db.pop_block();
// Try again, should result in same object IDs
create_bitasset( "BTC", account_id_type() );
create_bitasset( "GBP", dan_id );
generate_block();
histories = hist_api.get_account_history(dan_id, operation_history_id_type(0), 10, operation_history_id_type(0));
BOOST_CHECK_EQUAL(histories.size(), 3u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 6u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 4u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 3u);
} catch (fc::exception &e) {
edump((e.to_detail_string()));
throw;
}
}
BOOST_AUTO_TEST_CASE(track_account2) {
try {
graphene::app::history_api hist_api(app);
// account_id_type() is tracked
// account_id_type() creates alice(tracked account)
const account_object& alice = create_account("alice");
auto alice_id = alice.id;
//account_id_type() creates some ops
create_bitasset("CNY", account_id_type());
create_bitasset("USD", account_id_type());
// alice makes 1 op
create_bitasset("EUR", alice_id);
// account_id_type() creates dan(account not tracked)
const account_object& dan = create_account("dan");
auto dan_id = dan.id;
generate_block();
// all account_id_type() should have 4 ops {4,2,1,0}
vector<operation_history_object> histories = hist_api.get_account_history(account_id_type(), operation_history_id_type(0), 10, operation_history_id_type(0));
BOOST_CHECK_EQUAL(histories.size(), 4u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 4u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 2u);
BOOST_CHECK_EQUAL(histories[2].id.instance(), 1u);
BOOST_CHECK_EQUAL(histories[3].id.instance(), 0u);
// all alice account should have 2 ops {3, 0}
histories = hist_api.get_account_history(alice_id, operation_history_id_type(0), 10, operation_history_id_type(0));
BOOST_CHECK_EQUAL(histories.size(), 2u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u);
BOOST_CHECK_EQUAL(histories[1].id.instance(), 0u);
// alice first op should be {0}
histories = hist_api.get_account_history(alice_id, operation_history_id_type(0), 1, operation_history_id_type(1));
BOOST_CHECK_EQUAL(histories.size(), 1u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 0u);
// alice second op should be {3}
histories = hist_api.get_account_history(alice_id, operation_history_id_type(1), 1, operation_history_id_type(0));
BOOST_CHECK_EQUAL(histories.size(), 1u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 3u);
// anything against dan should be {}
histories = hist_api.get_account_history(dan_id, operation_history_id_type(0), 10, operation_history_id_type(0));
BOOST_CHECK_EQUAL(histories.size(), 0u);
histories = hist_api.get_account_history(dan_id, operation_history_id_type(1), 10, operation_history_id_type(0));
BOOST_CHECK_EQUAL(histories.size(), 0u);
histories = hist_api.get_account_history(dan_id, operation_history_id_type(1), 1, operation_history_id_type(2));
BOOST_CHECK_EQUAL(histories.size(), 0u);
} catch (fc::exception &e) {
edump((e.to_detail_string()));
throw;
}
}
BOOST_AUTO_TEST_CASE(get_account_history_operations) {
try {
graphene::app::history_api hist_api(app);
//account_id_type() do 3 ops
create_bitasset("CNY", account_id_type());
create_account("sam");
create_account("alice");
generate_block();
fc::usleep(fc::milliseconds(2000));
int asset_create_op_id = operation::tag<asset_create_operation>::value;
int account_create_op_id = operation::tag<account_create_operation>::value;
//account_id_type() did 1 asset_create op
vector<operation_history_object> histories = hist_api.get_account_history_operations(account_id_type(), asset_create_op_id, operation_history_id_type(), operation_history_id_type(), 100);
BOOST_CHECK_EQUAL(histories.size(), 1u);
BOOST_CHECK_EQUAL(histories[0].id.instance(), 0u);
BOOST_CHECK_EQUAL(histories[0].op.which(), asset_create_op_id);
//account_id_type() did 2 account_create ops
histories = hist_api.get_account_history_operations(account_id_type(), account_create_op_id, operation_history_id_type(), operation_history_id_type(), 100);
BOOST_CHECK_EQUAL(histories.size(), 2u);
BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id);
// No asset_create op larger than id1
histories = hist_api.get_account_history_operations(account_id_type(), asset_create_op_id, operation_history_id_type(), operation_history_id_type(1), 100);
BOOST_CHECK_EQUAL(histories.size(), 0u);
// Limit 1 returns 1 result
histories = hist_api.get_account_history_operations(account_id_type(), account_create_op_id, operation_history_id_type(),operation_history_id_type(), 1);
BOOST_CHECK_EQUAL(histories.size(), 1u);
BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id);
// alice has 1 op
histories = hist_api.get_account_history_operations(get_account("alice").id, account_create_op_id, operation_history_id_type(),operation_history_id_type(), 100);
BOOST_CHECK_EQUAL(histories.size(), 1u);
BOOST_CHECK_EQUAL(histories[0].op.which(), account_create_op_id);
} catch (fc::exception &e) {
edump((e.to_detail_string()));
throw;
}
}
BOOST_AUTO_TEST_SUITE_END()

View file

@ -1111,7 +1111,7 @@ BOOST_AUTO_TEST_CASE( balance_object_test )
auto _sign = [&]( signed_transaction& tx, const private_key_type& key )
{ tx.sign( key, db.get_chain_id() ); };
db.open(td.path(), [this]{return genesis_state;});
db.open(td.path(), [this]{return genesis_state;}, "TEST");
const balance_object& balance = balance_id_type()(db);
BOOST_CHECK_EQUAL(balance.balance.amount.value, 1);
BOOST_CHECK_EQUAL(balance_id_type(1)(db).balance.amount.value, 1);