2017-05-25 09:13:59 +00:00
|
|
|
/*
|
|
|
|
|
* Copyright (c) 2015 Cryptonomex, Inc., and contributors.
|
|
|
|
|
*
|
|
|
|
|
* The MIT License
|
|
|
|
|
*
|
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
|
*
|
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
|
*
|
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
|
* THE SOFTWARE.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <graphene/chain/database.hpp>
|
|
|
|
|
|
2018-07-26 16:53:55 +00:00
|
|
|
#include <graphene/chain/chain_property_object.hpp>
|
|
|
|
|
#include <graphene/chain/witness_schedule_object.hpp>
|
|
|
|
|
#include <graphene/chain/special_authority_object.hpp>
|
2017-05-25 09:13:59 +00:00
|
|
|
#include <graphene/chain/operation_history_object.hpp>
|
2021-01-28 19:27:54 +00:00
|
|
|
#include <graphene/chain/nft_object.hpp>
|
2019-04-14 04:38:56 +00:00
|
|
|
|
|
|
|
|
#include <graphene/protocol/fee_schedule.hpp>
|
2017-05-25 09:13:59 +00:00
|
|
|
|
|
|
|
|
#include <fc/io/fstream.hpp>
|
|
|
|
|
|
|
|
|
|
#include <fstream>
|
|
|
|
|
#include <functional>
|
|
|
|
|
#include <iostream>
|
|
|
|
|
|
|
|
|
|
namespace graphene { namespace chain {
|
|
|
|
|
|
Safety Check: Part 2 -- Implement and Integrate Checks
Implement a safety check mechanism on object_database, based on the
safety_check_policy abstract interface. Create two standard
implementations of the safety_check_policy interface, one
(null_safety_check) which allows all modifications unconditionally, and
the other (database_lock_safety_check) which allows modifications only
when unlocked.
Integrate these safety checks into chain::database and plugins, so that
the consensus databases are locked at all times except during core
consensus code pathways. Also ensures that databases are re-locked when
calling code outside of consensus pathways from consensus pathways.
To make this work, it was necessary to move two objects from the
consensus object spaces to a new API object space. The
operation_history_object and account_transaction_history_object were
moved to the API object space, as they are not actually used by
consensus and are maintained by a plugin (which can no longer modify the
consensus object spaces, due to the safety checks).
Finally, add a mechanism to application and chain::database, which
allows the chain to start in "unit testing mode" and allows unchecked
actions upon the database within delimited scopes. This was necessary
because many tests edit the database directly to set up the environment
for their respective tests. This mode is activated by database_fixture so
tests can utilize it conveniently, but it is architecturally difficult to
enable this mode in production, i.e. from a plugin.
2022-03-12 20:04:08 +00:00
|
|
|
database::database(bool allow_testing_edits) :
|
|
|
|
|
_random_number_generator(fc::ripemd160().data()),
|
|
|
|
|
_allow_safety_check_bypass(allow_testing_edits)
|
2017-05-25 09:13:59 +00:00
|
|
|
{
|
Safety Check: Part 2 -- Implement and Integrate Checks
Implement a safety check mechanism on object_database, based on the
safety_check_policy abstract interface. Create two standard
implementations of the safety_check_policy interface, one
(null_safety_check) which allows all modifications unconditionally, and
the other (database_lock_safety_check) which allows modifications only
when unlocked.
Integrate these safety checks into chain::database and plugins, so that
the consensus databases are locked at all times except during core
consensus code pathways. Also ensures that databases are re-locked when
calling code outside of consensus pathways from consensus pathways.
To make this work, it was necessary to move two objects from the
consensus object spaces to a new API object space. The
operation_history_object and account_transaction_history_object were
moved to the API object space, as they are not actually used by
consensus and are maintained by a plugin (which can no longer modify the
consensus object spaces, due to the safety checks).
Finally, add a mechanism to application and chain::database, which
allows the chain to start in "unit testing mode" and allows unchecked
actions upon the database within delimited scopes. This was necessary
because many tests edit the database directly to set up the environment
for their respective tests. This mode is activated by database_fixture so
tests can utilize it conveniently, but it is architecturally difficult to
enable this mode in production, i.e. from a plugin.
2022-03-12 20:04:08 +00:00
|
|
|
if (allow_testing_edits)
|
|
|
|
|
elog("UNIT TESTING MODE ENABLED -- NOT FOR PRODUCTION USE");
|
2017-05-25 09:13:59 +00:00
|
|
|
initialize_indexes();
|
|
|
|
|
initialize_evaluators();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
database::~database()
|
|
|
|
|
{
|
|
|
|
|
clear_pending();
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-03 05:07:46 +00:00
|
|
|
// Right now, we leave undo_db enabled when replaying when the bookie plugin is
|
|
|
|
|
// enabled. It depends on new/changed/removed object notifications, and those are
|
|
|
|
|
// only fired when the undo_db is enabled.
|
|
|
|
|
// So we use this helper object to disable undo_db only if it is not forbidden
|
|
|
|
|
// with _slow_replays flag.
|
|
|
|
|
class auto_undo_enabler
|
|
|
|
|
{
|
|
|
|
|
const bool _slow_replays;
|
|
|
|
|
undo_database& _undo_db;
|
|
|
|
|
bool _disabled;
|
|
|
|
|
public:
|
|
|
|
|
auto_undo_enabler(bool slow_replays, undo_database& undo_db) :
|
|
|
|
|
_slow_replays(slow_replays),
|
|
|
|
|
_undo_db(undo_db),
|
|
|
|
|
_disabled(false)
|
|
|
|
|
{
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
~auto_undo_enabler()
|
|
|
|
|
{
|
|
|
|
|
try{
|
|
|
|
|
enable();
|
|
|
|
|
} FC_CAPTURE_AND_LOG(("undo_db enabling crash"))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void enable()
|
|
|
|
|
{
|
|
|
|
|
if(!_disabled)
|
|
|
|
|
return;
|
|
|
|
|
_undo_db.enable();
|
|
|
|
|
_disabled = false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void disable()
|
|
|
|
|
{
|
|
|
|
|
if(_disabled)
|
|
|
|
|
return;
|
|
|
|
|
if(_slow_replays)
|
|
|
|
|
return;
|
|
|
|
|
_undo_db.disable();
|
|
|
|
|
_disabled = true;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
2017-07-12 20:03:57 +00:00
|
|
|
void database::reindex( fc::path data_dir )
|
2017-05-25 09:13:59 +00:00
|
|
|
{ try {
|
|
|
|
|
auto last_block = _block_id_to_block.last();
|
|
|
|
|
if( !last_block ) {
|
|
|
|
|
elog( "!no last block" );
|
|
|
|
|
edump((last_block));
|
|
|
|
|
return;
|
|
|
|
|
}
|
2017-07-12 20:03:57 +00:00
|
|
|
if( last_block->block_num() <= head_block_num()) return;
|
2017-05-25 09:13:59 +00:00
|
|
|
|
2017-07-12 20:03:57 +00:00
|
|
|
ilog( "reindexing blockchain" );
|
|
|
|
|
auto start = fc::time_point::now();
|
2017-05-25 09:13:59 +00:00
|
|
|
const auto last_block_num = last_block->block_num();
|
2017-07-31 18:24:04 +00:00
|
|
|
uint32_t undo_point = last_block_num < 50 ? 0 : last_block_num - 50;
|
2017-05-25 09:13:59 +00:00
|
|
|
|
2017-07-31 12:20:30 +00:00
|
|
|
ilog( "Replaying blocks, starting at ${next}...", ("next",head_block_num() + 1) );
|
2019-09-03 05:07:46 +00:00
|
|
|
auto_undo_enabler undo(_slow_replays, _undo_db);
|
2017-07-13 18:26:35 +00:00
|
|
|
if( head_block_num() >= undo_point )
|
2017-07-31 18:24:04 +00:00
|
|
|
{
|
|
|
|
|
if( head_block_num() > 0 )
|
|
|
|
|
_fork_db.start_block( *fetch_block_by_number( head_block_num() ) );
|
|
|
|
|
}
|
2017-07-13 18:26:35 +00:00
|
|
|
else
|
|
|
|
|
{
|
2019-09-03 05:07:46 +00:00
|
|
|
undo.disable();
|
2017-07-13 18:26:35 +00:00
|
|
|
}
|
2017-07-12 20:03:57 +00:00
|
|
|
for( uint32_t i = head_block_num() + 1; i <= last_block_num; ++i )
|
2017-05-25 09:13:59 +00:00
|
|
|
{
|
2021-12-17 02:27:34 +00:00
|
|
|
if( i % 1000000 == 0 )
|
2017-07-12 20:03:57 +00:00
|
|
|
{
|
|
|
|
|
ilog( "Writing database to disk at block ${i}", ("i",i) );
|
|
|
|
|
flush();
|
|
|
|
|
ilog( "Done" );
|
|
|
|
|
}
|
2017-05-25 09:13:59 +00:00
|
|
|
fc::optional< signed_block > block = _block_id_to_block.fetch_by_number(i);
|
|
|
|
|
if( !block.valid() )
|
|
|
|
|
{
|
|
|
|
|
wlog( "Reindexing terminated due to gap: Block ${i} does not exist!", ("i", i) );
|
|
|
|
|
uint32_t dropped_count = 0;
|
|
|
|
|
while( true )
|
|
|
|
|
{
|
|
|
|
|
fc::optional< block_id_type > last_id = _block_id_to_block.last_id();
|
|
|
|
|
// this can trigger if we attempt to e.g. read a file that has block #2 but no block #1
|
|
|
|
|
if( !last_id.valid() )
|
|
|
|
|
break;
|
|
|
|
|
// we've caught up to the gap
|
|
|
|
|
if( block_header::num_from_id( *last_id ) <= i )
|
|
|
|
|
break;
|
|
|
|
|
_block_id_to_block.remove( *last_id );
|
|
|
|
|
dropped_count++;
|
|
|
|
|
}
|
|
|
|
|
wlog( "Dropped ${n} blocks from after the gap", ("n", dropped_count) );
|
|
|
|
|
break;
|
|
|
|
|
}
|
2017-07-13 18:26:35 +00:00
|
|
|
if( i < undo_point && !_slow_replays)
|
|
|
|
|
{
|
2019-07-30 15:43:31 +00:00
|
|
|
apply_block(*block, skip_witness_signature |
|
|
|
|
|
skip_transaction_signatures |
|
|
|
|
|
skip_transaction_dupe_check |
|
|
|
|
|
skip_tapos_check |
|
|
|
|
|
skip_witness_schedule_check |
|
|
|
|
|
skip_authority_check);
|
2017-07-13 18:26:35 +00:00
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
2019-09-03 05:07:46 +00:00
|
|
|
undo.enable();
|
2017-07-13 18:26:35 +00:00
|
|
|
push_block(*block, skip_witness_signature |
|
|
|
|
|
skip_transaction_signatures |
|
|
|
|
|
skip_transaction_dupe_check |
|
|
|
|
|
skip_tapos_check |
|
|
|
|
|
skip_witness_schedule_check |
|
|
|
|
|
skip_authority_check);
|
|
|
|
|
}
|
2017-05-25 09:13:59 +00:00
|
|
|
}
|
2019-09-03 05:07:46 +00:00
|
|
|
undo.enable();
|
2017-05-25 09:13:59 +00:00
|
|
|
auto end = fc::time_point::now();
|
|
|
|
|
ilog( "Done reindexing, elapsed time: ${t} sec", ("t",double((end-start).count())/1000000.0 ) );
|
|
|
|
|
} FC_CAPTURE_AND_RETHROW( (data_dir) ) }
|
|
|
|
|
|
|
|
|
|
void database::wipe(const fc::path& data_dir, bool include_blocks)
|
|
|
|
|
{
|
|
|
|
|
ilog("Wiping database", ("include_blocks", include_blocks));
|
2018-02-25 04:14:01 +00:00
|
|
|
if (_opened) {
|
2019-09-05 06:40:17 +00:00
|
|
|
close(false);
|
2018-02-25 04:14:01 +00:00
|
|
|
}
|
2017-05-25 09:13:59 +00:00
|
|
|
object_database::wipe(data_dir);
|
|
|
|
|
if( include_blocks )
|
|
|
|
|
fc::remove_all( data_dir / "database" );
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void database::open(
|
|
|
|
|
const fc::path& data_dir,
|
2017-07-12 20:03:57 +00:00
|
|
|
std::function<genesis_state_type()> genesis_loader,
|
|
|
|
|
const std::string& db_version)
|
2017-05-25 09:13:59 +00:00
|
|
|
{
|
|
|
|
|
try
|
|
|
|
|
{
|
2017-07-12 20:03:57 +00:00
|
|
|
bool wipe_object_db = false;
|
|
|
|
|
if( !fc::exists( data_dir / "db_version" ) )
|
|
|
|
|
wipe_object_db = true;
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
std::string version_string;
|
|
|
|
|
fc::read_file_contents( data_dir / "db_version", version_string );
|
|
|
|
|
wipe_object_db = ( version_string != db_version );
|
|
|
|
|
}
|
|
|
|
|
if( wipe_object_db ) {
|
|
|
|
|
ilog("Wiping object_database due to missing or wrong version");
|
|
|
|
|
object_database::wipe( data_dir );
|
|
|
|
|
std::ofstream version_file( (data_dir / "db_version").generic_string().c_str(),
|
|
|
|
|
std::ios::out | std::ios::binary | std::ios::trunc );
|
|
|
|
|
version_file.write( db_version.c_str(), db_version.size() );
|
|
|
|
|
version_file.close();
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-25 09:13:59 +00:00
|
|
|
object_database::open(data_dir);
|
|
|
|
|
|
|
|
|
|
_block_id_to_block.open(data_dir / "database" / "block_num_to_block");
|
|
|
|
|
|
|
|
|
|
if( !find(global_property_id_type()) )
|
|
|
|
|
init_genesis(genesis_loader());
|
2018-07-03 22:49:58 +00:00
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
_p_core_asset_obj = &get( asset_id_type() );
|
2018-07-05 17:42:16 +00:00
|
|
|
_p_core_dynamic_data_obj = &get( asset_dynamic_data_id_type() );
|
2018-07-03 22:49:58 +00:00
|
|
|
_p_global_prop_obj = &get( global_property_id_type() );
|
|
|
|
|
_p_chain_property_obj = &get( chain_property_id_type() );
|
|
|
|
|
_p_dyn_global_prop_obj = &get( dynamic_global_property_id_type() );
|
2018-07-05 17:05:23 +00:00
|
|
|
_p_witness_schedule_obj = &get( witness_schedule_id_type() );
|
2018-07-03 22:49:58 +00:00
|
|
|
}
|
2017-05-25 09:13:59 +00:00
|
|
|
|
2017-07-12 20:03:57 +00:00
|
|
|
fc::optional<block_id_type> last_block = _block_id_to_block.last_id();
|
2017-05-25 09:13:59 +00:00
|
|
|
if( last_block.valid() )
|
|
|
|
|
{
|
2017-07-12 20:03:57 +00:00
|
|
|
FC_ASSERT( *last_block >= head_block_id(),
|
|
|
|
|
"last block ID does not match current chain state",
|
|
|
|
|
("last_block->id", last_block)("head_block_id",head_block_num()) );
|
2021-12-01 21:57:46 +00:00
|
|
|
|
|
|
|
|
_block_id_to_block.set_replay_mode(true);
|
|
|
|
|
|
2017-07-12 20:03:57 +00:00
|
|
|
reindex( data_dir );
|
2021-12-01 21:57:46 +00:00
|
|
|
|
|
|
|
|
_block_id_to_block.set_replay_mode(false);
|
2017-05-25 09:13:59 +00:00
|
|
|
}
|
2018-02-25 04:14:01 +00:00
|
|
|
_opened = true;
|
2017-05-25 09:13:59 +00:00
|
|
|
}
|
|
|
|
|
FC_CAPTURE_LOG_AND_RETHROW( (data_dir) )
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void database::close(bool rewind)
|
|
|
|
|
{
|
2019-01-14 18:55:13 +00:00
|
|
|
if (!_opened)
|
|
|
|
|
return;
|
2021-01-26 20:41:58 +00:00
|
|
|
|
2017-05-25 09:13:59 +00:00
|
|
|
// TODO: Save pending tx's on close()
|
|
|
|
|
clear_pending();
|
|
|
|
|
|
|
|
|
|
// pop all of the blocks that we can given our undo history, this should
|
|
|
|
|
// throw when there is no more undo history to pop
|
|
|
|
|
if( rewind )
|
|
|
|
|
{
|
|
|
|
|
try
|
|
|
|
|
{
|
|
|
|
|
uint32_t cutoff = get_dynamic_global_properties().last_irreversible_block_num;
|
|
|
|
|
|
|
|
|
|
while( head_block_num() > cutoff )
|
|
|
|
|
{
|
|
|
|
|
block_id_type popped_block_id = head_block_id();
|
|
|
|
|
pop_block();
|
|
|
|
|
_fork_db.remove(popped_block_id); // doesn't throw on missing
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-07-30 15:43:31 +00:00
|
|
|
catch ( const fc::exception& e )
|
2017-05-25 09:13:59 +00:00
|
|
|
{
|
2019-07-30 15:43:31 +00:00
|
|
|
wlog( "Database close unexpected exception: ${e}", ("e", e) );
|
2017-05-25 09:13:59 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Since pop_block() will move tx's in the popped blocks into pending,
|
|
|
|
|
// we have to clear_pending() after we're done popping to get a clean
|
|
|
|
|
// DB state (issue #336).
|
|
|
|
|
clear_pending();
|
|
|
|
|
|
|
|
|
|
object_database::flush();
|
|
|
|
|
object_database::close();
|
|
|
|
|
|
|
|
|
|
if( _block_id_to_block.is_open() )
|
|
|
|
|
_block_id_to_block.close();
|
|
|
|
|
|
|
|
|
|
_fork_db.reset();
|
2018-02-25 04:14:01 +00:00
|
|
|
|
|
|
|
|
_opened = false;
|
2017-05-25 09:13:59 +00:00
|
|
|
}
|
|
|
|
|
|
2019-07-30 15:43:31 +00:00
|
|
|
void database::force_slow_replays()
|
|
|
|
|
{
|
|
|
|
|
ilog("enabling slow replays");
|
|
|
|
|
_slow_replays = true;
|
|
|
|
|
}
|
2017-12-06 07:40:52 +00:00
|
|
|
|
|
|
|
|
void database::check_ending_lotteries()
|
|
|
|
|
{
|
2017-12-11 14:42:55 +00:00
|
|
|
try {
|
2021-01-26 20:41:58 +00:00
|
|
|
const auto& lotteries_idx = get_index_type<asset_index>().indices().get<active_lotteries>();
|
2017-12-11 14:42:55 +00:00
|
|
|
for( auto checking_asset: lotteries_idx )
|
|
|
|
|
{
|
|
|
|
|
FC_ASSERT( checking_asset.is_lottery() );
|
|
|
|
|
FC_ASSERT( checking_asset.lottery_options->is_active );
|
2018-01-10 09:06:34 +00:00
|
|
|
FC_ASSERT( checking_asset.lottery_options->end_date != time_point_sec() );
|
|
|
|
|
if( checking_asset.lottery_options->end_date > head_block_time() ) continue;
|
2017-12-11 14:42:55 +00:00
|
|
|
checking_asset.end_lottery(*this);
|
|
|
|
|
}
|
|
|
|
|
} catch( ... ) {}
|
2017-12-06 07:40:52 +00:00
|
|
|
}
|
|
|
|
|
|
2021-01-28 19:27:54 +00:00
|
|
|
void database::check_ending_nft_lotteries()
|
|
|
|
|
{
|
|
|
|
|
try {
|
|
|
|
|
const auto &nft_lotteries_idx = get_index_type<nft_metadata_index>().indices().get<active_nft_lotteries>();
|
|
|
|
|
for (auto checking_token : nft_lotteries_idx)
|
|
|
|
|
{
|
|
|
|
|
FC_ASSERT(checking_token.is_lottery());
|
|
|
|
|
const auto &lottery_options = checking_token.lottery_data->lottery_options;
|
|
|
|
|
FC_ASSERT(lottery_options.is_active);
|
|
|
|
|
// Check the current supply of lottery tokens
|
|
|
|
|
auto current_supply = checking_token.get_token_current_supply(*this);
|
|
|
|
|
if ((lottery_options.ending_on_soldout && (current_supply == checking_token.max_supply)) ||
|
|
|
|
|
(lottery_options.end_date != time_point_sec() && (lottery_options.end_date <= head_block_time())))
|
|
|
|
|
checking_token.end_lottery(*this);
|
|
|
|
|
}
|
|
|
|
|
} catch( ... ) {}
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-06 07:40:52 +00:00
|
|
|
void database::check_lottery_end_by_participants( asset_id_type asset_id )
|
|
|
|
|
{
|
2017-12-11 14:42:55 +00:00
|
|
|
try {
|
|
|
|
|
asset_object asset_to_check = asset_id( *this );
|
|
|
|
|
auto asset_dyn_props = asset_to_check.dynamic_data( *this );
|
|
|
|
|
FC_ASSERT( asset_dyn_props.current_supply == asset_to_check.options.max_supply );
|
|
|
|
|
FC_ASSERT( asset_to_check.is_lottery() );
|
|
|
|
|
FC_ASSERT( asset_to_check.lottery_options->ending_on_soldout );
|
|
|
|
|
asset_to_check.end_lottery( *this );
|
|
|
|
|
} catch( ... ) {}
|
2017-12-06 07:40:52 +00:00
|
|
|
}
|
|
|
|
|
|
2017-05-25 09:13:59 +00:00
|
|
|
} }
|