peerplays_migrated/libraries/chain/db_management.cpp

216 lines
7.2 KiB
C++

/*
* Copyright (c) 2015, Cryptonomex, Inc.
* All rights reserved.
*
* This source code is provided for evaluation in private test networks only, until September 8, 2015. After this date, this license expires and
* the code may not be used, modified or distributed for any purpose. Redistribution and use in source and binary forms, with or without modification,
* are permitted until September 8, 2015, provided that the following conditions are met:
*
* 1. The code and/or derivative works are used only for private test networks consisting of no more than 10 P2P nodes.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <graphene/chain/database.hpp>
#include <graphene/chain/operation_history_object.hpp>
#include <graphene/chain/protocol/fee_schedule.hpp>
#include <fc/io/fstream.hpp>
#include <fstream>
#include <functional>
#include <iostream>
namespace graphene { namespace chain {
database::database()
{
initialize_indexes();
initialize_evaluators();
}
database::~database()
{
clear_pending();
}
void database::reindex(fc::path data_dir, const genesis_state_type& initial_allocation)
{ try {
ilog( "reindexing blockchain" );
wipe(data_dir, false);
open(data_dir, [&initial_allocation]{return initial_allocation;});
auto start = fc::time_point::now();
auto last_block = _block_id_to_block.last();
if( !last_block ) {
elog( "!no last block" );
edump((last_block));
return;
}
const auto last_block_num = last_block->block_num();
ilog( "Replaying blocks..." );
_undo_db.disable();
for( uint32_t i = 1; i <= last_block_num; ++i )
{
if( i % 2000 == 0 ) std::cerr << " " << double(i*100)/last_block_num << "% "<<i << " of " <<last_block_num<<" \n";
fc::optional< signed_block > block = _block_id_to_block.fetch_by_number(i);
if( !block.valid() )
{
wlog( "Reindexing terminated due to gap: Block ${i} does not exist!", ("i", i) );
uint32_t dropped_count = 0;
while( true )
{
fc::optional< block_id_type > last_id = _block_id_to_block.last_id();
// this can trigger if we attempt to e.g. read a file that has block #2 but no block #1
if( !last_id.valid() )
break;
// we've caught up to the gap
if( block_header::num_from_id( *last_id ) <= i )
break;
_block_id_to_block.remove( *last_id );
dropped_count++;
}
wlog( "Dropped ${n} blocks from after the gap", ("n", dropped_count) );
break;
}
apply_block(*block, skip_witness_signature |
skip_transaction_signatures |
skip_transaction_dupe_check |
skip_tapos_check |
skip_witness_schedule_check |
skip_authority_check);
}
_undo_db.enable();
auto end = fc::time_point::now();
ilog( "Done reindexing, elapsed time: ${t} sec", ("t",double((end-start).count())/1000000.0 ) );
} FC_CAPTURE_AND_RETHROW( (data_dir) ) }
void database::wipe(const fc::path& data_dir, bool include_blocks)
{
ilog("Wiping database", ("include_blocks", include_blocks));
close();
object_database::wipe(data_dir);
if( include_blocks )
fc::remove_all( data_dir / "database" );
}
void database::open(
const fc::path& data_dir,
std::function<genesis_state_type()> genesis_loader )
{
try
{
auto is_new = [&]() -> bool
{
// directory doesn't exist
if( !fc::exists( data_dir ) )
return true;
// if directory exists but is empty, return true; else false.
return ( fc::directory_iterator( data_dir ) == fc::directory_iterator() );
};
auto is_outdated = [&]() -> bool
{
if( !fc::exists( data_dir / "db_version" ) )
return true;
std::string version_str;
fc::read_file_contents( data_dir / "db_version", version_str );
return (version_str != GRAPHENE_CURRENT_DB_VERSION);
};
if( (!is_new()) && is_outdated() )
{
ilog( "Old database version detected, reindex is required" );
wipe( data_dir, false );
fc::remove_all( data_dir / "db_version" );
}
object_database::open(data_dir);
_block_id_to_block.open(data_dir / "database" / "block_num_to_block");
if( !find(global_property_id_type()) )
init_genesis(genesis_loader());
fc::optional<signed_block> last_block = _block_id_to_block.last();
if( last_block.valid() )
{
_fork_db.start_block( *last_block );
idump((last_block->id())(last_block->block_num()));
if( last_block->id() != head_block_id() )
{
FC_ASSERT( head_block_num() == 0, "last block ID does not match current chain state" );
}
}
// doing this down here helps ensure that DB will be wiped
// if any of the above steps were interrupted on a previous run
if( !fc::exists( data_dir / "db_version" ) )
{
std::ofstream db_version(
(data_dir / "db_version").generic_string().c_str(),
std::ios::out | std::ios::binary | std::ios::trunc );
std::string version_string = GRAPHENE_CURRENT_DB_VERSION;
db_version.write( version_string.c_str(), version_string.size() );
db_version.close();
}
//idump((head_block_id())(head_block_num()));
}
FC_CAPTURE_LOG_AND_RETHROW( (data_dir) )
}
void database::close(bool rewind)
{
// TODO: Save pending tx's on close()
clear_pending();
// pop all of the blocks that we can given our undo history, this should
// throw when there is no more undo history to pop
if( rewind )
{
try
{
while( true )
{
// elog("pop");
block_id_type popped_block_id = head_block_id();
pop_block();
_fork_db.remove(popped_block_id); // doesn't throw on missing
try
{
_block_id_to_block.remove(popped_block_id);
}
catch (const fc::key_not_found_exception&)
{
}
}
}
catch (...)
{
}
}
// Since pop_block() will move tx's in the popped blocks into pending,
// we have to clear_pending() after we're done popping to get a clean
// DB state (issue #336).
clear_pending();
object_database::flush();
object_database::close();
if( _block_id_to_block.is_open() )
_block_id_to_block.close();
_fork_db.reset();
}
} }