remove dependency on level db, bugs left to fix

This commit is contained in:
Daniel Larimer 2015-06-16 15:56:13 -04:00
parent e8bbdc08f6
commit ca89d5057c
21 changed files with 274 additions and 1018 deletions

4
.gitmodules vendored
View file

@ -5,7 +5,3 @@
path = libraries/fc
url = https://github.com/cryptonomex/fc.git
ignore = dirty
[submodule "libraries/leveldb"]
path = libraries/leveldb
url = https://github.com/bitcoin/leveldb.git
ignore = dirty

View file

@ -65,21 +65,6 @@ IF(NOT "${Boost_VERSION}" MATCHES "1.53(.*)")
SET(Boost_LIBRARIES ${BOOST_LIBRARIES_TEMP} ${Boost_LIBRARIES})
ENDIF()
set( LEVEL_DB_DIR "${CMAKE_SOURCE_DIR}/libraries/leveldb" )
file( GLOB LEVEL_DB_SOURCES "${LEVEL_DB_DIR}/db/*.cc"
"${LEVEL_DB_DIR}/helpers/memenv/memenv.cc"
"${LEVEL_DB_DIR}/table/*.cc"
"${LEVEL_DB_DIR}/util/*.cc" )
foreach( filename ${LEVEL_DB_SOURCES} )
if( ${filename} MATCHES ".*_test.cc" OR ${filename} MATCHES ".*_bench.cc" OR ${filename} MATCHES ".*_main.cc" )
list( REMOVE_ITEM LEVEL_DB_SOURCES ${filename} )
endif()
endforeach()
set(LEVELDB_BUILD_DEFINES)
set(LEVELDB_BUILD_LIBRARIES)
set(LEVELDB_BUILD_PRIVATE_INCLUDES "${LEVEL_DB_DIR}")
if( WIN32 )
message( STATUS "Configuring Graphene on WIN32")
set( DB_VERSION 60 )
@ -112,13 +97,7 @@ if( WIN32 )
SET(TCL_LIBS "${TCL_LIBS}${TCL_LIB_PATH}/${TCL_LIB_NAME}g${TCL_LIB_EXT}")
SET(TCL_LIBRARY ${TCL_LIBS})
SET(LEVELDB_PORT_FILE "${LEVEL_DB_DIR}/port/port_win.cc" )
list(APPEND LEVELDB_BUILD_DEFINES OS_WINDOWS LEVELDB_PLATFORM_WINDOWS )
list(APPEND LEVELDB_BUILD_LIBRARIES shlwapi.lib)
list(INSERT LEVELDB_BUILD_PRIVATE_INCLUDES 0 "${CMAKE_SOURCE_DIR}/libraries/leveldb-msvc/include")
else( WIN32 ) # Apple AND Linux
SET(LEVELDB_PORT_FILE "${LEVEL_DB_DIR}/port/port_posix.cc" )
list(APPEND LEVELDB_BUILD_DEFINES LEVELDB_PLATFORM_POSIX LEVELDB_ATOMIC_PRESENT)
if( APPLE )
list(APPEND LEVELDB_BUILD_DEFINES OS_MACOSX)
@ -169,13 +148,6 @@ else( WIN32 ) # Apple AND Linux
endif( WIN32 )
list(APPEND LEVEL_DB_SOURCES "${LEVELDB_PORT_FILE}")
add_library( leveldb ${LEVEL_DB_SOURCES} )
target_link_libraries( leveldb ${LEVELDB_BUILD_LIBRARIES} )
target_include_directories( leveldb PRIVATE ${LEVELDB_BUILD_PRIVATE_INCLUDES}
PUBLIC "${LEVEL_DB_DIR}/include" )
set_target_properties(leveldb PROPERTIES COMPILE_DEFINITIONS "${LEVELDB_BUILD_DEFINES}")
find_package( BerkeleyDB )
set(ENABLE_COVERAGE_TESTING FALSE CACHE BOOL "Build Graphene for code coverage analysis")

View file

@ -20,6 +20,7 @@
#include <graphene/app/application.hpp>
#include <boost/program_options.hpp>
#include <fc/io/json.hpp>
namespace graphene { namespace app {
namespace bpo = boost::program_options;

View file

@ -34,6 +34,7 @@ add_library( graphene_chain
transaction_evaluation_state.cpp
fork_database.cpp
block_database.cpp
db_balance.cpp
db_block.cpp
@ -49,7 +50,7 @@ add_library( graphene_chain
${HEADERS}
)
target_link_libraries( graphene_chain fc graphene_db leveldb )
target_link_libraries( graphene_chain fc graphene_db )
target_include_directories( graphene_chain
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" )

View file

@ -0,0 +1,159 @@
#include <graphene/chain/block_database.hpp>
namespace graphene { namespace chain {
struct index_entry
{
uint64_t block_pos = 0;
uint32_t block_size = 0;
block_id_type block_id;
};
void block_database::open( const fc::path& dbdir )
{
_block_num_to_pos.open( (dbdir/"index").generic_string().c_str(), std::fstream::binary );
_blocks.open( (dbdir/"blocks").generic_string().c_str(), std::fstream::binary );
}
bool block_database::is_open()const
{
return _blocks.is_open();
}
void block_database::close()
{
_blocks.close();
_block_num_to_pos.close();
}
void block_database::flush()
{
_blocks.flush();
_block_num_to_pos.flush();
}
void block_database::store( const block_id_type& id, const signed_block& b )
{
auto num = block_header::num_from_id(id);
_block_num_to_pos.seekp( sizeof( index_entry ) * num );
index_entry e;
_blocks.seekp( 0, _blocks.end );
auto vec = fc::raw::pack( b );
e.block_pos = _blocks.tellp();
e.block_size = vec.size();
e.block_id = id;
_blocks.write( vec.data(), vec.size() );
_block_num_to_pos.write( (char*)&e, sizeof(e) );
}
void block_database::remove( const block_id_type& id )
{
index_entry e;
auto index_pos = sizeof(e)*block_header::num_from_id(id);
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
FC_ASSERT( _block_num_to_pos.tellg() > index_pos );
_block_num_to_pos.seekg( index_pos );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
FC_ASSERT( e.block_id == id );
e.block_size = 0;
_block_num_to_pos.seekp( sizeof(e)*block_header::num_from_id(id) );
_block_num_to_pos.write( (char*)&e, sizeof(e) );
}
bool block_database::contains( const block_id_type& id )const
{
index_entry e;
auto index_pos = sizeof(e)*block_header::num_from_id(id);
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
FC_ASSERT( _block_num_to_pos.tellg() > index_pos );
_block_num_to_pos.seekg( index_pos );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
return e.block_id == id;
}
block_id_type block_database::fetch_block_id( uint32_t block_num )const
{
index_entry e;
auto index_pos = sizeof(e)*block_num;
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
FC_ASSERT( _block_num_to_pos.tellg() > index_pos );
_block_num_to_pos.seekg( index_pos );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
return e.block_id;
}
optional<signed_block> block_database::fetch_optional( const block_id_type& id )const
{
index_entry e;
auto index_pos = sizeof(e)*block_header::num_from_id(id);
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
FC_ASSERT( _block_num_to_pos.tellg() > index_pos );
_block_num_to_pos.seekg( index_pos );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
if( e.block_id != id ) return optional<signed_block>();
vector<char> data( e.block_size );
_blocks.seekg( e.block_pos );
_blocks.read( data.data(), e.block_size );
return fc::raw::unpack<signed_block>(data);
}
optional<signed_block> block_database::fetch_by_number( uint32_t block_num )const
{
index_entry e;
auto index_pos = sizeof(e)*block_num;
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
FC_ASSERT( _block_num_to_pos.tellg() > index_pos );
_block_num_to_pos.seekg( index_pos );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
vector<char> data( e.block_size );
_blocks.seekg( e.block_pos );
_blocks.read( data.data(), e.block_size );
return fc::raw::unpack<signed_block>(data);
}
optional<signed_block> block_database::last()const
{
index_entry e;
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
if( _block_num_to_pos.tellp() < sizeof(index_entry) )
return optional<signed_block>();
_block_num_to_pos.seekg( -sizeof(index_entry), _block_num_to_pos.end );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
if( e.block_size == 0 )
return optional<signed_block>();
vector<char> data( e.block_size );
_blocks.seekg( e.block_pos );
_blocks.read( data.data(), e.block_size );
return fc::raw::unpack<signed_block>(data);
}
} }

View file

@ -30,7 +30,7 @@ namespace graphene { namespace chain {
bool database::is_known_block( const block_id_type& id )const
{
return _fork_db.is_known_block(id) || _block_id_to_block.find(id).valid();
return _fork_db.is_known_block(id) || _block_id_to_block.contains(id);
}
/**
* Only return true *if* the transaction has not expired or been invalidated. If this
@ -45,10 +45,7 @@ bool database::is_known_transaction( const transaction_id_type& id )const
block_id_type database::get_block_id_for_num( uint32_t block_num )const
{ try {
block_id_type lb; lb._hash[0] = htonl(block_num);
auto itr = _block_id_to_block.lower_bound( lb );
FC_ASSERT( itr.valid() && itr.key()._hash[0] == lb._hash[0] );
return itr.key();
return _block_id_to_block.fetch_block_id( block_num );
} FC_CAPTURE_AND_RETHROW( (block_num) ) }
optional<signed_block> database::fetch_block_by_id( const block_id_type& id )const
@ -65,12 +62,7 @@ optional<signed_block> database::fetch_block_by_number( uint32_t num )const
if( results.size() == 1 )
return results[0]->data;
else
{
block_id_type lb; lb._hash[0] = htonl(num);
auto itr = _block_id_to_block.lower_bound( lb );
if( itr.valid() && itr.key()._hash[0] == lb._hash[0] )
return itr.value();
}
return _block_id_to_block.fetch_by_number(num);
return optional<signed_block>();
}

View file

@ -46,9 +46,9 @@ void database::open( const fc::path& data_dir, const genesis_allocation& initial
_pending_block.previous = head_block_id();
_pending_block.timestamp = head_block_time();
auto last_block_itr = _block_id_to_block.last();
if( last_block_itr.valid() )
_fork_db.start_block( last_block_itr.value() );
auto last_block= _block_id_to_block.last();
if( last_block.valid() )
_fork_db.start_block( *last_block );
} FC_CAPTURE_AND_RETHROW( (data_dir) ) }
@ -58,19 +58,22 @@ void database::reindex(fc::path data_dir, const genesis_allocation& initial_allo
open(data_dir, initial_allocation);
auto start = fc::time_point::now();
auto itr = _block_id_to_block.begin();
auto last_block = _block_id_to_block.last();
if( !last_block ) return;
const auto last_block_num = last_block->block_num();
// TODO: disable undo tracking durring reindex, this currently causes crashes in the benchmark test
//_undo_db.disable();
while( itr.valid() )
for( uint32_t i = 1; i <= last_block_num; ++i )
{
apply_block( itr.value(), skip_delegate_signature |
apply_block( *_block_id_to_block.fetch_by_number(i), skip_delegate_signature |
skip_transaction_signatures |
skip_undo_block |
skip_undo_transaction |
skip_transaction_dupe_check |
skip_tapos_check |
skip_authority_check );
++itr;
}
//_undo_db.enable();
auto end = fc::time_point::now();

View file

@ -187,9 +187,11 @@ void database::clear_expired_orders()
max_settlement_volume = mia_object.amount(mia.max_force_settlement_volume(mia_object.dynamic_data(*this).current_supply));
if( mia.force_settled_volume >= max_settlement_volume.amount )
{
/*
ilog("Skipping force settlement in ${asset}; settled ${settled_volume} / ${max_volume}",
("asset", mia_object.symbol)("settlement_price_null",mia.current_feed.settlement_price.is_null())
("settled_volume", mia.force_settled_volume)("max_volume", max_settlement_volume));
*/
if( next_asset() )
continue;
break;

View file

@ -0,0 +1,26 @@
#pragma once
#include <fstream>
#include <graphene/chain/block.hpp>
namespace graphene { namespace chain {
class block_database
{
public:
void open( const fc::path& dbdir );
bool is_open()const;
void flush();
void close();
void store( const block_id_type& id, const signed_block& b );
void remove( const block_id_type& id );
bool contains( const block_id_type& id )const;
block_id_type fetch_block_id( uint32_t block_num )const;
optional<signed_block> fetch_optional( const block_id_type& id )const;
optional<signed_block> fetch_by_number( uint32_t block_num )const;
optional<signed_block> last()const;
private:
mutable std::fstream _blocks;
mutable std::fstream _block_num_to_pos;
};
} }

View file

@ -23,11 +23,10 @@
#include <graphene/chain/account_object.hpp>
#include <graphene/chain/asset_object.hpp>
#include <graphene/chain/fork_database.hpp>
#include <graphene/chain/block_database.hpp>
#include <graphene/db/object_database.hpp>
#include <graphene/db/object.hpp>
#include <graphene/db/level_map.hpp>
#include <graphene/db/level_pod_map.hpp>
#include <graphene/db/simple_index.hpp>
#include <fc/signals.hpp>
@ -376,7 +375,7 @@ namespace graphene { namespace chain {
* until the fork is resolved. This should make maintaining
* the fork tree relatively simple.
*/
graphene::db::level_map<block_id_type, signed_block> _block_id_to_block;
block_database _block_id_to_block;
/**
* Contains the set of ops that are in the process of being applied from

View file

@ -1,4 +1,4 @@
file(GLOB HEADERS "include/graphene/db/*.hpp")
add_library( graphene_db undo_database.cpp index.cpp object_database.cpp upgrade_leveldb.cpp ${HEADERS} )
target_link_libraries( graphene_db fc leveldb )
add_library( graphene_db undo_database.cpp index.cpp object_database.cpp ${HEADERS} )
target_link_libraries( graphene_db fc )
target_include_directories( graphene_db PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" )

View file

@ -17,10 +17,13 @@
*/
#pragma once
#include <graphene/db/object.hpp>
#include <graphene/db/level_map.hpp>
#include <fc/interprocess/file_mapping.hpp>
#include <fc/io/raw.hpp>
#include <fstream>
namespace graphene { namespace db {
class object_database;
using fc::path;
/**
* @class index_observer
@ -80,9 +83,12 @@ namespace graphene { namespace db {
virtual const object& create( const std::function<void(object&)>& constructor ) = 0;
/**
* Opens the index loading objects from a level_db database
* Opens the index loading objects from a file
*/
virtual void open( const shared_ptr<graphene::db::level_map<object_id_type, vector<char> >>& db ){}
virtual void open( const fc::path& db ) = 0;
virtual void save( const fc::path& db ) = 0;
/** @return the object with id or nullptr if not found */
virtual const object* find( object_id_type id )const = 0;
@ -170,22 +176,40 @@ namespace graphene { namespace db {
virtual void use_next_id()override { ++_next_id.number; }
virtual void set_next_id( object_id_type id )override { _next_id = id; }
virtual void open( const path& db )override
{
if( !fc::exists( db ) ) return;
fc::file_mapping fm( db.generic_string().c_str(), fc::read_only );
fc::mapped_region mr( fm, fc::read_only, 0, fc::file_size(db) );
fc::datastream<const char*> ds( (const char*)mr.get_address(), mr.get_size() );
fc::raw::unpack(ds, _next_id);
try {
vector<char> tmp;
while( true )
{
fc::raw::unpack( ds, tmp );
load( tmp );
}
} catch ( const fc::exception& ){}
}
virtual void save( const path& db ) override
{
std::ofstream out( db.generic_string(), std::ofstream::binary );
out.write( (char*)&_next_id, sizeof(_next_id) );
this->inspect_all_objects( [&]( const object& o ) {
auto vec = fc::raw::pack( static_cast<const object_type&>(o) );
auto packed_vec = fc::raw::pack( vec );
out.write( packed_vec.data(), packed_vec.size() );
});
}
virtual const object& load( const std::vector<char>& data )override
{
return DerivedIndex::insert( fc::raw::unpack<object_type>( data ) );
}
virtual void open( const shared_ptr<graphene::db::level_map<object_id_type, vector<char> >>& db )override
{
auto first = object_id_type( DerivedIndex::object_type::space_id, DerivedIndex::object_type::type_id, 0 );
auto last = object_id_type( DerivedIndex::object_type::space_id, DerivedIndex::object_type::type_id+1, 0 );
auto itr = db->lower_bound( first );
while( itr.valid() && itr.key() < last )
{
load( itr.value() );
++itr;
}
}
virtual const object& create(const std::function<void(object&)>& constructor )override
{
const auto& result = DerivedIndex::create( constructor );

View file

@ -1,462 +0,0 @@
/*
* Copyright (c) 2015, Cryptonomex, Inc.
* All rights reserved.
*
* This source code is provided for evaluation in private test networks only, until September 8, 2015. After this date, this license expires and
* the code may not be used, modified or distributed for any purpose. Redistribution and use in source and binary forms, with or without modification,
* are permitted until September 8, 2015, provided that the following conditions are met:
*
* 1. The code and/or derivative works are used only for private test networks consisting of no more than 10 P2P nodes.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <leveldb/cache.h>
#include <leveldb/comparator.h>
#include <leveldb/db.h>
#include <leveldb/write_batch.h>
#include <graphene/db/exception.hpp>
#include <graphene/db/upgrade_leveldb.hpp>
#include <fc/filesystem.hpp>
#include <fc/io/json.hpp>
#include <fc/io/raw.hpp>
#include <fc/reflect/reflect.hpp>
#include <fstream>
namespace graphene { namespace db {
namespace ldb = leveldb;
/**
* @brief implements a high-level API on top of Level DB that stores items using fc::raw / reflection
*/
template<typename Key, typename Value>
class level_map
{
public:
void open( const fc::path& dir, bool create = true, size_t cache_size = 0 )
{ try {
FC_ASSERT( !is_open(), "Database is already open!" );
ldb::Options opts;
opts.comparator = &_comparer;
opts.create_if_missing = create;
opts.max_open_files = 64;
opts.compression = leveldb::kNoCompression;
if( cache_size > 0 )
{
opts.write_buffer_size = cache_size / 4; // up to two write buffers may be held in memory simultaneously
_cache.reset( leveldb::NewLRUCache( cache_size / 2 ) );
opts.block_cache = _cache.get();
}
if( ldb::kMajorVersion > 1 || ( leveldb::kMajorVersion == 1 && leveldb::kMinorVersion >= 16 ) )
{
// LevelDB versions before 1.16 consider short writes to be corruption. Only trigger error
// on corruption in later versions.
opts.paranoid_checks = true;
}
_read_options.verify_checksums = true;
_iter_options.verify_checksums = true;
_iter_options.fill_cache = false;
_sync_options.sync = true;
// Given path must exist to succeed toNativeAnsiPath
fc::create_directories( dir );
std::string ldbPath = dir.to_native_ansi_path();
ldb::DB* ndb = nullptr;
const auto ntrxstat = ldb::DB::Open( opts, ldbPath.c_str(), &ndb );
if( !ntrxstat.ok() )
{
elog( "Failure opening database: ${db}\nStatus: ${msg}", ("db",dir)("msg",ntrxstat.ToString()) );
FC_THROW_EXCEPTION( level_map_open_failure, "Failure opening database: ${db}\nStatus: ${msg}",
("db",dir)("msg",ntrxstat.ToString()) );
}
_db.reset( ndb );
try_upgrade_db( dir, ndb, fc::get_typename<Value>::name(), sizeof( Value ) );
} FC_CAPTURE_AND_RETHROW( (dir)(create)(cache_size) ) }
bool is_open()const
{
return !!_db;
}
void close()
{
_db.reset();
_cache.reset();
}
fc::optional<Value> fetch_optional( const Key& k )const
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
auto itr = find( k );
if( itr.valid() ) return itr.value();
return fc::optional<Value>();
} FC_RETHROW_EXCEPTIONS( warn, "" ) }
Value fetch( const Key& k )
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
std::vector<char> kslice = fc::raw::pack( k );
ldb::Slice ks( kslice.data(), kslice.size() );
std::string value;
auto status = _db->Get( _read_options, ks, &value );
if( status.IsNotFound() )
{
FC_THROW_EXCEPTION( fc::key_not_found_exception, "unable to find key ${key}", ("key",k) );
}
if( !status.ok() )
{
FC_THROW_EXCEPTION( level_map_failure, "database error: ${msg}", ("msg", status.ToString() ) );
}
fc::datastream<const char*> ds(value.c_str(), value.size());
Value tmp;
fc::raw::unpack(ds, tmp);
return tmp;
} FC_RETHROW_EXCEPTIONS( warn, "failure fetching key ${key}", ("key",k) ); }
class iterator
{
public:
iterator(){}
bool valid()const
{
return _it && _it->Valid();
}
Key key()const
{
Key tmp_key;
fc::datastream<const char*> ds2( _it->key().data(), _it->key().size() );
fc::raw::unpack( ds2, tmp_key );
return tmp_key;
}
Value value()const
{
Value tmp_val;
fc::datastream<const char*> ds( _it->value().data(), _it->value().size() );
fc::raw::unpack( ds, tmp_val );
return tmp_val;
}
iterator& operator++() { _it->Next(); return *this; }
iterator& operator--() { _it->Prev(); return *this; }
protected:
friend class level_map;
iterator( ldb::Iterator* it )
:_it(it){}
std::shared_ptr<ldb::Iterator> _it;
};
iterator begin() const
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
iterator itr( _db->NewIterator( _iter_options ) );
itr._it->SeekToFirst();
if( itr._it->status().IsNotFound() )
{
FC_THROW_EXCEPTION( fc::key_not_found_exception, "" );
}
if( !itr._it->status().ok() )
{
FC_THROW_EXCEPTION( level_map_failure, "database error: ${msg}", ("msg", itr._it->status().ToString() ) );
}
if( itr.valid() )
{
return itr;
}
return iterator();
} FC_RETHROW_EXCEPTIONS( warn, "error seeking to first" ) }
iterator find( const Key& key )const
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
ldb::Slice key_slice;
/** avoid dynamic memory allocation at this step if possible, most
* keys should be relatively small in size and not require dynamic
* memory allocation to seralize the key.
*/
fc::array<char,256+sizeof(Key)> stack_buffer;
size_t pack_size = fc::raw::pack_size(key);
if( pack_size <= stack_buffer.size() )
{
fc::datastream<char*> ds( stack_buffer.data, stack_buffer.size() );
fc::raw::pack( ds ,key );
key_slice = ldb::Slice( stack_buffer.data, pack_size );
}
else
{
auto kslice = fc::raw::pack( key );
key_slice = ldb::Slice( kslice.data(), kslice.size() );
}
iterator itr( _db->NewIterator( _iter_options ) );
itr._it->Seek( key_slice );
if( itr.valid() && itr.key() == key )
{
return itr;
}
return iterator();
} FC_RETHROW_EXCEPTIONS( warn, "error finding ${key}", ("key",key) ) }
iterator lower_bound( const Key& key )const
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
std::vector<char> kslice = fc::raw::pack( key );
ldb::Slice key_slice( kslice.data(), kslice.size() );
iterator itr( _db->NewIterator( _iter_options ) );
itr._it->Seek( key_slice );
return itr;
} FC_RETHROW_EXCEPTIONS( warn, "error finding ${key}", ("key",key) ) }
iterator last( )const
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
iterator itr( _db->NewIterator( _iter_options ) );
itr._it->SeekToLast();
return itr;
} FC_RETHROW_EXCEPTIONS( warn, "error finding last" ) }
bool last( Key& k )
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
std::unique_ptr<ldb::Iterator> it( _db->NewIterator( _iter_options ) );
FC_ASSERT( it != nullptr );
it->SeekToLast();
if( !it->Valid() )
{
return false;
}
fc::datastream<const char*> ds2( it->key().data(), it->key().size() );
fc::raw::unpack( ds2, k );
return true;
} FC_RETHROW_EXCEPTIONS( warn, "error reading last item from database" ); }
bool last( Key& k, Value& v )
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
std::unique_ptr<ldb::Iterator> it( _db->NewIterator( _iter_options ) );
FC_ASSERT( it != nullptr );
it->SeekToLast();
if( !it->Valid() )
{
return false;
}
fc::datastream<const char*> ds( it->value().data(), it->value().size() );
fc::raw::unpack( ds, v );
fc::datastream<const char*> ds2( it->key().data(), it->key().size() );
fc::raw::unpack( ds2, k );
return true;
} FC_RETHROW_EXCEPTIONS( warn, "error reading last item from database" ); }
/** this class allows batched, atomic database writes.
* usage:
* {
* write_batch batch = _db.create_batch();
* batch.store(key1, value1);
* batch.store(key2, value2);
* }
* when the batch goes out of scope, the operations are commited to the database
*/
class write_batch
{
private:
leveldb::WriteBatch _batch;
level_map* _map = nullptr;
leveldb::WriteOptions _write_options;
friend class level_map;
write_batch( level_map* map, bool sync = false ) : _map(map)
{
_write_options.sync = sync;
}
public:
~write_batch()
{
try
{
commit();
}
catch (const fc::canceled_exception&)
{
throw;
}
catch (const fc::exception&)
{
// we're in a destructor, nothing we can do...
}
}
void commit()
{
try
{
FC_ASSERT(_map->is_open(), "Database is not open!");
ldb::Status status = _map->_db->Write( _write_options, &_batch );
if (!status.ok())
FC_THROW_EXCEPTION(level_map_failure, "database error while applying batch: ${msg}", ("msg", status.ToString()));
_batch.Clear();
}
FC_RETHROW_EXCEPTIONS(warn, "error applying batch");
}
void abort()
{
_batch.Clear();
}
void store( const Key& k, const Value& v )
{
std::vector<char> kslice = fc::raw::pack(k);
ldb::Slice ks(kslice.data(), kslice.size());
auto vec = fc::raw::pack(v);
ldb::Slice vs(vec.data(), vec.size());
_batch.Put(ks, vs);
}
void remove( const Key& k )
{
std::vector<char> kslice = fc::raw::pack(k);
ldb::Slice ks(kslice.data(), kslice.size());
_batch.Delete(ks);
}
};
write_batch create_batch( bool sync = false )
{
FC_ASSERT( is_open(), "Database is not open!" );
return write_batch( this, sync );
}
void store(const Key& k, const Value& v, bool sync = false)
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
std::vector<char> kslice = fc::raw::pack( k );
ldb::Slice ks( kslice.data(), kslice.size() );
auto vec = fc::raw::pack(v);
ldb::Slice vs( vec.data(), vec.size() );
auto status = _db->Put( sync ? _sync_options : _write_options, ks, vs );
if( !status.ok() )
{
FC_THROW_EXCEPTION( level_map_failure, "database error: ${msg}", ("msg", status.ToString() ) );
}
} FC_RETHROW_EXCEPTIONS( warn, "error storing ${key} = ${value}", ("key",k)("value",v) ); }
void remove( const Key& k, bool sync = false )
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
std::vector<char> kslice = fc::raw::pack( k );
ldb::Slice ks( kslice.data(), kslice.size() );
auto status = _db->Delete( sync ? _sync_options : _write_options, ks );
if( !status.ok() )
{
FC_THROW_EXCEPTION( level_map_failure, "database error: ${msg}", ("msg", status.ToString() ) );
}
} FC_RETHROW_EXCEPTIONS( warn, "error removing ${key}", ("key",k) ); }
void export_to_json( const fc::path& path )const
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
FC_ASSERT( !fc::exists( path ) );
std::ofstream fs( path.string() );
fs.write( "[\n", 2 );
auto iter = begin();
while( iter.valid() )
{
auto str = fc::json::to_pretty_string( std::make_pair( iter.key(), iter.value() ) );
if( (++iter).valid() ) str += ",";
str += "\n";
fs.write( str.c_str(), str.size() );
}
fs.write( "]", 1 );
} FC_CAPTURE_AND_RETHROW( (path) ) }
// note: this loops through all the items in the database, so it's not exactly fast. it's intended for debugging, nothing else.
size_t size() const
{
FC_ASSERT( is_open(), "Database is not open!" );
iterator it = begin();
size_t count = 0;
while (it.valid())
{
++count;
++it;
}
return count;
}
private:
class key_compare : public leveldb::Comparator
{
public:
int Compare( const leveldb::Slice& a, const leveldb::Slice& b )const
{
Key ak,bk;
fc::datastream<const char*> dsa( a.data(), a.size() );
fc::raw::unpack( dsa, ak );
fc::datastream<const char*> dsb( b.data(), b.size() );
fc::raw::unpack( dsb, bk );
if( ak < bk ) return -1;
if( ak == bk ) return 0;
return 1;
}
const char* Name()const { return "key_compare"; }
void FindShortestSeparator( std::string*, const leveldb::Slice& )const{}
void FindShortSuccessor( std::string* )const{};
};
std::unique_ptr<leveldb::DB> _db;
std::unique_ptr<leveldb::Cache> _cache;
key_compare _comparer;
ldb::ReadOptions _read_options;
ldb::ReadOptions _iter_options;
ldb::WriteOptions _write_options;
ldb::WriteOptions _sync_options;
};
} } // graphene::db

View file

@ -1,309 +0,0 @@
/*
* Copyright (c) 2015, Cryptonomex, Inc.
* All rights reserved.
*
* This source code is provided for evaluation in private test networks only, until September 8, 2015. After this date, this license expires and
* the code may not be used, modified or distributed for any purpose. Redistribution and use in source and binary forms, with or without modification,
* are permitted until September 8, 2015, provided that the following conditions are met:
*
* 1. The code and/or derivative works are used only for private test networks consisting of no more than 10 P2P nodes.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <leveldb/cache.h>
#include <leveldb/comparator.h>
#include <leveldb/db.h>
#include <graphene/db/exception.hpp>
#include <graphene/db/upgrade_leveldb.hpp>
#include <fc/filesystem.hpp>
#include <fc/io/raw.hpp>
#include <fc/reflect/reflect.hpp>
namespace graphene { namespace db {
namespace ldb = leveldb;
/**
* @brief implements a high-level API on top of Level DB that stores items using fc::raw / reflection
* @note Key must be a POD type
*/
template<typename Key, typename Value>
class level_pod_map
{
public:
void open( const fc::path& dir, bool create = true, size_t cache_size = 0 )
{ try {
FC_ASSERT( !is_open(), "Database is already open!" );
ldb::Options opts;
opts.comparator = &_comparer;
opts.create_if_missing = create;
opts.max_open_files = 64;
opts.compression = leveldb::kNoCompression;
if( cache_size > 0 )
{
opts.write_buffer_size = cache_size / 4; // up to two write buffers may be held in memory simultaneously
_cache.reset( leveldb::NewLRUCache( cache_size / 2 ) );
opts.block_cache = _cache.get();
}
if( ldb::kMajorVersion > 1 || ( leveldb::kMajorVersion == 1 && leveldb::kMinorVersion >= 16 ) )
{
// LevelDB versions before 1.16 consider short writes to be corruption. Only trigger error
// on corruption in later versions.
opts.paranoid_checks = true;
}
_read_options.verify_checksums = true;
_iter_options.verify_checksums = true;
_iter_options.fill_cache = false;
_sync_options.sync = true;
// Given path must exist to succeed toNativeAnsiPath
fc::create_directories( dir );
std::string ldbPath = dir.to_native_ansi_path();
ldb::DB* ndb = nullptr;
const auto ntrxstat = ldb::DB::Open( opts, ldbPath.c_str(), &ndb );
if( !ntrxstat.ok() )
{
elog( "Failure opening database: ${db}\nStatus: ${msg}", ("db",dir)("msg",ntrxstat.ToString()) );
FC_THROW_EXCEPTION( level_pod_map_open_failure, "Failure opening database: ${db}\nStatus: ${msg}",
("db",dir)("msg",ntrxstat.ToString()) );
}
_db.reset( ndb );
try_upgrade_db( dir, ndb, fc::get_typename<Value>::name(), sizeof( Value ) );
} FC_CAPTURE_AND_RETHROW( (dir)(create)(cache_size) ) }
bool is_open()const
{
return !!_db;
}
void close()
{
_db.reset();
_cache.reset();
}
fc::optional<Value> fetch_optional( const Key& k )
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
auto itr = find( k );
if( itr.valid() ) return itr.value();
return fc::optional<Value>();
} FC_RETHROW_EXCEPTIONS( warn, "" ) }
Value fetch( const Key& key )
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
ldb::Slice key_slice( (char*)&key, sizeof(key) );
std::string value;
auto status = _db->Get( _read_options, key_slice, &value );
if( status.IsNotFound() )
{
FC_THROW_EXCEPTION( fc::key_not_found_exception, "unable to find key ${key}", ("key",key) );
}
if( !status.ok() )
{
FC_THROW_EXCEPTION( level_pod_map_failure, "database error: ${msg}", ("msg", status.ToString() ) );
}
fc::datastream<const char*> datastream(value.c_str(), value.size());
Value tmp;
fc::raw::unpack(datastream, tmp);
return tmp;
} FC_RETHROW_EXCEPTIONS( warn, "error fetching key ${key}", ("key",key) ); }
class iterator
{
public:
iterator(){}
bool valid()const
{
return _it && _it->Valid();
}
Key key()const
{
FC_ASSERT( sizeof(Key) == _it->key().size() );
return *((Key*)_it->key().data());
}
Value value()const
{
Value tmp_val;
fc::datastream<const char*> ds( _it->value().data(), _it->value().size() );
fc::raw::unpack( ds, tmp_val );
return tmp_val;
}
iterator& operator++() { _it->Next(); return *this; }
iterator& operator--() { _it->Prev(); return *this; }
protected:
friend class level_pod_map;
iterator( ldb::Iterator* it )
:_it(it){}
std::shared_ptr<ldb::Iterator> _it;
};
iterator begin()
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
iterator itr( _db->NewIterator( _iter_options ) );
itr._it->SeekToFirst();
if( itr._it->status().IsNotFound() )
{
FC_THROW_EXCEPTION( fc::key_not_found_exception, "" );
}
if( !itr._it->status().ok() )
{
FC_THROW_EXCEPTION( level_pod_map_failure, "database error: ${msg}", ("msg", itr._it->status().ToString() ) );
}
if( itr.valid() )
{
return itr;
}
return iterator();
} FC_RETHROW_EXCEPTIONS( warn, "error seeking to first" ) }
iterator find( const Key& key )
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
ldb::Slice key_slice( (char*)&key, sizeof(key) );
iterator itr( _db->NewIterator( _iter_options ) );
itr._it->Seek( key_slice );
if( itr.valid() && itr.key() == key )
{
return itr;
}
return iterator();
} FC_RETHROW_EXCEPTIONS( warn, "error finding ${key}", ("key",key) ) }
iterator lower_bound( const Key& key )
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
ldb::Slice key_slice( (char*)&key, sizeof(key) );
iterator itr( _db->NewIterator( _iter_options ) );
itr._it->Seek( key_slice );
if( itr.valid() )
{
return itr;
}
return iterator();
} FC_RETHROW_EXCEPTIONS( warn, "error finding ${key}", ("key",key) ) }
bool last( Key& k )
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
std::unique_ptr<ldb::Iterator> it( _db->NewIterator( _iter_options ) );
FC_ASSERT( it != nullptr );
it->SeekToLast();
if( !it->Valid() )
{
return false;
}
FC_ASSERT( sizeof( Key) == it->key().size() );
k = *((Key*)it->key().data());
return true;
} FC_RETHROW_EXCEPTIONS( warn, "error reading last item from database" ); }
bool last( Key& k, Value& v )
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
std::unique_ptr<ldb::Iterator> it( _db->NewIterator( _iter_options ) );
FC_ASSERT( it != nullptr );
it->SeekToLast();
if( !it->Valid() )
{
return false;
}
fc::datastream<const char*> ds( it->value().data(), it->value().size() );
fc::raw::unpack( ds, v );
FC_ASSERT( sizeof( Key) == it->key().size() );
k = *((Key*)it->key().data());
return true;
} FC_RETHROW_EXCEPTIONS( warn, "error reading last item from database" ); }
void store( const Key& k, const Value& v, bool sync = false )
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
ldb::Slice ks( (char*)&k, sizeof(k) );
auto vec = fc::raw::pack(v);
ldb::Slice vs( vec.data(), vec.size() );
auto status = _db->Put( sync ? _sync_options : _write_options, ks, vs );
if( !status.ok() )
{
FC_THROW_EXCEPTION( level_pod_map_failure, "database error: ${msg}", ("msg", status.ToString() ) );
}
} FC_RETHROW_EXCEPTIONS( warn, "error storing ${key} = ${value}", ("key",k)("value",v) ); }
void remove( const Key& k, bool sync = false )
{ try {
FC_ASSERT( is_open(), "Database is not open!" );
ldb::Slice ks( (char*)&k, sizeof(k) );
auto status = _db->Delete( sync ? _sync_options : _write_options, ks );
if( status.IsNotFound() )
{
FC_THROW_EXCEPTION( fc::key_not_found_exception, "unable to find key ${key}", ("key",k) );
}
if( !status.ok() )
{
FC_THROW_EXCEPTION( level_pod_map_failure, "database error: ${msg}", ("msg", status.ToString() ) );
}
} FC_RETHROW_EXCEPTIONS( warn, "error removing ${key}", ("key",k) ); }
private:
class key_compare : public leveldb::Comparator
{
public:
int Compare( const leveldb::Slice& a, const leveldb::Slice& b )const
{
FC_ASSERT( (a.size() == sizeof(Key)) && (b.size() == sizeof( Key )) );
Key* ak = (Key*)a.data();
Key* bk = (Key*)b.data();
if( *ak < *bk ) return -1;
if( *ak == *bk ) return 0;
return 1;
}
const char* Name()const { return "key_compare"; }
void FindShortestSeparator( std::string*, const leveldb::Slice& )const{}
void FindShortSuccessor( std::string* )const{};
};
std::unique_ptr<leveldb::DB> _db;
std::unique_ptr<leveldb::Cache> _cache;
key_compare _comparer;
ldb::ReadOptions _read_options;
ldb::ReadOptions _iter_options;
ldb::WriteOptions _write_options;
ldb::WriteOptions _sync_options;
};
} } // graphene::db

View file

@ -20,9 +20,6 @@
#include <graphene/db/index.hpp>
#include <graphene/db/undo_database.hpp>
#include <graphene/db/level_map.hpp>
#include <graphene/db/level_pod_map.hpp>
#include <fc/log/logger.hpp>
#include <map>
@ -150,7 +147,6 @@ namespace graphene { namespace db {
fc::path _data_dir;
vector< vector< unique_ptr<index> > > _index;
shared_ptr<db::level_map<object_id_type, vector<char> >> _object_id_to_object;
};
} } // graphene::db

View file

@ -28,19 +28,12 @@ object_database::object_database()
{
_index.resize(255);
_undo_db.enable();
_object_id_to_object = std::make_shared<db::level_map<object_id_type,vector<char>>>();
}
object_database::~object_database(){}
void object_database::close()
{
if( _object_id_to_object->is_open() )
{
flush();
_object_id_to_object->close();
}
}
const object* object_database::find_object( object_id_type id )const
@ -72,20 +65,14 @@ index& object_database::get_mutable_index(uint8_t space_id, uint8_t type_id)
void object_database::flush()
{
if( !_object_id_to_object->is_open() )
return;
vector<object_id_type> next_ids;
for( auto& space : _index )
for( const unique_ptr<index>& type_index : space )
if( type_index )
{
type_index->inspect_all_objects([&] (const object& obj) {
_object_id_to_object->store(obj.id, obj.pack());
});
next_ids.push_back( type_index->get_next_id() );
}
_object_id_to_object->store( object_id_type(), fc::raw::pack(next_ids) );
ilog("Save object_database in ${d}", ("d", _data_dir));
for( uint32_t space = 0; space < _index.size(); ++space )
{
const auto types = _index[space].size();
for( uint32_t type = 0; type < types; ++type )
if( _index[space][type] )
_index[space][type]->save( _data_dir / "object_database" / fc::to_string(space)/fc::to_string(type) );
}
}
void object_database::wipe(const fc::path& data_dir)
@ -99,35 +86,12 @@ void object_database::wipe(const fc::path& data_dir)
void object_database::open( const fc::path& data_dir )
{ try {
ilog("Open object_database in ${d}", ("d", data_dir));
_object_id_to_object->open( data_dir / "object_database" / "objects" );
for( auto& space : _index )
{
for( auto& type_index : space )
{
if( type_index )
{
type_index->open( _object_id_to_object );
}
}
}
try {
auto next_ids = fc::raw::unpack<vector<object_id_type>>( _object_id_to_object->fetch( object_id_type() ) );
wdump((next_ids));
for( auto id : next_ids )
{
try {
get_mutable_index( id ).set_next_id( id );
} FC_CAPTURE_AND_RETHROW( (id) );
}
}
catch ( const fc::exception& e )
{
// dlog( "unable to fetch next ids, must be new object_database\n ${e}", ("e",e.to_detail_string()) );
}
_data_dir = data_dir;
for( uint32_t space = 0; space < _index.size(); ++space )
for( uint32_t type = 0; type < _index[space].size(); ++type )
if( _index[space][type] )
_index[space][type]->open( _data_dir / "object_database" / fc::to_string(space)/fc::to_string(type) );
} FC_CAPTURE_AND_RETHROW( (data_dir) ) }

View file

@ -1,114 +0,0 @@
/*
* Copyright (c) 2015, Cryptonomex, Inc.
* All rights reserved.
*
* This source code is provided for evaluation in private test networks only, until September 8, 2015. After this date, this license expires and
* the code may not be used, modified or distributed for any purpose. Redistribution and use in source and binary forms, with or without modification,
* are permitted until September 8, 2015, provided that the following conditions are met:
*
* 1. The code and/or derivative works are used only for private test networks consisting of no more than 10 P2P nodes.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <graphene/db/exception.hpp>
#include <graphene/db/upgrade_leveldb.hpp>
#include <fc/log/logger.hpp>
#include <boost/filesystem.hpp>
#include <fstream>
#include <boost/regex.hpp>
#include <boost/filesystem/fstream.hpp>
namespace graphene { namespace db {
upgrade_db_mapper& upgrade_db_mapper::instance()
{
static upgrade_db_mapper mapper;
return mapper;
}
int32_t upgrade_db_mapper::add_type( const std::string& type_name, const upgrade_db_function& function)
{
_upgrade_db_function_registry[type_name] = function;
return 0;
}
// this code has no graphene dependencies, and it
// could be moved to fc, if fc ever adds a leveldb dependency
void try_upgrade_db( const fc::path& dir, leveldb::DB* dbase, const char* record_type, size_t record_type_size )
{
size_t old_record_type_size = 0;
std::string old_record_type;
fc::path record_type_filename = dir / "RECORD_TYPE";
//if no RECORD_TYPE file exists
if ( !boost::filesystem::exists( record_type_filename ) )
{
//must be original type for the database
old_record_type = record_type;
int last_char = old_record_type.length() - 1;
//strip version number from current_record_name and append 0 to set old_record_type (e.g. mytype0)
while (last_char >= 0 && isdigit(old_record_type[last_char]))
{
--last_char;
}
//upgradeable record types should always end with version number
if( 'v' != old_record_type[last_char] )
{
//ilog("Database ${db} is not upgradeable",("db",dir.to_native_ansi_path()));
return;
}
++last_char;
old_record_type[last_char] = '0';
old_record_type.resize(last_char+1);
}
else //read record type from file
{
boost::filesystem::ifstream is(record_type_filename);
char buffer[120];
is.getline(buffer,120);
old_record_type = buffer;
is >> old_record_type_size;
}
if (old_record_type != record_type)
{
//check if upgrade function in registry
auto upgrade_function_itr = upgrade_db_mapper::instance()._upgrade_db_function_registry.find( old_record_type );
if (upgrade_function_itr != upgrade_db_mapper::instance()._upgrade_db_function_registry.end())
{
ilog("Upgrading database ${db} from ${old} to ${new}",("db",dir.preferred_string())
("old",old_record_type)
("new",record_type));
//update database's RECORD_TYPE to new record type name
boost::filesystem::ofstream os(record_type_filename);
os << record_type << std::endl;
os << record_type_size;
//upgrade the database using upgrade function
upgrade_function_itr->second(dbase);
}
else
{
elog("In ${db}, record types ${old} and ${new} do not match, but no upgrade function found!",
("db",dir.preferred_string())("old",old_record_type)("new",record_type));
}
}
else if (old_record_type_size == 0) //if record type file never created, create it now
{
boost::filesystem::ofstream os(record_type_filename);
os << record_type << std::endl;
os << record_type_size;
}
else if (old_record_type_size != record_type_size)
{
elog("In ${db}, record type matches ${new}, but record sizes do not match!",
("db",dir.preferred_string())("new",record_type));
}
}
} } // namespace graphene::db

@ -1 +0,0 @@
Subproject commit 7d41e6f89ff04ce9e6a742932924796f69c6e23d

View file

@ -10,7 +10,7 @@ set(SOURCES node.cpp
add_library( graphene_net ${SOURCES} ${HEADERS} )
target_link_libraries( graphene_net
PUBLIC fc graphene_db leveldb )
PUBLIC fc graphene_db )
target_include_directories( graphene_net
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include"
PRIVATE "${CMAKE_SOURCE_DIR}/libraries/chain/include"

View file

@ -28,7 +28,7 @@
#include <fc/io/json.hpp>
#include <graphene/net/peer_database.hpp>
#include <graphene/db/level_pod_map.hpp>
//#include <graphene/db/level_pod_map.hpp>
@ -76,8 +76,8 @@ namespace graphene { namespace net {
>
> potential_peer_set;
//private:
typedef graphene::db::level_pod_map<uint32_t, potential_peer_record> potential_peer_leveldb;
potential_peer_leveldb _leveldb;
//typedef graphene::db::level_pod_map<uint32_t, potential_peer_record> potential_peer_leveldb;
//potential_peer_leveldb _leveldb;
potential_peer_set _potential_peer_set;
@ -109,6 +109,7 @@ namespace graphene { namespace net {
void peer_database_impl::open(const fc::path& databaseFilename)
{
/*
try
{
_leveldb.open(databaseFilename);
@ -135,16 +136,18 @@ namespace graphene { namespace net {
iter = _potential_peer_set.erase(iter);
}
}
*/
}
void peer_database_impl::close()
{
_leveldb.close();
//_leveldb.close();
_potential_peer_set.clear();
}
void peer_database_impl::clear()
{
/*
auto iter = _leveldb.begin();
while (iter.valid())
{
@ -159,6 +162,7 @@ namespace graphene { namespace net {
// shouldn't happen, and if it does there's not much we can do
}
}
*/
_potential_peer_set.clear();
}
@ -167,7 +171,7 @@ namespace graphene { namespace net {
auto iter = _potential_peer_set.get<endpoint_index>().find(endpointToErase);
if (iter != _potential_peer_set.get<endpoint_index>().end())
{
_leveldb.remove(iter->database_key);
//_leveldb.remove(iter->database_key);
_potential_peer_set.get<endpoint_index>().erase(iter);
}
}
@ -178,16 +182,16 @@ namespace graphene { namespace net {
if (iter != _potential_peer_set.get<endpoint_index>().end())
{
_potential_peer_set.get<endpoint_index>().modify(iter, [&updatedRecord](potential_peer_database_entry& entry) { entry.peer_record = updatedRecord; });
_leveldb.store(iter->database_key, updatedRecord);
//_leveldb.store(iter->database_key, updatedRecord);
}
else
{
uint32_t last_database_key;
_leveldb.last(last_database_key);
//_leveldb.last(last_database_key);
uint32_t new_database_key = last_database_key + 1;
potential_peer_database_entry new_database_entry(new_database_key, updatedRecord);
_potential_peer_set.get<endpoint_index>().insert(new_database_entry);
_leveldb.store(new_database_key, updatedRecord);
//_leveldb.store(new_database_key, updatedRecord);
}
}
@ -314,12 +318,14 @@ namespace graphene { namespace net {
std::vector<potential_peer_record> peer_database::get_all()const
{
std::vector<potential_peer_record> results;
/*
auto itr = my->_leveldb.begin();
while( itr.valid() )
{
results.push_back( itr.value() );
++itr;
}
*/
return results;
}

View file

@ -20,6 +20,7 @@
#include <graphene/app/application.hpp>
#include <graphene/chain/database.hpp>
#include <graphene/chain/key_object.hpp>
#include <fc/io/json.hpp>
using namespace graphene::db;