2015-06-08 15:50:35 +00:00
|
|
|
/*
|
2015-10-12 17:48:40 +00:00
|
|
|
* Copyright (c) 2015 Cryptonomex, Inc., and contributors.
|
|
|
|
|
*
|
2016-01-06 09:51:18 +00:00
|
|
|
* The MIT License
|
2015-10-12 17:48:40 +00:00
|
|
|
*
|
2016-01-06 09:51:18 +00:00
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
|
* furnished to do so, subject to the following conditions:
|
2015-10-12 17:48:40 +00:00
|
|
|
*
|
2016-01-06 09:51:18 +00:00
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
|
* all copies or substantial portions of the Software.
|
2015-10-12 17:02:59 +00:00
|
|
|
*
|
2016-01-06 09:51:18 +00:00
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
|
* THE SOFTWARE.
|
2015-06-08 15:50:35 +00:00
|
|
|
*/
|
|
|
|
|
#include <graphene/db/object_database.hpp>
|
|
|
|
|
#include <graphene/db/undo_database.hpp>
|
|
|
|
|
#include <fc/reflect/variant.hpp>
|
|
|
|
|
|
|
|
|
|
namespace graphene { namespace db {
|
|
|
|
|
|
|
|
|
|
void undo_database::enable() { _disabled = false; }
|
|
|
|
|
void undo_database::disable() { _disabled = true; }
|
|
|
|
|
|
2015-09-29 14:49:12 +00:00
|
|
|
undo_database::session undo_database::start_undo_session( bool force_enable )
|
2015-06-08 15:50:35 +00:00
|
|
|
{
|
2015-09-29 14:49:12 +00:00
|
|
|
if( _disabled && !force_enable ) return session(*this);
|
|
|
|
|
bool disable_on_exit = _disabled && force_enable;
|
|
|
|
|
if( force_enable )
|
|
|
|
|
_disabled = false;
|
2015-06-08 15:50:35 +00:00
|
|
|
|
2015-07-15 18:13:24 +00:00
|
|
|
while( size() > max_size() )
|
2015-06-08 15:50:35 +00:00
|
|
|
_stack.pop_front();
|
|
|
|
|
|
|
|
|
|
_stack.emplace_back();
|
|
|
|
|
++_active_sessions;
|
2015-09-29 14:49:12 +00:00
|
|
|
return session(*this, disable_on_exit );
|
2015-06-08 15:50:35 +00:00
|
|
|
}
|
|
|
|
|
void undo_database::on_create( const object& obj )
|
|
|
|
|
{
|
|
|
|
|
if( _disabled ) return;
|
|
|
|
|
|
|
|
|
|
if( _stack.empty() )
|
|
|
|
|
_stack.emplace_back();
|
|
|
|
|
auto& state = _stack.back();
|
|
|
|
|
auto index_id = object_id_type( obj.id.space(), obj.id.type(), 0 );
|
|
|
|
|
auto itr = state.old_index_next_ids.find( index_id );
|
|
|
|
|
if( itr == state.old_index_next_ids.end() )
|
|
|
|
|
state.old_index_next_ids[index_id] = obj.id;
|
|
|
|
|
state.new_ids.insert(obj.id);
|
|
|
|
|
}
|
|
|
|
|
void undo_database::on_modify( const object& obj )
|
|
|
|
|
{
|
|
|
|
|
if( _disabled ) return;
|
|
|
|
|
|
|
|
|
|
if( _stack.empty() )
|
|
|
|
|
_stack.emplace_back();
|
|
|
|
|
auto& state = _stack.back();
|
|
|
|
|
if( state.new_ids.find(obj.id) != state.new_ids.end() )
|
|
|
|
|
return;
|
|
|
|
|
auto itr = state.old_values.find(obj.id);
|
|
|
|
|
if( itr != state.old_values.end() ) return;
|
|
|
|
|
state.old_values[obj.id] = obj.clone();
|
|
|
|
|
}
|
|
|
|
|
void undo_database::on_remove( const object& obj )
|
|
|
|
|
{
|
|
|
|
|
if( _disabled ) return;
|
|
|
|
|
|
|
|
|
|
if( _stack.empty() )
|
|
|
|
|
_stack.emplace_back();
|
|
|
|
|
undo_state& state = _stack.back();
|
|
|
|
|
if( state.new_ids.count(obj.id) )
|
|
|
|
|
{
|
|
|
|
|
state.new_ids.erase(obj.id);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
if( state.old_values.count(obj.id) )
|
|
|
|
|
{
|
|
|
|
|
state.removed[obj.id] = std::move(state.old_values[obj.id]);
|
|
|
|
|
state.old_values.erase(obj.id);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
if( state.removed.count(obj.id) ) return;
|
|
|
|
|
state.removed[obj.id] = obj.clone();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void undo_database::undo()
|
|
|
|
|
{ try {
|
|
|
|
|
FC_ASSERT( !_disabled );
|
|
|
|
|
FC_ASSERT( _active_sessions > 0 );
|
|
|
|
|
disable();
|
|
|
|
|
|
|
|
|
|
auto& state = _stack.back();
|
|
|
|
|
for( auto& item : state.old_values )
|
|
|
|
|
{
|
|
|
|
|
_db.modify( _db.get_object( item.second->id ), [&]( object& obj ){ obj.move_from( *item.second ); } );
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for( auto ritr = state.new_ids.begin(); ritr != state.new_ids.end(); ++ritr )
|
|
|
|
|
{
|
|
|
|
|
_db.remove( _db.get_object(*ritr) );
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for( auto& item : state.old_index_next_ids )
|
|
|
|
|
{
|
|
|
|
|
_db.get_mutable_index( item.first.space(), item.first.type() ).set_next_id( item.second );
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for( auto& item : state.removed )
|
|
|
|
|
_db.insert( std::move(*item.second) );
|
|
|
|
|
|
|
|
|
|
_stack.pop_back();
|
|
|
|
|
enable();
|
|
|
|
|
--_active_sessions;
|
|
|
|
|
} FC_CAPTURE_AND_RETHROW() }
|
|
|
|
|
|
|
|
|
|
void undo_database::merge()
|
|
|
|
|
{
|
|
|
|
|
FC_ASSERT( _active_sessions > 0 );
|
[SON-107] Merge develop branch to SONs-base (#166)
* fix rng and get_winner_numbers implemented
* coipied code for bitshares fixing 429 and 433 isuues
* ticket_purchase_operation implemented. added lottery_options to asset
* lottery end implemented
* minor logic changes. added db_api and cli_wallet methods
* fix reindex on peerplays network
* fix some tests. add gitlab-ci.yml
* add pull to gitlab-ci
* fix
* fix and comment some tests
* added owner to lottery_asset_options. commented async call in on_applied_block callback
* added get_account_lotteries method to db_api and cli, lottery end_date and ticket_price verification
* merge get_account_lotteries branch. fix create_witness test
* fix test genesis and end_date verification
* fixed indices sorting and lottery end checking by date
* update db_version for replay and removed duplicate include files
* Added ntp and upgraded boost version
* Revert "GPOS protocol"
* need to remove backup files
* virtual-op-fix for deterministic virtual_op number
* Merged beatrice into 5050
* Updated gitmodules, changes to allow voting on lottery fee
* Removed submodule libraries/fc
* Added libraries/fc
* added missing , in types.hpp
* Added sweeps parameters to parameter_extension
* added missing comma in operations.hpp, small changes to config.hpp
* fixed returntype in chain_parameters.hpp
* removed sweeps_parameter_extensions
* Changed fc library
* fixed asset_object
* Changed peerplays-fc submodule
* Changed fc submodule to ubuntu 18.04 upgrade
* Removed submodule libraries/fc
* Added fc library back
* fix casting in overloaded function
* Removed blind_sign and unblind_signature functions
* Added new lottery_asset_create_operation
* Changed sweeps hardfork time
* Removed redundant if from asset_evaluator and fixed db_notify
* fixed duplicate code in fee_tests
* removed redundant tgenesis file
* Enable building on Ubuntu 18.04 using GCC 7 compiler
* fix: is_benefactor_reward had the default value of true when not set
* Docker file for Ubuntu 18.04
Base image updated to Unbuntu 18.04
Prerequisite list updated
Basic configuration updated
* Quick fix: Added missing package pkg-config
* Docker file updates
* 5050 fee update and compilation error fix
* Dockerfile, set system locale
Prevents locale::facet::_S_create_c_locale name error
* Update README.md
Fix typo
* Update README.md
* Changed hardfork time for SWEEPS and Core-429
* revert master changes that were brought in previous commit
* Fixed error when account_history_object with id 0 doesnt exist
* Fixed error while loading object database
* test for zero id object in account history
* Reorder operations in Dockerfile, to make image creation faster
- Reorder prevents unnecessary building of Boost libraries
* Fix for irrelevant signature included issue
* fix copyrigth messages order
* remove double empty lines
* Backport fix for `get_account_history` from https://github.com/bitshares/bitshares-core/pull/628 and add additional account history test case
* NTP client back
* GRPH-53-Log_format_error
* Merge pull request #1036 from jmjatlanta/issue_730
Add fail_reason to proposal_object
* Unit test case fixes and prepared SONs base
* Use offsetof instead of custom macro
* Hide some compiler warnings
* Make all the tests compile
* Add nullptr check in api.cpp for easier testing
* Add test case for broadcast_trx_with_callback API
* Unit test case fixes and prepared SONs base
* Merge pull request #714 from pmconrad/json_fix
JSON fix
* Increase max depth for trx confirmation callback
* Adapt to variant API with `max_depth` argument
* Update fc submodule
* Created unit test for #325
* remove needless find()
* GRPH-4-CliWallet_crash_ctrlD
* fix copyright message
* Make all the tests compile
* increase delay for node connection
* Increase block creation timeout to 2500ms
* remove cache from cli get_account
* add cli tests framework
* Adjust newly merged code to new API
* Improved resilience of block database against corruption
* Merged changes from Bitshares PR 1036
* GRPH-76 - Short-cut long sequences of missed blocks
Fixes database::update_global_dynamic_data to speed up counting missed blocks.
(This also fixes a minor issue with counting - the previous algorithm would skip missed blocks for the witness who signed the first block after the gap.)
* Moved reindex logic into database / chain_database, make use of additional blocks in block_database
Fixed tests wrt db.open
* Enable undo + fork database for final blocks in a replay
Dont remove blocks from block db when popping blocks, handle edge case in replay wrt fork_db, adapted unit tests
* Log starting block number of replay
* Prevent unsigned integer underflow
* Fixed lock detection
* Dont leave _data_dir empty if db is locked
* Writing the object_database is now almost atomic
* Improved consistency check for block_log
* Cut back block_log index file if inconsistent
* Fixed undo_database
* Added test case for broken merge on empty undo_db
* Merge pull request #938 from bitshares/fix-block-storing
Store correct block ID when switching forks
* exclude second undo_db.enable() call in some cases
* Add missing change
* change bitshares to core in message
* Fixed integer overflow issue
* Fix for for history ID mismatch ( Bitshares PR #875 )
* Update the FC submodule with the changes for GRPH-4
* Fix #436 object_database created outside of witness data directory
* supplement more comments on database::_opened variable
* prevent segfault when destructing application obj
* Fixed duplicate ops returned from get_account_history
* minor performance improvement
* Added comment
* Merged Bitshares PR #1462 and compilation fixes
* Support/gitlab (#123)
* Updated gitlab process
* Fix undefined references in cli test
* Fixed test failures and compilation issue
* Fixed account_history_pagination test
* Fix compilation in debug mode
* Removed unrelated comment
* Skip auth check when pushing self-generated blocks
* Extract public keys before pushing a transaction
* Dereference chain_database shared_ptr
* Updated transaction::signees to mutable
and
* updated get_signature_keys() to return a const reference,
* get_signature_keys() will update signees on first call,
* modified test cases and wallet.cpp accordingly,
* no longer construct a new signed_transaction object before pushing
* Added get_asset_count API
* Allow sufficient space for new undo_session
* Throw for deep nesting
* No longer extract public keys before pushing a trx
and removed unused new added constructor and _get_signature_keys() function from signed_transaction struct
* Added cli_test to CI
* use random port numbers in app_test (#154)
* proposal fail_reason bug fixed (#157)
* Added Sonarcloud code_quality to CI (#159)
* Added sonarcloud analysis (#158)
* fix for lottery end
* fix declarations
* fix declarations
* fix boost integer
* fix compilation
* fix chain tests
* fix app_test
* try to fix cli test
* fix incorrect max_depth param
* working cli test
* correct fc version
2019-10-08 01:25:03 +00:00
|
|
|
if( _active_sessions == 1 && _stack.size() == 1 )
|
|
|
|
|
{
|
|
|
|
|
_stack.pop_back();
|
|
|
|
|
--_active_sessions;
|
|
|
|
|
return;
|
|
|
|
|
}
|
2015-06-08 15:50:35 +00:00
|
|
|
FC_ASSERT( _stack.size() >=2 );
|
|
|
|
|
auto& state = _stack.back();
|
|
|
|
|
auto& prev_state = _stack[_stack.size()-2];
|
2015-12-09 22:22:38 +00:00
|
|
|
|
|
|
|
|
// An object's relationship to a state can be:
|
|
|
|
|
// in new_ids : new
|
|
|
|
|
// in old_values (was=X) : upd(was=X)
|
|
|
|
|
// in removed (was=X) : del(was=X)
|
|
|
|
|
// not in any of above : nop
|
|
|
|
|
//
|
|
|
|
|
// When merging A=prev_state and B=state we have a 4x4 matrix of all possibilities:
|
|
|
|
|
//
|
|
|
|
|
// |--------------------- B ----------------------|
|
|
|
|
|
//
|
|
|
|
|
// +------------+------------+------------+------------+
|
|
|
|
|
// | new | upd(was=Y) | del(was=Y) | nop |
|
|
|
|
|
// +------------+------------+------------+------------+------------+
|
|
|
|
|
// / | new | N/A | new A| nop C| new A|
|
|
|
|
|
// | +------------+------------+------------+------------+------------+
|
|
|
|
|
// | | upd(was=X) | N/A | upd(was=X)A| del(was=X)C| upd(was=X)A|
|
|
|
|
|
// A +------------+------------+------------+------------+------------+
|
|
|
|
|
// | | del(was=X) | N/A | N/A | N/A | del(was=X)A|
|
|
|
|
|
// | +------------+------------+------------+------------+------------+
|
|
|
|
|
// \ | nop | new B| upd(was=Y)B| del(was=Y)B| nop AB|
|
|
|
|
|
// +------------+------------+------------+------------+------------+
|
|
|
|
|
//
|
|
|
|
|
// Each entry was composed by labelling what should occur in the given case.
|
|
|
|
|
//
|
|
|
|
|
// Type A means the composition of states contains the same entry as the first of the two merged states for that object.
|
|
|
|
|
// Type B means the composition of states contains the same entry as the second of the two merged states for that object.
|
|
|
|
|
// Type C means the composition of states contains an entry different from either of the merged states for that object.
|
|
|
|
|
// Type N/A means the composition of states violates causal timing.
|
|
|
|
|
// Type AB means both type A and type B simultaneously.
|
|
|
|
|
//
|
|
|
|
|
// The merge() operation is defined as modifying prev_state in-place to be the state object which represents the composition of
|
|
|
|
|
// state A and B.
|
|
|
|
|
//
|
|
|
|
|
// Type A (and AB) can be implemented as a no-op; prev_state already contains the correct value for the merged state.
|
|
|
|
|
// Type B (and AB) can be implemented by copying from state to prev_state.
|
|
|
|
|
// Type C needs special case-by-case logic.
|
|
|
|
|
// Type N/A can be ignored or assert(false) as it can only occur if prev_state and state have illegal values
|
|
|
|
|
// (a serious logic error which should never happen).
|
|
|
|
|
//
|
|
|
|
|
|
|
|
|
|
// We can only be outside type A/AB (the nop path) if B is not nop, so it suffices to iterate through B's three containers.
|
|
|
|
|
|
|
|
|
|
// *+upd
|
2015-06-08 15:50:35 +00:00
|
|
|
for( auto& obj : state.old_values )
|
|
|
|
|
{
|
|
|
|
|
if( prev_state.new_ids.find(obj.second->id) != prev_state.new_ids.end() )
|
2015-12-09 22:22:38 +00:00
|
|
|
{
|
|
|
|
|
// new+upd -> new, type A
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
if( prev_state.old_values.find(obj.second->id) != prev_state.old_values.end() )
|
|
|
|
|
{
|
|
|
|
|
// upd(was=X) + upd(was=Y) -> upd(was=X), type A
|
2015-06-08 15:50:35 +00:00
|
|
|
continue;
|
2015-12-09 22:22:38 +00:00
|
|
|
}
|
2015-12-09 22:26:52 +00:00
|
|
|
// del+upd -> N/A
|
|
|
|
|
assert( prev_state.removed.find(obj.second->id) == prev_state.removed.end() );
|
2015-12-09 22:22:38 +00:00
|
|
|
// nop+upd(was=Y) -> upd(was=Y), type B
|
|
|
|
|
prev_state.old_values[obj.second->id] = std::move(obj.second);
|
2015-06-08 15:50:35 +00:00
|
|
|
}
|
2015-12-09 22:22:38 +00:00
|
|
|
|
|
|
|
|
// *+new, but we assume the N/A cases don't happen, leaving type B nop+new -> new
|
2015-06-08 15:50:35 +00:00
|
|
|
for( auto id : state.new_ids )
|
|
|
|
|
prev_state.new_ids.insert(id);
|
2015-12-09 22:22:38 +00:00
|
|
|
|
|
|
|
|
// old_index_next_ids can only be updated, iterate over *+upd cases
|
2015-06-08 15:50:35 +00:00
|
|
|
for( auto& item : state.old_index_next_ids )
|
|
|
|
|
{
|
|
|
|
|
if( prev_state.old_index_next_ids.find( item.first ) == prev_state.old_index_next_ids.end() )
|
2015-12-09 22:22:38 +00:00
|
|
|
{
|
|
|
|
|
// nop+upd(was=Y) -> upd(was=Y), type B
|
2015-06-08 15:50:35 +00:00
|
|
|
prev_state.old_index_next_ids[item.first] = item.second;
|
2015-12-09 22:22:38 +00:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
// upd(was=X)+upd(was=Y) -> upd(was=X), type A
|
|
|
|
|
// type A implementation is a no-op, as discussed above, so there is no code here
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2015-06-08 15:50:35 +00:00
|
|
|
}
|
2015-12-09 22:22:38 +00:00
|
|
|
|
|
|
|
|
// *+del
|
2015-06-08 15:50:35 +00:00
|
|
|
for( auto& obj : state.removed )
|
2015-12-09 22:22:38 +00:00
|
|
|
{
|
|
|
|
|
if( prev_state.new_ids.find(obj.second->id) != prev_state.new_ids.end() )
|
|
|
|
|
{
|
|
|
|
|
// new + del -> nop (type C)
|
2015-06-08 15:50:35 +00:00
|
|
|
prev_state.new_ids.erase(obj.second->id);
|
2015-12-09 22:22:38 +00:00
|
|
|
continue;
|
|
|
|
|
}
|
2015-12-09 22:26:52 +00:00
|
|
|
auto it = prev_state.old_values.find(obj.second->id);
|
|
|
|
|
if( it != prev_state.old_values.end() )
|
|
|
|
|
{
|
|
|
|
|
// upd(was=X) + del(was=Y) -> del(was=X)
|
|
|
|
|
prev_state.removed[obj.second->id] = std::move(it->second);
|
|
|
|
|
prev_state.old_values.erase(obj.second->id);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
// del + del -> N/A
|
|
|
|
|
assert( prev_state.removed.find( obj.second->id ) == prev_state.removed.end() );
|
2015-12-09 22:22:38 +00:00
|
|
|
// nop + del(was=Y) -> del(was=Y)
|
|
|
|
|
prev_state.removed[obj.second->id] = std::move(obj.second);
|
|
|
|
|
}
|
2015-06-08 15:50:35 +00:00
|
|
|
_stack.pop_back();
|
|
|
|
|
--_active_sessions;
|
|
|
|
|
}
|
|
|
|
|
void undo_database::commit()
|
|
|
|
|
{
|
|
|
|
|
FC_ASSERT( _active_sessions > 0 );
|
|
|
|
|
--_active_sessions;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void undo_database::pop_commit()
|
|
|
|
|
{
|
|
|
|
|
FC_ASSERT( _active_sessions == 0 );
|
|
|
|
|
FC_ASSERT( !_stack.empty() );
|
|
|
|
|
|
|
|
|
|
disable();
|
|
|
|
|
try {
|
|
|
|
|
auto& state = _stack.back();
|
|
|
|
|
|
|
|
|
|
for( auto& item : state.old_values )
|
|
|
|
|
{
|
|
|
|
|
_db.modify( _db.get_object( item.second->id ), [&]( object& obj ){ obj.move_from( *item.second ); } );
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for( auto ritr = state.new_ids.begin(); ritr != state.new_ids.end(); ++ritr )
|
|
|
|
|
{
|
|
|
|
|
_db.remove( _db.get_object(*ritr) );
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for( auto& item : state.old_index_next_ids )
|
|
|
|
|
{
|
|
|
|
|
_db.get_mutable_index( item.first.space(), item.first.type() ).set_next_id( item.second );
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for( auto& item : state.removed )
|
|
|
|
|
_db.insert( std::move(*item.second) );
|
|
|
|
|
|
|
|
|
|
_stack.pop_back();
|
|
|
|
|
}
|
|
|
|
|
catch ( const fc::exception& e )
|
|
|
|
|
{
|
|
|
|
|
elog( "error popping commit ${e}", ("e", e.to_detail_string() ) );
|
|
|
|
|
enable();
|
|
|
|
|
throw;
|
|
|
|
|
}
|
|
|
|
|
enable();
|
|
|
|
|
}
|
|
|
|
|
const undo_state& undo_database::head()const
|
|
|
|
|
{
|
|
|
|
|
FC_ASSERT( !_stack.empty() );
|
|
|
|
|
return _stack.back();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
} } // graphene::db
|