844 lines
36 KiB
C++
844 lines
36 KiB
C++
/*
|
|
* Copyright (c) 2015 Cryptonomex, Inc., and contributors.
|
|
*
|
|
* The MIT License
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
* in the Software without restriction, including without limitation the rights
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
* furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
* THE SOFTWARE.
|
|
*/
|
|
|
|
#include <graphene/chain/database.hpp>
|
|
#include <graphene/chain/db_with.hpp>
|
|
|
|
#include <graphene/chain/asset_object.hpp>
|
|
#include <graphene/chain/betting_market_object.hpp>
|
|
#include <graphene/chain/game_object.hpp>
|
|
#include <graphene/chain/global_property_object.hpp>
|
|
#include <graphene/chain/hardfork.hpp>
|
|
#include <graphene/chain/market_object.hpp>
|
|
#include <graphene/chain/offer_object.hpp>
|
|
#include <graphene/chain/proposal_object.hpp>
|
|
#include <graphene/chain/son_proposal_object.hpp>
|
|
#include <graphene/chain/tournament_object.hpp>
|
|
#include <graphene/chain/transaction_object.hpp>
|
|
#include <graphene/chain/withdraw_permission_object.hpp>
|
|
#include <graphene/chain/witness_object.hpp>
|
|
|
|
#include <graphene/chain/protocol/fee_schedule.hpp>
|
|
|
|
#include <fc/uint128.hpp>
|
|
|
|
namespace graphene { namespace chain {
|
|
|
|
void database::update_global_dynamic_data( const signed_block& b, const uint32_t missed_blocks )
|
|
{
|
|
const dynamic_global_property_object& _dgp = get_dynamic_global_properties();
|
|
|
|
// dynamic global properties updating
|
|
modify( _dgp, [&b,this,missed_blocks]( dynamic_global_property_object& dgp ){
|
|
secret_hash_type::encoder enc;
|
|
fc::raw::pack( enc, dgp.random );
|
|
fc::raw::pack( enc, b.previous_secret );
|
|
dgp.random = enc.result();
|
|
|
|
_random_number_generator = fc::hash_ctr_rng<secret_hash_type, 20>(dgp.random.data());
|
|
|
|
const uint32_t block_num = b.block_num();
|
|
if( BOOST_UNLIKELY( block_num == 1 ) )
|
|
dgp.recently_missed_count = 0;
|
|
else if( _checkpoints.size() && _checkpoints.rbegin()->first >= block_num )
|
|
dgp.recently_missed_count = 0;
|
|
else if( missed_blocks )
|
|
dgp.recently_missed_count += GRAPHENE_RECENTLY_MISSED_COUNT_INCREMENT*missed_blocks;
|
|
else if( dgp.recently_missed_count > GRAPHENE_RECENTLY_MISSED_COUNT_INCREMENT )
|
|
dgp.recently_missed_count -= GRAPHENE_RECENTLY_MISSED_COUNT_DECREMENT;
|
|
else if( dgp.recently_missed_count > 0 )
|
|
dgp.recently_missed_count--;
|
|
|
|
dgp.head_block_number = block_num;
|
|
dgp.head_block_id = b.id();
|
|
dgp.time = b.timestamp;
|
|
dgp.current_witness = b.witness;
|
|
dgp.recent_slots_filled = (
|
|
(dgp.recent_slots_filled << 1)
|
|
+ 1) << missed_blocks;
|
|
dgp.current_aslot += missed_blocks+1;
|
|
});
|
|
|
|
if( !(get_node_properties().skip_flags & skip_undo_history_check) )
|
|
{
|
|
GRAPHENE_ASSERT( _dgp.head_block_number - _dgp.last_irreversible_block_num < GRAPHENE_MAX_UNDO_HISTORY, undo_database_exception,
|
|
"The database does not have enough undo history to support a blockchain with so many missed blocks. "
|
|
"Please add a checkpoint if you would like to continue applying blocks beyond this point.",
|
|
("last_irreversible_block_num",_dgp.last_irreversible_block_num)("head", _dgp.head_block_number)
|
|
("recently_missed",_dgp.recently_missed_count)("max_undo",GRAPHENE_MAX_UNDO_HISTORY) );
|
|
}
|
|
|
|
_undo_db.set_max_size( _dgp.head_block_number - _dgp.last_irreversible_block_num + 1 );
|
|
_fork_db.set_max_size( _dgp.head_block_number - _dgp.last_irreversible_block_num + 1 );
|
|
}
|
|
|
|
void database::update_signing_witness(const witness_object& signing_witness, const signed_block& new_block)
|
|
{
|
|
const global_property_object& gpo = get_global_properties();
|
|
const dynamic_global_property_object& dpo = get_dynamic_global_properties();
|
|
uint64_t new_block_aslot = dpo.current_aslot + get_slot_at_time( new_block.timestamp );
|
|
|
|
share_type witness_pay = std::min( gpo.parameters.witness_pay_per_block, dpo.witness_budget );
|
|
|
|
modify( dpo, [&]( dynamic_global_property_object& _dpo )
|
|
{
|
|
_dpo.witness_budget -= witness_pay;
|
|
} );
|
|
|
|
deposit_witness_pay( signing_witness, witness_pay );
|
|
|
|
modify( signing_witness, [&]( witness_object& _wit )
|
|
{
|
|
_wit.last_aslot = new_block_aslot;
|
|
_wit.last_confirmed_block_num = new_block.block_num();
|
|
_wit.previous_secret = new_block.previous_secret;
|
|
_wit.next_secret_hash = new_block.next_secret_hash;
|
|
} );
|
|
}
|
|
|
|
void database::update_last_irreversible_block()
|
|
{
|
|
const global_property_object& gpo = get_global_properties();
|
|
const dynamic_global_property_object& dpo = get_dynamic_global_properties();
|
|
|
|
// TODO for better performance, move this to db_maint, because only need to do it once per maintenance interval
|
|
vector< const witness_object* > wit_objs;
|
|
wit_objs.reserve( gpo.active_witnesses.size() );
|
|
for( const witness_id_type& wid : gpo.active_witnesses )
|
|
wit_objs.push_back( &(wid(*this)) );
|
|
|
|
static_assert( GRAPHENE_IRREVERSIBLE_THRESHOLD > 0, "irreversible threshold must be nonzero" );
|
|
|
|
// 1 1 1 2 2 2 2 2 2 2 -> 2 .7*10 = 7
|
|
// 1 1 1 1 1 1 1 2 2 2 -> 1
|
|
// 3 3 3 3 3 3 3 3 3 3 -> 3
|
|
|
|
size_t offset = ((GRAPHENE_100_PERCENT - GRAPHENE_IRREVERSIBLE_THRESHOLD) * wit_objs.size() / GRAPHENE_100_PERCENT);
|
|
|
|
std::nth_element( wit_objs.begin(), wit_objs.begin() + offset, wit_objs.end(),
|
|
[]( const witness_object* a, const witness_object* b )
|
|
{
|
|
return a->last_confirmed_block_num < b->last_confirmed_block_num;
|
|
} );
|
|
|
|
uint32_t new_last_irreversible_block_num = wit_objs[offset]->last_confirmed_block_num;
|
|
|
|
if( new_last_irreversible_block_num > dpo.last_irreversible_block_num )
|
|
{
|
|
modify( dpo, [&]( dynamic_global_property_object& _dpo )
|
|
{
|
|
_dpo.last_irreversible_block_num = new_last_irreversible_block_num;
|
|
} );
|
|
}
|
|
}
|
|
void database::clear_expired_transactions()
|
|
{ try {
|
|
//Look for expired transactions in the deduplication list, and remove them.
|
|
//Transactions must have expired by at least two forking windows in order to be removed.
|
|
auto& transaction_idx = static_cast<transaction_index&>(get_mutable_index(implementation_ids, impl_transaction_object_type));
|
|
const auto& dedupe_index = transaction_idx.indices().get<by_expiration>();
|
|
while( (!dedupe_index.empty()) && (head_block_time() > dedupe_index.begin()->trx.expiration) )
|
|
transaction_idx.remove(*dedupe_index.begin());
|
|
} FC_CAPTURE_AND_RETHROW() }
|
|
|
|
void database::place_delayed_bets()
|
|
{ try {
|
|
// If any bets have been placed during live betting where bets are delayed for a few seconds, see if there are
|
|
// any bets whose delays have expired.
|
|
|
|
// Delayed bets are sorted to the beginning of the order book, so if there are any bets that need placing,
|
|
// they're right at the front of the book
|
|
const auto& bet_odds_idx = get_index_type<bet_object_index>().indices().get<by_odds>();
|
|
auto iter = bet_odds_idx.begin();
|
|
|
|
// we use an awkward looping mechanism here because there's a case where we are processing the
|
|
// last delayed bet before the "real" order book starts and `iter` was pointing at the first
|
|
// real order. The place_bet() call can cause the that real order to be deleted, so we need
|
|
// to decide whether this is the last delayed bet before `place_bet` is called.
|
|
bool last = iter == bet_odds_idx.end() ||
|
|
!iter->end_of_delay ||
|
|
*iter->end_of_delay > head_block_time();
|
|
while (!last)
|
|
{
|
|
const bet_object& bet_to_place = *iter;
|
|
++iter;
|
|
|
|
last = iter == bet_odds_idx.end() ||
|
|
!iter->end_of_delay ||
|
|
*iter->end_of_delay > head_block_time();
|
|
|
|
// it's possible that the betting market was active when the bet was placed,
|
|
// but has been frozen before the delay expired. If that's the case here,
|
|
// don't try to match the bet.
|
|
// Since this check happens every block, this could impact performance if a
|
|
// market with many delayed bets is frozen for a long time.
|
|
// Our current understanding is that the witnesses will typically cancel all unmatched
|
|
// bets on frozen markets to avoid this.
|
|
const betting_market_object& betting_market = bet_to_place.betting_market_id(*this);
|
|
if (betting_market.get_status() == betting_market_status::unresolved)
|
|
{
|
|
modify(bet_to_place, [](bet_object& bet_obj) {
|
|
// clear the end_of_delay, which will re-sort the bet into its place in the book
|
|
bet_obj.end_of_delay.reset();
|
|
});
|
|
|
|
place_bet(bet_to_place);
|
|
}
|
|
}
|
|
} FC_CAPTURE_AND_RETHROW() }
|
|
|
|
void database::clear_expired_proposals()
|
|
{
|
|
const auto& proposal_expiration_index = get_index_type<proposal_index>().indices().get<by_expiration>();
|
|
while( !proposal_expiration_index.empty() && proposal_expiration_index.begin()->expiration_time <= head_block_time() )
|
|
{
|
|
const proposal_object& proposal = *proposal_expiration_index.begin();
|
|
processed_transaction result;
|
|
try {
|
|
if( proposal.is_authorized_to_execute(*this) )
|
|
{
|
|
result = push_proposal(proposal);
|
|
//TODO: Do something with result so plugins can process it.
|
|
continue;
|
|
}
|
|
} catch( const fc::exception& e ) {
|
|
elog("Failed to apply proposed transaction on its expiration. Deleting it.\n${proposal}\n${error}",
|
|
("proposal", proposal)("error", e.to_detail_string()));
|
|
}
|
|
remove_son_proposal(proposal);
|
|
remove(proposal);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* let HB = the highest bid for the collateral (aka who will pay the most DEBT for the least collateral)
|
|
* let SP = current median feed's Settlement Price
|
|
* let LC = the least collateralized call order's swan price (debt/collateral)
|
|
*
|
|
* If there is no valid price feed or no bids then there is no black swan.
|
|
*
|
|
* A black swan occurs if MAX(HB,SP) <= LC
|
|
*/
|
|
bool database::check_for_blackswan( const asset_object& mia, bool enable_black_swan,
|
|
const asset_bitasset_data_object* bitasset_ptr )
|
|
{
|
|
if( !mia.is_market_issued() ) return false;
|
|
|
|
const asset_bitasset_data_object& bitasset = ( bitasset_ptr ? *bitasset_ptr : mia.bitasset_data(*this) );
|
|
if( bitasset.has_settlement() ) return true; // already force settled
|
|
auto settle_price = bitasset.current_feed.settlement_price;
|
|
if( settle_price.is_null() ) return false; // no feed
|
|
|
|
const call_order_object* call_ptr = nullptr; // place holder for the call order with least collateral ratio
|
|
|
|
asset_id_type debt_asset_id = mia.id;
|
|
auto call_min = price::min( bitasset.options.short_backing_asset, debt_asset_id );
|
|
|
|
auto maint_time = get_dynamic_global_properties().next_maintenance_time;
|
|
bool before_core_hardfork_1270 = ( maint_time <= HARDFORK_CORE_1270_TIME ); // call price caching issue
|
|
|
|
if( before_core_hardfork_1270 ) // before core-1270 hard fork, check with call_price
|
|
{
|
|
const auto& call_price_index = get_index_type<call_order_index>().indices().get<by_price>();
|
|
auto call_itr = call_price_index.lower_bound( call_min );
|
|
if( call_itr == call_price_index.end() ) // no call order
|
|
return false;
|
|
call_ptr = &(*call_itr);
|
|
}
|
|
else // after core-1270 hard fork, check with collateralization
|
|
{
|
|
const auto& call_collateral_index = get_index_type<call_order_index>().indices().get<by_collateral>();
|
|
auto call_itr = call_collateral_index.lower_bound( call_min );
|
|
if( call_itr == call_collateral_index.end() ) // no call order
|
|
return false;
|
|
call_ptr = &(*call_itr);
|
|
}
|
|
if( call_ptr->debt_type() != debt_asset_id ) // no call order
|
|
return false;
|
|
|
|
price highest = settle_price;
|
|
if( maint_time > HARDFORK_CORE_1270_TIME )
|
|
// due to #338, we won't check for black swan on incoming limit order, so need to check with MSSP here
|
|
highest = bitasset.current_feed.max_short_squeeze_price();
|
|
else if( maint_time > HARDFORK_CORE_338_TIME )
|
|
// due to #338, we won't check for black swan on incoming limit order, so need to check with MSSP here
|
|
highest = bitasset.current_feed.max_short_squeeze_price_before_hf_1270();
|
|
|
|
const limit_order_index& limit_index = get_index_type<limit_order_index>();
|
|
const auto& limit_price_index = limit_index.indices().get<by_price>();
|
|
|
|
// looking for limit orders selling the most USD for the least CORE
|
|
auto highest_possible_bid = price::max( mia.id, bitasset.options.short_backing_asset );
|
|
// stop when limit orders are selling too little USD for too much CORE
|
|
auto lowest_possible_bid = price::min( mia.id, bitasset.options.short_backing_asset );
|
|
|
|
FC_ASSERT( highest_possible_bid.base.asset_id == lowest_possible_bid.base.asset_id );
|
|
// NOTE limit_price_index is sorted from greatest to least
|
|
auto limit_itr = limit_price_index.lower_bound( highest_possible_bid );
|
|
auto limit_end = limit_price_index.upper_bound( lowest_possible_bid );
|
|
|
|
if( limit_itr != limit_end ) {
|
|
FC_ASSERT( highest.base.asset_id == limit_itr->sell_price.base.asset_id );
|
|
highest = std::max( limit_itr->sell_price, highest );
|
|
}
|
|
|
|
auto least_collateral = call_ptr->collateralization();
|
|
if( ~least_collateral >= highest )
|
|
{
|
|
wdump( (*call_ptr) );
|
|
elog( "Black Swan detected on asset ${symbol} (${id}) at block ${b}: \n"
|
|
" Least collateralized call: ${lc} ${~lc}\n"
|
|
// " Highest Bid: ${hb} ${~hb}\n"
|
|
" Settle Price: ${~sp} ${sp}\n"
|
|
" Max: ${~h} ${h}\n",
|
|
("id",mia.id)("symbol",mia.symbol)("b",head_block_num())
|
|
("lc",least_collateral.to_real())("~lc",(~least_collateral).to_real())
|
|
// ("hb",limit_itr->sell_price.to_real())("~hb",(~limit_itr->sell_price).to_real())
|
|
("sp",settle_price.to_real())("~sp",(~settle_price).to_real())
|
|
("h",highest.to_real())("~h",(~highest).to_real()) );
|
|
edump((enable_black_swan));
|
|
FC_ASSERT( enable_black_swan, "Black swan was detected during a margin update which is not allowed to trigger a blackswan" );
|
|
if( maint_time > HARDFORK_CORE_338_TIME && ~least_collateral <= settle_price )
|
|
// globol settle at feed price if possible
|
|
globally_settle_asset(mia, settle_price );
|
|
else
|
|
globally_settle_asset(mia, ~least_collateral );
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
void database::clear_expired_orders()
|
|
{ try {
|
|
//Cancel expired limit orders
|
|
auto head_time = head_block_time();
|
|
auto maint_time = get_dynamic_global_properties().next_maintenance_time;
|
|
bool before_core_hardfork_184 = ( maint_time <= HARDFORK_CORE_184_TIME ); // something-for-nothing
|
|
bool before_core_hardfork_342 = ( maint_time <= HARDFORK_CORE_342_TIME ); // better rounding
|
|
bool before_core_hardfork_606 = ( maint_time <= HARDFORK_CORE_606_TIME ); // feed always trigger call
|
|
auto& limit_index = get_index_type<limit_order_index>().indices().get<by_expiration>();
|
|
while( !limit_index.empty() && limit_index.begin()->expiration <= head_time )
|
|
{
|
|
const limit_order_object& order = *limit_index.begin();
|
|
auto base_asset = order.sell_price.base.asset_id;
|
|
auto quote_asset = order.sell_price.quote.asset_id;
|
|
cancel_limit_order( order );
|
|
if( before_core_hardfork_606 )
|
|
{
|
|
// check call orders
|
|
// Comments below are copied from limit_order_cancel_evaluator::do_apply(...)
|
|
// Possible optimization: order can be called by cancelling a limit order
|
|
// if the canceled order was at the top of the book.
|
|
// Do I need to check calls in both assets?
|
|
check_call_orders( base_asset( *this ) );
|
|
check_call_orders( quote_asset( *this ) );
|
|
}
|
|
}
|
|
|
|
//Process expired force settlement orders
|
|
auto& settlement_index = get_index_type<force_settlement_index>().indices().get<by_expiration>();
|
|
if( !settlement_index.empty() )
|
|
{
|
|
asset_id_type current_asset = settlement_index.begin()->settlement_asset_id();
|
|
asset max_settlement_volume;
|
|
price settlement_fill_price;
|
|
price settlement_price;
|
|
bool current_asset_finished = false;
|
|
bool extra_dump = false;
|
|
|
|
auto next_asset = [¤t_asset, ¤t_asset_finished, &settlement_index, &extra_dump] {
|
|
auto bound = settlement_index.upper_bound(current_asset);
|
|
if( bound == settlement_index.end() )
|
|
{
|
|
if( extra_dump )
|
|
{
|
|
ilog( "next_asset() returning false" );
|
|
}
|
|
return false;
|
|
}
|
|
if( extra_dump )
|
|
{
|
|
ilog( "next_asset returning true, bound is ${b}", ("b", *bound) );
|
|
}
|
|
current_asset = bound->settlement_asset_id();
|
|
current_asset_finished = false;
|
|
return true;
|
|
};
|
|
|
|
uint32_t count = 0;
|
|
|
|
// At each iteration, we either consume the current order and remove it, or we move to the next asset
|
|
for( auto itr = settlement_index.lower_bound(current_asset);
|
|
itr != settlement_index.end();
|
|
itr = settlement_index.lower_bound(current_asset) )
|
|
{
|
|
++count;
|
|
const force_settlement_object& order = *itr;
|
|
auto order_id = order.id;
|
|
current_asset = order.settlement_asset_id();
|
|
const asset_object& mia_object = get(current_asset);
|
|
const asset_bitasset_data_object& mia = mia_object.bitasset_data(*this);
|
|
|
|
extra_dump = ((count >= 1000) && (count <= 1020));
|
|
|
|
if( extra_dump )
|
|
{
|
|
wlog( "clear_expired_orders() dumping extra data for iteration ${c}", ("c", count) );
|
|
ilog( "head_block_num is ${hb} current_asset is ${a}", ("hb", head_block_num())("a", current_asset) );
|
|
}
|
|
|
|
if( mia.has_settlement() )
|
|
{
|
|
ilog( "Canceling a force settlement because of black swan" );
|
|
cancel_settle_order( order );
|
|
continue;
|
|
}
|
|
|
|
// Has this order not reached its settlement date?
|
|
if( order.settlement_date > head_time )
|
|
{
|
|
if( next_asset() )
|
|
{
|
|
if( extra_dump )
|
|
{
|
|
ilog( "next_asset() returned true when order.settlement_date > head_block_time()" );
|
|
}
|
|
continue;
|
|
}
|
|
break;
|
|
}
|
|
// Can we still settle in this asset?
|
|
if( mia.current_feed.settlement_price.is_null() )
|
|
{
|
|
ilog("Canceling a force settlement in ${asset} because settlement price is null",
|
|
("asset", mia_object.symbol));
|
|
cancel_settle_order(order);
|
|
continue;
|
|
}
|
|
if( max_settlement_volume.asset_id != current_asset )
|
|
max_settlement_volume = mia_object.amount(mia.max_force_settlement_volume(mia_object.dynamic_data(*this).current_supply));
|
|
// When current_asset_finished is true, this would be the 2nd time processing the same order.
|
|
// In this case, we move to the next asset.
|
|
if( mia.force_settled_volume >= max_settlement_volume.amount || current_asset_finished )
|
|
{
|
|
/*
|
|
ilog("Skipping force settlement in ${asset}; settled ${settled_volume} / ${max_volume}",
|
|
("asset", mia_object.symbol)("settlement_price_null",mia.current_feed.settlement_price.is_null())
|
|
("settled_volume", mia.force_settled_volume)("max_volume", max_settlement_volume));
|
|
*/
|
|
if( next_asset() )
|
|
{
|
|
if( extra_dump )
|
|
{
|
|
ilog( "next_asset() returned true when mia.force_settled_volume >= max_settlement_volume.amount" );
|
|
}
|
|
continue;
|
|
}
|
|
break;
|
|
}
|
|
|
|
if( settlement_fill_price.base.asset_id != current_asset )
|
|
settlement_fill_price = mia.current_feed.settlement_price
|
|
/ ratio_type( GRAPHENE_100_PERCENT - mia.options.force_settlement_offset_percent,
|
|
GRAPHENE_100_PERCENT );
|
|
|
|
if( before_core_hardfork_342 )
|
|
{
|
|
auto& pays = order.balance;
|
|
auto receives = (order.balance * mia.current_feed.settlement_price);
|
|
receives.amount = ( fc::uint128_t(receives.amount.value) *
|
|
(GRAPHENE_100_PERCENT - mia.options.force_settlement_offset_percent) /
|
|
GRAPHENE_100_PERCENT ).to_uint64();
|
|
assert(receives <= order.balance * mia.current_feed.settlement_price);
|
|
settlement_price = pays / receives;
|
|
}
|
|
else if( settlement_price.base.asset_id != current_asset ) // only calculate once per asset
|
|
settlement_price = settlement_fill_price;
|
|
|
|
auto& call_index = get_index_type<call_order_index>().indices().get<by_collateral>();
|
|
asset settled = mia_object.amount(mia.force_settled_volume);
|
|
// Match against the least collateralized short until the settlement is finished or we reach max settlements
|
|
while( settled < max_settlement_volume && find_object(order_id) )
|
|
{
|
|
auto itr = call_index.lower_bound(boost::make_tuple(price::min(mia_object.bitasset_data(*this).options.short_backing_asset,
|
|
mia_object.get_id())));
|
|
// There should always be a call order, since asset exists!
|
|
assert(itr != call_index.end() && itr->debt_type() == mia_object.get_id());
|
|
asset max_settlement = max_settlement_volume - settled;
|
|
|
|
if( order.balance.amount == 0 )
|
|
{
|
|
wlog( "0 settlement detected" );
|
|
cancel_settle_order( order );
|
|
break;
|
|
}
|
|
try {
|
|
asset new_settled = match(*itr, order, settlement_price, max_settlement, settlement_fill_price);
|
|
if( !before_core_hardfork_184 && new_settled.amount == 0 ) // unable to fill this settle order
|
|
{
|
|
if( find_object( order_id ) ) // the settle order hasn't been cancelled
|
|
current_asset_finished = true;
|
|
break;
|
|
}
|
|
settled += new_settled;
|
|
// before hard fork core-342, if new_settled > 0, we'll have:
|
|
// * call order is completely filled (thus itr will change in next loop), or
|
|
// * settle order is completely filled (thus find_object(order_id) will be false so will break out), or
|
|
// * reached max_settlement_volume limit (thus new_settled == max_settlement so will break out).
|
|
//
|
|
// after hard fork core-342, if new_settled > 0, we'll have:
|
|
// * call order is completely filled (thus itr will change in next loop), or
|
|
// * settle order is completely filled (thus find_object(order_id) will be false so will break out), or
|
|
// * reached max_settlement_volume limit, but it's possible that new_settled < max_settlement,
|
|
// in this case, new_settled will be zero in next iteration of the loop, so no need to check here.
|
|
}
|
|
catch ( const black_swan_exception& e ) {
|
|
wlog( "Cancelling a settle_order since it may trigger a black swan: ${o}, ${e}",
|
|
("o", order)("e", e.to_detail_string()) );
|
|
cancel_settle_order( order );
|
|
break;
|
|
}
|
|
}
|
|
if( mia.force_settled_volume != settled.amount )
|
|
{
|
|
modify(mia, [settled](asset_bitasset_data_object& b) {
|
|
b.force_settled_volume = settled.amount;
|
|
});
|
|
}
|
|
}
|
|
}
|
|
} FC_CAPTURE_AND_RETHROW() }
|
|
|
|
void database::update_expired_feeds()
|
|
{
|
|
const auto head_time = head_block_time();
|
|
const auto next_maint_time = get_dynamic_global_properties().next_maintenance_time;
|
|
bool after_hardfork_615 = ( head_time >= HARDFORK_615_TIME );
|
|
|
|
const auto& idx = get_index_type<asset_bitasset_data_index>().indices().get<by_feed_expiration>();
|
|
auto itr = idx.begin();
|
|
while( itr != idx.end() && itr->feed_is_expired( head_time ) )
|
|
{
|
|
const asset_bitasset_data_object& b = *itr;
|
|
++itr; // not always process begin() because old code skipped updating some assets before hf 615
|
|
bool update_cer = false; // for better performance, to only update bitasset once, also check CER in this function
|
|
const asset_object* asset_ptr = nullptr;
|
|
// update feeds, check margin calls
|
|
if( after_hardfork_615 || b.feed_is_expired_before_hardfork_615( head_time ) )
|
|
{
|
|
auto old_median_feed = b.current_feed;
|
|
modify( b, [head_time,next_maint_time,&update_cer]( asset_bitasset_data_object& abdo )
|
|
{
|
|
abdo.update_median_feeds( head_time, next_maint_time );
|
|
if( abdo.need_to_update_cer() )
|
|
{
|
|
update_cer = true;
|
|
abdo.asset_cer_updated = false;
|
|
abdo.feed_cer_updated = false;
|
|
}
|
|
});
|
|
if( !b.current_feed.settlement_price.is_null() && !( b.current_feed == old_median_feed ) ) // `==` check is safe here
|
|
{
|
|
asset_ptr = &b.asset_id( *this );
|
|
check_call_orders( *asset_ptr, true, false, &b );
|
|
}
|
|
}
|
|
// update CER
|
|
if( update_cer )
|
|
{
|
|
if( !asset_ptr )
|
|
asset_ptr = &b.asset_id( *this );
|
|
if( asset_ptr->options.core_exchange_rate != b.current_feed.core_exchange_rate )
|
|
{
|
|
modify( *asset_ptr, [&b]( asset_object& ao )
|
|
{
|
|
ao.options.core_exchange_rate = b.current_feed.core_exchange_rate;
|
|
});
|
|
}
|
|
}
|
|
} // for each asset whose feed is expired
|
|
|
|
// process assets affected by bitshares-core issue 453 before hard fork 615
|
|
if( !after_hardfork_615 )
|
|
{
|
|
for( asset_id_type a : _issue_453_affected_assets )
|
|
{
|
|
check_call_orders( a(*this) );
|
|
}
|
|
}
|
|
}
|
|
|
|
void database::update_core_exchange_rates()
|
|
{
|
|
const auto& idx = get_index_type<asset_bitasset_data_index>().indices().get<by_cer_update>();
|
|
if( idx.begin() != idx.end() )
|
|
{
|
|
for( auto itr = idx.rbegin(); itr->need_to_update_cer(); itr = idx.rbegin() )
|
|
{
|
|
const asset_bitasset_data_object& b = *itr;
|
|
const asset_object& a = b.asset_id( *this );
|
|
if( a.options.core_exchange_rate != b.current_feed.core_exchange_rate )
|
|
{
|
|
modify( a, [&b]( asset_object& ao )
|
|
{
|
|
ao.options.core_exchange_rate = b.current_feed.core_exchange_rate;
|
|
});
|
|
}
|
|
modify( b, []( asset_bitasset_data_object& abdo )
|
|
{
|
|
abdo.asset_cer_updated = false;
|
|
abdo.feed_cer_updated = false;
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
void database::update_maintenance_flag( bool new_maintenance_flag )
|
|
{
|
|
modify( get_dynamic_global_properties(), [&]( dynamic_global_property_object& dpo )
|
|
{
|
|
auto maintenance_flag = dynamic_global_property_object::maintenance_flag;
|
|
dpo.dynamic_flags =
|
|
(dpo.dynamic_flags & ~maintenance_flag)
|
|
| (new_maintenance_flag ? maintenance_flag : 0);
|
|
} );
|
|
return;
|
|
}
|
|
|
|
void database::update_withdraw_permissions()
|
|
{
|
|
auto& permit_index = get_index_type<withdraw_permission_index>().indices().get<by_expiration>();
|
|
while( !permit_index.empty() && permit_index.begin()->expiration <= head_block_time() )
|
|
remove(*permit_index.begin());
|
|
}
|
|
|
|
uint64_t database::get_random_bits( uint64_t bound )
|
|
{
|
|
return _random_number_generator(bound);
|
|
}
|
|
|
|
void process_finished_games(database& db)
|
|
{
|
|
//auto& games_index = db.get_index_type<game_index>().indices().get<by_id>();
|
|
}
|
|
|
|
void process_finished_matches(database& db)
|
|
{
|
|
}
|
|
|
|
void process_in_progress_tournaments(database& db)
|
|
{
|
|
auto& start_time_index = db.get_index_type<tournament_index>().indices().get<by_start_time>();
|
|
auto start_iter = start_time_index.lower_bound(boost::make_tuple(tournament_state::in_progress));
|
|
while (start_iter != start_time_index.end() &&
|
|
start_iter->get_state() == tournament_state::in_progress)
|
|
{
|
|
auto next_iter = std::next(start_iter);
|
|
start_iter->check_for_new_matches_to_start(db);
|
|
start_iter = next_iter;
|
|
}
|
|
}
|
|
|
|
void cancel_expired_tournaments(database& db)
|
|
{
|
|
// First, cancel any tournaments that didn't get enough players
|
|
auto& registration_deadline_index = db.get_index_type<tournament_index>().indices().get<by_registration_deadline>();
|
|
// this index is sorted on state and deadline, so the tournaments awaiting registrations with the earliest
|
|
// deadlines will be at the beginning
|
|
while (!registration_deadline_index.empty() &&
|
|
registration_deadline_index.begin()->get_state() == tournament_state::accepting_registrations &&
|
|
registration_deadline_index.begin()->options.registration_deadline <= db.head_block_time())
|
|
{
|
|
const tournament_object& tournament_obj = *registration_deadline_index.begin();
|
|
fc_ilog(fc::logger::get("tournament"),
|
|
"Canceling tournament ${id} because its deadline expired",
|
|
("id", tournament_obj.id));
|
|
// cancel this tournament
|
|
db.modify(tournament_obj, [&](tournament_object& t) {
|
|
t.on_registration_deadline_passed(db);
|
|
});
|
|
}
|
|
}
|
|
|
|
void start_fully_registered_tournaments(database& db)
|
|
{
|
|
// Next, start any tournaments that have enough players and whose start time just arrived
|
|
auto& start_time_index = db.get_index_type<tournament_index>().indices().get<by_start_time>();
|
|
while (1)
|
|
{
|
|
// find the first tournament waiting to start; if its start time has arrived, start it
|
|
auto start_iter = start_time_index.lower_bound(boost::make_tuple(tournament_state::awaiting_start));
|
|
if (start_iter != start_time_index.end() &&
|
|
start_iter->get_state() == tournament_state::awaiting_start &&
|
|
*start_iter->start_time <= db.head_block_time())
|
|
{
|
|
db.modify(*start_iter, [&](tournament_object& t) {
|
|
t.on_start_time_arrived(db);
|
|
});
|
|
}
|
|
else
|
|
break;
|
|
}
|
|
}
|
|
|
|
void initiate_next_round_of_matches(database& db)
|
|
{
|
|
}
|
|
|
|
void initiate_next_games(database& db)
|
|
{
|
|
// Next, trigger timeouts on any games which have been waiting too long for commit or
|
|
// reveal moves
|
|
auto& next_timeout_index = db.get_index_type<game_index>().indices().get<by_next_timeout>();
|
|
while (1)
|
|
{
|
|
// empty time_points are sorted to the beginning, so upper_bound takes us to the first
|
|
// non-empty time_point
|
|
auto start_iter = next_timeout_index.upper_bound(boost::make_tuple(optional<time_point_sec>()));
|
|
if (start_iter != next_timeout_index.end() &&
|
|
*start_iter->next_timeout <= db.head_block_time())
|
|
{
|
|
db.modify(*start_iter, [&](game_object& game) {
|
|
game.on_timeout(db);
|
|
});
|
|
}
|
|
else
|
|
break;
|
|
}
|
|
}
|
|
|
|
void database::update_tournaments()
|
|
{
|
|
// Process as follows:
|
|
// - Process games
|
|
// - Process matches
|
|
// - Process tournaments
|
|
// - Process matches
|
|
// - Process games
|
|
process_finished_games(*this);
|
|
process_finished_matches(*this);
|
|
cancel_expired_tournaments(*this);
|
|
start_fully_registered_tournaments(*this);
|
|
process_in_progress_tournaments(*this);
|
|
initiate_next_round_of_matches(*this);
|
|
initiate_next_games(*this);
|
|
}
|
|
|
|
void process_settled_betting_markets(database& db, fc::time_point_sec current_block_time)
|
|
{
|
|
// after a betting market is graded, it goes through a delay period in which it
|
|
// can be flagged for re-grading. If it isn't flagged during this interval,
|
|
// it is automatically settled (paid). Process these now.
|
|
const auto& betting_market_group_index = db.get_index_type<betting_market_group_object_index>().indices().get<by_settling_time>();
|
|
|
|
// this index will be sorted with all bmgs with no settling time set first, followed by
|
|
// ones with the settling time set by increasing time. Start at the first bmg with a time set
|
|
auto betting_market_group_iter = betting_market_group_index.upper_bound(fc::optional<fc::time_point_sec>());
|
|
while (betting_market_group_iter != betting_market_group_index.end() &&
|
|
*betting_market_group_iter->settling_time <= current_block_time)
|
|
{
|
|
auto next_iter = std::next(betting_market_group_iter);
|
|
db.settle_betting_market_group(*betting_market_group_iter);
|
|
betting_market_group_iter = next_iter;
|
|
}
|
|
}
|
|
|
|
void database::update_betting_markets(fc::time_point_sec current_block_time)
|
|
{
|
|
process_settled_betting_markets(*this, current_block_time);
|
|
remove_completed_events();
|
|
}
|
|
|
|
void database::finalize_expired_offers(){
|
|
try {
|
|
detail::with_skip_flags( *this,
|
|
get_node_properties().skip_flags | skip_authority_check, [&](){
|
|
transaction_evaluation_state cancel_context(this);
|
|
|
|
//Cancel expired limit orders
|
|
auto& limit_index = get_index_type<offer_index>().indices().get<by_expiration_date>();
|
|
auto itr = limit_index.begin();
|
|
while( itr != limit_index.end() && itr->offer_expiration_date <= head_block_time() )
|
|
{
|
|
const offer_object& offer = *itr;
|
|
++itr;
|
|
|
|
finalize_offer_operation finalize;
|
|
finalize.fee_paying_account = offer.issuer;
|
|
finalize.offer_id = offer.id;
|
|
finalize.fee = asset( 0, asset_id_type() );
|
|
finalize.result = offer.bidder ? result_type::Expired : result_type::ExpiredNoBid;
|
|
|
|
cancel_context.skip_fee_schedule_check = true;
|
|
apply_operation(cancel_context, finalize);
|
|
}
|
|
});
|
|
} FC_CAPTURE_AND_RETHROW()}
|
|
void database::remove_son_proposal( const proposal_object& proposal )
|
|
{ try {
|
|
if( proposal.proposed_transaction.operations.size() == 1 &&
|
|
( proposal.proposed_transaction.operations.back().which() == operation::tag<son_deregister_operation>::value ||
|
|
proposal.proposed_transaction.operations.back().which() == operation::tag<son_report_down_operation>::value) )
|
|
{
|
|
const auto& son_proposal_idx = get_index_type<son_proposal_index>().indices().get<by_proposal>();
|
|
auto son_proposal_itr = son_proposal_idx.find( proposal.id );
|
|
if( son_proposal_itr == son_proposal_idx.end() ) {
|
|
return;
|
|
}
|
|
remove( *son_proposal_itr );
|
|
}
|
|
} FC_CAPTURE_AND_RETHROW( (proposal) ) }
|
|
|
|
void database::remove_inactive_son_down_proposals( const vector<son_id_type>& son_ids_to_remove )
|
|
{
|
|
const auto& son_proposal_idx = get_index_type<son_proposal_index>().indices().get< by_id >();
|
|
std::vector<proposal_id_type> proposals_to_remove;
|
|
|
|
for( auto& son_proposal : son_proposal_idx )
|
|
{
|
|
if(son_proposal.proposal_type == son_proposal_type::son_report_down_proposal)
|
|
{
|
|
auto it = std::find(son_ids_to_remove.begin(), son_ids_to_remove.end(), son_proposal.son_id);
|
|
if (it != son_ids_to_remove.end())
|
|
{
|
|
ilog( "Removing inactive proposal ${p} for son ${s}", ("p", son_proposal.proposal_id) ("s",son_proposal.son_id));
|
|
proposals_to_remove.push_back(son_proposal.proposal_id);
|
|
}
|
|
}
|
|
}
|
|
|
|
for( auto& proposal_id : proposals_to_remove )
|
|
{
|
|
const auto& proposal_obj = proposal_id(*this);
|
|
remove_son_proposal(proposal_obj);
|
|
remove(proposal_obj);
|
|
}
|
|
}
|
|
|
|
void database::remove_inactive_son_proposals( const vector<son_id_type>& son_ids_to_remove )
|
|
{
|
|
remove_inactive_son_down_proposals( son_ids_to_remove );
|
|
}
|
|
|
|
} }
|