a collection of flags (is_live, is_closed) to a single status field. The status changes in an event can trickle down to the market groups, and the status changes in market groups can bubble up to events.
682 lines
28 KiB
C++
682 lines
28 KiB
C++
/*
|
|
* Copyright (c) 2015 Cryptonomex, Inc., and contributors.
|
|
*
|
|
* The MIT License
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
* in the Software without restriction, including without limitation the rights
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
* furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
* THE SOFTWARE.
|
|
*/
|
|
|
|
#include <graphene/chain/database.hpp>
|
|
#include <graphene/chain/db_with.hpp>
|
|
|
|
#include <graphene/chain/asset_object.hpp>
|
|
#include <graphene/chain/global_property_object.hpp>
|
|
#include <graphene/chain/hardfork.hpp>
|
|
#include <graphene/chain/market_object.hpp>
|
|
#include <graphene/chain/proposal_object.hpp>
|
|
#include <graphene/chain/transaction_object.hpp>
|
|
#include <graphene/chain/withdraw_permission_object.hpp>
|
|
#include <graphene/chain/witness_object.hpp>
|
|
#include <graphene/chain/tournament_object.hpp>
|
|
#include <graphene/chain/game_object.hpp>
|
|
#include <graphene/chain/betting_market_object.hpp>
|
|
|
|
#include <graphene/chain/protocol/fee_schedule.hpp>
|
|
|
|
#include <fc/uint128.hpp>
|
|
|
|
namespace graphene { namespace chain {
|
|
|
|
void database::update_global_dynamic_data( const signed_block& b )
|
|
{
|
|
const dynamic_global_property_object& _dgp = dynamic_global_property_id_type(0)(*this);
|
|
const global_property_object& gpo = get_global_properties();
|
|
|
|
uint32_t missed_blocks = get_slot_at_time( b.timestamp );
|
|
|
|
//#define DIRTY_TRICK // problem with missed_blocks can occur when "maintenance_interval" set to few minutes
|
|
#ifdef DIRTY_TRICK
|
|
if (missed_blocks != 0) {
|
|
#else
|
|
assert( missed_blocks != 0 );
|
|
#endif
|
|
// bad if-condition, this code needs to execute for both shuffled and rng algorithms
|
|
// if (gpo.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SHUFFLED_ALGORITHM)
|
|
// {
|
|
missed_blocks--;
|
|
for( uint32_t i = 0; i < missed_blocks; ++i ) {
|
|
const auto& witness_missed = get_scheduled_witness( i+1 )(*this);
|
|
if( witness_missed.id != b.witness ) {
|
|
/*
|
|
const auto& witness_account = witness_missed.witness_account(*this);
|
|
if( (fc::time_point::now() - b.timestamp) < fc::seconds(30) )
|
|
wlog( "Witness ${name} missed block ${n} around ${t}", ("name",witness_account.name)("n",b.block_num())("t",b.timestamp) );
|
|
*/
|
|
|
|
modify( witness_missed, [&]( witness_object& w ) {
|
|
w.total_missed++;
|
|
});
|
|
}
|
|
}
|
|
// }
|
|
#ifdef DIRTY_TRICK
|
|
}
|
|
#endif
|
|
// dynamic global properties updating
|
|
modify( _dgp, [&]( dynamic_global_property_object& dgp ){
|
|
secret_hash_type::encoder enc;
|
|
fc::raw::pack( enc, dgp.random );
|
|
fc::raw::pack( enc, b.previous_secret );
|
|
dgp.random = enc.result();
|
|
|
|
_random_number_generator = fc::hash_ctr_rng<secret_hash_type, 20>(dgp.random.data());
|
|
|
|
if( BOOST_UNLIKELY( b.block_num() == 1 ) )
|
|
dgp.recently_missed_count = 0;
|
|
else if( _checkpoints.size() && _checkpoints.rbegin()->first >= b.block_num() )
|
|
dgp.recently_missed_count = 0;
|
|
else if( missed_blocks )
|
|
dgp.recently_missed_count += GRAPHENE_RECENTLY_MISSED_COUNT_INCREMENT*missed_blocks;
|
|
else if( dgp.recently_missed_count > GRAPHENE_RECENTLY_MISSED_COUNT_INCREMENT )
|
|
dgp.recently_missed_count -= GRAPHENE_RECENTLY_MISSED_COUNT_DECREMENT;
|
|
else if( dgp.recently_missed_count > 0 )
|
|
dgp.recently_missed_count--;
|
|
|
|
dgp.head_block_number = b.block_num();
|
|
dgp.head_block_id = b.id();
|
|
dgp.time = b.timestamp;
|
|
dgp.current_witness = b.witness;
|
|
dgp.recent_slots_filled = (
|
|
(dgp.recent_slots_filled << 1)
|
|
+ 1) << missed_blocks;
|
|
dgp.current_aslot += missed_blocks+1;
|
|
});
|
|
|
|
if( !(get_node_properties().skip_flags & skip_undo_history_check) )
|
|
{
|
|
GRAPHENE_ASSERT( _dgp.head_block_number - _dgp.last_irreversible_block_num < GRAPHENE_MAX_UNDO_HISTORY, undo_database_exception,
|
|
"The database does not have enough undo history to support a blockchain with so many missed blocks. "
|
|
"Please add a checkpoint if you would like to continue applying blocks beyond this point.",
|
|
("last_irreversible_block_num",_dgp.last_irreversible_block_num)("head", _dgp.head_block_number)
|
|
("recently_missed",_dgp.recently_missed_count)("max_undo",GRAPHENE_MAX_UNDO_HISTORY) );
|
|
}
|
|
|
|
_undo_db.set_max_size( _dgp.head_block_number - _dgp.last_irreversible_block_num + 1 );
|
|
_fork_db.set_max_size( _dgp.head_block_number - _dgp.last_irreversible_block_num + 1 );
|
|
}
|
|
|
|
void database::update_signing_witness(const witness_object& signing_witness, const signed_block& new_block)
|
|
{
|
|
const global_property_object& gpo = get_global_properties();
|
|
const dynamic_global_property_object& dpo = get_dynamic_global_properties();
|
|
uint64_t new_block_aslot = dpo.current_aslot + get_slot_at_time( new_block.timestamp );
|
|
|
|
share_type witness_pay = std::min( gpo.parameters.witness_pay_per_block, dpo.witness_budget );
|
|
|
|
modify( dpo, [&]( dynamic_global_property_object& _dpo )
|
|
{
|
|
_dpo.witness_budget -= witness_pay;
|
|
} );
|
|
|
|
deposit_witness_pay( signing_witness, witness_pay );
|
|
|
|
modify( signing_witness, [&]( witness_object& _wit )
|
|
{
|
|
_wit.last_aslot = new_block_aslot;
|
|
_wit.last_confirmed_block_num = new_block.block_num();
|
|
_wit.previous_secret = new_block.previous_secret;
|
|
_wit.next_secret_hash = new_block.next_secret_hash;
|
|
} );
|
|
}
|
|
|
|
void database::update_last_irreversible_block()
|
|
{
|
|
const global_property_object& gpo = get_global_properties();
|
|
const dynamic_global_property_object& dpo = get_dynamic_global_properties();
|
|
|
|
vector< const witness_object* > wit_objs;
|
|
wit_objs.reserve( gpo.active_witnesses.size() );
|
|
for( const witness_id_type& wid : gpo.active_witnesses )
|
|
wit_objs.push_back( &(wid(*this)) );
|
|
|
|
static_assert( GRAPHENE_IRREVERSIBLE_THRESHOLD > 0, "irreversible threshold must be nonzero" );
|
|
|
|
// 1 1 1 2 2 2 2 2 2 2 -> 2 .7*10 = 7
|
|
// 1 1 1 1 1 1 1 2 2 2 -> 1
|
|
// 3 3 3 3 3 3 3 3 3 3 -> 3
|
|
|
|
size_t offset = ((GRAPHENE_100_PERCENT - GRAPHENE_IRREVERSIBLE_THRESHOLD) * wit_objs.size() / GRAPHENE_100_PERCENT);
|
|
|
|
std::nth_element( wit_objs.begin(), wit_objs.begin() + offset, wit_objs.end(),
|
|
[]( const witness_object* a, const witness_object* b )
|
|
{
|
|
return a->last_confirmed_block_num < b->last_confirmed_block_num;
|
|
} );
|
|
|
|
uint32_t new_last_irreversible_block_num = wit_objs[offset]->last_confirmed_block_num;
|
|
|
|
if( new_last_irreversible_block_num > dpo.last_irreversible_block_num )
|
|
{
|
|
modify( dpo, [&]( dynamic_global_property_object& _dpo )
|
|
{
|
|
_dpo.last_irreversible_block_num = new_last_irreversible_block_num;
|
|
} );
|
|
}
|
|
}
|
|
void database::clear_expired_transactions()
|
|
{ try {
|
|
//Look for expired transactions in the deduplication list, and remove them.
|
|
//Transactions must have expired by at least two forking windows in order to be removed.
|
|
auto& transaction_idx = static_cast<transaction_index&>(get_mutable_index(implementation_ids, impl_transaction_object_type));
|
|
const auto& dedupe_index = transaction_idx.indices().get<by_expiration>();
|
|
while( (!dedupe_index.empty()) && (head_block_time() > dedupe_index.begin()->trx.expiration) )
|
|
transaction_idx.remove(*dedupe_index.begin());
|
|
} FC_CAPTURE_AND_RETHROW() }
|
|
|
|
void database::place_delayed_bets()
|
|
{ try {
|
|
// If any bets have been placed during live betting where bets are delayed for a few seconds, see if there are
|
|
// any bets whose delays have expired.
|
|
|
|
// Delayed bets are sorted to the beginning of the order book, so if there are any bets that need placing,
|
|
// they're right at the front of the book
|
|
const auto& bet_odds_idx = get_index_type<bet_object_index>().indices().get<by_odds>();
|
|
auto iter = bet_odds_idx.begin();
|
|
|
|
// we use an awkward looping mechanism here because there's a case where we are processing the
|
|
// last delayed bet before the "real" order book starts and `iter` was pointing at the first
|
|
// real order. The place_bet() call can cause the that real order to be deleted, so we need
|
|
// to decide whether this is the last delayed bet before `place_bet` is called.
|
|
bool last = iter == bet_odds_idx.end() ||
|
|
!iter->end_of_delay ||
|
|
*iter->end_of_delay > head_block_time();
|
|
while (!last)
|
|
{
|
|
const bet_object& bet_to_place = *iter;
|
|
++iter;
|
|
|
|
last = iter == bet_odds_idx.end() ||
|
|
!iter->end_of_delay ||
|
|
*iter->end_of_delay > head_block_time();
|
|
|
|
// it's possible that the betting market was active when the bet was placed,
|
|
// but has been frozen before the delay expired. If that's the case here,
|
|
// don't try to match the bet.
|
|
// Since this check happens every block, this could impact performance if a
|
|
// market with many delayed bets is frozen for a long time.
|
|
// Our current understanding is that the witnesses will typically cancel all unmatched
|
|
// bets on frozen markets to avoid this.
|
|
const betting_market_object& betting_market = bet_to_place.betting_market_id(*this);
|
|
if (betting_market.get_status() == betting_market_status::unresolved)
|
|
{
|
|
modify(bet_to_place, [](bet_object& bet_obj) {
|
|
// clear the end_of_delay, which will re-sort the bet into its place in the book
|
|
bet_obj.end_of_delay.reset();
|
|
});
|
|
|
|
place_bet(bet_to_place);
|
|
}
|
|
}
|
|
} FC_CAPTURE_AND_RETHROW() }
|
|
|
|
void database::clear_expired_proposals()
|
|
{
|
|
const auto& proposal_expiration_index = get_index_type<proposal_index>().indices().get<by_expiration>();
|
|
while( !proposal_expiration_index.empty() && proposal_expiration_index.begin()->expiration_time <= head_block_time() )
|
|
{
|
|
const proposal_object& proposal = *proposal_expiration_index.begin();
|
|
processed_transaction result;
|
|
try {
|
|
if( proposal.is_authorized_to_execute(*this) )
|
|
{
|
|
result = push_proposal(proposal);
|
|
//TODO: Do something with result so plugins can process it.
|
|
continue;
|
|
}
|
|
} catch( const fc::exception& e ) {
|
|
elog("Failed to apply proposed transaction on its expiration. Deleting it.\n${proposal}\n${error}",
|
|
("proposal", proposal)("error", e.to_detail_string()));
|
|
}
|
|
remove(proposal);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* let HB = the highest bid for the collateral (aka who will pay the most DEBT for the least collateral)
|
|
* let SP = current median feed's Settlement Price
|
|
* let LC = the least collateralized call order's swan price (debt/collateral)
|
|
*
|
|
* If there is no valid price feed or no bids then there is no black swan.
|
|
*
|
|
* A black swan occurs if MAX(HB,SP) <= LC
|
|
*/
|
|
bool database::check_for_blackswan( const asset_object& mia, bool enable_black_swan )
|
|
{
|
|
if( !mia.is_market_issued() ) return false;
|
|
|
|
const asset_bitasset_data_object& bitasset = mia.bitasset_data(*this);
|
|
if( bitasset.has_settlement() ) return true; // already force settled
|
|
auto settle_price = bitasset.current_feed.settlement_price;
|
|
if( settle_price.is_null() ) return false; // no feed
|
|
|
|
const call_order_index& call_index = get_index_type<call_order_index>();
|
|
const auto& call_price_index = call_index.indices().get<by_price>();
|
|
|
|
const limit_order_index& limit_index = get_index_type<limit_order_index>();
|
|
const auto& limit_price_index = limit_index.indices().get<by_price>();
|
|
|
|
// looking for limit orders selling the most USD for the least CORE
|
|
auto highest_possible_bid = price::max( mia.id, bitasset.options.short_backing_asset );
|
|
// stop when limit orders are selling too little USD for too much CORE
|
|
auto lowest_possible_bid = price::min( mia.id, bitasset.options.short_backing_asset );
|
|
|
|
assert( highest_possible_bid.base.asset_id == lowest_possible_bid.base.asset_id );
|
|
// NOTE limit_price_index is sorted from greatest to least
|
|
auto limit_itr = limit_price_index.lower_bound( highest_possible_bid );
|
|
auto limit_end = limit_price_index.upper_bound( lowest_possible_bid );
|
|
|
|
auto call_min = price::min( bitasset.options.short_backing_asset, mia.id );
|
|
auto call_max = price::max( bitasset.options.short_backing_asset, mia.id );
|
|
auto call_itr = call_price_index.lower_bound( call_min );
|
|
auto call_end = call_price_index.upper_bound( call_max );
|
|
|
|
if( call_itr == call_end ) return false; // no call orders
|
|
|
|
price highest = settle_price;
|
|
if( limit_itr != limit_end ) {
|
|
assert( settle_price.base.asset_id == limit_itr->sell_price.base.asset_id );
|
|
highest = std::max( limit_itr->sell_price, settle_price );
|
|
}
|
|
|
|
auto least_collateral = call_itr->collateralization();
|
|
if( ~least_collateral >= highest )
|
|
{
|
|
elog( "Black Swan detected: \n"
|
|
" Least collateralized call: ${lc} ${~lc}\n"
|
|
// " Highest Bid: ${hb} ${~hb}\n"
|
|
" Settle Price: ${sp} ${~sp}\n"
|
|
" Max: ${h} ${~h}\n",
|
|
("lc",least_collateral.to_real())("~lc",(~least_collateral).to_real())
|
|
// ("hb",limit_itr->sell_price.to_real())("~hb",(~limit_itr->sell_price).to_real())
|
|
("sp",settle_price.to_real())("~sp",(~settle_price).to_real())
|
|
("h",highest.to_real())("~h",(~highest).to_real()) );
|
|
FC_ASSERT( enable_black_swan, "Black swan was detected during a margin update which is not allowed to trigger a blackswan" );
|
|
globally_settle_asset(mia, ~least_collateral );
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
void database::clear_expired_orders()
|
|
{ try {
|
|
detail::with_skip_flags( *this,
|
|
get_node_properties().skip_flags | skip_authority_check, [&](){
|
|
transaction_evaluation_state cancel_context(this);
|
|
|
|
//Cancel expired limit orders
|
|
auto& limit_index = get_index_type<limit_order_index>().indices().get<by_expiration>();
|
|
while( !limit_index.empty() && limit_index.begin()->expiration <= head_block_time() )
|
|
{
|
|
limit_order_cancel_operation canceler;
|
|
const limit_order_object& order = *limit_index.begin();
|
|
canceler.fee_paying_account = order.seller;
|
|
canceler.order = order.id;
|
|
canceler.fee = current_fee_schedule().calculate_fee( canceler );
|
|
if( canceler.fee.amount > order.deferred_fee )
|
|
{
|
|
// Cap auto-cancel fees at deferred_fee; see #549
|
|
wlog( "At block ${b}, fee for clearing expired order ${oid} was capped at deferred_fee ${fee}", ("b", head_block_num())("oid", order.id)("fee", order.deferred_fee) );
|
|
canceler.fee = asset( order.deferred_fee, asset_id_type() );
|
|
}
|
|
// we know the fee for this op is set correctly since it is set by the chain.
|
|
// this allows us to avoid a hung chain:
|
|
// - if #549 case above triggers
|
|
// - if the fee is incorrect, which may happen due to #435 (although since cancel is a fixed-fee op, it shouldn't)
|
|
cancel_context.skip_fee_schedule_check = true;
|
|
apply_operation(cancel_context, canceler);
|
|
}
|
|
});
|
|
|
|
//Process expired force settlement orders
|
|
auto& settlement_index = get_index_type<force_settlement_index>().indices().get<by_expiration>();
|
|
if( !settlement_index.empty() )
|
|
{
|
|
asset_id_type current_asset = settlement_index.begin()->settlement_asset_id();
|
|
asset max_settlement_volume;
|
|
bool extra_dump = false;
|
|
|
|
auto next_asset = [¤t_asset, &settlement_index, &extra_dump] {
|
|
auto bound = settlement_index.upper_bound(current_asset);
|
|
if( bound == settlement_index.end() )
|
|
{
|
|
if( extra_dump )
|
|
{
|
|
ilog( "next_asset() returning false" );
|
|
}
|
|
return false;
|
|
}
|
|
if( extra_dump )
|
|
{
|
|
ilog( "next_asset returning true, bound is ${b}", ("b", *bound) );
|
|
}
|
|
current_asset = bound->settlement_asset_id();
|
|
return true;
|
|
};
|
|
|
|
uint32_t count = 0;
|
|
|
|
// At each iteration, we either consume the current order and remove it, or we move to the next asset
|
|
for( auto itr = settlement_index.lower_bound(current_asset);
|
|
itr != settlement_index.end();
|
|
itr = settlement_index.lower_bound(current_asset) )
|
|
{
|
|
++count;
|
|
const force_settlement_object& order = *itr;
|
|
auto order_id = order.id;
|
|
current_asset = order.settlement_asset_id();
|
|
const asset_object& mia_object = get(current_asset);
|
|
const asset_bitasset_data_object& mia = mia_object.bitasset_data(*this);
|
|
|
|
extra_dump = ((count >= 1000) && (count <= 1020));
|
|
|
|
if( extra_dump )
|
|
{
|
|
wlog( "clear_expired_orders() dumping extra data for iteration ${c}", ("c", count) );
|
|
ilog( "head_block_num is ${hb} current_asset is ${a}", ("hb", head_block_num())("a", current_asset) );
|
|
}
|
|
|
|
if( mia.has_settlement() )
|
|
{
|
|
ilog( "Canceling a force settlement because of black swan" );
|
|
cancel_order( order );
|
|
continue;
|
|
}
|
|
|
|
// Has this order not reached its settlement date?
|
|
if( order.settlement_date > head_block_time() )
|
|
{
|
|
if( next_asset() )
|
|
{
|
|
if( extra_dump )
|
|
{
|
|
ilog( "next_asset() returned true when order.settlement_date > head_block_time()" );
|
|
}
|
|
continue;
|
|
}
|
|
break;
|
|
}
|
|
// Can we still settle in this asset?
|
|
if( mia.current_feed.settlement_price.is_null() )
|
|
{
|
|
ilog("Canceling a force settlement in ${asset} because settlement price is null",
|
|
("asset", mia_object.symbol));
|
|
cancel_order(order);
|
|
continue;
|
|
}
|
|
if( max_settlement_volume.asset_id != current_asset )
|
|
max_settlement_volume = mia_object.amount(mia.max_force_settlement_volume(mia_object.dynamic_data(*this).current_supply));
|
|
if( mia.force_settled_volume >= max_settlement_volume.amount )
|
|
{
|
|
/*
|
|
ilog("Skipping force settlement in ${asset}; settled ${settled_volume} / ${max_volume}",
|
|
("asset", mia_object.symbol)("settlement_price_null",mia.current_feed.settlement_price.is_null())
|
|
("settled_volume", mia.force_settled_volume)("max_volume", max_settlement_volume));
|
|
*/
|
|
if( next_asset() )
|
|
{
|
|
if( extra_dump )
|
|
{
|
|
ilog( "next_asset() returned true when mia.force_settled_volume >= max_settlement_volume.amount" );
|
|
}
|
|
continue;
|
|
}
|
|
break;
|
|
}
|
|
|
|
auto& pays = order.balance;
|
|
auto receives = (order.balance * mia.current_feed.settlement_price);
|
|
receives.amount = (fc::uint128_t(receives.amount.value) *
|
|
(GRAPHENE_100_PERCENT - mia.options.force_settlement_offset_percent) / GRAPHENE_100_PERCENT).to_uint64();
|
|
assert(receives <= order.balance * mia.current_feed.settlement_price);
|
|
|
|
price settlement_price = pays / receives;
|
|
|
|
auto& call_index = get_index_type<call_order_index>().indices().get<by_collateral>();
|
|
asset settled = mia_object.amount(mia.force_settled_volume);
|
|
// Match against the least collateralized short until the settlement is finished or we reach max settlements
|
|
while( settled < max_settlement_volume && find_object(order_id) )
|
|
{
|
|
auto itr = call_index.lower_bound(boost::make_tuple(price::min(mia_object.bitasset_data(*this).options.short_backing_asset,
|
|
mia_object.get_id())));
|
|
// There should always be a call order, since asset exists!
|
|
assert(itr != call_index.end() && itr->debt_type() == mia_object.get_id());
|
|
asset max_settlement = max_settlement_volume - settled;
|
|
|
|
if( order.balance.amount == 0 )
|
|
{
|
|
wlog( "0 settlement detected" );
|
|
cancel_order( order );
|
|
break;
|
|
}
|
|
try {
|
|
settled += match(*itr, order, settlement_price, max_settlement);
|
|
}
|
|
catch ( const black_swan_exception& e ) {
|
|
wlog( "black swan detected: ${e}", ("e", e.to_detail_string() ) );
|
|
cancel_order( order );
|
|
break;
|
|
}
|
|
}
|
|
if( mia.force_settled_volume != settled.amount )
|
|
{
|
|
modify(mia, [settled](asset_bitasset_data_object& b) {
|
|
b.force_settled_volume = settled.amount;
|
|
});
|
|
}
|
|
}
|
|
}
|
|
} FC_CAPTURE_AND_RETHROW() }
|
|
|
|
void database::update_expired_feeds()
|
|
{
|
|
auto& asset_idx = get_index_type<asset_index>().indices().get<by_type>();
|
|
auto itr = asset_idx.lower_bound( true /** market issued */ );
|
|
while( itr != asset_idx.end() )
|
|
{
|
|
const asset_object& a = *itr;
|
|
++itr;
|
|
assert( a.is_market_issued() );
|
|
|
|
const asset_bitasset_data_object& b = a.bitasset_data(*this);
|
|
bool feed_is_expired;
|
|
if( head_block_time() < HARDFORK_615_TIME )
|
|
feed_is_expired = b.feed_is_expired_before_hardfork_615( head_block_time() );
|
|
else
|
|
feed_is_expired = b.feed_is_expired( head_block_time() );
|
|
if( feed_is_expired )
|
|
{
|
|
modify(b, [this](asset_bitasset_data_object& a) {
|
|
a.update_median_feeds(head_block_time());
|
|
});
|
|
check_call_orders(b.current_feed.settlement_price.base.asset_id(*this));
|
|
}
|
|
if( !b.current_feed.core_exchange_rate.is_null() &&
|
|
a.options.core_exchange_rate != b.current_feed.core_exchange_rate )
|
|
modify(a, [&b](asset_object& a) {
|
|
a.options.core_exchange_rate = b.current_feed.core_exchange_rate;
|
|
});
|
|
}
|
|
}
|
|
|
|
void database::update_maintenance_flag( bool new_maintenance_flag )
|
|
{
|
|
modify( get_dynamic_global_properties(), [&]( dynamic_global_property_object& dpo )
|
|
{
|
|
auto maintenance_flag = dynamic_global_property_object::maintenance_flag;
|
|
dpo.dynamic_flags =
|
|
(dpo.dynamic_flags & ~maintenance_flag)
|
|
| (new_maintenance_flag ? maintenance_flag : 0);
|
|
} );
|
|
return;
|
|
}
|
|
|
|
void database::update_withdraw_permissions()
|
|
{
|
|
auto& permit_index = get_index_type<withdraw_permission_index>().indices().get<by_expiration>();
|
|
while( !permit_index.empty() && permit_index.begin()->expiration <= head_block_time() )
|
|
remove(*permit_index.begin());
|
|
}
|
|
|
|
uint64_t database::get_random_bits( uint64_t bound )
|
|
{
|
|
return _random_number_generator(bound);
|
|
}
|
|
|
|
void process_finished_games(database& db)
|
|
{
|
|
//auto& games_index = db.get_index_type<game_index>().indices().get<by_id>();
|
|
}
|
|
|
|
void process_finished_matches(database& db)
|
|
{
|
|
}
|
|
|
|
void process_in_progress_tournaments(database& db)
|
|
{
|
|
auto& start_time_index = db.get_index_type<tournament_index>().indices().get<by_start_time>();
|
|
auto start_iter = start_time_index.lower_bound(boost::make_tuple(tournament_state::in_progress));
|
|
while (start_iter != start_time_index.end() &&
|
|
start_iter->get_state() == tournament_state::in_progress)
|
|
{
|
|
auto next_iter = std::next(start_iter);
|
|
start_iter->check_for_new_matches_to_start(db);
|
|
start_iter = next_iter;
|
|
}
|
|
}
|
|
|
|
void cancel_expired_tournaments(database& db)
|
|
{
|
|
// First, cancel any tournaments that didn't get enough players
|
|
auto& registration_deadline_index = db.get_index_type<tournament_index>().indices().get<by_registration_deadline>();
|
|
// this index is sorted on state and deadline, so the tournaments awaiting registrations with the earliest
|
|
// deadlines will be at the beginning
|
|
while (!registration_deadline_index.empty() &&
|
|
registration_deadline_index.begin()->get_state() == tournament_state::accepting_registrations &&
|
|
registration_deadline_index.begin()->options.registration_deadline <= db.head_block_time())
|
|
{
|
|
const tournament_object& tournament_obj = *registration_deadline_index.begin();
|
|
fc_ilog(fc::logger::get("tournament"),
|
|
"Canceling tournament ${id} because its deadline expired",
|
|
("id", tournament_obj.id));
|
|
// cancel this tournament
|
|
db.modify(tournament_obj, [&](tournament_object& t) {
|
|
t.on_registration_deadline_passed(db);
|
|
});
|
|
}
|
|
}
|
|
|
|
void start_fully_registered_tournaments(database& db)
|
|
{
|
|
// Next, start any tournaments that have enough players and whose start time just arrived
|
|
auto& start_time_index = db.get_index_type<tournament_index>().indices().get<by_start_time>();
|
|
while (1)
|
|
{
|
|
// find the first tournament waiting to start; if its start time has arrived, start it
|
|
auto start_iter = start_time_index.lower_bound(boost::make_tuple(tournament_state::awaiting_start));
|
|
if (start_iter != start_time_index.end() &&
|
|
start_iter->get_state() == tournament_state::awaiting_start &&
|
|
*start_iter->start_time <= db.head_block_time())
|
|
{
|
|
db.modify(*start_iter, [&](tournament_object& t) {
|
|
t.on_start_time_arrived(db);
|
|
});
|
|
}
|
|
else
|
|
break;
|
|
}
|
|
}
|
|
|
|
void initiate_next_round_of_matches(database& db)
|
|
{
|
|
}
|
|
|
|
void initiate_next_games(database& db)
|
|
{
|
|
// Next, trigger timeouts on any games which have been waiting too long for commit or
|
|
// reveal moves
|
|
auto& next_timeout_index = db.get_index_type<game_index>().indices().get<by_next_timeout>();
|
|
while (1)
|
|
{
|
|
// empty time_points are sorted to the beginning, so upper_bound takes us to the first
|
|
// non-empty time_point
|
|
auto start_iter = next_timeout_index.upper_bound(boost::make_tuple(optional<time_point_sec>()));
|
|
if (start_iter != next_timeout_index.end() &&
|
|
*start_iter->next_timeout <= db.head_block_time())
|
|
{
|
|
db.modify(*start_iter, [&](game_object& game) {
|
|
game.on_timeout(db);
|
|
});
|
|
}
|
|
else
|
|
break;
|
|
}
|
|
}
|
|
|
|
void database::update_tournaments()
|
|
{
|
|
// Process as follows:
|
|
// - Process games
|
|
// - Process matches
|
|
// - Process tournaments
|
|
// - Process matches
|
|
// - Process games
|
|
process_finished_games(*this);
|
|
process_finished_matches(*this);
|
|
cancel_expired_tournaments(*this);
|
|
start_fully_registered_tournaments(*this);
|
|
process_in_progress_tournaments(*this);
|
|
initiate_next_round_of_matches(*this);
|
|
initiate_next_games(*this);
|
|
}
|
|
|
|
void process_settled_betting_markets(database& db, fc::time_point_sec current_block_time)
|
|
{
|
|
// after a betting market is graded, it goes through a delay period in which it
|
|
// can be flagged for re-grading. If it isn't flagged during this interval,
|
|
// it is automatically settled (paid). Process these now.
|
|
const auto& betting_market_group_index = db.get_index_type<betting_market_group_object_index>().indices().get<by_settling_time>();
|
|
|
|
// this index will be sorted with all bmgs with no settling time set first, followed by
|
|
// ones with the settling time set by increasing time. Start at the first bmg with a time set
|
|
auto betting_market_group_iter = betting_market_group_index.upper_bound(fc::optional<fc::time_point_sec>());
|
|
while (betting_market_group_iter != betting_market_group_index.end() &&
|
|
*betting_market_group_iter->settling_time <= current_block_time)
|
|
{
|
|
auto next_iter = std::next(betting_market_group_iter);
|
|
db.settle_betting_market_group(*betting_market_group_iter);
|
|
betting_market_group_iter = next_iter;
|
|
}
|
|
}
|
|
|
|
void database::update_betting_markets(fc::time_point_sec current_block_time)
|
|
{
|
|
process_settled_betting_markets(*this, current_block_time);
|
|
}
|
|
|
|
} }
|