Merge branch 'master' of github.com:cryptonomex/graphene
This commit is contained in:
commit
e2b5b24f5d
14 changed files with 295 additions and 57 deletions
|
|
@ -103,6 +103,7 @@ namespace detail {
|
|||
public:
|
||||
fc::optional<fc::temp_file> _lock_file;
|
||||
bool _is_block_producer = false;
|
||||
bool _force_validate = false;
|
||||
|
||||
void reset_p2p_node(const fc::path& data_dir)
|
||||
{ try {
|
||||
|
|
@ -312,6 +313,12 @@ namespace detail {
|
|||
_chain_db->open(_data_dir / "blockchain", initial_state);
|
||||
}
|
||||
|
||||
if( _options->count("force-validate") )
|
||||
{
|
||||
ilog( "All transaction signatures will be validated" );
|
||||
_force_validate = true;
|
||||
}
|
||||
|
||||
graphene::time::now();
|
||||
|
||||
if( _options->count("api-access") )
|
||||
|
|
@ -399,7 +406,7 @@ namespace detail {
|
|||
// you can help the network code out by throwing a block_older_than_undo_history exception.
|
||||
// when the net code sees that, it will stop trying to push blocks from that chain, but
|
||||
// leave that peer connected so that they can get sync blocks from us
|
||||
bool result = _chain_db->push_block(blk_msg.block, _is_block_producer ? database::skip_nothing : database::skip_transaction_signatures);
|
||||
bool result = _chain_db->push_block(blk_msg.block, (_is_block_producer | _force_validate) ? database::skip_nothing : database::skip_transaction_signatures);
|
||||
|
||||
// the block was accepted, so we now know all of the transactions contained in the block
|
||||
if (!sync_mode)
|
||||
|
|
@ -826,6 +833,7 @@ void application::set_program_options(boost::program_options::options_descriptio
|
|||
"invalid file is found, it will be replaced with an example Genesis State.")
|
||||
("replay-blockchain", "Rebuild object graph by replaying all blocks")
|
||||
("resync-blockchain", "Delete all blocks and re-sync with network from scratch")
|
||||
("force-validate", "Force validation of all transactions")
|
||||
("genesis-timestamp", bpo::value<uint32_t>(), "Replace timestamp from genesis.json with current time plus this many seconds (experts only!)")
|
||||
;
|
||||
command_line_options.add(_cli_options);
|
||||
|
|
|
|||
|
|
@ -21,14 +21,64 @@
|
|||
#include <graphene/chain/market_evaluator.hpp>
|
||||
#include <graphene/chain/database.hpp>
|
||||
#include <graphene/chain/exceptions.hpp>
|
||||
#include <graphene/chain/hardfork.hpp>
|
||||
|
||||
#include <functional>
|
||||
|
||||
namespace graphene { namespace chain {
|
||||
|
||||
/**
|
||||
* Valid symbols can contain [A-Z0-9], and '.'
|
||||
* They must start with [A, Z]
|
||||
* They must end with [A, Z]
|
||||
* They can contain a maximum of one '.'
|
||||
*/
|
||||
bool is_valid_symbol_old( const string& symbol )
|
||||
{
|
||||
if( symbol.size() < GRAPHENE_MIN_ASSET_SYMBOL_LENGTH )
|
||||
return false;
|
||||
|
||||
if( symbol.size() > GRAPHENE_MAX_ASSET_SYMBOL_LENGTH )
|
||||
return false;
|
||||
|
||||
if( !isalpha( symbol.front() ) )
|
||||
return false;
|
||||
|
||||
if( !isalpha( symbol.back() ) )
|
||||
return false;
|
||||
|
||||
bool dot_already_present = false;
|
||||
for( const auto c : symbol )
|
||||
{
|
||||
if( (isalpha( c ) || isdigit(c)) && isupper( c ) )
|
||||
continue;
|
||||
|
||||
if( c == '.' )
|
||||
{
|
||||
if( dot_already_present )
|
||||
return false;
|
||||
|
||||
dot_already_present = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void_result asset_create_evaluator::do_evaluate( const asset_create_operation& op )
|
||||
{ try {
|
||||
|
||||
database& d = db();
|
||||
|
||||
#warning HARDFORK remove this check after HARDFORK_359_TIME and rename is_valid_symbol_old -> is_valid_symbol
|
||||
if( d.head_block_time() <= HARDFORK_359_TIME )
|
||||
{
|
||||
FC_ASSERT( is_valid_symbol_old( op.symbol ) );
|
||||
}
|
||||
|
||||
const auto& chain_parameters = d.get_global_properties().parameters;
|
||||
FC_ASSERT( op.common_options.whitelist_authorities.size() <= chain_parameters.maximum_asset_whitelist_authorities );
|
||||
FC_ASSERT( op.common_options.blacklist_authorities.size() <= chain_parameters.maximum_asset_whitelist_authorities );
|
||||
|
|
@ -456,7 +506,96 @@ void_result asset_publish_feeds_evaluator::do_evaluate(const asset_publish_feed_
|
|||
|
||||
const asset_bitasset_data_object& bitasset = base.bitasset_data(d);
|
||||
FC_ASSERT( !bitasset.has_settlement(), "No further feeds may be published after a settlement event" );
|
||||
FC_ASSERT(o.feed.settlement_price.quote.asset_id == bitasset.options.short_backing_asset);
|
||||
|
||||
#warning Remove this check when starting a new network
|
||||
if( d.head_block_time() <= HARDFORK_357_TIME )
|
||||
{
|
||||
FC_ASSERT(o.feed.settlement_price.quote.asset_id == bitasset.options.short_backing_asset);
|
||||
|
||||
bool is_nop = false;
|
||||
|
||||
try
|
||||
{
|
||||
// these two changes should go in price_feed::validate() when creating new network
|
||||
if( !o.feed.core_exchange_rate.is_null() )
|
||||
{
|
||||
o.feed.core_exchange_rate.validate();
|
||||
}
|
||||
if( (!o.feed.settlement_price.is_null()) && (!o.feed.core_exchange_rate.is_null()) )
|
||||
{
|
||||
if( o.feed.settlement_price.base.asset_id == o.feed.core_exchange_rate.base.asset_id )
|
||||
{
|
||||
// uncrossed feed, this is the form we expect
|
||||
FC_ASSERT( o.feed.settlement_price.base.asset_id == o.feed.core_exchange_rate.base.asset_id );
|
||||
FC_ASSERT( o.feed.settlement_price.quote.asset_id == o.feed.core_exchange_rate.quote.asset_id );
|
||||
}
|
||||
else
|
||||
{
|
||||
// crossed feed, your feed script needs to be fixed
|
||||
FC_ASSERT( o.feed.settlement_price.base.asset_id == o.feed.core_exchange_rate.quote.asset_id );
|
||||
FC_ASSERT( o.feed.settlement_price.quote.asset_id == o.feed.core_exchange_rate.base.asset_id );
|
||||
/*
|
||||
wlog( "${aname} feed pub with crossed prices by ${name} in block ${n}",
|
||||
("aname", base.symbol)
|
||||
("n", d.head_block_num()+1)
|
||||
("name", o.publisher(d).name)
|
||||
);
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
if( !o.feed.is_for( o.asset_id ) )
|
||||
{
|
||||
wlog( "${aname} feed pub with wrong asset by ${name} in block ${n}",
|
||||
("aname", base.symbol)
|
||||
("n", d.head_block_num()+1)
|
||||
("name", o.publisher(d).name)
|
||||
);
|
||||
is_nop = true;
|
||||
}
|
||||
}
|
||||
catch( const fc::exception& e )
|
||||
{
|
||||
wlog( "${aname} feed pub with invalid price feed by ${name} in block ${n}",
|
||||
("aname", base.symbol)
|
||||
("n", d.head_block_num()+1)
|
||||
("name", o.publisher(d).name)
|
||||
);
|
||||
wdump( (e) );
|
||||
}
|
||||
|
||||
#warning Remove this check when starting a new network
|
||||
if( d.head_block_num() > 59300 )
|
||||
{
|
||||
FC_ASSERT(
|
||||
(base.symbol != "SEK")
|
||||
&& (base.symbol != "SILVER")
|
||||
&& (base.symbol != "RUB")
|
||||
&& (base.symbol != "GBP")
|
||||
);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
//
|
||||
// many of these checks should be moved to price_feed.validate()
|
||||
// or the operation validator when new network is started
|
||||
//
|
||||
if( !o.feed.core_exchange_rate.is_null() )
|
||||
{
|
||||
o.feed.core_exchange_rate.validate();
|
||||
}
|
||||
if( (!o.feed.settlement_price.is_null()) && (!o.feed.core_exchange_rate.is_null()) )
|
||||
{
|
||||
FC_ASSERT( o.feed.settlement_price.base.asset_id == o.feed.core_exchange_rate.base.asset_id );
|
||||
FC_ASSERT( o.feed.settlement_price.quote.asset_id == o.feed.core_exchange_rate.quote.asset_id );
|
||||
}
|
||||
|
||||
FC_ASSERT( !o.feed.settlement_price.is_null() );
|
||||
FC_ASSERT( !o.feed.core_exchange_rate.is_null() );
|
||||
FC_ASSERT( o.feed.settlement_price.quote.asset_id == bitasset.options.short_backing_asset );
|
||||
FC_ASSERT( o.feed.is_for( o.asset_id ) );
|
||||
}
|
||||
//Verify that the publisher is authoritative to publish a feed
|
||||
if( (base.issuer == GRAPHENE_WITNESS_ACCOUNT) || (base.issuer == GRAPHENE_COMMITTEE_ACCOUNT) )
|
||||
{
|
||||
|
|
@ -472,6 +611,14 @@ void_result asset_publish_feeds_evaluator::do_evaluate(const asset_publish_feed_
|
|||
|
||||
void_result asset_publish_feeds_evaluator::do_apply(const asset_publish_feed_operation& o)
|
||||
{ try {
|
||||
|
||||
#warning Remove this check when preparing for new network release
|
||||
if( !o.feed.is_for( o.asset_id ) )
|
||||
{
|
||||
wlog( "Ignoring bad feed" );
|
||||
return void_result();
|
||||
}
|
||||
|
||||
database& d = db();
|
||||
|
||||
const asset_object& base = o.asset_id(d);
|
||||
|
|
|
|||
|
|
@ -377,7 +377,10 @@ signed_block database::_generate_block(
|
|||
pending_block.sign( block_signing_private_key );
|
||||
|
||||
// TODO: Move this to _push_block() so session is restored.
|
||||
FC_ASSERT( fc::raw::pack_size(pending_block) <= get_global_properties().parameters.maximum_block_size );
|
||||
if( !(skip & skip_block_size_check) )
|
||||
{
|
||||
FC_ASSERT( fc::raw::pack_size(pending_block) <= get_global_properties().parameters.maximum_block_size );
|
||||
}
|
||||
|
||||
push_block( pending_block, skip );
|
||||
|
||||
|
|
@ -640,7 +643,11 @@ const witness_object& database::validate_block_header( uint32_t skip, const sign
|
|||
|
||||
witness_id_type scheduled_witness = get_scheduled_witness( slot_num );
|
||||
|
||||
#warning remove this hardfork check for next release
|
||||
#ifdef _MSC_VER
|
||||
# pragma message ("WARNING: remove this hardfork check for next release")
|
||||
#else
|
||||
# warning remove this hardfork check for next release
|
||||
#endif
|
||||
if( next_block.block_num() > 58500 ) {
|
||||
FC_ASSERT( next_block.witness == scheduled_witness, "Witness produced block at wrong time",
|
||||
("block witness",next_block.witness)("scheduled",scheduled_witness)("slot_num",slot_num) );
|
||||
|
|
|
|||
|
|
@ -47,7 +47,11 @@ vector<std::reference_wrapper<const typename Index::object_type>> database::sort
|
|||
template<class... Types>
|
||||
void database::perform_account_maintenance(std::tuple<Types...> helpers)
|
||||
{
|
||||
#warning switch to this for next release: const auto& idx = get_index_type<account_index>().indices().get<by_name>();
|
||||
#ifdef _MSC_VER
|
||||
# pragma message ("WARNING: switch to this for next release: const auto& idx = get_index_type<account_index>().indices().get<by_name>();")
|
||||
#else
|
||||
# warning switch to this for next release: const auto& idx = get_index_type<account_index>().indices().get<by_name>();
|
||||
#endif
|
||||
const auto& idx = get_index_type<account_index>().indices();
|
||||
for( const account_object& a : idx )
|
||||
detail::for_each(helpers, a, detail::gen_seq<sizeof...(Types)>());
|
||||
|
|
|
|||
|
|
@ -168,32 +168,35 @@ void database::open(
|
|||
FC_CAPTURE_LOG_AND_RETHROW( (data_dir) )
|
||||
}
|
||||
|
||||
void database::close(uint32_t blocks_to_rewind)
|
||||
void database::close(bool rewind)
|
||||
{
|
||||
// TODO: Save pending tx's on close()
|
||||
clear_pending();
|
||||
|
||||
// pop all of the blocks that we can given our undo history, this should
|
||||
// throw when there is no more undo history to pop
|
||||
try
|
||||
if( rewind )
|
||||
{
|
||||
while( true )
|
||||
try
|
||||
{
|
||||
// elog("pop");
|
||||
block_id_type popped_block_id = head_block_id();
|
||||
pop_block();
|
||||
_fork_db.remove(popped_block_id); // doesn't throw on missing
|
||||
try
|
||||
{
|
||||
_block_id_to_block.remove(popped_block_id);
|
||||
}
|
||||
catch (const fc::key_not_found_exception&)
|
||||
while( true )
|
||||
{
|
||||
// elog("pop");
|
||||
block_id_type popped_block_id = head_block_id();
|
||||
pop_block();
|
||||
_fork_db.remove(popped_block_id); // doesn't throw on missing
|
||||
try
|
||||
{
|
||||
_block_id_to_block.remove(popped_block_id);
|
||||
}
|
||||
catch (const fc::key_not_found_exception&)
|
||||
{
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
|
||||
// Since pop_block() will move tx's in the popped blocks into pending,
|
||||
|
|
|
|||
|
|
@ -100,7 +100,7 @@ namespace graphene { namespace chain {
|
|||
* Will close the database before wiping. Database will be closed when this function returns.
|
||||
*/
|
||||
void wipe(const fc::path& data_dir, bool include_blocks);
|
||||
void close(uint32_t blocks_to_rewind = 0);
|
||||
void close(bool rewind = true);
|
||||
|
||||
//////////////////// db_block.cpp ////////////////////
|
||||
|
||||
|
|
|
|||
21
libraries/chain/include/graphene/chain/hardfork.hpp
Normal file
21
libraries/chain/include/graphene/chain/hardfork.hpp
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
/*
|
||||
* Copyright (c) 2015, Cryptonomex, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is provided for evaluation in private test networks only, until September 8, 2015. After this date, this license expires and
|
||||
* the code may not be used, modified or distributed for any purpose. Redistribution and use in source and binary forms, with or without modification,
|
||||
* are permitted until September 8, 2015, provided that the following conditions are met:
|
||||
*
|
||||
* 1. The code and/or derivative works are used only for private test networks consisting of no more than 10 P2P nodes.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
||||
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
|
||||
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#define HARDFORK_357_TIME (fc::time_point_sec( 1444416300 ))
|
||||
#define HARDFORK_359_TIME (fc::time_point_sec( 1444416300 ))
|
||||
|
|
@ -200,6 +200,7 @@ namespace graphene { namespace chain {
|
|||
}
|
||||
|
||||
void validate() const;
|
||||
bool is_for( asset_id_type asset_id ) const;
|
||||
};
|
||||
|
||||
} }
|
||||
|
|
|
|||
|
|
@ -138,6 +138,20 @@ namespace graphene { namespace chain {
|
|||
//FC_ASSERT( maintenance_collateral_ratio >= maximum_short_squeeze_ratio );
|
||||
} FC_CAPTURE_AND_RETHROW( (*this) ) }
|
||||
|
||||
bool price_feed::is_for( asset_id_type asset_id ) const
|
||||
{
|
||||
try
|
||||
{
|
||||
if( !settlement_price.is_null() )
|
||||
return (settlement_price.base.asset_id == asset_id);
|
||||
if( !core_exchange_rate.is_null() )
|
||||
return (core_exchange_rate.base.asset_id == asset_id);
|
||||
// (null, null) is valid for any feed
|
||||
return true;
|
||||
}
|
||||
FC_CAPTURE_AND_RETHROW( (*this) )
|
||||
}
|
||||
|
||||
price price_feed::max_short_squeeze_price()const
|
||||
{
|
||||
boost::rational<uint64_t> sp( settlement_price.base.amount.value, settlement_price.quote.amount.value ); //debt.amount.value,collateral.amount.value);
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ bool is_valid_symbol( const string& symbol )
|
|||
bool dot_already_present = false;
|
||||
for( const auto c : symbol )
|
||||
{
|
||||
if( (isalpha( c ) || isdigit(c)) && isupper( c ) )
|
||||
if( (isalpha( c ) && isupper( c )) || isdigit(c) )
|
||||
continue;
|
||||
|
||||
if( c == '.' )
|
||||
|
|
@ -42,6 +42,7 @@ bool is_valid_symbol( const string& symbol )
|
|||
|
||||
return true;
|
||||
}
|
||||
|
||||
share_type asset_issue_operation::calculate_fee(const fee_parameters_type& k)const
|
||||
{
|
||||
return k.fee + calculate_data_fee( fc::raw::pack_size(memo), k.price_per_kbyte );
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ void_result vesting_balance_withdraw_evaluator::do_evaluate( const vesting_balan
|
|||
const time_point_sec now = d.head_block_time();
|
||||
|
||||
const vesting_balance_object& vbo = op.vesting_balance( d );
|
||||
FC_ASSERT( op.owner == vbo.owner );
|
||||
FC_ASSERT( op.owner == vbo.owner, "", ("op.owner", op.owner)("vbo.owner", vbo.owner) );
|
||||
FC_ASSERT( vbo.is_withdraw_allowed( now, op.amount ), "", ("now", now)("op", op)("vbo", vbo) );
|
||||
assert( op.amount <= vbo.balance ); // is_withdraw_allowed should fail before this check is reached
|
||||
|
||||
|
|
|
|||
|
|
@ -250,9 +250,12 @@ namespace graphene { namespace net { namespace detail {
|
|||
{
|
||||
item_id item;
|
||||
unsigned sequence_number;
|
||||
fc::time_point timestamp; // the time we last heard about this item in an inventory message
|
||||
|
||||
prioritized_item_id(const item_id& item, unsigned sequence_number) :
|
||||
item(item),
|
||||
sequence_number(sequence_number)
|
||||
sequence_number(sequence_number),
|
||||
timestamp(fc::time_point::now())
|
||||
{}
|
||||
bool operator<(const prioritized_item_id& rhs) const
|
||||
{
|
||||
|
|
@ -473,6 +476,7 @@ namespace graphene { namespace net { namespace detail {
|
|||
// @}
|
||||
|
||||
fc::future<void> _terminate_inactive_connections_loop_done;
|
||||
uint8_t _recent_block_interval_in_seconds; // a cached copy of the block interval, to avoid a thread hop to the blockchain to get the current value
|
||||
|
||||
std::string _user_agent_string;
|
||||
/** _node_public_key is a key automatically generated when the client is first run, stored in
|
||||
|
|
@ -793,6 +797,7 @@ namespace graphene { namespace net { namespace detail {
|
|||
_suspend_fetching_sync_blocks(false),
|
||||
_items_to_fetch_updated(false),
|
||||
_items_to_fetch_sequence_counter(0),
|
||||
_recent_block_interval_in_seconds(GRAPHENE_MAX_BLOCK_INTERVAL),
|
||||
_user_agent_string(user_agent),
|
||||
_desired_number_of_connections(GRAPHENE_NET_DEFAULT_DESIRED_CONNECTIONS),
|
||||
_maximum_number_of_connections(GRAPHENE_NET_DEFAULT_MAX_CONNECTIONS),
|
||||
|
|
@ -1106,6 +1111,7 @@ namespace graphene { namespace net { namespace detail {
|
|||
dlog("beginning an iteration of fetch items (${count} items to fetch)",
|
||||
("count", _items_to_fetch.size()));
|
||||
|
||||
fc::time_point oldest_timestamp_to_fetch = fc::time_point::now() - fc::seconds(_recent_block_interval_in_seconds * GRAPHENE_NET_MESSAGE_CACHE_DURATION_IN_BLOCKS);
|
||||
fc::time_point next_peer_unblocked_time = fc::time_point::maximum();
|
||||
|
||||
// we need to construct a list of items to request from each peer first,
|
||||
|
|
@ -1134,34 +1140,45 @@ namespace graphene { namespace net { namespace detail {
|
|||
// now loop over all items we want to fetch
|
||||
for (auto item_iter = _items_to_fetch.begin(); item_iter != _items_to_fetch.end();)
|
||||
{
|
||||
// and find a peer that has it, we'll use the one who has the least requests going to it to load balance
|
||||
bool item_fetched = false;
|
||||
for (auto peer_iter = items_by_peer.get<requested_item_count_index>().begin(); peer_iter != items_by_peer.get<requested_item_count_index>().end(); ++peer_iter)
|
||||
if (item_iter->timestamp < oldest_timestamp_to_fetch)
|
||||
{
|
||||
const peer_connection_ptr& peer = peer_iter->peer;
|
||||
// if they have the item and we haven't already decided to ask them for too many other items
|
||||
if (peer_iter->item_ids.size() < GRAPHENE_NET_MAX_ITEMS_PER_PEER_DURING_NORMAL_OPERATION &&
|
||||
peer->inventory_peer_advertised_to_us.find(item_iter->item) != peer->inventory_peer_advertised_to_us.end())
|
||||
{
|
||||
if (item_iter->item.item_type == graphene::net::trx_message_type && peer->is_transaction_fetching_inhibited())
|
||||
next_peer_unblocked_time = std::min(peer->transaction_fetching_inhibited_until, next_peer_unblocked_time);
|
||||
else
|
||||
{
|
||||
//dlog("requesting item ${hash} from peer ${endpoint}",
|
||||
// ("hash", iter->item.item_hash)("endpoint", peer->get_remote_endpoint()));
|
||||
item_id item_id_to_fetch = item_iter->item;
|
||||
peer->items_requested_from_peer.insert(peer_connection::item_to_time_map_type::value_type(item_id_to_fetch, fc::time_point::now()));
|
||||
item_iter = _items_to_fetch.erase(item_iter);
|
||||
item_fetched = true;
|
||||
items_by_peer.get<requested_item_count_index>().modify(peer_iter, [&item_id_to_fetch](peer_and_items_to_fetch& peer_and_items) {
|
||||
peer_and_items.item_ids.push_back(item_id_to_fetch);
|
||||
});
|
||||
break;
|
||||
}
|
||||
}
|
||||
// this item has probably already fallen out of our peers' caches, we'll just ignore it.
|
||||
// this can happen during flooding, and the _items_to_fetch could otherwise get clogged
|
||||
// with a bunch of items that we'll never be able to request from any peer
|
||||
wlog("Unable to fetch item ${item} before its likely expiration time, removing it from our list of items to fetch", ("item", item_iter->item));
|
||||
item_iter = _items_to_fetch.erase(item_iter);
|
||||
}
|
||||
else
|
||||
{
|
||||
// find a peer that has it, we'll use the one who has the least requests going to it to load balance
|
||||
bool item_fetched = false;
|
||||
for (auto peer_iter = items_by_peer.get<requested_item_count_index>().begin(); peer_iter != items_by_peer.get<requested_item_count_index>().end(); ++peer_iter)
|
||||
{
|
||||
const peer_connection_ptr& peer = peer_iter->peer;
|
||||
// if they have the item and we haven't already decided to ask them for too many other items
|
||||
if (peer_iter->item_ids.size() < GRAPHENE_NET_MAX_ITEMS_PER_PEER_DURING_NORMAL_OPERATION &&
|
||||
peer->inventory_peer_advertised_to_us.find(item_iter->item) != peer->inventory_peer_advertised_to_us.end())
|
||||
{
|
||||
if (item_iter->item.item_type == graphene::net::trx_message_type && peer->is_transaction_fetching_inhibited())
|
||||
next_peer_unblocked_time = std::min(peer->transaction_fetching_inhibited_until, next_peer_unblocked_time);
|
||||
else
|
||||
{
|
||||
//dlog("requesting item ${hash} from peer ${endpoint}",
|
||||
// ("hash", iter->item.item_hash)("endpoint", peer->get_remote_endpoint()));
|
||||
item_id item_id_to_fetch = item_iter->item;
|
||||
peer->items_requested_from_peer.insert(peer_connection::item_to_time_map_type::value_type(item_id_to_fetch, fc::time_point::now()));
|
||||
item_iter = _items_to_fetch.erase(item_iter);
|
||||
item_fetched = true;
|
||||
items_by_peer.get<requested_item_count_index>().modify(peer_iter, [&item_id_to_fetch](peer_and_items_to_fetch& peer_and_items) {
|
||||
peer_and_items.item_ids.push_back(item_id_to_fetch);
|
||||
});
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!item_fetched)
|
||||
++item_iter;
|
||||
}
|
||||
if (!item_fetched)
|
||||
++item_iter;
|
||||
}
|
||||
|
||||
// we've figured out which peer will be providing each item, now send the messages.
|
||||
|
|
@ -1295,7 +1312,7 @@ namespace graphene { namespace net { namespace detail {
|
|||
std::list<peer_connection_ptr> peers_to_send_keep_alive;
|
||||
std::list<peer_connection_ptr> peers_to_terminate;
|
||||
|
||||
uint8_t current_block_interval_in_seconds = _delegate->get_current_block_interval_in_seconds();
|
||||
_recent_block_interval_in_seconds = _delegate->get_current_block_interval_in_seconds();
|
||||
|
||||
// Disconnect peers that haven't sent us any data recently
|
||||
// These numbers are just guesses and we need to think through how this works better.
|
||||
|
|
@ -1334,7 +1351,7 @@ namespace graphene { namespace net { namespace detail {
|
|||
}
|
||||
|
||||
// timeout for any active peers is two block intervals
|
||||
uint32_t active_disconnect_timeout = 10 * current_block_interval_in_seconds;
|
||||
uint32_t active_disconnect_timeout = 10 * _recent_block_interval_in_seconds;
|
||||
uint32_t active_send_keepalive_timeout = active_disconnect_timeout / 2;
|
||||
|
||||
// set the ignored request time out to 1 second. When we request a block
|
||||
|
|
@ -2843,13 +2860,22 @@ namespace graphene { namespace net { namespace detail {
|
|||
}
|
||||
else
|
||||
{
|
||||
auto insert_result = _items_to_fetch.insert(prioritized_item_id(advertised_item_id, _items_to_fetch_sequence_counter++));
|
||||
if (insert_result.second)
|
||||
auto items_to_fetch_iter = _items_to_fetch.get<item_id_index>().find(advertised_item_id);
|
||||
if (items_to_fetch_iter == _items_to_fetch.get<item_id_index>().end())
|
||||
{
|
||||
// it's new to us
|
||||
_items_to_fetch.insert(prioritized_item_id(advertised_item_id, _items_to_fetch_sequence_counter++));
|
||||
dlog("adding item ${item_hash} from inventory message to our list of items to fetch",
|
||||
("item_hash", item_hash));
|
||||
trigger_fetch_items_loop();
|
||||
}
|
||||
else
|
||||
{
|
||||
// another peer has told us about this item already, but this peer just told us it has the item
|
||||
// too, we can expect it to be around in this peer's cache for longer, so update its timestamp
|
||||
_items_to_fetch.get<item_id_index>().modify(items_to_fetch_iter,
|
||||
[](prioritized_item_id& item) { item.timestamp = fc::time_point::now(); });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -115,7 +115,7 @@ BOOST_AUTO_TEST_CASE( valid_symbol_test )
|
|||
|
||||
BOOST_CHECK( is_valid_symbol( "AAA" ) );
|
||||
BOOST_CHECK( !is_valid_symbol( "AaA" ) );
|
||||
BOOST_CHECK( !is_valid_symbol( "A0A" ) );
|
||||
BOOST_CHECK( is_valid_symbol( "A0A" ) );
|
||||
BOOST_CHECK( is_valid_symbol( "A.A" ) );
|
||||
|
||||
BOOST_CHECK( !is_valid_symbol( "A..A" ) );
|
||||
|
|
@ -126,6 +126,8 @@ BOOST_AUTO_TEST_CASE( valid_symbol_test )
|
|||
BOOST_CHECK( !is_valid_symbol( "AAAAAAAAAAAAAAAAA" ) );
|
||||
BOOST_CHECK( is_valid_symbol( "A.AAAAAAAAAAAAAA" ) );
|
||||
BOOST_CHECK( !is_valid_symbol( "A.AAAAAAAAAAAA.A" ) );
|
||||
|
||||
BOOST_CHECK( is_valid_symbol( "AAA000AAA" ) );
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE( price_test )
|
||||
|
|
|
|||
|
|
@ -291,7 +291,7 @@ BOOST_AUTO_TEST_CASE( black_swan_issue_346 )
|
|||
|
||||
auto setup_asset = [&]() -> const asset_object&
|
||||
{
|
||||
const asset_object& bitusd = create_bitasset("BITUSD"+fc::to_string(trial), feeder_id);
|
||||
const asset_object& bitusd = create_bitasset("BITUSD"+fc::to_string(trial)+"X", feeder_id);
|
||||
update_feed_producers( bitusd, {feeder.id} );
|
||||
BOOST_CHECK( !bitusd.bitasset_data(db).has_settlement() );
|
||||
trial++;
|
||||
|
|
@ -362,7 +362,11 @@ BOOST_AUTO_TEST_CASE( black_swan_issue_346 )
|
|||
limit_order_id_type oid_019 = create_sell_order( seller, bitusd.amount(39), core.amount(2000) )->id; // this order is at $0.019, we should not be able to match against it
|
||||
limit_order_id_type oid_020 = create_sell_order( seller, bitusd.amount(40), core.amount(2000) )->id; // this order is at $0.020, we should be able to match against it
|
||||
set_price( bitusd, bitusd.amount(21) / core.amount(1000) ); // $0.021
|
||||
BOOST_CHECK( !bitusd.bitasset_data(db).has_settlement() );
|
||||
//
|
||||
// We attempt to match against $0.019 order and black swan,
|
||||
// and this is intended behavior. See discussion in ticket.
|
||||
//
|
||||
BOOST_CHECK( bitusd.bitasset_data(db).has_settlement() );
|
||||
BOOST_CHECK( db.find_object( oid_019 ) != nullptr );
|
||||
BOOST_CHECK( db.find_object( oid_020 ) == nullptr );
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in a new issue