Merge branch 'hotfix/bookie2024' into 'master'

Bookie 2024

See merge request PBSA/peerplays!259
This commit is contained in:
Bobinson K B 2023-12-18 06:17:23 +00:00
commit d3d967a2d7
17 changed files with 305 additions and 281 deletions

View file

@ -210,8 +210,8 @@ network_node_api::network_node_api(application &a) :
}
/*
* Remove expired transactions from pending_transactions
*/
* Remove expired transactions from pending_transactions
*/
for (const auto &transaction : _pending_transactions) {
if (transaction.second.expiration < block.timestamp) {
auto transaction_it = _pending_transactions.find(transaction.second.id());

View file

@ -391,8 +391,8 @@ public:
}
/**
* If delegate has the item, the network has no need to fetch it.
*/
* If delegate has the item, the network has no need to fetch it.
*/
virtual bool has_item(const net::item_id &id) override {
try {
if (id.item_type == graphene::net::block_message_type)
@ -404,13 +404,13 @@ public:
}
/**
* @brief allows the application to validate an item prior to broadcasting to peers.
*
* @param sync_mode true if the message was fetched through the sync process, false during normal operation
* @returns true if this message caused the blockchain to switch forks, false if it did not
*
* @throws exception if error validating the item, otherwise the item is safe to broadcast on.
*/
* @brief allows the application to validate an item prior to broadcasting to peers.
*
* @param sync_mode true if the message was fetched through the sync process, false during normal operation
* @returns true if this message caused the blockchain to switch forks, false if it did not
*
* @throws exception if error validating the item, otherwise the item is safe to broadcast on.
*/
virtual bool handle_block(const graphene::net::block_message &blk_msg, bool sync_mode,
std::vector<fc::uint160_t> &contained_transaction_message_ids) override {
@ -498,14 +498,14 @@ public:
}
/**
* Assuming all data elements are ordered in some way, this method should
* return up to limit ids that occur *after* the last ID in synopsis that
* we recognize.
*
* On return, remaining_item_count will be set to the number of items
* in our blockchain after the last item returned in the result,
* or 0 if the result contains the last item in the blockchain
*/
* Assuming all data elements are ordered in some way, this method should
* return up to limit ids that occur *after* the last ID in synopsis that
* we recognize.
*
* On return, remaining_item_count will be set to the number of items
* in our blockchain after the last item returned in the result,
* or 0 if the result contains the last item in the blockchain
*/
virtual std::vector<item_hash_t> get_block_ids(const std::vector<item_hash_t> &blockchain_synopsis,
uint32_t &remaining_item_count,
uint32_t limit) override {
@ -552,8 +552,8 @@ public:
}
/**
* Given the hash of the requested data, fetch the body.
*/
* Given the hash of the requested data, fetch the body.
*/
virtual message get_item(const item_id &id) override {
try {
// ilog("Request for item ${id}", ("id", id));
@ -576,63 +576,63 @@ public:
}
/**
* Returns a synopsis of the blockchain used for syncing. This consists of a list of
* block hashes at intervals exponentially increasing towards the genesis block.
* When syncing to a peer, the peer uses this data to determine if we're on the same
* fork as they are, and if not, what blocks they need to send us to get us on their
* fork.
*
* In the over-simplified case, this is a straighforward synopsis of our current
* preferred blockchain; when we first connect up to a peer, this is what we will be sending.
* It looks like this:
* If the blockchain is empty, it will return the empty list.
* If the blockchain has one block, it will return a list containing just that block.
* If it contains more than one block:
* the first element in the list will be the hash of the highest numbered block that
* we cannot undo
* the second element will be the hash of an item at the half way point in the undoable
* segment of the blockchain
* the third will be ~3/4 of the way through the undoable segment of the block chain
* the fourth will be at ~7/8...
* &c.
* the last item in the list will be the hash of the most recent block on our preferred chain
* so if the blockchain had 26 blocks labeled a - z, the synopsis would be:
* a n u x z
* the idea being that by sending a small (<30) number of block ids, we can summarize a huge
* blockchain. The block ids are more dense near the end of the chain where because we are
* more likely to be almost in sync when we first connect, and forks are likely to be short.
* If the peer we're syncing with in our example is on a fork that started at block 'v',
* then they will reply to our synopsis with a list of all blocks starting from block 'u',
* the last block they know that we had in common.
*
* In the real code, there are several complications.
*
* First, as an optimization, we don't usually send a synopsis of the entire blockchain, we
* send a synopsis of only the segment of the blockchain that we have undo data for. If their
* fork doesn't build off of something in our undo history, we would be unable to switch, so there's
* no reason to fetch the blocks.
*
* Second, when a peer replies to our initial synopsis and gives us a list of the blocks they think
* we are missing, they only send a chunk of a few thousand blocks at once. After we get those
* block ids, we need to request more blocks by sending another synopsis (we can't just say "send me
* the next 2000 ids" because they may have switched forks themselves and they don't track what
* they've sent us). For faster performance, we want to get a fairly long list of block ids first,
* then start downloading the blocks.
* The peer doesn't handle these follow-up block id requests any different from the initial request;
* it treats the synopsis we send as our blockchain and bases its response entirely off that. So to
* get the response we want (the next chunk of block ids following the last one they sent us, or,
* failing that, the shortest fork off of the last list of block ids they sent), we need to construct
* a synopsis as if our blockchain was made up of:
* 1. the blocks in our block chain up to the fork point (if there is a fork) or the head block (if no fork)
* 2. the blocks we've already pushed from their fork (if there's a fork)
* 3. the block ids they've previously sent us
* Segment 3 is handled in the p2p code, it just tells us the number of blocks it has (in
* number_of_blocks_after_reference_point) so we can leave space in the synopsis for them.
* We're responsible for constructing the synopsis of Segments 1 and 2 from our active blockchain and
* fork database. The reference_point parameter is the last block from that peer that has been
* successfully pushed to the blockchain, so that tells us whether the peer is on a fork or on
* the main chain.
*/
* Returns a synopsis of the blockchain used for syncing. This consists of a list of
* block hashes at intervals exponentially increasing towards the genesis block.
* When syncing to a peer, the peer uses this data to determine if we're on the same
* fork as they are, and if not, what blocks they need to send us to get us on their
* fork.
*
* In the over-simplified case, this is a straighforward synopsis of our current
* preferred blockchain; when we first connect up to a peer, this is what we will be sending.
* It looks like this:
* If the blockchain is empty, it will return the empty list.
* If the blockchain has one block, it will return a list containing just that block.
* If it contains more than one block:
* the first element in the list will be the hash of the highest numbered block that
* we cannot undo
* the second element will be the hash of an item at the half way point in the undoable
* segment of the blockchain
* the third will be ~3/4 of the way through the undoable segment of the block chain
* the fourth will be at ~7/8...
* &c.
* the last item in the list will be the hash of the most recent block on our preferred chain
* so if the blockchain had 26 blocks labeled a - z, the synopsis would be:
* a n u x z
* the idea being that by sending a small (<30) number of block ids, we can summarize a huge
* blockchain. The block ids are more dense near the end of the chain where because we are
* more likely to be almost in sync when we first connect, and forks are likely to be short.
* If the peer we're syncing with in our example is on a fork that started at block 'v',
* then they will reply to our synopsis with a list of all blocks starting from block 'u',
* the last block they know that we had in common.
*
* In the real code, there are several complications.
*
* First, as an optimization, we don't usually send a synopsis of the entire blockchain, we
* send a synopsis of only the segment of the blockchain that we have undo data for. If their
* fork doesn't build off of something in our undo history, we would be unable to switch, so there's
* no reason to fetch the blocks.
*
* Second, when a peer replies to our initial synopsis and gives us a list of the blocks they think
* we are missing, they only send a chunk of a few thousand blocks at once. After we get those
* block ids, we need to request more blocks by sending another synopsis (we can't just say "send me
* the next 2000 ids" because they may have switched forks themselves and they don't track what
* they've sent us). For faster performance, we want to get a fairly long list of block ids first,
* then start downloading the blocks.
* The peer doesn't handle these follow-up block id requests any different from the initial request;
* it treats the synopsis we send as our blockchain and bases its response entirely off that. So to
* get the response we want (the next chunk of block ids following the last one they sent us, or,
* failing that, the shortest fork off of the last list of block ids they sent), we need to construct
* a synopsis as if our blockchain was made up of:
* 1. the blocks in our block chain up to the fork point (if there is a fork) or the head block (if no fork)
* 2. the blocks we've already pushed from their fork (if there's a fork)
* 3. the block ids they've previously sent us
* Segment 3 is handled in the p2p code, it just tells us the number of blocks it has (in
* number_of_blocks_after_reference_point) so we can leave space in the synopsis for them.
* We're responsible for constructing the synopsis of Segments 1 and 2 from our active blockchain and
* fork database. The reference_point parameter is the last block from that peer that has been
* successfully pushed to the blockchain, so that tells us whether the peer is on a fork or on
* the main chain.
*/
virtual std::vector<item_hash_t> get_blockchain_synopsis(const item_hash_t &reference_point,
uint32_t number_of_blocks_after_reference_point) override {
try {
@ -733,26 +733,26 @@ public:
low_block_num += (true_high_block_num - low_block_num + 2) / 2;
} while (low_block_num <= high_block_num);
//idump((synopsis));
// idump((synopsis));
return synopsis;
}
FC_CAPTURE_AND_RETHROW()
}
/**
* Call this after the call to handle_message succeeds.
*
* @param item_type the type of the item we're synchronizing, will be the same as item passed to the sync_from() call
* @param item_count the number of items known to the node that haven't been sent to handle_item() yet.
* After `item_count` more calls to handle_item(), the node will be in sync
*/
* Call this after the call to handle_message succeeds.
*
* @param item_type the type of the item we're synchronizing, will be the same as item passed to the sync_from() call
* @param item_count the number of items known to the node that haven't been sent to handle_item() yet.
* After `item_count` more calls to handle_item(), the node will be in sync
*/
virtual void sync_status(uint32_t item_type, uint32_t item_count) override {
// any status reports to GUI go here
}
/**
* Call any time the number of connected peers changes.
*/
* Call any time the number of connected peers changes.
*/
virtual void connection_count_changed(uint32_t c) override {
// any status reports to GUI go here
}
@ -769,9 +769,9 @@ public:
}
/**
* Returns the time a block was produced (if block_id = 0, returns genesis time).
* If we don't know about the block, returns time_point_sec::min()
*/
* Returns the time a block was produced (if block_id = 0, returns genesis time).
* If we don't know about the block, returns time_point_sec::min()
*/
virtual fc::time_point_sec get_block_time(const item_hash_t &block_id) override {
try {
auto opt_block = _chain_db->fetch_block_by_id(block_id);

View file

@ -313,7 +313,7 @@ public:
uint32_t api_limit_get_trade_history = 100;
uint32_t api_limit_get_trade_history_by_sequence = 100;
//private:
// private:
const account_object *get_account_from_string(const std::string &name_or_id,
bool throw_if_not_found = true) const;
const asset_object *get_asset_from_string(const std::string &symbol_or_id,
@ -470,7 +470,7 @@ void database_api::set_subscribe_callback(std::function<void(const variant &)> c
}
void database_api_impl::set_subscribe_callback(std::function<void(const variant &)> cb, bool notify_remove_create) {
//edump((clear_filter));
// edump((clear_filter));
_subscribe_callback = cb;
_notify_remove_create = notify_remove_create;
_subscribed_accounts.clear();
@ -2197,7 +2197,7 @@ vector<variant> database_api_impl::lookup_vote_ids(const vector<vote_id_type> &v
case vote_id_type::committee: {
auto itr = committee_idx.find(id);
if (itr != committee_idx.end())
result.emplace_back(variant(*itr, 1));
result.emplace_back(variant(*itr, 2)); // Depth of committee_member_object is 1, add 1 to be safe
else
result.emplace_back(variant());
break;
@ -2205,7 +2205,7 @@ vector<variant> database_api_impl::lookup_vote_ids(const vector<vote_id_type> &v
case vote_id_type::witness: {
auto itr = witness_idx.find(id);
if (itr != witness_idx.end())
result.emplace_back(variant(*itr, 1));
result.emplace_back(variant(*itr, 2)); // Depth of witness_object is 1, add 1 here to be safe
else
result.emplace_back(variant());
break;
@ -2213,11 +2213,15 @@ vector<variant> database_api_impl::lookup_vote_ids(const vector<vote_id_type> &v
case vote_id_type::worker: {
auto itr = for_worker_idx.find(id);
if (itr != for_worker_idx.end()) {
result.emplace_back(variant(*itr, 1));
result.emplace_back(variant(*itr, 4)); // Depth of worker_object is 3, add 1 here to be safe.
// If we want to extract the balance object inside,
// need to increase this value
} else {
auto itr = against_worker_idx.find(id);
if (itr != against_worker_idx.end()) {
result.emplace_back(variant(*itr, 1));
result.emplace_back(variant(*itr, 4)); // Depth of worker_object is 3, add 1 here to be safe.
// If we want to extract the balance object inside,
// need to increase this value
} else {
result.emplace_back(variant());
}
@ -2903,7 +2907,7 @@ graphene::app::gpos_info database_api::get_gpos_info(const account_id_type accou
}
graphene::app::gpos_info database_api_impl::get_gpos_info(const account_id_type account) const {
FC_ASSERT(_db.head_block_time() > HARDFORK_GPOS_TIME); //Can be deleted after GPOS hardfork time
FC_ASSERT(_db.head_block_time() > HARDFORK_GPOS_TIME); // Can be deleted after GPOS hardfork time
gpos_info result;
result.vesting_factor = _db.calculate_vesting_factor(account(_db));
@ -3550,9 +3554,9 @@ void database_api_impl::handle_object_changed(bool force_notify, bool full_objec
/// pushing the future back / popping the prior future if it is complete.
/// if a connection hangs then this could get backed up and result in
/// a failure to exit cleanly.
//fc::async([capture_this,this,updates,market_broadcast_queue](){
//if( _subscribe_callback )
// _subscribe_callback( updates );
// fc::async([capture_this,this,updates,market_broadcast_queue](){
// if( _subscribe_callback )
// _subscribe_callback( updates );
for (auto id : ids) {
if (id.is<call_order_object>()) {

View file

@ -85,10 +85,10 @@ struct asset_holders {
};
/**
* @brief The history_api class implements the RPC API for account history
*
* This API contains methods to access account histories
*/
* @brief The history_api class implements the RPC API for account history
*
* This API contains methods to access account histories
*/
class history_api {
public:
history_api(application &app) :
@ -97,27 +97,27 @@ public:
}
/**
* @brief Get operations relevant to the specificed account
* @param account_id_or_name The account ID or name whose history should be queried
* @param stop ID of the earliest operation to retrieve
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start ID of the most recent operation to retrieve
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
* @brief Get operations relevant to the specificed account
* @param account_id_or_name The account ID or name whose history should be queried
* @param stop ID of the earliest operation to retrieve
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start ID of the most recent operation to retrieve
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
vector<operation_history_object> get_account_history(const std::string account_id_or_name,
operation_history_id_type stop = operation_history_id_type(),
unsigned limit = 100,
operation_history_id_type start = operation_history_id_type()) const;
/**
* @brief Get only asked operations relevant to the specified account
* @param account_id_or_name The account ID or name whose history should be queried
* @param operation_id The ID of the operation we want to get operations in the account( 0 = transfer , 1 = limit order create, ...)
* @param stop ID of the earliest operation to retrieve
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start ID of the most recent operation to retrieve
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
* @brief Get only asked operations relevant to the specified account
* @param account_id_or_name The account ID or name whose history should be queried
* @param operation_id The ID of the operation we want to get operations in the account( 0 = transfer , 1 = limit order create, ...)
* @param stop ID of the earliest operation to retrieve
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start ID of the most recent operation to retrieve
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
vector<operation_history_object> get_account_history_operations(const std::string account_id_or_name,
int operation_id,
operation_history_id_type start = operation_history_id_type(),
@ -125,17 +125,17 @@ public:
unsigned limit = 100) const;
/**
* @breif Get operations relevant to the specified account referenced
* by an event numbering specific to the account. The current number of operations
* for the account can be found in the account statistics (or use 0 for start).
* @param account_id_or_name The account ID or name whose history should be queried
* @param stop Sequence number of earliest operation. 0 is default and will
* query 'limit' number of operations.
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start Sequence number of the most recent operation to retrieve.
* 0 is default, which will start querying from the most recent operation.
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
* @breif Get operations relevant to the specified account referenced
* by an event numbering specific to the account. The current number of operations
* for the account can be found in the account statistics (or use 0 for start).
* @param account_id_or_name The account ID or name whose history should be queried
* @param stop Sequence number of earliest operation. 0 is default and will
* query 'limit' number of operations.
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start Sequence number of the most recent operation to retrieve.
* 0 is default, which will start querying from the most recent operation.
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
vector<operation_history_object> get_relative_account_history(const std::string account_id_or_name,
uint32_t stop = 0,
unsigned limit = 100,
@ -156,8 +156,8 @@ private:
};
/**
* @brief Block api
*/
* @brief Block api
*/
class block_api {
public:
block_api(graphene::chain::database &db);
@ -170,8 +170,8 @@ private:
};
/**
* @brief The network_broadcast_api class allows broadcasting of transactions.
*/
* @brief The network_broadcast_api class allows broadcasting of transactions.
*/
class network_broadcast_api : public std::enable_shared_from_this<network_broadcast_api> {
public:
network_broadcast_api(application &a);
@ -186,36 +186,36 @@ public:
typedef std::function<void(variant /*transaction_confirmation*/)> confirmation_callback;
/**
* @brief Broadcast a transaction to the network
* @param trx The transaction to broadcast
*
* The transaction will be checked for validity in the local database prior to broadcasting. If it fails to
* apply locally, an error will be thrown and the transaction will not be broadcast.
*/
* @brief Broadcast a transaction to the network
* @param trx The transaction to broadcast
*
* The transaction will be checked for validity in the local database prior to broadcasting. If it fails to
* apply locally, an error will be thrown and the transaction will not be broadcast.
*/
void broadcast_transaction(const signed_transaction &trx);
/** this version of broadcast transaction registers a callback method that will be called when the transaction is
* included into a block. The callback method includes the transaction id, block number, and transaction number in the
* block.
*/
* included into a block. The callback method includes the transaction id, block number, and transaction number in the
* block.
*/
void broadcast_transaction_with_callback(confirmation_callback cb, const signed_transaction &trx);
/** this version of broadcast transaction registers a callback method that will be called when the transaction is
* included into a block. The callback method includes the transaction id, block number, and transaction number in the
* block.
*/
* included into a block. The callback method includes the transaction id, block number, and transaction number in the
* block.
*/
fc::variant broadcast_transaction_synchronous(const signed_transaction &trx);
void broadcast_block(const signed_block &block);
/**
* @brief Not reflected, thus not accessible to API clients.
*
* This function is registered to receive the applied_block
* signal from the chain database when a block is received.
* It then dispatches callbacks to clients who have requested
* to be notified when a particular txid is included in a block.
*/
* @brief Not reflected, thus not accessible to API clients.
*
* This function is registered to receive the applied_block
* signal from the chain database when a block is received.
* It then dispatches callbacks to clients who have requested
* to be notified when a particular txid is included in a block.
*/
void on_applied_block(const signed_block &b);
private:
@ -225,60 +225,60 @@ private:
};
/**
* @brief The network_node_api class allows maintenance of p2p connections.
*/
* @brief The network_node_api class allows maintenance of p2p connections.
*/
class network_node_api {
public:
network_node_api(application &a);
/**
* @brief Return general network information, such as p2p port
*/
* @brief Return general network information, such as p2p port
*/
fc::variant_object get_info() const;
/**
* @brief add_node Connect to a new peer
* @param ep The IP/Port of the peer to connect to
*/
* @brief add_node Connect to a new peer
* @param ep The IP/Port of the peer to connect to
*/
void add_node(const fc::ip::endpoint &ep);
/**
* @brief Get status of all current connections to peers
*/
* @brief Get status of all current connections to peers
*/
std::vector<net::peer_status> get_connected_peers() const;
/**
* @brief Get advanced node parameters, such as desired and max
* number of connections
*/
* @brief Get advanced node parameters, such as desired and max
* number of connections
*/
fc::variant_object get_advanced_node_parameters() const;
/**
* @brief Set advanced node parameters, such as desired and max
* number of connections
* @param params a JSON object containing the name/value pairs for the parameters to set
*/
* @brief Set advanced node parameters, such as desired and max
* number of connections
* @param params a JSON object containing the name/value pairs for the parameters to set
*/
void set_advanced_node_parameters(const fc::variant_object &params);
/**
* @brief Return list of potential peers
*/
* @brief Return list of potential peers
*/
std::vector<net::potential_peer_record> get_potential_peers() const;
/**
* @brief Return list of pending transactions.
*/
* @brief Return list of pending transactions.
*/
map<transaction_id_type, signed_transaction> list_pending_transactions() const;
/**
* @brief Subscribes caller for notifications about pending transactions.
* @param callback a functional object which will be called when new transaction is created.
*/
* @brief Subscribes caller for notifications about pending transactions.
* @param callback a functional object which will be called when new transaction is created.
*/
void subscribe_to_pending_transactions(std::function<void(const variant &)> callback);
/**
* @brief Unsubscribes caller from notifications about pending transactions.
*/
* @brief Unsubscribes caller from notifications about pending transactions.
*/
void unsubscribe_from_pending_transactions();
private:
@ -290,33 +290,33 @@ private:
};
/**
* @brief
*/
* @brief
*/
class asset_api {
public:
asset_api(graphene::app::application &app);
~asset_api();
/**
* @brief Get asset holders for a specific asset
* @param asset The specific asset id or symbol
* @param start The start index
* @param limit Maximum limit must not exceed 100
* @return A list of asset holders for the specified asset
*/
* @brief Get asset holders for a specific asset
* @param asset The specific asset id or symbol
* @param start The start index
* @param limit Maximum limit must not exceed 100
* @return A list of asset holders for the specified asset
*/
vector<account_asset_balance> get_asset_holders(std::string asset, uint32_t start, uint32_t limit) const;
/**
* @brief Get asset holders count for a specific asset
* @param asset The specific asset id or symbol
* @return Holders count for the specified asset
*/
* @brief Get asset holders count for a specific asset
* @param asset The specific asset id or symbol
* @return Holders count for the specified asset
*/
int get_asset_holders_count(std::string asset) const;
/**
* @brief Get all asset holders
* @return A list of all asset holders
*/
* @brief Get all asset holders
* @return A list of all asset holders
*/
vector<asset_holders> get_all_asset_holders() const;
uint32_t api_limit_get_asset_holders = 100;
@ -337,24 +337,24 @@ extern template class fc::api<graphene::debug_witness::debug_api>;
namespace graphene { namespace app {
/**
* @brief The login_api class implements the bottom layer of the RPC API
*
* All other APIs must be requested from this API.
*/
* @brief The login_api class implements the bottom layer of the RPC API
*
* All other APIs must be requested from this API.
*/
class login_api {
public:
login_api(application &a);
~login_api();
/**
* @brief Authenticate to the RPC server
* @param user Username to login with
* @param password Password to login with
* @return True if logged in successfully; false otherwise
*
* @note This must be called prior to requesting other APIs. Other APIs may not be accessible until the client
* has sucessfully authenticated.
*/
* @brief Authenticate to the RPC server
* @param user Username to login with
* @param password Password to login with
* @return True if logged in successfully; false otherwise
*
* @note This must be called prior to requesting other APIs. Other APIs may not be accessible until the client
* has sucessfully authenticated.
*/
bool login(const string &user, const string &password);
/// @brief Retrieve the network block API
fc::api<block_api> block() const;

View file

@ -198,10 +198,10 @@ public:
optional<block_header> get_block_header(uint32_t block_num) const;
/**
* @brief Retrieve multiple block header by block numbers
* @param block_num vector containing heights of the block whose header should be returned
* @return array of headers of the referenced blocks, or null if no matching block was found
*/
* @brief Retrieve multiple block header by block numbers
* @param block_num vector containing heights of the block whose header should be returned
* @return array of headers of the referenced blocks, or null if no matching block was found
*/
map<uint32_t, optional<block_header>> get_block_header_batch(const vector<uint32_t> block_nums) const;
/**
@ -279,12 +279,12 @@ public:
vector<vector<account_id_type>> get_key_references(vector<public_key_type> key) const;
/**
* Determine whether a textual representation of a public key
* (in Base-58 format) is *currently* linked
* to any *registered* (i.e. non-stealth) account on the blockchain
* @param public_key Public key
* @return Whether a public key is known
*/
* Determine whether a textual representation of a public key
* (in Base-58 format) is *currently* linked
* to any *registered* (i.e. non-stealth) account on the blockchain
* @param public_key Public key
* @return Whether a public key is known
*/
bool is_public_key_registered(string public_key) const;
//////////////

View file

@ -386,23 +386,14 @@ vector<uint64_t> database::get_random_numbers(uint64_t minimum, uint64_t maximum
bool database::is_asset_creation_allowed(const string &symbol)
{
time_point_sec now = head_block_time();
std::unordered_set<std::string> post_son_hf_symbols = {"ETH", "USDT", "BNB", "ADA", "DOGE", "XRP", "USDC", "DOT", "UNI", "BUSD", "BCH", "LTC", "SOL", "LINK", "MATIC", "THETA",
"WBTC", "XLM", "ICP", "DAI", "VET", "ETC", "TRX", "FIL", "XMR", "EGR", "EOS", "SHIB", "AAVE", "CRO", "ALGO", "AMP", "BTCB",
"BSV", "KLAY", "CAKE", "FTT", "LEO", "XTZ", "TFUEL", "MIOTA", "LUNA", "NEO", "ATOM", "MKR", "FEI", "WBNB", "UST", "AVAX",
"STEEM", "HIVE", "HBD", "SBD", "BTS"};
if (symbol == "BTC")
{
if (now < HARDFORK_SON_TIME)
return false;
}
if (post_son_hf_symbols.find(symbol) != post_son_hf_symbols.end())
{
if (now >= HARDFORK_SON_TIME)
if (head_block_time() < HARDFORK_SON_TIME)
return false;
}
return true;
}
} }
}
}

View file

@ -170,6 +170,7 @@ struct worker_pay_visitor
worker.pay_worker(pay, db);
}
};
void database::update_worker_votes()
{
auto& idx = get_index_type<worker_index>();
@ -185,6 +186,28 @@ void database::update_worker_votes()
}
}
void database::hotfix_2024()
{
if (head_block_time() >= HARDFORK_HOTFIX_2024_TIME)
{
if (get_chain_id().str() == "6b6b5f0ce7a36d323768e534f3edb41c6d6332a541a95725b98e28d140850134")
{
const auto& vb_idx = get_index_type<vesting_balance_index>().indices().get<by_id>();
auto vbo = vb_idx.find(vesting_balance_id_type(388));
if (vbo != vb_idx.end())
{
if (vbo->owner == account_id_type(14786))
{
modify(*vbo, [&]( vesting_balance_object& _vbo)
{
_vbo.owner = account_id_type(0);
});
}
}
}
}
}
void database::pay_sons_before_hf_ethereum()
{
const auto now = head_block_time();
@ -2510,6 +2533,7 @@ void database::perform_chain_maintenance(const signed_block& next_block, const g
update_active_committee_members();
update_active_sons();
update_worker_votes();
hotfix_2024();
const dynamic_global_property_object& dgpo = get_dynamic_global_properties();

View file

@ -0,0 +1,7 @@
#ifndef HARDFORK_HOTFIX_2024_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_HOTFIX_2024_TIME (fc::time_point_sec::from_iso_string("2023-12-20T00:00:00"))
#else
#define HARDFORK_HOTFIX_2024_TIME (fc::time_point_sec::from_iso_string("2023-12-20T00:00:00"))
#endif
#endif

View file

@ -594,6 +594,7 @@ namespace graphene { namespace chain {
const flat_map<sidechain_type, vector<son_sidechain_info> >& new_active_sons );
void update_son_wallet( const flat_map<sidechain_type, vector<son_sidechain_info> >& new_active_sons );
void update_worker_votes();
void hotfix_2024();
public:
double calculate_vesting_factor(const account_object& stake_account);

View file

@ -152,7 +152,7 @@ void bitcoin_transaction_builder::add_in(payment_type type, tx_in txin, const by
txin.scriptSig = script_code;
break;
default: {
if (txin.prevout.hash == fc::sha256("0000000000000000000000000000000000000000000000000000000000000000")) { //coinbase
if (txin.prevout.hash == fc::sha256("0000000000000000000000000000000000000000000000000000000000000000")) { // coinbase
FC_ASSERT(script_code != bytes());
txin.scriptSig = script_code;
}

View file

@ -384,7 +384,7 @@ rpc_client::~rpc_client() {
if (connection_selection_task.valid())
connection_selection_task.cancel_and_wait(__FUNCTION__);
} catch (fc::canceled_exception &) {
//Expected exception. Move along.
// Expected exception. Move along.
} catch (fc::exception &e) {
edump((e.to_detail_string()));
}

View file

@ -22,15 +22,15 @@ typedef fc::ecc::private_key private_key_type;
typedef fc::sha256 chain_id_type;
typedef std::string account_name_type;
typedef fc::ripemd160 block_id_type;
//typedef fc::ripemd160 checksum_type;
// typedef fc::ripemd160 checksum_type;
typedef fc::ripemd160 transaction_id_type;
typedef fc::sha256 digest_type;
typedef fc::ecc::compact_signature signature_type;
typedef fc::safe<int64_t> share_type;
//typedef safe<uint64_t> ushare_type;
//typedef uint16_t weight_type;
//typedef uint32_t contribution_id_type;
//typedef fixed_string<32> custom_id_type;
// typedef safe<uint64_t> ushare_type;
// typedef uint16_t weight_type;
// typedef uint32_t contribution_id_type;
// typedef fixed_string<32> custom_id_type;
struct public_key_type {

View file

@ -145,7 +145,7 @@ peerplays_sidechain_plugin_impl::~peerplays_sidechain_plugin_impl() {
if (_heartbeat_task.valid())
_heartbeat_task.cancel_and_wait(__FUNCTION__);
} catch (fc::canceled_exception &) {
//Expected exception. Move along.
// Expected exception. Move along.
} catch (fc::exception &e) {
edump((e.to_detail_string()));
}
@ -156,7 +156,7 @@ peerplays_sidechain_plugin_impl::~peerplays_sidechain_plugin_impl() {
_son_processing_task.at(active_sidechain_type).wait();
}
} catch (fc::canceled_exception &) {
//Expected exception. Move along.
// Expected exception. Move along.
} catch (fc::exception &e) {
edump((e.to_detail_string()));
}
@ -540,11 +540,11 @@ void peerplays_sidechain_plugin_impl::son_processing(sidechain_type sidechain) {
return;
}
//fc::time_point now_fine = fc::time_point::now();
//fc::time_point_sec now = now_fine + fc::microseconds(500000);
//if (plugin.database().get_slot_time(1) < now) {
// return; // Not synced
//}
// fc::time_point now_fine = fc::time_point::now();
// fc::time_point_sec now = now_fine + fc::microseconds(500000);
// if (plugin.database().get_slot_time(1) < now) {
// return; // Not synced
// }
const fc::time_point now_fine = fc::time_point::now();
const fc::time_point_sec now = now_fine - fc::milliseconds(3000);
@ -873,15 +873,12 @@ void peerplays_sidechain_plugin_impl::settle_sidechain_transactions(sidechain_ty
void peerplays_sidechain_plugin_impl::on_applied_block(const signed_block &b) {
if (first_block_skipped) {
if(son_processing_enabled) {
if (son_processing_enabled) {
schedule_son_processing();
}
else
{
} else {
const fc::time_point now_fine = fc::time_point::now();
const fc::time_point_sec now = now_fine + fc::microseconds( 500000 );
if( plugin.database().get_slot_time(1) >= now )
{
const fc::time_point_sec now = now_fine + fc::microseconds(500000);
if (plugin.database().get_slot_time(1) >= now) {
son_processing_enabled = true;
schedule_son_processing();
}

View file

@ -184,11 +184,11 @@ void sidechain_net_handler::sidechain_event_data_received(const sidechain_event_
bool enable_peerplays_asset_deposits = false;
#ifdef ENABLE_PEERPLAYS_ASSET_DEPOSITS
//enable_peerplays_asset_deposits = (sed.sidechain == sidechain_type::peerplays) &&
// (sed.sidechain_currency.compare("BTC") != 0) &&
// (sed.sidechain_currency.compare("ETH") != 0) &&
// (sed.sidechain_currency.compare("HBD") != 0) &&
// (sed.sidechain_currency.compare("HIVE") != 0);
// enable_peerplays_asset_deposits = (sed.sidechain == sidechain_type::peerplays) &&
// (sed.sidechain_currency.compare("BTC") != 0) &&
// (sed.sidechain_currency.compare("ETH") != 0) &&
// (sed.sidechain_currency.compare("HBD") != 0) &&
// (sed.sidechain_currency.compare("HIVE") != 0);
#endif
const bool deposit_condition = (sed.peerplays_to == gpo.parameters.son_account()) &&
@ -451,7 +451,7 @@ void sidechain_net_handler::process_deposits() {
if (swdo.id == object_id_type(0, 0, 0) || !plugin.can_son_participate(sidechain, chain::operation::tag<chain::son_wallet_deposit_process_operation>::value, swdo.id)) {
return;
}
//Ignore the deposits which are not valid anymore, considered refunds.
// Ignore the deposits which are not valid anymore, considered refunds.
const auto &sidechain_addresses_idx = database.get_index_type<sidechain_address_index>().indices().get<by_sidechain_and_deposit_address_and_expires>();
const auto &addr_itr = sidechain_addresses_idx.find(std::make_tuple(sidechain, swdo.sidechain_from, time_point_sec::maximum()));
if (addr_itr == sidechain_addresses_idx.end()) {

View file

@ -757,8 +757,8 @@ bool sidechain_net_handler_ethereum::settle_sidechain_transaction(const sidechai
if ("0x1" == json_receipt.get<std::string>("result.status")) {
count += 1;
//! Fixme - compare data somehow?
//if( sto.transaction == entry_receipt.second.get<std::string>("data") ) {
//}
// if( sto.transaction == entry_receipt.second.get<std::string>("data") ) {
// }
}
}

View file

@ -858,14 +858,14 @@ bool sidechain_net_handler_hive::settle_sidechain_transaction(const sidechain_tr
boost::property_tree::ptree tx_json;
boost::property_tree::read_json(ss_tx, tx_json);
//const chain::global_property_object &gpo = database.get_global_properties();
// const chain::global_property_object &gpo = database.get_global_properties();
std::string tx_txid = tx_json.get<std::string>("result.transaction_id");
uint32_t tx_block_num = tx_json.get<uint32_t>("result.block_num");
const uint32_t last_irreversible_block = std::stoul(rpc_client->get_last_irreversible_block_num());
//std::string tx_address = addr.get_address();
//int64_t tx_amount = -1;
// std::string tx_address = addr.get_address();
// int64_t tx_amount = -1;
if (tx_block_num <= last_irreversible_block) {
if (sto.object_id.is<son_wallet_withdraw_id_type>()) {
@ -918,15 +918,15 @@ void sidechain_net_handler_hive::hive_listener_loop() {
}
}
//std::string reply = rpc_client->get_last_irreversible_block_num();
//if (!reply.empty()) {
// uint64_t last_irreversible_block = std::stoul(reply);
// if (last_irreversible_block != last_block_received) {
// std::string event_data = std::to_string(last_irreversible_block);
// handle_event(event_data);
// last_block_received = last_irreversible_block;
// }
//}
// std::string reply = rpc_client->get_last_irreversible_block_num();
// if (!reply.empty()) {
// uint64_t last_irreversible_block = std::stoul(reply);
// if (last_irreversible_block != last_block_received) {
// std::string event_data = std::to_string(last_irreversible_block);
// handle_event(event_data);
// last_block_received = last_irreversible_block;
// }
// }
}
void sidechain_net_handler_hive::handle_event(const std::string &event_data) {
@ -960,7 +960,7 @@ void sidechain_net_handler_hive::handle_event(const std::string &event_data) {
const auto &amount_child = op_value.get_child("amount");
uint64_t amount = amount_child.get<uint64_t>("amount");
//uint64_t precision = amount_child.get<uint64_t>("precision");
// uint64_t precision = amount_child.get<uint64_t>("precision");
std::string nai = amount_child.get<std::string>("nai");
std::string sidechain_currency = "";
price sidechain_currency_price = {};

View file

@ -24,15 +24,15 @@ namespace graphene { namespace peerplays_sidechain {
sidechain_net_handler_peerplays::sidechain_net_handler_peerplays(peerplays_sidechain_plugin &_plugin, const boost::program_options::variables_map &options) :
sidechain_net_handler(sidechain_type::peerplays, _plugin, options) {
//const auto &assets_by_symbol = database.get_index_type<asset_index>().indices().get<by_symbol>();
//const auto get_asset_id = [&assets_by_symbol](const string &symbol) {
// auto asset_itr = assets_by_symbol.find(symbol);
// FC_ASSERT(asset_itr != assets_by_symbol.end(), "Unable to find asset '${sym}'", ("sym", symbol));
// return asset_itr->get_id();
//};
//tracked_assets.push_back(get_asset_id("PBTC"));
//tracked_assets.push_back(get_asset_id("PETH"));
//tracked_assets.push_back(get_asset_id("PEOS"));
// const auto &assets_by_symbol = database.get_index_type<asset_index>().indices().get<by_symbol>();
// const auto get_asset_id = [&assets_by_symbol](const string &symbol) {
// auto asset_itr = assets_by_symbol.find(symbol);
// FC_ASSERT(asset_itr != assets_by_symbol.end(), "Unable to find asset '${sym}'", ("sym", symbol));
// return asset_itr->get_id();
// };
// tracked_assets.push_back(get_asset_id("PBTC"));
// tracked_assets.push_back(get_asset_id("PETH"));
// tracked_assets.push_back(get_asset_id("PEOS"));
if (options.count("peerplays-private-key")) {
const std::vector<std::string> pub_priv_keys = options["peerplays-private-key"].as<std::vector<std::string>>();
@ -284,8 +284,8 @@ bool sidechain_net_handler_peerplays::settle_sidechain_transaction(const sidecha
}
if (sto.object_id.is<son_wallet_deposit_id_type>()) {
//auto swdo = database.get<son_wallet_deposit_object>(sto.object_id);
//settle_amount = asset(swdo.sidechain_amount, swdo.sidechain_currency);
// auto swdo = database.get<son_wallet_deposit_object>(sto.object_id);
// settle_amount = asset(swdo.sidechain_amount, swdo.sidechain_currency);
}
if (sto.object_id.is<son_wallet_withdraw_id_type>()) {