Compare commits

...

27 commits

Author SHA1 Message Date
Bobinson K B
d3d967a2d7 Merge branch 'hotfix/bookie2024' into 'master'
Bookie 2024

See merge request PBSA/peerplays!259
2023-12-18 06:17:23 +00:00
serkixenos
178756bd34 Bookie 2024 2023-12-18 06:17:22 +00:00
Bobinson K B
1f70857d64 Merge branch 'beatrice' into 'master'
Mainnet release

See merge request PBSA/peerplays!251
2023-10-06 10:50:31 +00:00
Vlad Dobromyslov
97e85a849d Merge branch 'develop' into 'beatrice'
Set HARDFORK_SON_FOR_ETHEREUM_TIME to 24 of October

See merge request PBSA/peerplays!250
2023-10-04 16:51:45 +00:00
Vlad Dobromyslov
dc4cdd6e4b Set HARDFORK_SON_FOR_ETHEREUM_TIME to 24 of October 2023-10-04 16:51:45 +00:00
Vlad Dobromyslov
1472066af6 Merge branch 'develop' into 'beatrice'
Fixes for 1.5.25-beta

See merge request PBSA/peerplays!249
2023-10-03 16:24:00 +00:00
Vlad Dobromyslov
a641b8e93f Fixes for 1.5.25-beta 2023-10-03 16:23:59 +00:00
Vlad Dobromyslov
aa099f960f Merge branch 'develop' into 'beatrice'
Fixes for 1.5.24-beta

See merge request PBSA/peerplays!246
2023-08-23 14:31:39 +00:00
Vlad Dobromyslov
f0654e5ffd Fixes for 1.5.24-beta 2023-08-23 14:31:38 +00:00
Vlad Dobromyslov
9fe351300b Merge branch 'develop' into 'beatrice'
Set test-e2e as manual

See merge request PBSA/peerplays!242
2023-07-17 12:49:53 +00:00
Vlad Dobromyslov
5fd79c3e78 Set test-e2e as manual 2023-07-17 12:49:53 +00:00
Vlad Dobromyslov
b56818b8ae Merge branch 'develop' into 'beatrice'
Change DB_VERSION to PPY2.5

See merge request PBSA/peerplays!240
2023-07-13 16:44:03 +00:00
Vlad Dobromyslov
bc0fbeb707 Change DB_VERSION to PPY2.5 2023-07-13 16:44:03 +00:00
Vlad Dobromyslov
accd334a86 Merge branch 'develop' into 'beatrice'
NEW HARDFORK TIME FOR SON ETH

See merge request PBSA/peerplays!238
2023-07-07 12:10:44 +00:00
Vlad Dobromyslov
84a66c6722 NEW HARDFORK TIME FOR SON ETH 2023-07-07 12:10:44 +00:00
Vlad Dobromyslov
abd446d80b Merge branch 'develop' into 'beatrice'
Fix balance discrepancies in 1.5.23-beta

See merge request PBSA/peerplays!235
2023-07-06 05:31:28 +00:00
Vlad Dobromyslov
93fb57c080 Fix balance discrepancies in 1.5.23-beta 2023-07-06 05:31:28 +00:00
Bobinson K B
a8845ffde9 Merge branch 'develop' into 'beatrice'
Fix issue with balance discrepancies in 1.5.23-beta

See merge request PBSA/peerplays!232
2023-06-20 07:37:24 +00:00
Vlad Dobromyslov
435c1f8e96 Fix issue with balance discrepancies in 1.5.23-beta 2023-06-20 07:37:24 +00:00
Bobinson K B
1123ff6f93 Merge branch 'develop' into 'beatrice'
Fixes for public testnet

See merge request PBSA/peerplays!230
2023-06-09 08:10:26 +00:00
Vlad Dobromyslov
c34415b403 Fixes for public testnet 2023-06-09 08:10:26 +00:00
Christopher Sanborn
daca2813ef Merge branch 'testnet-set-hf-dates' into 'beatrice'
Set Hard Fork dates for testnet and mainnet
2023-05-25 13:25:09 -04:00
Christopher Sanborn
0b37a48b02 Set Hard Fork dates for testnet and main net. 2023-05-25 13:23:05 -04:00
Bobinson K B
e3b10cf1ec Merge branch 'testnet-builds' into 'beatrice'
Updated build rules for mainnet and testnet

See merge request PBSA/peerplays!223
2023-05-17 16:25:03 +00:00
Rily Dunlap
f5c6a6310b Updated build rules for mainnet and testnet 2023-05-17 14:50:10 +00:00
Bobinson K B
75ee6fbed3 Merge branch 'develop' into 'beatrice'
New set of functionality

See merge request PBSA/peerplays!220
2023-05-16 11:46:25 +00:00
Vlad Dobromyslov
7516126d01 New set of functionality 2023-05-16 11:46:25 +00:00
35 changed files with 806 additions and 514 deletions

View file

@ -9,6 +9,8 @@ stages:
- build
- test
- dockerize
- python-test
- deploy
build-mainnet:
stage: build
@ -48,6 +50,7 @@ dockerize-mainnet:
IMAGE: $CI_REGISTRY_IMAGE/mainnet/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA
before_script:
- docker info
- docker builder prune -a -f
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build --no-cache -t $IMAGE .
@ -56,8 +59,6 @@ dockerize-mainnet:
- docker rmi $IMAGE
tags:
- builder
when:
manual
timeout:
3h
@ -78,12 +79,27 @@ build-testnet:
- build/libraries/
- build/programs/
- build/tests/
when: manual
tags:
- builder
when:
manual
timeout:
3h
deploy-testnet:
stage: deploy
dependencies:
- build-testnet
script:
- sudo systemctl stop witness
- rm $WORK_DIR/peerplays/witness_node || true
- cp build/programs/witness_node/witness_node $WORK_DIR/peerplays/
- sudo systemctl restart witness
rules:
- if: $CI_COMMIT_BRANCH == "master"
when: always
environment:
name: devnet
url: $DEVNET_URL
tags:
- devnet
test-testnet:
stage: test
@ -119,3 +135,37 @@ dockerize-testnet:
manual
timeout:
3h
test-e2e:
stage: python-test
variables:
IMAGE: $CI_REGISTRY_IMAGE/mainnet/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA
before_script:
- docker info
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- git clone https://gitlab.com/PBSA/tools-libs/peerplays-utils.git
- cd peerplays-utils/peerplays-qa-environment
- git checkout origin/feature/python-e2e-tests-for-CI
- cd e2e-tests/
- python3 -m venv venv
- source venv/bin/activate
- pip3 install -r requirements.txt
- docker-compose down --remove-orphans
- docker ps -a
- docker pull $IMAGE
- docker tag $IMAGE peerplays-base:latest
- docker image ls -a
- docker-compose build
- python3 main.py --start all
- docker ps -a
- python3 -m pytest test_btc_init_state.py test_hive_inital_state.py test_pp_inital_state.py
- python3 main.py --stop
- deactivate
- docker ps -a
after_script:
- docker rmi $(docker images -a | grep -v 'hive-for-peerplays\|ethereum-for-peerplays\|bitcoin-for-peerplays\|ubuntu-for-peerplays' | awk '{print $3}')
tags:
- python-tests
when:
manual

View file

@ -134,11 +134,8 @@ RUN \
libsodium-dev
RUN \
git clone https://github.com/libbitcoin/libbitcoin-build.git && \
cd libbitcoin-build && \
git reset --hard 92c215fc1ffa272bab4d485d369d0306db52d69d && \
./generate3.sh && \
cd ../libbitcoin-explorer && \
git clone --branch version3.8.0 --depth 1 https://gitlab.com/PBSA/peerplays-1.0/libbitcoin-explorer.git && \
cd libbitcoin-explorer && \
./install.sh && \
ldconfig && \
rm -rf /home/peerplays/src/*
@ -190,7 +187,6 @@ ADD . peerplays
RUN \
cd peerplays && \
git submodule update --init --recursive && \
git symbolic-ref --short HEAD && \
git log --oneline -n 5 && \
mkdir build && \
cd build && \

View file

@ -134,11 +134,8 @@ RUN \
libsodium-dev
RUN \
git clone https://github.com/libbitcoin/libbitcoin-build.git && \
cd libbitcoin-build && \
git reset --hard 92c215fc1ffa272bab4d485d369d0306db52d69d && \
./generate3.sh && \
cd ../libbitcoin-explorer && \
git clone --branch version3.8.0 --depth 1 https://gitlab.com/PBSA/peerplays-1.0/libbitcoin-explorer.git && \
cd libbitcoin-explorer && \
./install.sh && \
ldconfig && \
rm -rf /home/peerplays/src/*

View file

@ -77,11 +77,8 @@ sudo ldconfig
libbitcoin-explorer setup:
```
git clone https://github.com/libbitcoin/libbitcoin-build.git
cd libbitcoin-build
git reset --hard 92c215fc1ffa272bab4d485d369d0306db52d69d
./generate3.sh
cd ../libbitcoin-explorer
git clone --branch version3.8.0 --depth 1 https://gitlab.com/PBSA/peerplays-1.0/libbitcoin-explorer.git
cd libbitcoin-explorer
sudo ./install.sh
sudo ldconfig
```

View file

@ -210,8 +210,8 @@ network_node_api::network_node_api(application &a) :
}
/*
* Remove expired transactions from pending_transactions
*/
* Remove expired transactions from pending_transactions
*/
for (const auto &transaction : _pending_transactions) {
if (transaction.second.expiration < block.timestamp) {
auto transaction_it = _pending_transactions.find(transaction.second.id());

View file

@ -391,8 +391,8 @@ public:
}
/**
* If delegate has the item, the network has no need to fetch it.
*/
* If delegate has the item, the network has no need to fetch it.
*/
virtual bool has_item(const net::item_id &id) override {
try {
if (id.item_type == graphene::net::block_message_type)
@ -404,13 +404,13 @@ public:
}
/**
* @brief allows the application to validate an item prior to broadcasting to peers.
*
* @param sync_mode true if the message was fetched through the sync process, false during normal operation
* @returns true if this message caused the blockchain to switch forks, false if it did not
*
* @throws exception if error validating the item, otherwise the item is safe to broadcast on.
*/
* @brief allows the application to validate an item prior to broadcasting to peers.
*
* @param sync_mode true if the message was fetched through the sync process, false during normal operation
* @returns true if this message caused the blockchain to switch forks, false if it did not
*
* @throws exception if error validating the item, otherwise the item is safe to broadcast on.
*/
virtual bool handle_block(const graphene::net::block_message &blk_msg, bool sync_mode,
std::vector<fc::uint160_t> &contained_transaction_message_ids) override {
@ -498,14 +498,14 @@ public:
}
/**
* Assuming all data elements are ordered in some way, this method should
* return up to limit ids that occur *after* the last ID in synopsis that
* we recognize.
*
* On return, remaining_item_count will be set to the number of items
* in our blockchain after the last item returned in the result,
* or 0 if the result contains the last item in the blockchain
*/
* Assuming all data elements are ordered in some way, this method should
* return up to limit ids that occur *after* the last ID in synopsis that
* we recognize.
*
* On return, remaining_item_count will be set to the number of items
* in our blockchain after the last item returned in the result,
* or 0 if the result contains the last item in the blockchain
*/
virtual std::vector<item_hash_t> get_block_ids(const std::vector<item_hash_t> &blockchain_synopsis,
uint32_t &remaining_item_count,
uint32_t limit) override {
@ -552,8 +552,8 @@ public:
}
/**
* Given the hash of the requested data, fetch the body.
*/
* Given the hash of the requested data, fetch the body.
*/
virtual message get_item(const item_id &id) override {
try {
// ilog("Request for item ${id}", ("id", id));
@ -576,63 +576,63 @@ public:
}
/**
* Returns a synopsis of the blockchain used for syncing. This consists of a list of
* block hashes at intervals exponentially increasing towards the genesis block.
* When syncing to a peer, the peer uses this data to determine if we're on the same
* fork as they are, and if not, what blocks they need to send us to get us on their
* fork.
*
* In the over-simplified case, this is a straighforward synopsis of our current
* preferred blockchain; when we first connect up to a peer, this is what we will be sending.
* It looks like this:
* If the blockchain is empty, it will return the empty list.
* If the blockchain has one block, it will return a list containing just that block.
* If it contains more than one block:
* the first element in the list will be the hash of the highest numbered block that
* we cannot undo
* the second element will be the hash of an item at the half way point in the undoable
* segment of the blockchain
* the third will be ~3/4 of the way through the undoable segment of the block chain
* the fourth will be at ~7/8...
* &c.
* the last item in the list will be the hash of the most recent block on our preferred chain
* so if the blockchain had 26 blocks labeled a - z, the synopsis would be:
* a n u x z
* the idea being that by sending a small (<30) number of block ids, we can summarize a huge
* blockchain. The block ids are more dense near the end of the chain where because we are
* more likely to be almost in sync when we first connect, and forks are likely to be short.
* If the peer we're syncing with in our example is on a fork that started at block 'v',
* then they will reply to our synopsis with a list of all blocks starting from block 'u',
* the last block they know that we had in common.
*
* In the real code, there are several complications.
*
* First, as an optimization, we don't usually send a synopsis of the entire blockchain, we
* send a synopsis of only the segment of the blockchain that we have undo data for. If their
* fork doesn't build off of something in our undo history, we would be unable to switch, so there's
* no reason to fetch the blocks.
*
* Second, when a peer replies to our initial synopsis and gives us a list of the blocks they think
* we are missing, they only send a chunk of a few thousand blocks at once. After we get those
* block ids, we need to request more blocks by sending another synopsis (we can't just say "send me
* the next 2000 ids" because they may have switched forks themselves and they don't track what
* they've sent us). For faster performance, we want to get a fairly long list of block ids first,
* then start downloading the blocks.
* The peer doesn't handle these follow-up block id requests any different from the initial request;
* it treats the synopsis we send as our blockchain and bases its response entirely off that. So to
* get the response we want (the next chunk of block ids following the last one they sent us, or,
* failing that, the shortest fork off of the last list of block ids they sent), we need to construct
* a synopsis as if our blockchain was made up of:
* 1. the blocks in our block chain up to the fork point (if there is a fork) or the head block (if no fork)
* 2. the blocks we've already pushed from their fork (if there's a fork)
* 3. the block ids they've previously sent us
* Segment 3 is handled in the p2p code, it just tells us the number of blocks it has (in
* number_of_blocks_after_reference_point) so we can leave space in the synopsis for them.
* We're responsible for constructing the synopsis of Segments 1 and 2 from our active blockchain and
* fork database. The reference_point parameter is the last block from that peer that has been
* successfully pushed to the blockchain, so that tells us whether the peer is on a fork or on
* the main chain.
*/
* Returns a synopsis of the blockchain used for syncing. This consists of a list of
* block hashes at intervals exponentially increasing towards the genesis block.
* When syncing to a peer, the peer uses this data to determine if we're on the same
* fork as they are, and if not, what blocks they need to send us to get us on their
* fork.
*
* In the over-simplified case, this is a straighforward synopsis of our current
* preferred blockchain; when we first connect up to a peer, this is what we will be sending.
* It looks like this:
* If the blockchain is empty, it will return the empty list.
* If the blockchain has one block, it will return a list containing just that block.
* If it contains more than one block:
* the first element in the list will be the hash of the highest numbered block that
* we cannot undo
* the second element will be the hash of an item at the half way point in the undoable
* segment of the blockchain
* the third will be ~3/4 of the way through the undoable segment of the block chain
* the fourth will be at ~7/8...
* &c.
* the last item in the list will be the hash of the most recent block on our preferred chain
* so if the blockchain had 26 blocks labeled a - z, the synopsis would be:
* a n u x z
* the idea being that by sending a small (<30) number of block ids, we can summarize a huge
* blockchain. The block ids are more dense near the end of the chain where because we are
* more likely to be almost in sync when we first connect, and forks are likely to be short.
* If the peer we're syncing with in our example is on a fork that started at block 'v',
* then they will reply to our synopsis with a list of all blocks starting from block 'u',
* the last block they know that we had in common.
*
* In the real code, there are several complications.
*
* First, as an optimization, we don't usually send a synopsis of the entire blockchain, we
* send a synopsis of only the segment of the blockchain that we have undo data for. If their
* fork doesn't build off of something in our undo history, we would be unable to switch, so there's
* no reason to fetch the blocks.
*
* Second, when a peer replies to our initial synopsis and gives us a list of the blocks they think
* we are missing, they only send a chunk of a few thousand blocks at once. After we get those
* block ids, we need to request more blocks by sending another synopsis (we can't just say "send me
* the next 2000 ids" because they may have switched forks themselves and they don't track what
* they've sent us). For faster performance, we want to get a fairly long list of block ids first,
* then start downloading the blocks.
* The peer doesn't handle these follow-up block id requests any different from the initial request;
* it treats the synopsis we send as our blockchain and bases its response entirely off that. So to
* get the response we want (the next chunk of block ids following the last one they sent us, or,
* failing that, the shortest fork off of the last list of block ids they sent), we need to construct
* a synopsis as if our blockchain was made up of:
* 1. the blocks in our block chain up to the fork point (if there is a fork) or the head block (if no fork)
* 2. the blocks we've already pushed from their fork (if there's a fork)
* 3. the block ids they've previously sent us
* Segment 3 is handled in the p2p code, it just tells us the number of blocks it has (in
* number_of_blocks_after_reference_point) so we can leave space in the synopsis for them.
* We're responsible for constructing the synopsis of Segments 1 and 2 from our active blockchain and
* fork database. The reference_point parameter is the last block from that peer that has been
* successfully pushed to the blockchain, so that tells us whether the peer is on a fork or on
* the main chain.
*/
virtual std::vector<item_hash_t> get_blockchain_synopsis(const item_hash_t &reference_point,
uint32_t number_of_blocks_after_reference_point) override {
try {
@ -733,26 +733,26 @@ public:
low_block_num += (true_high_block_num - low_block_num + 2) / 2;
} while (low_block_num <= high_block_num);
//idump((synopsis));
// idump((synopsis));
return synopsis;
}
FC_CAPTURE_AND_RETHROW()
}
/**
* Call this after the call to handle_message succeeds.
*
* @param item_type the type of the item we're synchronizing, will be the same as item passed to the sync_from() call
* @param item_count the number of items known to the node that haven't been sent to handle_item() yet.
* After `item_count` more calls to handle_item(), the node will be in sync
*/
* Call this after the call to handle_message succeeds.
*
* @param item_type the type of the item we're synchronizing, will be the same as item passed to the sync_from() call
* @param item_count the number of items known to the node that haven't been sent to handle_item() yet.
* After `item_count` more calls to handle_item(), the node will be in sync
*/
virtual void sync_status(uint32_t item_type, uint32_t item_count) override {
// any status reports to GUI go here
}
/**
* Call any time the number of connected peers changes.
*/
* Call any time the number of connected peers changes.
*/
virtual void connection_count_changed(uint32_t c) override {
// any status reports to GUI go here
}
@ -769,9 +769,9 @@ public:
}
/**
* Returns the time a block was produced (if block_id = 0, returns genesis time).
* If we don't know about the block, returns time_point_sec::min()
*/
* Returns the time a block was produced (if block_id = 0, returns genesis time).
* If we don't know about the block, returns time_point_sec::min()
*/
virtual fc::time_point_sec get_block_time(const item_hash_t &block_id) override {
try {
auto opt_block = _chain_db->fetch_block_by_id(block_id);

View file

@ -313,7 +313,7 @@ public:
uint32_t api_limit_get_trade_history = 100;
uint32_t api_limit_get_trade_history_by_sequence = 100;
//private:
// private:
const account_object *get_account_from_string(const std::string &name_or_id,
bool throw_if_not_found = true) const;
const asset_object *get_asset_from_string(const std::string &symbol_or_id,
@ -470,7 +470,7 @@ void database_api::set_subscribe_callback(std::function<void(const variant &)> c
}
void database_api_impl::set_subscribe_callback(std::function<void(const variant &)> cb, bool notify_remove_create) {
//edump((clear_filter));
// edump((clear_filter));
_subscribe_callback = cb;
_notify_remove_create = notify_remove_create;
_subscribed_accounts.clear();
@ -2197,7 +2197,7 @@ vector<variant> database_api_impl::lookup_vote_ids(const vector<vote_id_type> &v
case vote_id_type::committee: {
auto itr = committee_idx.find(id);
if (itr != committee_idx.end())
result.emplace_back(variant(*itr, 1));
result.emplace_back(variant(*itr, 2)); // Depth of committee_member_object is 1, add 1 to be safe
else
result.emplace_back(variant());
break;
@ -2205,7 +2205,7 @@ vector<variant> database_api_impl::lookup_vote_ids(const vector<vote_id_type> &v
case vote_id_type::witness: {
auto itr = witness_idx.find(id);
if (itr != witness_idx.end())
result.emplace_back(variant(*itr, 1));
result.emplace_back(variant(*itr, 2)); // Depth of witness_object is 1, add 1 here to be safe
else
result.emplace_back(variant());
break;
@ -2213,11 +2213,15 @@ vector<variant> database_api_impl::lookup_vote_ids(const vector<vote_id_type> &v
case vote_id_type::worker: {
auto itr = for_worker_idx.find(id);
if (itr != for_worker_idx.end()) {
result.emplace_back(variant(*itr, 1));
result.emplace_back(variant(*itr, 4)); // Depth of worker_object is 3, add 1 here to be safe.
// If we want to extract the balance object inside,
// need to increase this value
} else {
auto itr = against_worker_idx.find(id);
if (itr != against_worker_idx.end()) {
result.emplace_back(variant(*itr, 1));
result.emplace_back(variant(*itr, 4)); // Depth of worker_object is 3, add 1 here to be safe.
// If we want to extract the balance object inside,
// need to increase this value
} else {
result.emplace_back(variant());
}
@ -2903,7 +2907,7 @@ graphene::app::gpos_info database_api::get_gpos_info(const account_id_type accou
}
graphene::app::gpos_info database_api_impl::get_gpos_info(const account_id_type account) const {
FC_ASSERT(_db.head_block_time() > HARDFORK_GPOS_TIME); //Can be deleted after GPOS hardfork time
FC_ASSERT(_db.head_block_time() > HARDFORK_GPOS_TIME); // Can be deleted after GPOS hardfork time
gpos_info result;
result.vesting_factor = _db.calculate_vesting_factor(account(_db));
@ -3550,9 +3554,9 @@ void database_api_impl::handle_object_changed(bool force_notify, bool full_objec
/// pushing the future back / popping the prior future if it is complete.
/// if a connection hangs then this could get backed up and result in
/// a failure to exit cleanly.
//fc::async([capture_this,this,updates,market_broadcast_queue](){
//if( _subscribe_callback )
// _subscribe_callback( updates );
// fc::async([capture_this,this,updates,market_broadcast_queue](){
// if( _subscribe_callback )
// _subscribe_callback( updates );
for (auto id : ids) {
if (id.is<call_order_object>()) {

View file

@ -85,10 +85,10 @@ struct asset_holders {
};
/**
* @brief The history_api class implements the RPC API for account history
*
* This API contains methods to access account histories
*/
* @brief The history_api class implements the RPC API for account history
*
* This API contains methods to access account histories
*/
class history_api {
public:
history_api(application &app) :
@ -97,27 +97,27 @@ public:
}
/**
* @brief Get operations relevant to the specificed account
* @param account_id_or_name The account ID or name whose history should be queried
* @param stop ID of the earliest operation to retrieve
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start ID of the most recent operation to retrieve
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
* @brief Get operations relevant to the specificed account
* @param account_id_or_name The account ID or name whose history should be queried
* @param stop ID of the earliest operation to retrieve
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start ID of the most recent operation to retrieve
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
vector<operation_history_object> get_account_history(const std::string account_id_or_name,
operation_history_id_type stop = operation_history_id_type(),
unsigned limit = 100,
operation_history_id_type start = operation_history_id_type()) const;
/**
* @brief Get only asked operations relevant to the specified account
* @param account_id_or_name The account ID or name whose history should be queried
* @param operation_id The ID of the operation we want to get operations in the account( 0 = transfer , 1 = limit order create, ...)
* @param stop ID of the earliest operation to retrieve
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start ID of the most recent operation to retrieve
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
* @brief Get only asked operations relevant to the specified account
* @param account_id_or_name The account ID or name whose history should be queried
* @param operation_id The ID of the operation we want to get operations in the account( 0 = transfer , 1 = limit order create, ...)
* @param stop ID of the earliest operation to retrieve
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start ID of the most recent operation to retrieve
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
vector<operation_history_object> get_account_history_operations(const std::string account_id_or_name,
int operation_id,
operation_history_id_type start = operation_history_id_type(),
@ -125,17 +125,17 @@ public:
unsigned limit = 100) const;
/**
* @breif Get operations relevant to the specified account referenced
* by an event numbering specific to the account. The current number of operations
* for the account can be found in the account statistics (or use 0 for start).
* @param account_id_or_name The account ID or name whose history should be queried
* @param stop Sequence number of earliest operation. 0 is default and will
* query 'limit' number of operations.
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start Sequence number of the most recent operation to retrieve.
* 0 is default, which will start querying from the most recent operation.
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
* @breif Get operations relevant to the specified account referenced
* by an event numbering specific to the account. The current number of operations
* for the account can be found in the account statistics (or use 0 for start).
* @param account_id_or_name The account ID or name whose history should be queried
* @param stop Sequence number of earliest operation. 0 is default and will
* query 'limit' number of operations.
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start Sequence number of the most recent operation to retrieve.
* 0 is default, which will start querying from the most recent operation.
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
vector<operation_history_object> get_relative_account_history(const std::string account_id_or_name,
uint32_t stop = 0,
unsigned limit = 100,
@ -156,8 +156,8 @@ private:
};
/**
* @brief Block api
*/
* @brief Block api
*/
class block_api {
public:
block_api(graphene::chain::database &db);
@ -170,8 +170,8 @@ private:
};
/**
* @brief The network_broadcast_api class allows broadcasting of transactions.
*/
* @brief The network_broadcast_api class allows broadcasting of transactions.
*/
class network_broadcast_api : public std::enable_shared_from_this<network_broadcast_api> {
public:
network_broadcast_api(application &a);
@ -186,36 +186,36 @@ public:
typedef std::function<void(variant /*transaction_confirmation*/)> confirmation_callback;
/**
* @brief Broadcast a transaction to the network
* @param trx The transaction to broadcast
*
* The transaction will be checked for validity in the local database prior to broadcasting. If it fails to
* apply locally, an error will be thrown and the transaction will not be broadcast.
*/
* @brief Broadcast a transaction to the network
* @param trx The transaction to broadcast
*
* The transaction will be checked for validity in the local database prior to broadcasting. If it fails to
* apply locally, an error will be thrown and the transaction will not be broadcast.
*/
void broadcast_transaction(const signed_transaction &trx);
/** this version of broadcast transaction registers a callback method that will be called when the transaction is
* included into a block. The callback method includes the transaction id, block number, and transaction number in the
* block.
*/
* included into a block. The callback method includes the transaction id, block number, and transaction number in the
* block.
*/
void broadcast_transaction_with_callback(confirmation_callback cb, const signed_transaction &trx);
/** this version of broadcast transaction registers a callback method that will be called when the transaction is
* included into a block. The callback method includes the transaction id, block number, and transaction number in the
* block.
*/
* included into a block. The callback method includes the transaction id, block number, and transaction number in the
* block.
*/
fc::variant broadcast_transaction_synchronous(const signed_transaction &trx);
void broadcast_block(const signed_block &block);
/**
* @brief Not reflected, thus not accessible to API clients.
*
* This function is registered to receive the applied_block
* signal from the chain database when a block is received.
* It then dispatches callbacks to clients who have requested
* to be notified when a particular txid is included in a block.
*/
* @brief Not reflected, thus not accessible to API clients.
*
* This function is registered to receive the applied_block
* signal from the chain database when a block is received.
* It then dispatches callbacks to clients who have requested
* to be notified when a particular txid is included in a block.
*/
void on_applied_block(const signed_block &b);
private:
@ -225,60 +225,60 @@ private:
};
/**
* @brief The network_node_api class allows maintenance of p2p connections.
*/
* @brief The network_node_api class allows maintenance of p2p connections.
*/
class network_node_api {
public:
network_node_api(application &a);
/**
* @brief Return general network information, such as p2p port
*/
* @brief Return general network information, such as p2p port
*/
fc::variant_object get_info() const;
/**
* @brief add_node Connect to a new peer
* @param ep The IP/Port of the peer to connect to
*/
* @brief add_node Connect to a new peer
* @param ep The IP/Port of the peer to connect to
*/
void add_node(const fc::ip::endpoint &ep);
/**
* @brief Get status of all current connections to peers
*/
* @brief Get status of all current connections to peers
*/
std::vector<net::peer_status> get_connected_peers() const;
/**
* @brief Get advanced node parameters, such as desired and max
* number of connections
*/
* @brief Get advanced node parameters, such as desired and max
* number of connections
*/
fc::variant_object get_advanced_node_parameters() const;
/**
* @brief Set advanced node parameters, such as desired and max
* number of connections
* @param params a JSON object containing the name/value pairs for the parameters to set
*/
* @brief Set advanced node parameters, such as desired and max
* number of connections
* @param params a JSON object containing the name/value pairs for the parameters to set
*/
void set_advanced_node_parameters(const fc::variant_object &params);
/**
* @brief Return list of potential peers
*/
* @brief Return list of potential peers
*/
std::vector<net::potential_peer_record> get_potential_peers() const;
/**
* @brief Return list of pending transactions.
*/
* @brief Return list of pending transactions.
*/
map<transaction_id_type, signed_transaction> list_pending_transactions() const;
/**
* @brief Subscribes caller for notifications about pending transactions.
* @param callback a functional object which will be called when new transaction is created.
*/
* @brief Subscribes caller for notifications about pending transactions.
* @param callback a functional object which will be called when new transaction is created.
*/
void subscribe_to_pending_transactions(std::function<void(const variant &)> callback);
/**
* @brief Unsubscribes caller from notifications about pending transactions.
*/
* @brief Unsubscribes caller from notifications about pending transactions.
*/
void unsubscribe_from_pending_transactions();
private:
@ -290,33 +290,33 @@ private:
};
/**
* @brief
*/
* @brief
*/
class asset_api {
public:
asset_api(graphene::app::application &app);
~asset_api();
/**
* @brief Get asset holders for a specific asset
* @param asset The specific asset id or symbol
* @param start The start index
* @param limit Maximum limit must not exceed 100
* @return A list of asset holders for the specified asset
*/
* @brief Get asset holders for a specific asset
* @param asset The specific asset id or symbol
* @param start The start index
* @param limit Maximum limit must not exceed 100
* @return A list of asset holders for the specified asset
*/
vector<account_asset_balance> get_asset_holders(std::string asset, uint32_t start, uint32_t limit) const;
/**
* @brief Get asset holders count for a specific asset
* @param asset The specific asset id or symbol
* @return Holders count for the specified asset
*/
* @brief Get asset holders count for a specific asset
* @param asset The specific asset id or symbol
* @return Holders count for the specified asset
*/
int get_asset_holders_count(std::string asset) const;
/**
* @brief Get all asset holders
* @return A list of all asset holders
*/
* @brief Get all asset holders
* @return A list of all asset holders
*/
vector<asset_holders> get_all_asset_holders() const;
uint32_t api_limit_get_asset_holders = 100;
@ -337,24 +337,24 @@ extern template class fc::api<graphene::debug_witness::debug_api>;
namespace graphene { namespace app {
/**
* @brief The login_api class implements the bottom layer of the RPC API
*
* All other APIs must be requested from this API.
*/
* @brief The login_api class implements the bottom layer of the RPC API
*
* All other APIs must be requested from this API.
*/
class login_api {
public:
login_api(application &a);
~login_api();
/**
* @brief Authenticate to the RPC server
* @param user Username to login with
* @param password Password to login with
* @return True if logged in successfully; false otherwise
*
* @note This must be called prior to requesting other APIs. Other APIs may not be accessible until the client
* has sucessfully authenticated.
*/
* @brief Authenticate to the RPC server
* @param user Username to login with
* @param password Password to login with
* @return True if logged in successfully; false otherwise
*
* @note This must be called prior to requesting other APIs. Other APIs may not be accessible until the client
* has sucessfully authenticated.
*/
bool login(const string &user, const string &password);
/// @brief Retrieve the network block API
fc::api<block_api> block() const;

View file

@ -198,10 +198,10 @@ public:
optional<block_header> get_block_header(uint32_t block_num) const;
/**
* @brief Retrieve multiple block header by block numbers
* @param block_num vector containing heights of the block whose header should be returned
* @return array of headers of the referenced blocks, or null if no matching block was found
*/
* @brief Retrieve multiple block header by block numbers
* @param block_num vector containing heights of the block whose header should be returned
* @return array of headers of the referenced blocks, or null if no matching block was found
*/
map<uint32_t, optional<block_header>> get_block_header_batch(const vector<uint32_t> block_nums) const;
/**
@ -279,12 +279,12 @@ public:
vector<vector<account_id_type>> get_key_references(vector<public_key_type> key) const;
/**
* Determine whether a textual representation of a public key
* (in Base-58 format) is *currently* linked
* to any *registered* (i.e. non-stealth) account on the blockchain
* @param public_key Public key
* @return Whether a public key is known
*/
* Determine whether a textual representation of a public key
* (in Base-58 format) is *currently* linked
* to any *registered* (i.e. non-stealth) account on the blockchain
* @param public_key Public key
* @return Whether a public key is known
*/
bool is_public_key_registered(string public_key) const;
//////////////

View file

@ -33,45 +33,163 @@ namespace graphene { namespace chain {
void_result transfer_to_blind_evaluator::do_evaluate( const transfer_to_blind_operation& o )
{ try {
return void_result();
const auto& d = db();
if( d.head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME )
{
const auto& atype = o.amount.asset_id(d);
FC_ASSERT( atype.allow_confidential() );
FC_ASSERT( !atype.is_transfer_restricted() );
FC_ASSERT( !(atype.options.flags & white_list) );
for( const auto& out : o.outputs )
{
for( const auto& a : out.owner.account_auths )
a.first(d); // verify all accounts exist and are valid
}
}
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
void_result transfer_to_blind_evaluator::do_apply( const transfer_to_blind_operation& o )
{ try {
return void_result();
if( db().head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
db().adjust_balance(o.from, -o.amount);
const auto &add = o.amount.asset_id(db()).dynamic_asset_data_id(db()); // verify fee is a legit asset
db().modify(add, [&](asset_dynamic_data_object &obj) {
obj.confidential_supply += o.amount.amount;
FC_ASSERT(obj.confidential_supply >= 0);
});
for (const auto &out : o.outputs) {
db().create<blinded_balance_object>([&](blinded_balance_object &obj) {
obj.asset_id = o.amount.asset_id;
obj.owner = out.owner;
obj.commitment = out.commitment;
});
}
}
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
void transfer_to_blind_evaluator::pay_fee()
{
const auto& d = db();
if( d.head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
if (d.head_block_time() >= HARDFORK_563_TIME)
pay_fba_fee(fba_accumulator_id_transfer_to_blind);
else
generic_evaluator::pay_fee();
}
}
void_result transfer_from_blind_evaluator::do_evaluate( const transfer_from_blind_operation& o )
{ try {
return void_result();
const auto& d = db();
if( d.head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
o.fee.asset_id(d); // verify fee is a legit asset
const auto &bbi = d.get_index_type<blinded_balance_index>();
const auto &cidx = bbi.indices().get<by_commitment>();
for (const auto &in : o.inputs) {
auto itr = cidx.find(in.commitment);
FC_ASSERT(itr != cidx.end());
FC_ASSERT(itr->asset_id == o.fee.asset_id);
FC_ASSERT(itr->owner == in.owner);
}
}
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
void_result transfer_from_blind_evaluator::do_apply( const transfer_from_blind_operation& o )
{ try {
return void_result();
if( db().head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
db().adjust_balance(o.fee_payer(), o.fee);
db().adjust_balance(o.to, o.amount);
const auto &bbi = db().get_index_type<blinded_balance_index>();
const auto &cidx = bbi.indices().get<by_commitment>();
for (const auto &in : o.inputs) {
auto itr = cidx.find(in.commitment);
FC_ASSERT(itr != cidx.end());
db().remove(*itr);
}
const auto &add = o.amount.asset_id(db()).dynamic_asset_data_id(db()); // verify fee is a legit asset
db().modify(add, [&](asset_dynamic_data_object &obj) {
obj.confidential_supply -= o.amount.amount + o.fee.amount;
FC_ASSERT(obj.confidential_supply >= 0);
});
}
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
void transfer_from_blind_evaluator::pay_fee()
{
const auto& d = db();
if( d.head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
if (d.head_block_time() >= HARDFORK_563_TIME)
pay_fba_fee(fba_accumulator_id_transfer_from_blind);
else
generic_evaluator::pay_fee();
}
}
void_result blind_transfer_evaluator::do_evaluate( const blind_transfer_operation& o )
{ try {
return void_result();
const auto& d = db();
if( d.head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
o.fee.asset_id(d); // verify fee is a legit asset
const auto &bbi = d.get_index_type<blinded_balance_index>();
const auto &cidx = bbi.indices().get<by_commitment>();
for (const auto &out : o.outputs) {
for (const auto &a : out.owner.account_auths)
a.first(d); // verify all accounts exist and are valid
}
for (const auto &in : o.inputs) {
auto itr = cidx.find(in.commitment);
GRAPHENE_ASSERT(itr != cidx.end(), blind_transfer_unknown_commitment, "", ("commitment", in.commitment));
FC_ASSERT(itr->asset_id == o.fee.asset_id);
FC_ASSERT(itr->owner == in.owner);
}
}
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
void_result blind_transfer_evaluator::do_apply( const blind_transfer_operation& o )
{ try {
return void_result();
if( db().head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
db().adjust_balance(o.fee_payer(), o.fee); // deposit the fee to the temp account
const auto &bbi = db().get_index_type<blinded_balance_index>();
const auto &cidx = bbi.indices().get<by_commitment>();
for (const auto &in : o.inputs) {
auto itr = cidx.find(in.commitment);
GRAPHENE_ASSERT(itr != cidx.end(), blind_transfer_unknown_commitment, "", ("commitment", in.commitment));
db().remove(*itr);
}
for (const auto &out : o.outputs) {
db().create<blinded_balance_object>([&](blinded_balance_object &obj) {
obj.asset_id = o.fee.asset_id;
obj.owner = out.owner;
obj.commitment = out.commitment;
});
}
const auto &add = o.fee.asset_id(db()).dynamic_asset_data_id(db());
db().modify(add, [&](asset_dynamic_data_object &obj) {
obj.confidential_supply -= o.fee.amount;
FC_ASSERT(obj.confidential_supply >= 0);
});
}
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
void blind_transfer_evaluator::pay_fee()
{
const auto& d = db();
if( d.head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
if (d.head_block_time() >= HARDFORK_563_TIME)
pay_fba_fee(fba_accumulator_id_blind_transfer);
else
generic_evaluator::pay_fee();
}
}
} } // graphene::chain

View file

@ -739,13 +739,11 @@ void database::_apply_block( const signed_block& next_block )
if (global_props.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SCHEDULED_ALGORITHM) {
update_witness_schedule(next_block);
bool need_to_update_son_schedule = false;
for(const auto& active_sons : global_props.active_sons){
if(!active_sons.second.empty())
need_to_update_son_schedule = true;
}
if(need_to_update_son_schedule) {
update_son_schedule(next_block);
for(const auto& active_sons : global_props.active_sons) {
if(!active_sons.second.empty()) {
update_son_schedule(active_sons.first, next_block);
}
}
}
@ -783,15 +781,11 @@ void database::_apply_block( const signed_block& next_block )
if (global_props.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SHUFFLED_ALGORITHM) {
update_witness_schedule();
bool need_update_son_schedule = false;
for(const auto& active_sidechain_type : active_sidechain_types(dynamic_global_props.time)) {
if(global_props.active_sons.at(active_sidechain_type).size() > 0) {
need_update_son_schedule = true;
update_son_schedule(active_sidechain_type);
}
}
if(need_update_son_schedule) {
update_son_schedule();
}
}
if( !_node_property_object.debug_updates.empty() )

View file

@ -305,16 +305,14 @@ bool database::is_son_dereg_valid( son_id_type son_id )
}
bool status_son_dereg_valid = true;
for(const auto& status : son->statuses)
{
const auto& sidechain = status.first;
if(status.second != son_status::in_maintenance)
for (const auto &active_sidechain_type : active_sidechain_types(head_block_time())) {
if(son->statuses.at(active_sidechain_type) != son_status::in_maintenance)
status_son_dereg_valid = false;
if(status_son_dereg_valid)
{
if(son->statistics(*this).last_active_timestamp.contains(sidechain)) {
if (head_block_time() - son->statistics(*this).last_active_timestamp.at(sidechain) < fc::seconds(get_global_properties().parameters.son_deregister_time())) {
if(son->statistics(*this).last_active_timestamp.contains(active_sidechain_type)) {
if (head_block_time() - son->statistics(*this).last_active_timestamp.at(active_sidechain_type) < fc::seconds(get_global_properties().parameters.son_deregister_time())) {
status_son_dereg_valid = false;
}
}
@ -388,23 +386,14 @@ vector<uint64_t> database::get_random_numbers(uint64_t minimum, uint64_t maximum
bool database::is_asset_creation_allowed(const string &symbol)
{
time_point_sec now = head_block_time();
std::unordered_set<std::string> post_son_hf_symbols = {"ETH", "USDT", "BNB", "ADA", "DOGE", "XRP", "USDC", "DOT", "UNI", "BUSD", "BCH", "LTC", "SOL", "LINK", "MATIC", "THETA",
"WBTC", "XLM", "ICP", "DAI", "VET", "ETC", "TRX", "FIL", "XMR", "EGR", "EOS", "SHIB", "AAVE", "CRO", "ALGO", "AMP", "BTCB",
"BSV", "KLAY", "CAKE", "FTT", "LEO", "XTZ", "TFUEL", "MIOTA", "LUNA", "NEO", "ATOM", "MKR", "FEI", "WBNB", "UST", "AVAX",
"STEEM", "HIVE", "HBD", "SBD", "BTS"};
if (symbol == "BTC")
{
if (now < HARDFORK_SON_TIME)
return false;
}
if (post_son_hf_symbols.find(symbol) != post_son_hf_symbols.end())
{
if (now >= HARDFORK_SON_TIME)
if (head_block_time() < HARDFORK_SON_TIME)
return false;
}
return true;
}
} }
}
}

View file

@ -170,6 +170,7 @@ struct worker_pay_visitor
worker.pay_worker(pay, db);
}
};
void database::update_worker_votes()
{
auto& idx = get_index_type<worker_index>();
@ -185,13 +186,131 @@ void database::update_worker_votes()
}
}
void database::pay_sons()
void database::hotfix_2024()
{
if (head_block_time() >= HARDFORK_HOTFIX_2024_TIME)
{
if (get_chain_id().str() == "6b6b5f0ce7a36d323768e534f3edb41c6d6332a541a95725b98e28d140850134")
{
const auto& vb_idx = get_index_type<vesting_balance_index>().indices().get<by_id>();
auto vbo = vb_idx.find(vesting_balance_id_type(388));
if (vbo != vb_idx.end())
{
if (vbo->owner == account_id_type(14786))
{
modify(*vbo, [&]( vesting_balance_object& _vbo)
{
_vbo.owner = account_id_type(0);
});
}
}
}
}
}
void database::pay_sons_before_hf_ethereum()
{
const auto now = head_block_time();
const dynamic_global_property_object& dpo = get_dynamic_global_properties();
// Current requirement is that we have to pay every 24 hours, so the following check
if( dpo.son_budget.value > 0 && ((now - dpo.last_son_payout_time) >= fc::seconds(get_global_properties().parameters.son_pay_time())))
{
const sidechain_type st = sidechain_type::bitcoin;
const auto sons = sort_votable_objects<son_index>(st, get_global_properties().parameters.maximum_son_count());
// After SON2 HF
uint64_t total_votes = 0;
for( const son_object& son : sons )
{
FC_ASSERT(son.get_sidechain_vote_id(st).valid(), "Invalid vote id, sidechain: ${sidechain}, son: ${son}", ("sidechain", st)("son", son));
total_votes += _vote_tally_buffer[*son.get_sidechain_vote_id(st)];
}
const int8_t bits_to_drop = std::max(int(boost::multiprecision::detail::find_msb(total_votes)) - 15, 0);
auto get_weight = [&bits_to_drop]( uint64_t son_votes ) {
const uint16_t weight = std::max((son_votes >> bits_to_drop), uint64_t(1) );
return weight;
};
// Before SON2 HF
auto get_weight_before_son2_hf = []( uint64_t son_votes ) {
const int8_t bits_to_drop = std::max(int(boost::multiprecision::detail::find_msb(son_votes)) - 15, 0);
const uint16_t weight = std::max((son_votes >> bits_to_drop), uint64_t(1) );
return weight;
};
uint64_t weighted_total_txs_signed = 0;
const share_type son_budget = dpo.son_budget;
get_index_type<son_stats_index>().inspect_all_objects([this, &weighted_total_txs_signed, &get_weight, &now, &get_weight_before_son2_hf, &st](const object& o) {
const son_statistics_object& s = static_cast<const son_statistics_object&>(o);
const auto& idx = get_index_type<son_index>().indices().get<by_id>();
const auto son_obj = idx.find( s.owner );
uint16_t son_weight = 0;
FC_ASSERT(son_obj->get_sidechain_vote_id(st).valid(), "Invalid vote id, sidechain: ${sidechain}, son: ${son}", ("sidechain", st)("son", *son_obj));
if( now >= HARDFORK_SON2_TIME ) {
son_weight += get_weight(_vote_tally_buffer[*son_obj->get_sidechain_vote_id(st)]);
}
else {
son_weight += get_weight_before_son2_hf(_vote_tally_buffer[*son_obj->get_sidechain_vote_id(st)]);
}
const uint64_t txs_signed_bitcoin = s.txs_signed.contains(sidechain_type::bitcoin) ? s.txs_signed.at(sidechain_type::bitcoin) : 0;
const uint64_t txs_signed_hive = s.txs_signed.contains(sidechain_type::hive) ? s.txs_signed.at(sidechain_type::hive) : 0;
weighted_total_txs_signed += ((txs_signed_bitcoin + txs_signed_hive) * son_weight);
});
// Now pay off each SON proportional to the number of transactions signed.
get_index_type<son_stats_index>().inspect_all_objects([this, &weighted_total_txs_signed, &dpo, &son_budget, &get_weight, &get_weight_before_son2_hf, &now, &st](const object& o) {
const son_statistics_object& s = static_cast<const son_statistics_object&>(o);
const uint64_t txs_signed_bitcoin = s.txs_signed.contains(sidechain_type::bitcoin) ? s.txs_signed.at(sidechain_type::bitcoin) : 0;
const uint64_t txs_signed_hive = s.txs_signed.contains(sidechain_type::hive) ? s.txs_signed.at(sidechain_type::hive) : 0;
if(txs_signed_bitcoin > 0 || txs_signed_hive > 0) {
const auto& idx = get_index_type<son_index>().indices().get<by_id>();
auto son_obj = idx.find( s.owner );
uint16_t son_weight = 0;
FC_ASSERT(son_obj->get_sidechain_vote_id(st).valid(), "Invalid vote id, sidechain: ${sidechain}, son: ${son}", ("sidechain", st)("son", *son_obj));
if( now >= HARDFORK_SON2_TIME ) {
son_weight += get_weight(_vote_tally_buffer[*son_obj->get_sidechain_vote_id(st)]);
}
else {
son_weight += get_weight_before_son2_hf(_vote_tally_buffer[*son_obj->get_sidechain_vote_id(st)]);
}
const share_type pay = ((txs_signed_bitcoin + txs_signed_hive) * son_weight * son_budget.value)/weighted_total_txs_signed;
modify( *son_obj, [&]( son_object& _son_obj)
{
_son_obj.pay_son_fee(pay, *this);
});
//Remove the amount paid out to SON from global SON Budget
modify( dpo, [&]( dynamic_global_property_object& _dpo )
{
_dpo.son_budget -= pay;
} );
//Reset the tx counter in each son statistics object
modify( s, [&]( son_statistics_object& _s)
{
if(_s.txs_signed.contains(sidechain_type::bitcoin))
_s.txs_signed.at(sidechain_type::bitcoin) = 0;
if(_s.txs_signed.contains(sidechain_type::hive))
_s.txs_signed.at(sidechain_type::hive) = 0;
});
}
});
//Note the last son pay out time
modify( dpo, [&]( dynamic_global_property_object& _dpo )
{
_dpo.last_son_payout_time = now;
});
}
}
void database::pay_sons_after_hf_ethereum()
{
const time_point_sec now = head_block_time();
const dynamic_global_property_object& dpo = get_dynamic_global_properties();
// Current requirement is that we have to pay every 24 hours, so the following check
if( dpo.son_budget.value > 0 && ((now - dpo.last_son_payout_time) >= fc::seconds(get_global_properties().parameters.son_pay_time())))
{
flat_map<sidechain_type, int8_t> bits_to_drop;
for(const auto& active_sidechain_type : active_sidechain_types(now))
{
assert( _son_count_histogram_buffer.at(active_sidechain_type).size() > 0 );
@ -209,93 +328,72 @@ void database::pay_sons()
}
}
const sidechain_type st = [&now, &active_sidechain_type]{
if( now < HARDFORK_SON_FOR_ETHEREUM_TIME )
return sidechain_type::bitcoin;
else
return active_sidechain_type;
}();
const auto sons = sort_votable_objects<son_index>(st,
(std::max(son_count*2+1, (size_t)get_chain_properties().immutable_parameters.min_son_count))
);
const auto sons = sort_votable_objects<son_index>(active_sidechain_type, (std::max(son_count*2+1, (size_t)get_chain_properties().immutable_parameters.min_son_count)));
// After SON2 HF
uint64_t total_votes = 0;
for( const son_object& son : sons )
{
FC_ASSERT(son.get_sidechain_vote_id(st).valid(), "Invalid vote id, sidechain: ${sidechain}, son: ${son}", ("sidechain", st)("son", son));
total_votes += _vote_tally_buffer[*son.get_sidechain_vote_id(st)];
FC_ASSERT(son.get_sidechain_vote_id(active_sidechain_type).valid(), "Invalid vote id, sidechain: ${sidechain}, son: ${son}", ("sidechain", active_sidechain_type)("son", son));
total_votes += _vote_tally_buffer[*son.get_sidechain_vote_id(active_sidechain_type)];
}
const int8_t bits_to_drop = std::max(int(boost::multiprecision::detail::find_msb(total_votes)) - 15, 0);
auto get_weight = [&bits_to_drop]( uint64_t son_votes ) {
const uint16_t weight = std::max((son_votes >> bits_to_drop), uint64_t(1) );
return weight;
};
// Before SON2 HF
auto get_weight_before_son2_hf = []( uint64_t son_votes ) {
const int8_t bits_to_drop = std::max(int(boost::multiprecision::detail::find_msb(son_votes)) - 15, 0);
const uint16_t weight = std::max((son_votes >> bits_to_drop), uint64_t(1) );
return weight;
};
uint64_t weighted_total_txs_signed = 0;
const share_type son_budget = dpo.son_budget;
get_index_type<son_stats_index>().inspect_all_objects([this, &weighted_total_txs_signed, &get_weight, &now, &get_weight_before_son2_hf, &active_sidechain_type, &st](const object& o) {
const son_statistics_object& s = static_cast<const son_statistics_object&>(o);
const auto& idx = get_index_type<son_index>().indices().get<by_id>();
const auto son_obj = idx.find( s.owner );
uint16_t son_weight = 0;
FC_ASSERT(son_obj->get_sidechain_vote_id(st).valid(), "Invalid vote id, sidechain: ${sidechain}, son: ${son}", ("sidechain", st)("son", *son_obj));
if( now >= HARDFORK_SON2_TIME ) {
son_weight += get_weight(_vote_tally_buffer[*son_obj->get_sidechain_vote_id(st)]);
}
else {
son_weight += get_weight_before_son2_hf(_vote_tally_buffer[*son_obj->get_sidechain_vote_id(st)]);
}
bits_to_drop[active_sidechain_type] = std::max(int(boost::multiprecision::detail::find_msb(total_votes)) - 15, 0);
}
auto get_weight = [&bits_to_drop]( sidechain_type sidechain, uint64_t son_votes ) {
const uint16_t weight = std::max((son_votes >> bits_to_drop.at(sidechain)), uint64_t(1) );
return weight;
};
// Calculate weighted_total_txs_signed
uint64_t weighted_total_txs_signed = 0;
get_index_type<son_stats_index>().inspect_all_objects([this, &weighted_total_txs_signed, &get_weight, &now](const object& o) {
for(const auto& active_sidechain_type : active_sidechain_types(now)) {
const son_statistics_object &s = static_cast<const son_statistics_object &>(o);
const auto &idx = get_index_type<son_index>().indices().get<by_id>();
const auto son_obj = idx.find(s.owner);
FC_ASSERT(son_obj->get_sidechain_vote_id(active_sidechain_type).valid(), "Invalid vote id, sidechain: ${sidechain}, son: ${son}", ("sidechain", active_sidechain_type)("son", *son_obj));
const uint16_t son_weight = get_weight(active_sidechain_type, _vote_tally_buffer[*son_obj->get_sidechain_vote_id(active_sidechain_type)]);
const uint64_t txs_signed = s.txs_signed.contains(active_sidechain_type) ? s.txs_signed.at(active_sidechain_type) : 0;
weighted_total_txs_signed += (txs_signed * son_weight);
});
}
});
// Now pay off each SON proportional to the number of transactions signed
const share_type son_budget = dpo.son_budget;
get_index_type<son_stats_index>().inspect_all_objects([this, &now, &get_weight, &weighted_total_txs_signed, &dpo, &son_budget](const object& o) {
for(const auto& active_sidechain_type : active_sidechain_types(now)) {
const son_statistics_object &s = static_cast<const son_statistics_object &>(o);
// Now pay off each SON proportional to the number of transactions signed.
get_index_type<son_stats_index>().inspect_all_objects([this, &weighted_total_txs_signed, &dpo, &son_budget, &get_weight, &get_weight_before_son2_hf, &now, &active_sidechain_type, &st](const object& o) {
const son_statistics_object& s = static_cast<const son_statistics_object&>(o);
const uint64_t txs_signed = s.txs_signed.contains(active_sidechain_type) ? s.txs_signed.at(active_sidechain_type) : 0;
if(txs_signed > 0) {
const auto& idx = get_index_type<son_index>().indices().get<by_id>();
auto son_obj = idx.find( s.owner );
if (txs_signed > 0) {
const auto &idx = get_index_type<son_index>().indices().get<by_id>();
auto son_obj = idx.find(s.owner);
uint16_t son_weight = 0;
FC_ASSERT(son_obj->get_sidechain_vote_id(st).valid(), "Invalid vote id, sidechain: ${sidechain}, son: ${son}", ("sidechain", st)("son", *son_obj));
if( now >= HARDFORK_SON2_TIME ) {
son_weight += get_weight(_vote_tally_buffer[*son_obj->get_sidechain_vote_id(st)]);
}
else {
son_weight += get_weight_before_son2_hf(_vote_tally_buffer[*son_obj->get_sidechain_vote_id(st)]);
}
const share_type pay = (txs_signed * son_weight * son_budget.value)/weighted_total_txs_signed;
modify( *son_obj, [&]( son_object& _son_obj)
{
_son_obj.pay_son_fee(pay, *this);
FC_ASSERT(son_obj->get_sidechain_vote_id(active_sidechain_type).valid(), "Invalid vote id, sidechain: ${sidechain}, son: ${son}", ("sidechain", active_sidechain_type)("son", *son_obj));
son_weight += get_weight(active_sidechain_type, _vote_tally_buffer[*son_obj->get_sidechain_vote_id(active_sidechain_type)]);
const share_type pay = (txs_signed * son_weight * son_budget.value) / weighted_total_txs_signed;
modify(*son_obj, [&](son_object &_son_obj) {
_son_obj.pay_son_fee(pay, *this);
});
//Remove the amount paid out to SON from global SON Budget
modify( dpo, [&]( dynamic_global_property_object& _dpo )
{
_dpo.son_budget -= pay;
} );
//Reset the tx counter in each son statistics object
modify( s, [&]( son_statistics_object& _s)
{
if(_s.txs_signed.contains(active_sidechain_type))
// Remove the amount paid out to SON from global SON Budget
modify(dpo, [&](dynamic_global_property_object &_dpo) {
_dpo.son_budget -= pay;
});
// Reset the tx counter in each son statistics object
modify(s, [&](son_statistics_object &_s) {
if (_s.txs_signed.contains(active_sidechain_type))
_s.txs_signed.at(active_sidechain_type) = 0;
});
}
});
//Note the last son pay out time
modify( dpo, [&]( dynamic_global_property_object& _dpo )
{
_dpo.last_son_payout_time = now;
});
}
}
});
//Note the last son pay out time
modify( dpo, [&]( dynamic_global_property_object& _dpo )
{
_dpo.last_son_payout_time = now;
});
}
}
@ -2152,7 +2250,7 @@ void database::perform_son_tasks()
});
}
// create HIVE asset here because son_account is the issuer of the HIVE
if (gpo.parameters.hive_asset() == asset_id_type() && head_block_time() >= HARDFORK_SON_FOR_HIVE_TIME)
if (gpo.parameters.hive_asset() == asset_id_type() && head_block_time() >= HARDFORK_SON_FOR_HIVE_TIME)
{
const asset_dynamic_data_object& dyn_asset =
create<asset_dynamic_data_object>([](asset_dynamic_data_object& a) {
@ -2192,7 +2290,10 @@ void database::perform_son_tasks()
// Before making a budget we should pay out SONs
// This function should check if its time to pay sons
// and modify the global son funds accordingly, whatever is left is passed on to next budget
pay_sons();
if(head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME)
pay_sons_before_hf_ethereum();
else
pay_sons_after_hf_ethereum();
}
// Split vote_ids
@ -2205,7 +2306,7 @@ void database::perform_son_tasks()
// Add vote_ids for HIVE and ETHEREUM to all existing SONs
const auto &all_sons = get_index_type<son_index>().indices().get<by_id>();
for (const son_object &son : all_sons) {
vote_id_type existing_vote_id_bitcoin;
const auto existing_vote_id_bitcoin = son.get_bitcoin_vote_id();
vote_id_type new_vote_id_hive;
vote_id_type new_vote_id_eth;
@ -2222,7 +2323,7 @@ void database::perform_son_tasks()
// Duplicate all votes from bitcoin to hive
const auto &all_accounts = get_index_type<account_index>().indices().get<by_id>();
for (const auto &account : all_accounts) {
if (account.options.votes.count(existing_vote_id_bitcoin) != 0) {
if (existing_vote_id_bitcoin.valid() && account.options.votes.count(*existing_vote_id_bitcoin) != 0) {
modify(account, [new_vote_id_hive](account_object &a) {
a.options.votes.insert(new_vote_id_hive);
});
@ -2432,6 +2533,7 @@ void database::perform_chain_maintenance(const signed_block& next_block, const g
update_active_committee_members();
update_active_sons();
update_worker_votes();
hotfix_2024();
const dynamic_global_property_object& dgpo = get_dynamic_global_properties();

View file

@ -200,44 +200,41 @@ void database::update_witness_schedule()
}
}
void database::update_son_schedule()
void database::update_son_schedule(sidechain_type type)
{
const global_property_object& gpo = get_global_properties();
for(const auto& active_sidechain_type : active_sidechain_types(head_block_time()))
const son_schedule_object& sidechain_sso = get(son_schedule_id_type(get_son_schedule_id(type)));
if( gpo.active_sons.at(type).size() != 0 &&
head_block_num() % gpo.active_sons.at(type).size() == 0)
{
const son_schedule_object& sidechain_sso = get(son_schedule_id_type(get_son_schedule_id(active_sidechain_type)));
if( gpo.active_sons.at(active_sidechain_type).size() != 0 &&
head_block_num() % gpo.active_sons.at(active_sidechain_type).size() == 0)
modify( sidechain_sso, [&]( son_schedule_object& _sso )
{
modify( sidechain_sso, [&]( son_schedule_object& _sso )
_sso.current_shuffled_sons.clear();
_sso.current_shuffled_sons.reserve( gpo.active_sons.at(type).size() );
for ( const auto &w : gpo.active_sons.at(type) ) {
_sso.current_shuffled_sons.push_back(w.son_id);
}
auto now_hi = uint64_t(head_block_time().sec_since_epoch()) << 32;
for (uint32_t i = 0; i < _sso.current_shuffled_sons.size(); ++i)
{
_sso.current_shuffled_sons.clear();
_sso.current_shuffled_sons.reserve( gpo.active_sons.at(active_sidechain_type).size() );
/// High performance random generator
/// http://xorshift.di.unimi.it/
uint64_t k = now_hi + uint64_t(i) * 2685821657736338717ULL;
k ^= (k >> 12);
k ^= (k << 25);
k ^= (k >> 27);
k *= 2685821657736338717ULL;
for ( const auto &w : gpo.active_sons.at(active_sidechain_type) ) {
_sso.current_shuffled_sons.push_back(w.son_id);
}
auto now_hi = uint64_t(head_block_time().sec_since_epoch()) << 32;
for (uint32_t i = 0; i < _sso.current_shuffled_sons.size(); ++i)
{
/// High performance random generator
/// http://xorshift.di.unimi.it/
uint64_t k = now_hi + uint64_t(i) * 2685821657736338717ULL;
k ^= (k >> 12);
k ^= (k << 25);
k ^= (k >> 27);
k *= 2685821657736338717ULL;
uint32_t jmax = _sso.current_shuffled_sons.size() - i;
uint32_t j = i + k % jmax;
std::swap(_sso.current_shuffled_sons[i],
_sso.current_shuffled_sons[j]);
}
});
}
uint32_t jmax = _sso.current_shuffled_sons.size() - i;
uint32_t j = i + k % jmax;
std::swap(_sso.current_shuffled_sons[i],
_sso.current_shuffled_sons[j]);
}
});
}
}
@ -321,23 +318,15 @@ void database::update_witness_schedule(const signed_block& next_block)
idump( ( double(total_time/1000000.0)/calls) );
}
void database::update_son_schedule(const signed_block& next_block)
void database::update_son_schedule(sidechain_type type, const signed_block& next_block)
{
auto start = fc::time_point::now();
#ifndef NDEBUG
const son_schedule_object& sso = get(son_schedule_id_type());
#endif
const global_property_object& gpo = get_global_properties();
const flat_map<sidechain_type, uint32_t> schedule_needs_filled = [&gpo]()
{
flat_map<sidechain_type, uint32_t> schedule_needs_filled;
for(const auto& sidechain_active_sons : gpo.active_sons)
{
schedule_needs_filled[sidechain_active_sons.first] = sidechain_active_sons.second.size();
}
return schedule_needs_filled;
}();
uint32_t schedule_slot = get_slot_at_time(next_block.timestamp);
const uint32_t schedule_needs_filled = gpo.active_sons.at(type).size();
const uint32_t schedule_slot = get_slot_at_time(next_block.timestamp);
// We shouldn't be able to generate _pending_block with timestamp
// in the past, and incoming blocks from the network with timestamp
@ -351,46 +340,43 @@ void database::update_son_schedule(const signed_block& next_block)
assert( dpo.random.data_size() == witness_scheduler_rng::seed_length );
assert( witness_scheduler_rng::seed_length == sso.rng_seed.size() );
for(const auto& active_sidechain_type : active_sidechain_types(head_block_time()))
const son_schedule_object& sidechain_sso = get(son_schedule_id_type(get_son_schedule_id(type)));
son_id_type first_son;
bool slot_is_near = sidechain_sso.scheduler.get_slot( schedule_slot-1, first_son );
son_id_type son_id;
modify(sidechain_sso, [&](son_schedule_object& _sso)
{
const son_schedule_object& sidechain_sso = get(son_schedule_id_type(get_son_schedule_id(active_sidechain_type)));
son_id_type first_son;
bool slot_is_near = sidechain_sso.scheduler.get_slot( schedule_slot-1, first_son );
son_id_type son_id;
_sso.slots_since_genesis += schedule_slot;
witness_scheduler_rng rng(_sso.rng_seed.data, _sso.slots_since_genesis);
modify(sidechain_sso, [&](son_schedule_object& _sso)
{
_sso.slots_since_genesis += schedule_slot;
witness_scheduler_rng rng(_sso.rng_seed.data, _sso.slots_since_genesis);
_sso.scheduler._min_token_count = std::max(int(gpo.active_sons.at(type).size()) / 2, 1);
_sso.scheduler._min_token_count = std::max(int(gpo.active_sons.at(active_sidechain_type).size()) / 2, 1);
if( slot_is_near )
if( slot_is_near )
{
uint32_t drain = schedule_slot;
while( drain > 0 )
{
uint32_t drain = schedule_slot;
while( drain > 0 )
{
if( _sso.scheduler.size() == 0 )
break;
_sso.scheduler.consume_schedule();
--drain;
}
if( _sso.scheduler.size() == 0 )
break;
_sso.scheduler.consume_schedule();
--drain;
}
else
{
_sso.scheduler.reset_schedule( first_son );
}
while( !_sso.scheduler.get_slot(schedule_needs_filled.at(active_sidechain_type), son_id) )
{
if( _sso.scheduler.produce_schedule(rng) & emit_turn )
memcpy(_sso.rng_seed.begin(), dpo.random.data(), dpo.random.data_size());
}
_sso.last_scheduling_block = next_block.block_num();
_sso.recent_slots_filled = (
(_sso.recent_slots_filled << 1)
+ 1) << (schedule_slot - 1);
});
}
}
else
{
_sso.scheduler.reset_schedule( first_son );
}
while( !_sso.scheduler.get_slot(schedule_needs_filled, son_id) )
{
if( _sso.scheduler.produce_schedule(rng) & emit_turn )
memcpy(_sso.rng_seed.begin(), dpo.random.data(), dpo.random.data_size());
}
_sso.last_scheduling_block = next_block.block_num();
_sso.recent_slots_filled = (
(_sso.recent_slots_filled << 1)
+ 1) << (schedule_slot - 1);
});
auto end = fc::time_point::now();
static uint64_t total_time = 0;

View file

@ -0,0 +1,7 @@
#ifndef HARDFORK_HOTFIX_2024_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_HOTFIX_2024_TIME (fc::time_point_sec::from_iso_string("2023-12-20T00:00:00"))
#else
#define HARDFORK_HOTFIX_2024_TIME (fc::time_point_sec::from_iso_string("2023-12-20T00:00:00"))
#endif
#endif

View file

@ -1,7 +1,7 @@
#ifndef HARDFORK_SON_FOR_ETHEREUM_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_SON_FOR_ETHEREUM_TIME (fc::time_point_sec::from_iso_string("2023-01-24T00:00:00"))
#define HARDFORK_SON_FOR_ETHEREUM_TIME (fc::time_point_sec::from_iso_string("2023-07-17T12:00:00"))
#else
#define HARDFORK_SON_FOR_ETHEREUM_TIME (fc::time_point_sec::from_iso_string("2023-03-24T00:00:00"))
#define HARDFORK_SON_FOR_ETHEREUM_TIME (fc::time_point_sec::from_iso_string("2023-10-24T12:00:00"))
#endif
#endif

View file

@ -158,7 +158,7 @@
#define GRAPHENE_RECENTLY_MISSED_COUNT_INCREMENT 4
#define GRAPHENE_RECENTLY_MISSED_COUNT_DECREMENT 3
#define GRAPHENE_CURRENT_DB_VERSION "PPY2.4"
#define GRAPHENE_CURRENT_DB_VERSION "PPY2.5"
#define GRAPHENE_IRREVERSIBLE_THRESHOLD (70 * GRAPHENE_1_PERCENT)

View file

@ -292,8 +292,8 @@ namespace graphene { namespace chain {
vector<witness_id_type> get_near_witness_schedule()const;
void update_witness_schedule();
void update_witness_schedule(const signed_block& next_block);
void update_son_schedule();
void update_son_schedule(const signed_block& next_block);
void update_son_schedule(sidechain_type type);
void update_son_schedule(sidechain_type type, const signed_block& next_block);
void check_lottery_end_by_participants( asset_id_type asset_id );
void check_ending_lotteries();
@ -579,7 +579,8 @@ namespace graphene { namespace chain {
void initialize_budget_record( fc::time_point_sec now, budget_record& rec )const;
void process_budget();
void pay_workers( share_type& budget );
void pay_sons();
void pay_sons_before_hf_ethereum();
void pay_sons_after_hf_ethereum();
void perform_son_tasks();
void perform_chain_maintenance(const signed_block& next_block, const global_property_object& global_props);
void update_active_witnesses();
@ -593,6 +594,7 @@ namespace graphene { namespace chain {
const flat_map<sidechain_type, vector<son_sidechain_info> >& new_active_sons );
void update_son_wallet( const flat_map<sidechain_type, vector<son_sidechain_info> >& new_active_sons );
void update_worker_votes();
void hotfix_2024();
public:
double calculate_vesting_factor(const account_object& stake_account);

View file

@ -182,6 +182,9 @@ namespace graphene { namespace chain {
GRAPHENE_DECLARE_OP_BASE_EXCEPTIONS( override_transfer );
GRAPHENE_DECLARE_OP_EVALUATE_EXCEPTION( not_permitted, override_transfer, 1, "not permitted" )
GRAPHENE_DECLARE_OP_BASE_EXCEPTIONS( blind_transfer );
GRAPHENE_DECLARE_OP_EVALUATE_EXCEPTION( unknown_commitment, blind_transfer, 1, "Attempting to claim an unknown prior commitment" );
/*
FC_DECLARE_DERIVED_EXCEPTION( addition_overflow, graphene::chain::chain_exception, 30002, "addition overflow" )
FC_DECLARE_DERIVED_EXCEPTION( subtraction_overflow, graphene::chain::chain_exception, 30003, "subtraction overflow" )

View file

@ -158,9 +158,7 @@ struct transfer_to_blind_operation : public base_operation
blind_factor_type blinding_factor;
vector<blind_output> outputs;
account_id_type fee_payer()const { return account_id_type{}; }
//account_id_type fee_payer()const { return from; }
account_id_type fee_payer()const { return from; }
//void validate()const;
//share_type calculate_fee(const fee_parameters_type& )const;
};
@ -181,9 +179,7 @@ struct transfer_from_blind_operation : public base_operation
blind_factor_type blinding_factor;
vector<blind_input> inputs;
account_id_type fee_payer()const { return account_id_type{}; }
//account_id_type fee_payer()const { return GRAPHENE_TEMP_ACCOUNT; }
account_id_type fee_payer()const { return GRAPHENE_TEMP_ACCOUNT; }
//void validate()const;
//void get_required_authorities( vector<authority>& a )const
//{
@ -246,10 +242,8 @@ struct blind_transfer_operation : public base_operation
vector<blind_input> inputs;
vector<blind_output> outputs;
account_id_type fee_payer()const { return account_id_type{}; }
/** graphene TEMP account */
//account_id_type fee_payer()const;
account_id_type fee_payer()const { return GRAPHENE_TEMP_ACCOUNT; }
//void validate()const;
//share_type calculate_fee( const fee_parameters_type& k )const;
//void get_required_authorities( vector<authority>& a )const

View file

@ -36,6 +36,15 @@ namespace graphene { namespace chain {
deposit_address(""),
withdraw_public_key(""),
withdraw_address("") {}
inline string get_deposit_address() const {
if(sidechain_type::ethereum != sidechain)
return deposit_address;
auto deposit_address_lower = deposit_address;
std::transform(deposit_address_lower.begin(), deposit_address_lower.end(), deposit_address_lower.begin(), ::tolower);
return deposit_address_lower;
}
};
struct by_account;
@ -76,7 +85,7 @@ namespace graphene { namespace chain {
ordered_non_unique< tag<by_sidechain_and_deposit_address_and_expires>,
composite_key<sidechain_address_object,
member<sidechain_address_object, sidechain_type, &sidechain_address_object::sidechain>,
member<sidechain_address_object, string, &sidechain_address_object::deposit_address>,
const_mem_fun<sidechain_address_object, string, &sidechain_address_object::get_deposit_address>,
member<sidechain_address_object, time_point_sec, &sidechain_address_object::expires>
>
>

View file

@ -97,10 +97,18 @@ void_result update_son_wallet_evaluator::do_evaluate(const son_wallet_update_ope
FC_ASSERT(db().head_block_time() >= HARDFORK_SON_TIME, "Not allowed until SON HARDFORK");
FC_ASSERT( op.payer == db().get_global_properties().parameters.son_account(), "SON paying account must be set as payer." );
const son_wallet_id_type son_wallet_id = [&]{
if(db().head_block_time() >= HARDFORK_SON_FOR_ETHEREUM_TIME)
{
const auto ast = active_sidechain_types(db().head_block_time());
const auto id = (op.son_wallet_id.instance.value - std::distance(ast.begin(), ast.find(op.sidechain))) / ast.size();
return son_wallet_id_type{ id };
}
return op.son_wallet_id;
}();
const auto& idx = db().get_index_type<son_wallet_index>().indices().get<by_id>();
const auto ast = active_sidechain_types(db().head_block_time());
const auto id = (op.son_wallet_id.instance.value - std::distance(ast.begin(), ast.find(op.sidechain))) / ast.size();
const son_wallet_id_type son_wallet_id{ id };
FC_ASSERT( idx.find(son_wallet_id) != idx.end() );
//auto itr = idx.find(op.son_wallet_id);
//FC_ASSERT( itr->addresses.find(op.sidechain) == itr->addresses.end() ||
@ -110,10 +118,18 @@ void_result update_son_wallet_evaluator::do_evaluate(const son_wallet_update_ope
object_id_type update_son_wallet_evaluator::do_apply(const son_wallet_update_operation& op)
{ try {
const son_wallet_id_type son_wallet_id = [&]{
if(db().head_block_time() >= HARDFORK_SON_FOR_ETHEREUM_TIME)
{
const auto ast = active_sidechain_types(db().head_block_time());
const auto id = (op.son_wallet_id.instance.value - std::distance(ast.begin(), ast.find(op.sidechain))) / ast.size();
return son_wallet_id_type{ id };
}
return op.son_wallet_id;
}();
const auto& idx = db().get_index_type<son_wallet_index>().indices().get<by_id>();
const auto ast = active_sidechain_types(db().head_block_time());
const auto id = (op.son_wallet_id.instance.value - std::distance(ast.begin(), ast.find(op.sidechain))) / ast.size();
const son_wallet_id_type son_wallet_id{ id };
auto itr = idx.find(son_wallet_id);
if (itr != idx.end())
{

View file

@ -152,7 +152,7 @@ void bitcoin_transaction_builder::add_in(payment_type type, tx_in txin, const by
txin.scriptSig = script_code;
break;
default: {
if (txin.prevout.hash == fc::sha256("0000000000000000000000000000000000000000000000000000000000000000")) { //coinbase
if (txin.prevout.hash == fc::sha256("0000000000000000000000000000000000000000000000000000000000000000")) { // coinbase
FC_ASSERT(script_code != bytes());
txin.scriptSig = script_code;
}

View file

@ -384,7 +384,7 @@ rpc_client::~rpc_client() {
if (connection_selection_task.valid())
connection_selection_task.cancel_and_wait(__FUNCTION__);
} catch (fc::canceled_exception &) {
//Expected exception. Move along.
// Expected exception. Move along.
} catch (fc::exception &e) {
edump((e.to_detail_string()));
}

View file

@ -137,8 +137,9 @@ std::string rlp_encoder::encode_length(int len, int offset) {
std::string rlp_encoder::hex2bytes(const std::string &s) {
std::string dest;
dest.resize(s.size() / 2);
hex2bin(s.c_str(), &dest[0]);
const auto s_final = s.size() % 2 == 0 ? s : "0" + s;
dest.resize(s_final.size() / 2);
hex2bin(s_final.c_str(), &dest[0]);
return dest;
}

View file

@ -22,15 +22,15 @@ typedef fc::ecc::private_key private_key_type;
typedef fc::sha256 chain_id_type;
typedef std::string account_name_type;
typedef fc::ripemd160 block_id_type;
//typedef fc::ripemd160 checksum_type;
// typedef fc::ripemd160 checksum_type;
typedef fc::ripemd160 transaction_id_type;
typedef fc::sha256 digest_type;
typedef fc::ecc::compact_signature signature_type;
typedef fc::safe<int64_t> share_type;
//typedef safe<uint64_t> ushare_type;
//typedef uint16_t weight_type;
//typedef uint32_t contribution_id_type;
//typedef fixed_string<32> custom_id_type;
// typedef safe<uint64_t> ushare_type;
// typedef uint16_t weight_type;
// typedef uint32_t contribution_id_type;
// typedef fixed_string<32> custom_id_type;
struct public_key_type {

View file

@ -100,6 +100,7 @@ private:
uint16_t retries_threshold = 150;
bool first_block_skipped;
bool son_processing_enabled;
void on_applied_block(const signed_block &b);
};
@ -135,7 +136,8 @@ peerplays_sidechain_plugin_impl::peerplays_sidechain_plugin_impl(peerplays_sidec
}
return net_handlers;
}()),
first_block_skipped(false) {
first_block_skipped(false),
son_processing_enabled(false) {
}
peerplays_sidechain_plugin_impl::~peerplays_sidechain_plugin_impl() {
@ -143,7 +145,7 @@ peerplays_sidechain_plugin_impl::~peerplays_sidechain_plugin_impl() {
if (_heartbeat_task.valid())
_heartbeat_task.cancel_and_wait(__FUNCTION__);
} catch (fc::canceled_exception &) {
//Expected exception. Move along.
// Expected exception. Move along.
} catch (fc::exception &e) {
edump((e.to_detail_string()));
}
@ -154,7 +156,7 @@ peerplays_sidechain_plugin_impl::~peerplays_sidechain_plugin_impl() {
_son_processing_task.at(active_sidechain_type).wait();
}
} catch (fc::canceled_exception &) {
//Expected exception. Move along.
// Expected exception. Move along.
} catch (fc::exception &e) {
edump((e.to_detail_string()));
}
@ -538,11 +540,11 @@ void peerplays_sidechain_plugin_impl::son_processing(sidechain_type sidechain) {
return;
}
//fc::time_point now_fine = fc::time_point::now();
//fc::time_point_sec now = now_fine + fc::microseconds(500000);
//if (plugin.database().get_slot_time(1) < now) {
// return; // Not synced
//}
// fc::time_point now_fine = fc::time_point::now();
// fc::time_point_sec now = now_fine + fc::microseconds(500000);
// if (plugin.database().get_slot_time(1) < now) {
// return; // Not synced
// }
const fc::time_point now_fine = fc::time_point::now();
const fc::time_point_sec now = now_fine - fc::milliseconds(3000);
@ -871,7 +873,16 @@ void peerplays_sidechain_plugin_impl::settle_sidechain_transactions(sidechain_ty
void peerplays_sidechain_plugin_impl::on_applied_block(const signed_block &b) {
if (first_block_skipped) {
schedule_son_processing();
if (son_processing_enabled) {
schedule_son_processing();
} else {
const fc::time_point now_fine = fc::time_point::now();
const fc::time_point_sec now = now_fine + fc::microseconds(500000);
if (plugin.database().get_slot_time(1) >= now) {
son_processing_enabled = true;
schedule_son_processing();
}
}
} else {
first_block_skipped = true;
}

View file

@ -184,11 +184,11 @@ void sidechain_net_handler::sidechain_event_data_received(const sidechain_event_
bool enable_peerplays_asset_deposits = false;
#ifdef ENABLE_PEERPLAYS_ASSET_DEPOSITS
//enable_peerplays_asset_deposits = (sed.sidechain == sidechain_type::peerplays) &&
// (sed.sidechain_currency.compare("BTC") != 0) &&
// (sed.sidechain_currency.compare("ETH") != 0) &&
// (sed.sidechain_currency.compare("HBD") != 0) &&
// (sed.sidechain_currency.compare("HIVE") != 0);
// enable_peerplays_asset_deposits = (sed.sidechain == sidechain_type::peerplays) &&
// (sed.sidechain_currency.compare("BTC") != 0) &&
// (sed.sidechain_currency.compare("ETH") != 0) &&
// (sed.sidechain_currency.compare("HBD") != 0) &&
// (sed.sidechain_currency.compare("HIVE") != 0);
#endif
const bool deposit_condition = (sed.peerplays_to == gpo.parameters.son_account()) &&
@ -451,7 +451,7 @@ void sidechain_net_handler::process_deposits() {
if (swdo.id == object_id_type(0, 0, 0) || !plugin.can_son_participate(sidechain, chain::operation::tag<chain::son_wallet_deposit_process_operation>::value, swdo.id)) {
return;
}
//Ignore the deposits which are not valid anymore, considered refunds.
// Ignore the deposits which are not valid anymore, considered refunds.
const auto &sidechain_addresses_idx = database.get_index_type<sidechain_address_index>().indices().get<by_sidechain_and_deposit_address_and_expires>();
const auto &addr_itr = sidechain_addresses_idx.find(std::make_tuple(sidechain, swdo.sidechain_from, time_point_sec::maximum()));
if (addr_itr == sidechain_addresses_idx.end()) {
@ -680,8 +680,7 @@ void sidechain_net_handler::on_applied_block(const signed_block &b) {
const bool is_tracked_asset =
((sidechain == sidechain_type::bitcoin) && (transfer_op.amount.asset_id == gpo.parameters.btc_asset())) ||
((sidechain == sidechain_type::ethereum) && (transfer_op.amount.asset_id == gpo.parameters.eth_asset())) ||
((sidechain == sidechain_type::ethereum) && (transfer_op.amount.asset_id != gpo.parameters.btc_asset())
&& (transfer_op.amount.asset_id != gpo.parameters.hbd_asset()) && (transfer_op.amount.asset_id != gpo.parameters.hive_asset())) ||
((sidechain == sidechain_type::ethereum) && (transfer_op.amount.asset_id != gpo.parameters.btc_asset()) && (transfer_op.amount.asset_id != gpo.parameters.hbd_asset()) && (transfer_op.amount.asset_id != gpo.parameters.hive_asset()) && (transfer_op.amount.asset_id != asset_id_type())) ||
((sidechain == sidechain_type::hive) && (transfer_op.amount.asset_id == gpo.parameters.hbd_asset())) ||
((sidechain == sidechain_type::hive) && (transfer_op.amount.asset_id == gpo.parameters.hive_asset()));

View file

@ -684,13 +684,18 @@ std::string sidechain_net_handler_ethereum::send_sidechain_transaction(const sid
const ethereum::signature_encoder encoder{function_signature};
#ifdef SEND_RAW_TRANSACTION
const auto data = encoder.encode(transactions);
const std::string params = "[{\"from\":\"" + ethereum::add_0x(public_key) + "\", \"to\":\"" + wallet_contract_address + "\", \"data\":\"" + data + "\"}]";
ethereum::raw_transaction raw_tr;
raw_tr.nonce = rpc_client->get_nonce(ethereum::add_0x(public_key));
raw_tr.gas_price = rpc_client->get_gas_price();
raw_tr.gas_limit = rpc_client->get_gas_limit();
raw_tr.gas_limit = rpc_client->get_estimate_gas(params);
if (raw_tr.gas_limit.empty())
raw_tr.gas_limit = rpc_client->get_gas_limit();
raw_tr.to = wallet_contract_address;
raw_tr.value = "";
raw_tr.data = encoder.encode(transactions);
raw_tr.data = data;
raw_tr.chain_id = ethereum::add_0x(ethereum::to_hex(chain_id));
const auto sign_tr = raw_tr.sign(get_private_key(public_key));
@ -752,8 +757,8 @@ bool sidechain_net_handler_ethereum::settle_sidechain_transaction(const sidechai
if ("0x1" == json_receipt.get<std::string>("result.status")) {
count += 1;
//! Fixme - compare data somehow?
//if( sto.transaction == entry_receipt.second.get<std::string>("data") ) {
//}
// if( sto.transaction == entry_receipt.second.get<std::string>("data") ) {
// }
}
}
@ -804,7 +809,7 @@ optional<asset> sidechain_net_handler_ethereum::estimate_withdrawal_transaction_
}
const auto &public_key = son->sidechain_public_keys.at(sidechain);
const auto data = ethereum::withdrawal_encoder::encode(public_key, boost::multiprecision::uint256_t{1} * boost::multiprecision::uint256_t{10000000000}, son_wallet_withdraw_id_type{0}.operator object_id_type().operator std::string());
const auto data = ethereum::withdrawal_encoder::encode(public_key, boost::multiprecision::uint256_t{1} * boost::multiprecision::uint256_t{10000000000}, "0");
const std::string params = "[{\"from\":\"" + ethereum::add_0x(public_key) + "\", \"to\":\"" + wallet_contract_address + "\", \"data\":\"" + data + "\"}]";
const auto estimate_gas = ethereum::from_hex<int64_t>(rpc_client->get_estimate_gas(params));
@ -909,8 +914,9 @@ void sidechain_net_handler_ethereum::handle_event(const std::string &block_numbe
const boost::property_tree::ptree tx = tx_child.second;
tx_idx = tx_idx + 1;
const std::string from = tx.get<std::string>("from");
const std::string to = tx.get<std::string>("to");
std::string from = tx.get<std::string>("from");
std::transform(from.begin(), from.end(), from.begin(), ::tolower);
std::string cmp_to = to;
std::transform(cmp_to.begin(), cmp_to.end(), cmp_to.begin(), ::toupper);

View file

@ -858,14 +858,14 @@ bool sidechain_net_handler_hive::settle_sidechain_transaction(const sidechain_tr
boost::property_tree::ptree tx_json;
boost::property_tree::read_json(ss_tx, tx_json);
//const chain::global_property_object &gpo = database.get_global_properties();
// const chain::global_property_object &gpo = database.get_global_properties();
std::string tx_txid = tx_json.get<std::string>("result.transaction_id");
uint32_t tx_block_num = tx_json.get<uint32_t>("result.block_num");
const uint32_t last_irreversible_block = std::stoul(rpc_client->get_last_irreversible_block_num());
//std::string tx_address = addr.get_address();
//int64_t tx_amount = -1;
// std::string tx_address = addr.get_address();
// int64_t tx_amount = -1;
if (tx_block_num <= last_irreversible_block) {
if (sto.object_id.is<son_wallet_withdraw_id_type>()) {
@ -918,15 +918,15 @@ void sidechain_net_handler_hive::hive_listener_loop() {
}
}
//std::string reply = rpc_client->get_last_irreversible_block_num();
//if (!reply.empty()) {
// uint64_t last_irreversible_block = std::stoul(reply);
// if (last_irreversible_block != last_block_received) {
// std::string event_data = std::to_string(last_irreversible_block);
// handle_event(event_data);
// last_block_received = last_irreversible_block;
// }
//}
// std::string reply = rpc_client->get_last_irreversible_block_num();
// if (!reply.empty()) {
// uint64_t last_irreversible_block = std::stoul(reply);
// if (last_irreversible_block != last_block_received) {
// std::string event_data = std::to_string(last_irreversible_block);
// handle_event(event_data);
// last_block_received = last_irreversible_block;
// }
// }
}
void sidechain_net_handler_hive::handle_event(const std::string &event_data) {
@ -960,7 +960,7 @@ void sidechain_net_handler_hive::handle_event(const std::string &event_data) {
const auto &amount_child = op_value.get_child("amount");
uint64_t amount = amount_child.get<uint64_t>("amount");
//uint64_t precision = amount_child.get<uint64_t>("precision");
// uint64_t precision = amount_child.get<uint64_t>("precision");
std::string nai = amount_child.get<std::string>("nai");
std::string sidechain_currency = "";
price sidechain_currency_price = {};

View file

@ -24,15 +24,15 @@ namespace graphene { namespace peerplays_sidechain {
sidechain_net_handler_peerplays::sidechain_net_handler_peerplays(peerplays_sidechain_plugin &_plugin, const boost::program_options::variables_map &options) :
sidechain_net_handler(sidechain_type::peerplays, _plugin, options) {
//const auto &assets_by_symbol = database.get_index_type<asset_index>().indices().get<by_symbol>();
//const auto get_asset_id = [&assets_by_symbol](const string &symbol) {
// auto asset_itr = assets_by_symbol.find(symbol);
// FC_ASSERT(asset_itr != assets_by_symbol.end(), "Unable to find asset '${sym}'", ("sym", symbol));
// return asset_itr->get_id();
//};
//tracked_assets.push_back(get_asset_id("PBTC"));
//tracked_assets.push_back(get_asset_id("PETH"));
//tracked_assets.push_back(get_asset_id("PEOS"));
// const auto &assets_by_symbol = database.get_index_type<asset_index>().indices().get<by_symbol>();
// const auto get_asset_id = [&assets_by_symbol](const string &symbol) {
// auto asset_itr = assets_by_symbol.find(symbol);
// FC_ASSERT(asset_itr != assets_by_symbol.end(), "Unable to find asset '${sym}'", ("sym", symbol));
// return asset_itr->get_id();
// };
// tracked_assets.push_back(get_asset_id("PBTC"));
// tracked_assets.push_back(get_asset_id("PETH"));
// tracked_assets.push_back(get_asset_id("PEOS"));
if (options.count("peerplays-private-key")) {
const std::vector<std::string> pub_priv_keys = options["peerplays-private-key"].as<std::vector<std::string>>();
@ -284,8 +284,8 @@ bool sidechain_net_handler_peerplays::settle_sidechain_transaction(const sidecha
}
if (sto.object_id.is<son_wallet_deposit_id_type>()) {
//auto swdo = database.get<son_wallet_deposit_object>(sto.object_id);
//settle_amount = asset(swdo.sidechain_amount, swdo.sidechain_currency);
// auto swdo = database.get<son_wallet_deposit_object>(sto.object_id);
// settle_amount = asset(swdo.sidechain_amount, swdo.sidechain_currency);
}
if (sto.object_id.is<son_wallet_withdraw_id_type>()) {

View file

@ -2773,12 +2773,21 @@ public:
FC_ASSERT(son_obj, "Account ${son} is not registered as a son", ("son", son));
FC_ASSERT(sidechain == sidechain_type::bitcoin || sidechain == sidechain_type::hive || sidechain == sidechain_type::ethereum, "Unexpected sidechain type");
bool update_vote_time = false;
if (approve)
{
FC_ASSERT(son_obj->get_sidechain_vote_id(sidechain).valid(), "Invalid vote id, sidechain: ${sidechain}, son: ${son}", ("sidechain", sidechain)("son", *son_obj));
account_id_type stake_account = get_account_id(voting_account);
const auto gpos_info = _remote_db->get_gpos_info(stake_account);
const auto vesting_subperiod = _remote_db->get_global_properties().parameters.gpos_subperiod();
const auto gpos_start_time = fc::time_point_sec(_remote_db->get_global_properties().parameters.gpos_period_start());
const auto subperiod_start_time = gpos_start_time.sec_since_epoch() + (gpos_info.current_subperiod - 1) * vesting_subperiod;
auto insert_result = voting_account_object.options.votes.insert(*son_obj->get_sidechain_vote_id(sidechain));
if (!insert_result.second)
FC_THROW("Account ${account} has already voted for son ${son} for sidechain ${sidechain}", ("account", voting_account)("son", son)("sidechain", sidechain));
if (!insert_result.second && (gpos_info.last_voted_time.sec_since_epoch() >= subperiod_start_time))
FC_THROW("Account ${account} was already voting for son ${son} in the current GPOS sub-period", ("account", voting_account)("son", son));
else
update_vote_time = true; //Allow user to vote in each sub-period(Update voting time, which is reference in calculating VF)
}
else
{
@ -2787,9 +2796,11 @@ public:
if (!votes_removed)
FC_THROW("Account ${account} has already unvoted for son ${son} for sidechain ${sidechain}", ("account", voting_account)("son", son)("sidechain", sidechain));
}
account_update_operation account_update_op;
account_update_op.account = voting_account_object.id;
account_update_op.new_options = voting_account_object.options;
account_update_op.extensions.value.update_last_voting_time = update_vote_time;
signed_transaction tx;
tx.operations.push_back( account_update_op );

View file

@ -40,7 +40,7 @@ public:
{
fixture_.init_nathan();
fixture_.generate_blocks(HARDFORK_SON_FOR_ETHEREUM_TIME);
fixture_.generate_block();
fixture_.generate_maintenance_block();
}
void create_son(const std::string& account_name, const std::string& son_url,

View file

@ -574,7 +574,7 @@ BOOST_AUTO_TEST_CASE( son_pay_test )
BOOST_REQUIRE_EQUAL(son_stats_obj2->total_sidechain_txs_reported.at(sidechain_type::hive), 12);
BOOST_REQUIRE_EQUAL(son_stats_obj2->total_sidechain_txs_reported.at(sidechain_type::ethereum), 18);
// Check that Alice and Bob are paid for signing the transactions in the previous day/cycle
BOOST_REQUIRE_EQUAL(db.get_balance(obj1->son_account, asset_id_type()).amount.value, 80+obj1_balance);
BOOST_REQUIRE_EQUAL(db.get_balance(obj1->son_account, asset_id_type()).amount.value, 79+obj1_balance);
BOOST_REQUIRE_EQUAL(db.get_balance(obj2->son_account, asset_id_type()).amount.value, 120+obj2_balance);
// Check the SON Budget is again allocated after maintenance
BOOST_CHECK( dpo.son_budget.value == 200);