Compare commits

..

2 commits

Author SHA1 Message Date
sierra19XX
7606e33a02 init variables 2020-10-11 15:18:47 +00:00
sierra19XX
cb957f74db hotfix - son max count fix 2020-10-11 10:55:49 +00:00
344 changed files with 14282 additions and 241834 deletions

View file

@ -1,5 +1,6 @@
---
Language: Cpp
# BasedOnStyle: LLVM
AccessModifierOffset: -3
AlignAfterOpenBracket: Align
AlignConsecutiveMacros: false
@ -11,7 +12,7 @@ AlignTrailingComments: true
AllowAllArgumentsOnNextLine: true
AllowAllConstructorInitializersOnNextLine: false
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: Never
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: None
AllowShortLambdasOnASingleLine: None
@ -56,7 +57,6 @@ ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 6
ContinuationIndentWidth: 6
Cpp11BracedListStyle: true
DeriveLineEnding: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
@ -69,17 +69,12 @@ IncludeBlocks: Preserve
IncludeCategories:
- Regex: '^"(llvm|llvm-c|clang|clang-c)/'
Priority: 2
SortPriority: 0
- Regex: '^(<|"(gtest|gmock|isl|json)/)'
Priority: 3
SortPriority: 0
- Regex: '.*'
Priority: 1
SortPriority: 0
IncludeIsMainRegex: '(Test)?$'
IncludeIsMainSourceRegex: ''
IndentCaseLabels: false
IndentGotoLabels: false
IndentPPDirectives: None
IndentWidth: 3
IndentWrappedFunctionNames: false
@ -115,22 +110,18 @@ SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceInEmptyBlock: false
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInConditionalStatement: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
SpaceBeforeSquareBrackets: false
Standard: Latest
Standard: Cpp11
StatementMacros:
- Q_UNUSED
- QT_REQUIRE_VERSION
TabWidth: 3
UseCRLF: false
UseTab: Never
...

1
.gitignore vendored
View file

@ -14,7 +14,6 @@ data
CMakeDoxyfile.in
build
build__*
libraries/utilities/git_revision.cpp

View file

@ -8,20 +8,17 @@ include:
stages:
- build
- test
- dockerize
- python-test
- deploy
build-mainnet:
build:
stage: build
script:
- rm -rf .git/modules/docs .git/modules/libraries/fc ./docs ./libraries/fc
- rm -rf .git/modules/* ./docs ./libraries/fc
- git submodule sync
- git submodule update --init --recursive
- rm -rf build
- mkdir build
- cd build
- cmake -DCMAKE_BUILD_TYPE=Release ..
- cmake ..
- make -j$(nproc)
artifacts:
untracked: true
@ -32,140 +29,13 @@ build-mainnet:
tags:
- builder
test-mainnet:
test:
stage: test
dependencies:
- build-mainnet
- build
script:
- ./build/libraries/fc/tests/all_tests
- ./build/tests/betting_test --log_level=message
- ./build/tests/chain_test --log_level=message
- ./build/tests/cli_test --log_level=message
tags:
- builder
dockerize-mainnet:
stage: dockerize
variables:
IMAGE: $CI_REGISTRY_IMAGE/mainnet/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA
before_script:
- docker info
- docker builder prune -a -f
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build --no-cache -t $IMAGE .
- docker push $IMAGE
after_script:
- docker rmi $IMAGE
tags:
- builder
timeout:
3h
build-testnet:
stage: build
script:
- rm -rf .git/modules/docs .git/modules/libraries/fc ./docs ./libraries/fc
- git submodule sync
- git submodule update --init --recursive
- rm -rf build
- mkdir build
- cd build
- cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_PEERPLAYS_TESTNET=1 ..
- make -j$(nproc)
artifacts:
untracked: true
paths:
- build/libraries/
- build/programs/
- build/tests/
when: manual
tags:
- builder
deploy-testnet:
stage: deploy
dependencies:
- build-testnet
script:
- sudo systemctl stop witness
- rm $WORK_DIR/peerplays/witness_node || true
- cp build/programs/witness_node/witness_node $WORK_DIR/peerplays/
- sudo systemctl restart witness
rules:
- if: $CI_COMMIT_BRANCH == "master"
when: always
environment:
name: devnet
url: $DEVNET_URL
tags:
- devnet
test-testnet:
stage: test
dependencies:
- build-testnet
script:
- ./build/libraries/fc/tests/all_tests
- ./build/tests/betting_test --log_level=message
- ./build/tests/chain_test --log_level=message
- ./build/tests/cli_test --log_level=message
tags:
- builder
when:
manual
timeout:
1h
dockerize-testnet:
stage: dockerize
variables:
IMAGE: $CI_REGISTRY_IMAGE/testnet/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA
before_script:
- docker info
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build --no-cache -t $IMAGE .
- docker push $IMAGE
after_script:
- docker rmi $IMAGE
tags:
- builder
when:
manual
timeout:
3h
test-e2e:
stage: python-test
variables:
IMAGE: $CI_REGISTRY_IMAGE/mainnet/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA
before_script:
- docker info
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- git clone https://gitlab.com/PBSA/tools-libs/peerplays-utils.git
- cd peerplays-utils/peerplays-qa-environment
- git checkout origin/feature/python-e2e-tests-for-CI
- cd e2e-tests/
- python3 -m venv venv
- source venv/bin/activate
- pip3 install -r requirements.txt
- docker-compose down --remove-orphans
- docker ps -a
- docker pull $IMAGE
- docker tag $IMAGE peerplays-base:latest
- docker image ls -a
- docker-compose build
- python3 main.py --start all
- docker ps -a
- python3 -m pytest test_btc_init_state.py test_hive_inital_state.py test_pp_inital_state.py
- python3 main.py --stop
- deactivate
- docker ps -a
after_script:
- docker rmi $(docker images -a | grep -v 'hive-for-peerplays\|ethereum-for-peerplays\|bitcoin-for-peerplays\|ubuntu-for-peerplays' | awk '{print $3}')
tags:
- python-tests
when:
manual

14
.gitmodules vendored
View file

@ -1,9 +1,9 @@
[submodule "docs"]
path = docs
url = https://github.com/bitshares/bitshares-core.wiki.git
ignore = dirty
path = docs
url = https://github.com/bitshares/bitshares-core.wiki.git
ignore = dirty
[submodule "libraries/fc"]
path = libraries/fc
url = https://gitlab.com/PBSA/tools-libs/peerplays-fc.git
branch = develop
ignore = dirty
path = libraries/fc
url = https://github.com/peerplays-network/peerplays-fc.git
branch = latest-fc
ignore = dirty

View file

@ -1,10 +0,0 @@
sonar.projectKey=peerplays-network_peerplays
sonar.organization=peerplays-network
# This is the name and version displayed in the SonarCloud UI.
sonar.projectName=peerplays
# Path is relative to the sonar-project.properties file. Replace "\" by "/" on Windows.
sonar.sources=.
sonar.host.url=https://sonarcloud.io

View file

@ -1,11 +1,11 @@
# Defines Peerplays library target.
project( Peerplays )
# Defines BitShares library target.
project( BitShares )
cmake_minimum_required( VERSION 2.8.12 )
set( BLOCKCHAIN_NAME "Peerplays" )
set( BLOCKCHAIN_NAME "BitShares" )
set( CLI_CLIENT_EXECUTABLE_NAME graphene_client )
set( GUI_CLIENT_EXECUTABLE_NAME Peerplays )
set( GUI_CLIENT_EXECUTABLE_NAME BitShares )
set( CUSTOM_URL_SCHEME "gcs" )
set( INSTALLER_APP_ID "68ad7005-8eee-49c9-95ce-9eed97e5b347" )
@ -22,76 +22,8 @@ endif()
list( APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules" )
function(get_linux_lsb_release_information)
find_program(LSB_RELEASE_EXEC lsb_release)
if(NOT LSB_RELEASE_EXEC)
message(FATAL_ERROR "Could not detect lsb_release executable, can not gather required information")
endif()
execute_process(COMMAND "${LSB_RELEASE_EXEC}" --short --id OUTPUT_VARIABLE LSB_RELEASE_ID_SHORT OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(COMMAND "${LSB_RELEASE_EXEC}" --short --release OUTPUT_VARIABLE LSB_RELEASE_VERSION_SHORT OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(COMMAND "${LSB_RELEASE_EXEC}" --short --codename OUTPUT_VARIABLE LSB_RELEASE_CODENAME_SHORT OUTPUT_STRIP_TRAILING_WHITESPACE)
set(LSB_RELEASE_ID_SHORT "${LSB_RELEASE_ID_SHORT}" PARENT_SCOPE)
set(LSB_RELEASE_VERSION_SHORT "${LSB_RELEASE_VERSION_SHORT}" PARENT_SCOPE)
set(LSB_RELEASE_CODENAME_SHORT "${LSB_RELEASE_CODENAME_SHORT}" PARENT_SCOPE)
endfunction()
if(CMAKE_SYSTEM_NAME MATCHES "Linux")
find_package(cppzmq)
target_link_libraries(cppzmq)
get_linux_lsb_release_information()
message(STATUS "Linux ${LSB_RELEASE_ID_SHORT} ${LSB_RELEASE_VERSION_SHORT} ${LSB_RELEASE_CODENAME_SHORT}")
string(REGEX MATCHALL "([0-9]+)" arg_list ${LSB_RELEASE_VERSION_SHORT})
list( LENGTH arg_list listlen )
if (NOT listlen)
message(FATAL_ERROR "Could not detect Ubuntu version")
endif()
list(GET arg_list 0 output)
message("Ubuntu version is: ${output}")
add_definitions(-DPEERPLAYS_UBUNTU_VERSION=${output})
endif()
# function to help with cUrl
macro(FIND_CURL)
if (NOT WIN32 AND NOT APPLE AND CURL_STATICLIB)
find_package(OpenSSL REQUIRED)
set (OLD_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
set (CMAKE_FIND_LIBRARY_SUFFIXES .a)
find_package(CURL REQUIRED)
list(APPEND CURL_LIBRARIES ${OPENSSL_LIBRARIES} ${BOOST_THREAD_LIBRARY} ${CMAKE_DL_LIBS})
set (CMAKE_FIND_LIBRARY_SUFFIXES ${OLD_SUFFIXES})
else (NOT WIN32 AND NOT APPLE AND CURL_STATICLIB)
find_package(CURL REQUIRED)
endif (NOT WIN32 AND NOT APPLE AND CURL_STATICLIB)
if( WIN32 )
if ( MSVC )
list( APPEND CURL_LIBRARIES Wldap32 )
endif( MSVC )
if( MINGW )
# MinGW requires a specific order of included libraries ( CURL before ZLib )
find_package( ZLIB REQUIRED )
list( APPEND CURL_LIBRARIES ${ZLIB_LIBRARY} pthread )
endif( MINGW )
list( APPEND CURL_LIBRARIES ${PLATFORM_SPECIFIC_LIBS} )
endif( WIN32 )
endmacro()
set(CMAKE_EXPORT_COMPILE_COMMANDS "ON")
if (BUILD_PEERPLAYS_TESTNET)
set(GRAPHENE_EGENESIS_JSON "${CMAKE_CURRENT_SOURCE_DIR}/genesis-testnet.json" CACHE PATH "location of the genesis.json to embed in the executable" )
#add_compile_definitions(BUILD_PEERPLAYS_TESTNET=1)
add_definitions(-DBUILD_PEERPLAYS_TESTNET=1)
message ("\n====================\nBuilding for Testnet\n====================\n")
else (BUILD_PEERPLAYS_TESTNET)
set(GRAPHENE_EGENESIS_JSON "${CMAKE_CURRENT_SOURCE_DIR}/genesis-mainnet.json" CACHE PATH "location of the genesis.json to embed in the executable" )
message ("\n====================\nBuilding for Mainnet\n====================\n")
endif (BUILD_PEERPLAYS_TESTNET)
set(GRAPHENE_EGENESIS_JSON "${CMAKE_CURRENT_SOURCE_DIR}/genesis.json" CACHE PATH "location of the genesis.json to embed in the executable" )
#set (ENABLE_INSTALLER 1)
#set (USE_PCH 1)
@ -114,6 +46,7 @@ LIST(APPEND BOOST_COMPONENTS thread
system
filesystem
program_options
signals
serialization
chrono
unit_test_framework
@ -138,7 +71,7 @@ ENDIF()
if( WIN32 )
message( STATUS "Configuring Peerplays on WIN32")
message( STATUS "Configuring BitShares on WIN32")
set( DB_VERSION 60 )
set( BDB_STATIC_LIBS 1 )
@ -170,13 +103,20 @@ if( WIN32 )
SET(TCL_LIBRARY ${TCL_LIBS})
else( WIN32 ) # Apple AND Linux
find_library(READLINE_LIBRARIES NAMES readline)
find_path(READLINE_INCLUDE_DIR readline/readline.h)
#if(NOT READLINE_INCLUDE_DIR OR NOT READLINE_LIBRARIES)
# MESSAGE(FATAL_ERROR "Could not find lib readline.")
#endif()
if( APPLE )
# Apple Specific Options Here
message( STATUS "Configuring Peerplays on OS X" )
message( STATUS "Configuring BitShares on OS X" )
set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -std=c++11 -stdlib=libc++ -Wall" )
else( APPLE )
# Linux Specific Options Here
message( STATUS "Configuring Peerplays on Linux" )
message( STATUS "Configuring BitShares on Linux" )
set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -std=c++11 -Wall" )
set( rt_library rt )
#set( pthread_library pthread)
@ -195,7 +135,7 @@ else( WIN32 ) # Apple AND Linux
endif( APPLE )
if( "${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" )
set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall" )
set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-builtin-memcmp -Wno-parentheses -Wno-terminate -Wno-invalid-offsetof -Wno-sign-compare" )
elseif( "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" )
if( CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.0.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.0.0 )
set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-invalid-partial-specialization" )
@ -214,7 +154,7 @@ else( WIN32 ) # Apple AND Linux
endif( WIN32 )
set(ENABLE_COVERAGE_TESTING FALSE CACHE BOOL "Build Peerplays for code coverage analysis")
set(ENABLE_COVERAGE_TESTING FALSE CACHE BOOL "Build BitShares for code coverage analysis")
if(ENABLE_COVERAGE_TESTING)
SET(CMAKE_CXX_FLAGS "--coverage ${CMAKE_CXX_FLAGS}")
@ -223,13 +163,13 @@ endif()
add_subdirectory( libraries )
set(BUILD_PEERPLAYS_PROGRAMS TRUE CACHE BOOL "Build peerplays executables (witness node, cli wallet, etc)")
set(BUILD_BITSHARES_PROGRAMS TRUE CACHE BOOL "Build bitshares executables (witness node, cli wallet, etc)")
add_subdirectory( programs )
set(BUILD_PEERPLAYS_TESTS TRUE CACHE BOOL "Build peerplays unit tests")
if( BUILD_PEERPLAYS_TESTS )
set(BUILD_BITSHARES_TESTS TRUE CACHE BOOL "Build bitshares unit tests")
if( BUILD_BITSHARES_TESTS )
add_subdirectory( tests )
endif( BUILD_PEERPLAYS_TESTS )
endif( BUILD_BITSHARES_TESTS )
if (ENABLE_INSTALLER)
@ -251,18 +191,18 @@ set(CPACK_PACKAGE_VERSION_MAJOR "${VERSION_MAJOR}")
set(CPACK_PACKAGE_VERSION_MINOR "${VERSION_MINOR}")
set(CPACK_PACKAGE_VERSION_PATCH "${VERSION_PATCH}")
set(CPACK_PACKAGE_VERSION "${CPACK_PACKAGE_VERSION_MAJOR}.${CPACK_PACKAGE_VERSION_MINOR}.${CPACK_PACKAGE_VERSION_PATCH}")
set(CPACK_PACKAGE_DESCRIPTION "A client for the Peerplays network")
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "A client for the Peerplays network")
set(CPACK_PACKAGE_DESCRIPTION "A client for the BitShares network")
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "A client for the BitShares network")
set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE.md")
set(CPACK_PACKAGE_INSTALL_DIRECTORY "Peerplays ${CPACK_PACKAGE_VERSION}")
set(CPACK_PACKAGE_INSTALL_DIRECTORY "BitShares ${CPACK_PACKAGE_VERSION}")
if(WIN32)
SET(CPACK_GENERATOR "ZIP;NSIS")
set(CPACK_PACKAGE_NAME "Peerplays") # override above
set(CPACK_PACKAGE_NAME "BitShares") # override above
set(CPACK_NSIS_EXECUTABLES_DIRECTORY .)
set(CPACK_NSIS_PACKAGE_NAME "Peerplays v${CPACK_PACKAGE_VERSION}")
set(CPACK_NSIS_PACKAGE_NAME "BitShares v${CPACK_PACKAGE_VERSION}")
set(CPACK_NSIS_DISPLAY_NAME "${CPACK_NSIS_PACKAGE_NAME}")
set(CPACK_NSIS_DEFINES " !define MUI_STARTMENUPAGE_DEFAULTFOLDER \\\"Peerplays\\\"")
set(CPACK_NSIS_DEFINES " !define MUI_STARTMENUPAGE_DEFAULTFOLDER \\\"BitShares\\\"")
# it seems like windows zip files usually don't have a single directory inside them, unix tgz frequently do
SET(CPACK_INCLUDE_TOPLEVEL_DIRECTORY 0)
@ -280,8 +220,3 @@ endif(LINUX)
include(CPack)
endif(ENABLE_INSTALLER)
unset(GRAPHENE_EGENESIS_JSON)
unset(GRAPHENE_EGENESIS_JSON CACHE)
unset(BUILD_PEERPLAYS_TESTNET)
unset(BUILD_PEERPLAYS_TESTNET CACHE)

View file

@ -1,218 +1,96 @@
FROM ubuntu:20.04
FROM ubuntu:18.04
MAINTAINER PeerPlays Blockchain Standards Association
#===============================================================================
# Ubuntu setup
#===============================================================================
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US.UTF-8
ENV LC_ALL en_US.UTF-8
RUN \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y \
apt-utils \
autoconf \
bash \
bison \
build-essential \
ca-certificates \
cmake \
dnsutils \
expect \
flex \
doxygen \
git \
graphviz \
libbz2-dev \
libcurl4-openssl-dev \
libncurses-dev \
libsnappy-dev \
libreadline-dev \
libssl-dev \
libtool \
libzip-dev \
libzmq3-dev \
locales \
lsb-release \
mc \
nano \
net-tools \
ntp \
openssh-server \
pkg-config \
python3 \
python3-jinja2 \
sudo \
systemd-coredump \
wget
ENV HOME /home/peerplays
RUN useradd -rm -d /home/peerplays -s /bin/bash -g root -G sudo -u 1000 peerplays
RUN echo "peerplays ALL=(ALL) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/peerplays
RUN chmod 440 /etc/sudoers.d/peerplays
RUN service ssh start
RUN echo 'peerplays:peerplays' | chpasswd
# SSH
EXPOSE 22
WORKDIR /home/peerplays/src
#===============================================================================
# Boost setup
#===============================================================================
wget \
&& \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
RUN \
wget https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.gz && \
tar -xzf boost_1_72_0.tar.gz && \
cd boost_1_72_0 && \
./bootstrap.sh && \
sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \
locale-gen
# Compile Boost
RUN \
BOOST_ROOT=$HOME/boost_1_67_0 && \
wget -c 'http://sourceforge.net/projects/boost/files/boost/1.67.0/boost_1_67_0.tar.gz/download' -O boost_1_67_0.tar.gz &&\
tar -zxvf boost_1_67_0.tar.gz && \
cd boost_1_67_0/ && \
./bootstrap.sh "--prefix=$BOOST_ROOT" && \
./b2 install && \
ldconfig && \
rm -rf /home/peerplays/src/*
cd ..
#===============================================================================
# cmake setup
#===============================================================================
ADD . /peerplays-core
WORKDIR /peerplays-core
# Compile Peerplays
RUN \
wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh && \
chmod 755 ./cmake-3.24.2-linux-x86_64.sh && \
./cmake-3.24.2-linux-x86_64.sh --prefix=/usr --skip-license && \
cmake --version && \
rm -rf /home/peerplays/src/*
#===============================================================================
# libzmq setup
#===============================================================================
RUN \
wget https://github.com/zeromq/libzmq/archive/refs/tags/v4.3.4.tar.gz && \
tar -xzvf v4.3.4.tar.gz && \
cd libzmq-4.3.4 && \
mkdir build && \
cd build && \
cmake .. && \
make -j$(nproc) && \
make install && \
ldconfig && \
rm -rf /home/peerplays/src/*
#===============================================================================
# cppzmq setup
#===============================================================================
RUN \
wget https://github.com/zeromq/cppzmq/archive/refs/tags/v4.9.0.tar.gz && \
tar -xzvf v4.9.0.tar.gz && \
cd cppzmq-4.9.0 && \
mkdir build && \
cd build && \
cmake .. && \
make -j$(nproc) && \
make install && \
ldconfig && \
rm -rf /home/peerplays/src/*
#===============================================================================
# gsl setup
#===============================================================================
RUN \
DEBIAN_FRONTEND=noninteractive apt-get install -y \
libpcre3-dev
RUN \
wget https://github.com/imatix/gsl/archive/refs/tags/v4.1.4.tar.gz && \
tar -xzvf v4.1.4.tar.gz && \
cd gsl-4.1.4 && \
make -j$(nproc) && \
make install && \
rm -rf /home/peerplays/src/*
#===============================================================================
# libbitcoin-build setup
# libbitcoin-explorer setup
#===============================================================================
RUN \
DEBIAN_FRONTEND=noninteractive apt-get install -y \
libsodium-dev
RUN \
git clone --branch version3.8.0 --depth 1 https://gitlab.com/PBSA/peerplays-1.0/libbitcoin-explorer.git && \
cd libbitcoin-explorer && \
./install.sh && \
ldconfig && \
rm -rf /home/peerplays/src/*
#===============================================================================
# Doxygen setup
#===============================================================================
RUN \
sudo apt install -y bison flex && \
wget https://github.com/doxygen/doxygen/archive/refs/tags/Release_1_8_17.tar.gz && \
tar -xvf Release_1_8_17.tar.gz && \
cd doxygen-Release_1_8_17 && \
mkdir build && \
cd build && \
cmake .. && \
make -j$(nproc) install && \
ldconfig
#===============================================================================
# Perl setup
#===============================================================================
RUN \
wget https://github.com/Perl/perl5/archive/refs/tags/v5.30.0.tar.gz && \
tar -xvf v5.30.0.tar.gz && \
cd perl5-5.30.0 && \
./Configure -des && \
make -j$(nproc) install && \
ldconfig
#===============================================================================
# Peerplays setup
#===============================================================================
## Clone Peerplays
#RUN \
# git clone https://gitlab.com/PBSA/peerplays.git && \
# cd peerplays && \
# git checkout develop && \
# git submodule update --init --recursive && \
# git branch --show-current && \
# git log --oneline -n 5
# Add local source
ADD . peerplays
# Configure Peerplays
RUN \
cd peerplays && \
BOOST_ROOT=$HOME/boost_1_67_0 && \
git submodule sync --recursive && \
git submodule update --init --recursive && \
git log --oneline -n 5 && \
mkdir build && \
cd build && \
cmake -DCMAKE_BUILD_TYPE=Release ..
mkdir build/release && \
cd build/release && \
cmake \
-DBOOST_ROOT="$BOOST_ROOT" \
-DCMAKE_BUILD_TYPE=Debug \
../.. && \
make witness_node cli_wallet && \
install -s programs/witness_node/witness_node programs/cli_wallet/cli_wallet /usr/local/bin && \
#
# Obtain version
mkdir /etc/peerplays && \
git rev-parse --short HEAD > /etc/peerplays/version && \
cd / && \
rm -rf /peerplays-core
# Build Peerplays
RUN \
cd peerplays/build && \
make -j$(nproc) cli_wallet witness_node
# Home directory $HOME
WORKDIR /
RUN useradd -s /bin/bash -m -d /var/lib/peerplays peerplays
ENV HOME /var/lib/peerplays
RUN chown peerplays:peerplays -R /var/lib/peerplays
WORKDIR /home/peerplays/peerplays-network
# Volume
VOLUME ["/var/lib/peerplays", "/etc/peerplays"]
# Setup Peerplays runimage
RUN \
ln -s /home/peerplays/src/peerplays/build/programs/cli_wallet/cli_wallet ./ && \
ln -s /home/peerplays/src/peerplays/build/programs/witness_node/witness_node ./
RUN ./witness_node --create-genesis-json genesis.json && \
rm genesis.json
RUN chown peerplays:root -R /home/peerplays/peerplays-network
# Peerplays RPC
# rpc service:
EXPOSE 8090
# Peerplays P2P:
EXPOSE 9777
# p2p service:
EXPOSE 1776
# Peerplays
CMD ["./witness_node", "-d", "./witness_node_data_dir"]
# default exec/config files
ADD docker/default_config.ini /etc/peerplays/config.ini
ADD docker/peerplaysentry.sh /usr/local/bin/peerplaysentry.sh
RUN chmod a+x /usr/local/bin/peerplaysentry.sh
# Make Docker send SIGINT instead of SIGTERM to the daemon
STOPSIGNAL SIGINT
# default execute entry
CMD ["/usr/local/bin/peerplaysentry.sh"]

View file

@ -1,219 +0,0 @@
FROM ubuntu:18.04
#===============================================================================
# Ubuntu setup
#===============================================================================
RUN \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y \
apt-utils \
autoconf \
bash \
bison \
build-essential \
ca-certificates \
dnsutils \
expect \
flex \
git \
graphviz \
libbz2-dev \
libcurl4-openssl-dev \
libncurses-dev \
libsnappy-dev \
libssl-dev \
libtool \
libzip-dev \
locales \
lsb-release \
mc \
nano \
net-tools \
ntp \
openssh-server \
pkg-config \
python3 \
python3-jinja2 \
sudo \
systemd-coredump \
wget
ENV HOME /home/peerplays
RUN useradd -rm -d /home/peerplays -s /bin/bash -g root -G sudo -u 1000 peerplays
RUN echo "peerplays ALL=(ALL) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/peerplays
RUN chmod 440 /etc/sudoers.d/peerplays
RUN service ssh start
RUN echo 'peerplays:peerplays' | chpasswd
# SSH
EXPOSE 22
WORKDIR /home/peerplays/src
#===============================================================================
# Boost setup
#===============================================================================
RUN \
wget https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.gz && \
tar -xzf boost_1_72_0.tar.gz && \
cd boost_1_72_0 && \
./bootstrap.sh && \
./b2 install && \
ldconfig && \
rm -rf /home/peerplays/src/*
#===============================================================================
# cmake setup
#===============================================================================
RUN \
wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh && \
chmod 755 ./cmake-3.24.2-linux-x86_64.sh && \
./cmake-3.24.2-linux-x86_64.sh --prefix=/usr --skip-license && \
cmake --version && \
rm -rf /home/peerplays/src/*
#===============================================================================
# libzmq setup
#===============================================================================
RUN \
wget https://github.com/zeromq/libzmq/archive/refs/tags/v4.3.4.tar.gz && \
tar -xzvf v4.3.4.tar.gz && \
cd libzmq-4.3.4 && \
mkdir build && \
cd build && \
cmake .. && \
make -j$(nproc) && \
make install && \
ldconfig && \
rm -rf /home/peerplays/src/*
#===============================================================================
# cppzmq setup
#===============================================================================
RUN \
wget https://github.com/zeromq/cppzmq/archive/refs/tags/v4.9.0.tar.gz && \
tar -xzvf v4.9.0.tar.gz && \
cd cppzmq-4.9.0 && \
mkdir build && \
cd build && \
cmake .. && \
make -j$(nproc) && \
make install && \
ldconfig && \
rm -rf /home/peerplays/src/*
#===============================================================================
# gsl setup
#===============================================================================
RUN \
DEBIAN_FRONTEND=noninteractive apt-get install -y \
libpcre3-dev
RUN \
wget https://github.com/imatix/gsl/archive/refs/tags/v4.1.4.tar.gz && \
tar -xzvf v4.1.4.tar.gz && \
cd gsl-4.1.4 && \
make -j$(nproc) && \
make install && \
rm -rf /home/peerplays/src/*
#===============================================================================
# libbitcoin-build setup
# libbitcoin-explorer setup
#===============================================================================
RUN \
DEBIAN_FRONTEND=noninteractive apt-get install -y \
libsodium-dev
RUN \
git clone --branch version3.8.0 --depth 1 https://gitlab.com/PBSA/peerplays-1.0/libbitcoin-explorer.git && \
cd libbitcoin-explorer && \
./install.sh && \
ldconfig && \
rm -rf /home/peerplays/src/*
#===============================================================================
# Doxygen setup
#===============================================================================
RUN \
sudo apt install -y bison flex && \
wget https://github.com/doxygen/doxygen/archive/refs/tags/Release_1_8_17.tar.gz && \
tar -xvf Release_1_8_17.tar.gz && \
cd doxygen-Release_1_8_17 && \
mkdir build && \
cd build && \
cmake .. && \
make -j$(nproc) install && \
ldconfig
#===============================================================================
# Perl setup
#===============================================================================
RUN \
wget https://github.com/Perl/perl5/archive/refs/tags/v5.30.0.tar.gz && \
tar -xvf v5.30.0.tar.gz && \
cd perl5-5.30.0 && \
./Configure -des && \
make -j$(nproc) install && \
ldconfig
#===============================================================================
# Peerplays setup
#===============================================================================
## Clone Peerplays
#RUN \
# git clone https://gitlab.com/PBSA/peerplays.git && \
# cd peerplays && \
# git checkout develop && \
# git submodule update --init --recursive && \
# git branch --show-current && \
# git log --oneline -n 5
# Add local source
ADD . peerplays
# Configure Peerplays
RUN \
cd peerplays && \
git submodule update --init --recursive && \
git symbolic-ref --short HEAD && \
git log --oneline -n 5 && \
mkdir build && \
cd build && \
cmake -DCMAKE_BUILD_TYPE=Release ..
# Build Peerplays
RUN \
cd peerplays/build && \
make -j$(nproc) cli_wallet witness_node
WORKDIR /home/peerplays/peerplays-network
# Setup Peerplays runimage
RUN \
ln -s /home/peerplays/src/peerplays/build/programs/cli_wallet/cli_wallet ./ && \
ln -s /home/peerplays/src/peerplays/build/programs/witness_node/witness_node ./
RUN ./witness_node --create-genesis-json genesis.json && \
rm genesis.json
RUN chown peerplays:root -R /home/peerplays/peerplays-network
# Peerplays RPC
EXPOSE 8090
# Peerplays P2P:
EXPOSE 9777
# Peerplays
CMD ["./witness_node", "-d", "./witness_node_data_dir"]

343
Doxyfile
View file

@ -1,4 +1,4 @@
# Doxyfile 1.8.17
# Doxyfile 1.8.9.1
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
@ -17,11 +17,11 @@
# Project related configuration options
#---------------------------------------------------------------------------
# This tag specifies the encoding used for all characters in the configuration
# file that follow. The default is UTF-8 which is also the encoding used for all
# text before the first occurrence of this tag. Doxygen uses libiconv (or the
# iconv built into libc) for the transcoding. See
# https://www.gnu.org/software/libiconv/ for the list of possible encodings.
# This tag specifies the encoding used for all characters in the config file
# that follow. The default is UTF-8 which is also the encoding used for all text
# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
# for the list of possible encodings.
# The default value is: UTF-8.
DOXYFILE_ENCODING = UTF-8
@ -93,14 +93,6 @@ ALLOW_UNICODE_NAMES = NO
OUTPUT_LANGUAGE = English
# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all generated output in the proper direction.
# Possible values are: None, LTR, RTL and Context.
# The default value is: None.
OUTPUT_TEXT_DIRECTION = None
# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
# descriptions after the members that are listed in the file and class
# documentation (similar to Javadoc). Set to NO to disable this.
@ -187,16 +179,6 @@ SHORT_NAMES = NO
JAVADOC_AUTOBRIEF = NO
# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line
# such as
# /***************
# as being the beginning of a Javadoc-style comment "banner". If set to NO, the
# Javadoc-style will behave just like regular comments and it will not be
# interpreted by doxygen.
# The default value is: NO.
JAVADOC_BANNER = NO
# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
# line (until the first dot) of a Qt-style comment as the brief description. If
# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
@ -244,12 +226,7 @@ TAB_SIZE = 4
# will allow you to put the command \sideeffect (or @sideeffect) in the
# documentation, which will result in a user-defined paragraph with heading
# "Side Effects:". You can put \n's in the value part of an alias to insert
# newlines (in the resulting output). You can put ^^ in the value part of an
# alias to insert a newline as if a physical newline was in the original file.
# When you need a literal { or } or , in the value part of an alias you have to
# escape them by means of a backslash (\), this can lead to conflicts with the
# commands \{ and \} for these it is advised to use the version @{ and @} or use
# a double escape (\\{ and \\})
# newlines.
ALIASES =
@ -287,26 +264,17 @@ OPTIMIZE_FOR_FORTRAN = NO
OPTIMIZE_OUTPUT_VHDL = NO
# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice
# sources only. Doxygen will then generate output that is more tailored for that
# language. For instance, namespaces will be presented as modules, types will be
# separated into more groups, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_SLICE = NO
# Doxygen selects the parser to use depending on the extension of the files it
# parses. With this tag you can assign which parser to use for a given
# extension. Doxygen has a built-in mapping, but you can override or extend it
# using this tag. The format is ext=language, where ext is a file extension, and
# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice,
# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
# tries to guess whether the code is fixed or free formatted code, this is the
# default for Fortran type files), VHDL, tcl. For instance to make doxygen treat
# .inc files as Fortran files (default is PHP), and .f files as C (default is
# Fortran), use: inc=Fortran f=C.
# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
# Fortran. In the later case the parser tries to guess whether the code is fixed
# or free formatted code, this is the default for Fortran type files), VHDL. For
# instance to make doxygen treat .inc files as Fortran files (default is PHP),
# and .f files as C (default is Fortran), use: inc=Fortran f=C.
#
# Note: For files without extension you can use no_extension as a placeholder.
#
@ -317,7 +285,7 @@ EXTENSION_MAPPING =
# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
# according to the Markdown format, which allows for more readable
# documentation. See https://daringfireball.net/projects/markdown/ for details.
# documentation. See http://daringfireball.net/projects/markdown/ for details.
# The output of markdown processing is further processed by doxygen, so you can
# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
# case of backward compatibilities issues.
@ -325,15 +293,6 @@ EXTENSION_MAPPING =
MARKDOWN_SUPPORT = YES
# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
# to that level are automatically included in the table of contents, even if
# they do not have an id attribute.
# Note: This feature currently applies only to Markdown headings.
# Minimum value: 0, maximum value: 99, default value: 5.
# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
TOC_INCLUDE_HEADINGS = 5
# When enabled doxygen tries to link words that correspond to documented
# classes, or namespaces to their corresponding documentation. Such a link can
# be prevented in individual cases by putting a % sign in front of the word or
@ -359,7 +318,7 @@ BUILTIN_STL_SUPPORT = NO
CPP_CLI_SUPPORT = NO
# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen
# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
# will parse them like normal C++ but will assume all classes use public instead
# of private inheritance when no explicit protection keyword is present.
# The default value is: NO.
@ -384,13 +343,6 @@ IDL_PROPERTY_SUPPORT = YES
DISTRIBUTE_GROUP_DOC = NO
# If one adds a struct or class to a group and this option is enabled, then also
# any nested class or struct is added to the same group. By default this option
# is disabled and one has to add nested compounds explicitly via \ingroup.
# The default value is: NO.
GROUP_NESTED_COMPOUNDS = NO
# Set the SUBGROUPING tag to YES to allow class member groups of the same type
# (for instance a group of public functions) to be put as a subgroup of that
# type (e.g. under the Public Functions section). Set it to NO to prevent
@ -465,12 +417,6 @@ EXTRACT_ALL = YES
EXTRACT_PRIVATE = NO
# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual
# methods of a class will be included in the documentation.
# The default value is: NO.
EXTRACT_PRIV_VIRTUAL = NO
# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
# scope will be included in the documentation.
# The default value is: NO.
@ -525,8 +471,8 @@ HIDE_UNDOC_MEMBERS = NO
HIDE_UNDOC_CLASSES = NO
# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
# declarations. If set to NO, these declarations will be included in the
# documentation.
# (class|struct|union) declarations. If set to NO, these declarations will be
# included in the documentation.
# The default value is: NO.
HIDE_FRIEND_COMPOUNDS = NO
@ -549,7 +495,7 @@ INTERNAL_DOCS = NO
# names in lower-case letters. If set to YES, upper-case letters are also
# allowed. This is useful if you have classes or files whose names only differ
# in case and if your file system supports case sensitive file names. Windows
# (including Cygwin) ands Mac users are advised to set this option to NO.
# and Mac users are advised to set this option to NO.
# The default value is: system dependent.
CASE_SENSE_NAMES = NO
@ -736,7 +682,7 @@ LAYOUT_FILE =
# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
# the reference definitions. This must be a list of .bib files. The .bib
# extension is automatically appended if omitted. This requires the bibtex tool
# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info.
# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
# For LaTeX the style of the bibliography can be controlled using
# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
# search path. See also \cite for info how to create references.
@ -781,18 +727,11 @@ WARN_IF_DOC_ERROR = YES
# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
# are documented, but have no documentation for their parameters or return
# value. If set to NO, doxygen will only warn about wrong or incomplete
# parameter documentation, but not about the absence of documentation. If
# EXTRACT_ALL is set to YES then this flag will automatically be disabled.
# parameter documentation, but not about the absence of documentation.
# The default value is: NO.
WARN_NO_PARAMDOC = NO
# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
# a warning is encountered.
# The default value is: NO.
WARN_AS_ERROR = NO
# The WARN_FORMAT tag determines the format of the warning messages that doxygen
# can produce. The string should contain the $file, $line, and $text tags, which
# will be replaced by the file and line number from which the warning originated
@ -816,19 +755,15 @@ WARN_LOGFILE =
# The INPUT tag is used to specify the files and/or directories that contain
# documented source files. You may enter file names like myfile.cpp or
# directories like /usr/src/myproject. Separate the files or directories with
# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
# spaces.
# Note: If this tag is empty the current directory is searched.
INPUT = doc/main.dox \
libraries/chain \
libraries/chain/db \
libraries/app \
libraries/wallet
INPUT = doc/main.dox libraries/chain libraries/chain/db libraries/app libraries/wallet
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
# documentation (see: https://www.gnu.org/software/libiconv/) for the list of
# documentation (see: http://www.gnu.org/software/libiconv) for the list of
# possible encodings.
# The default value is: UTF-8.
@ -836,19 +771,12 @@ INPUT_ENCODING = UTF-8
# If the value of the INPUT tag contains directories, you can use the
# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
# *.h) to filter out the source-files in the directories.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# read by doxygen.
#
# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment),
# *.doc (to be provided as doxygen C comment), *.txt (to be provided as doxygen
# C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f, *.for, *.tcl, *.vhd,
# *.vhdl, *.ucf, *.qsf and *.ice.
# *.h) to filter out the source-files in the directories. If left blank the
# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
# *.qsf, *.as and *.js.
FILE_PATTERNS =
@ -934,10 +862,6 @@ IMAGE_PATH =
# Note that the filter must not add or remove lines; it is applied before the
# code is scanned, but not when the output code is generated. If lines are added
# or removed, the anchors will not be placed correctly.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# properly processed by doxygen.
INPUT_FILTER =
@ -947,10 +871,6 @@ INPUT_FILTER =
# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
# patterns match the file name, INPUT_FILTER is applied.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# properly processed by doxygen.
FILTER_PATTERNS =
@ -1003,7 +923,7 @@ INLINE_SOURCES = NO
STRIP_CODE_COMMENTS = YES
# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
# entity all documented functions referencing it will be listed.
# function all documented functions referencing it will be listed.
# The default value is: NO.
REFERENCED_BY_RELATION = NO
@ -1035,12 +955,12 @@ SOURCE_TOOLTIPS = YES
# If the USE_HTAGS tag is set to YES then the references to source code will
# point to the HTML generated by the htags(1) tool instead of doxygen built-in
# source browser. The htags tool is part of GNU's global source tagging system
# (see https://www.gnu.org/software/global/global.html). You will need version
# (see http://www.gnu.org/software/global/global.html). You will need version
# 4.8.6 or higher.
#
# To use it do the following:
# - Install the latest version of global
# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file
# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
# - Make sure the INPUT points to the root of the source tree
# - Run doxygen as normal
#
@ -1062,35 +982,6 @@ USE_HTAGS = NO
VERBATIM_HEADERS = YES
# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
# cost of reduced performance. This can be particularly helpful with template
# rich C++ code for which doxygen's built-in parser lacks the necessary type
# information.
# Note: The availability of this option depends on whether or not doxygen was
# generated with the -Duse_libclang=ON option for CMake.
# The default value is: NO.
CLANG_ASSISTED_PARSING = NO
# If clang assisted parsing is enabled you can provide the compiler with command
# line options that you would normally use when invoking the compiler. Note that
# the include paths will already be set by doxygen for the files and directories
# specified with INPUT and INCLUDE_PATH.
# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
CLANG_OPTIONS =
# If clang assisted parsing is enabled you can provide the clang parser with the
# path to the compilation database (see:
# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) used when the files
# were built. This is equivalent to specifying the "-p" option to a clang tool,
# such as clang-check. These options will then be passed to the parser.
# Note: The availability of this option depends on whether or not doxygen was
# generated with the -Duse_libclang=ON option for CMake.
CLANG_DATABASE_PATH =
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
@ -1209,7 +1100,7 @@ HTML_EXTRA_FILES =
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
# will adjust the colors in the style sheet and background images according to
# this color. Hue is specified as an angle on a colorwheel, see
# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
# purple, and 360 is red again.
# Minimum value: 0, maximum value: 359, default value: 220.
@ -1238,23 +1129,11 @@ HTML_COLORSTYLE_GAMMA = 80
# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
# page will contain the date and time when the page was generated. Setting this
# to YES can help to show when doxygen was last run and thus if the
# documentation is up to date.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_TIMESTAMP = YES
# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
# documentation will contain a main index with vertical navigation menus that
# are dynamically created via JavaScript. If disabled, the navigation index will
# consists of multiple levels of tabs that are statically embedded in every HTML
# page. Disable this option to support browsers that do not have JavaScript,
# like the Qt help browser.
# to NO can help when comparing the output of multiple runs.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_DYNAMIC_MENUS = YES
HTML_TIMESTAMP = YES
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
# documentation will contain sections that can be hidden and shown after the
@ -1279,13 +1158,13 @@ HTML_INDEX_NUM_ENTRIES = 100
# If the GENERATE_DOCSET tag is set to YES, additional index files will be
# generated that can be used as input for Apple's Xcode 3 integrated development
# environment (see: https://developer.apple.com/xcode/), introduced with OSX
# 10.5 (Leopard). To create a documentation set, doxygen will generate a
# environment (see: http://developer.apple.com/tools/xcode/), introduced with
# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
# Makefile in the HTML output directory. Running make will produce the docset in
# that directory and running make install will install the docset in
# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy
# genXcode/_index.html for more information.
# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
# for more information.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
@ -1324,7 +1203,7 @@ DOCSET_PUBLISHER_NAME = Publisher
# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
# (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on
# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
# Windows.
#
# The HTML Help Workshop contains a compiler that can convert all HTML output
@ -1400,7 +1279,7 @@ QCH_FILE =
# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
# Project output. For more information please see Qt Help Project / Namespace
# (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_QHP is set to YES.
@ -1408,7 +1287,7 @@ QHP_NAMESPACE = org.doxygen.Project
# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
# Help Project output. For more information please see Qt Help Project / Virtual
# Folders (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-
# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
# folders).
# The default value is: doc.
# This tag requires that the tag GENERATE_QHP is set to YES.
@ -1417,7 +1296,7 @@ QHP_VIRTUAL_FOLDER = doc
# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
# filter to add. For more information please see Qt Help Project / Custom
# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-
# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
# filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
@ -1425,7 +1304,7 @@ QHP_CUST_FILTER_NAME =
# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
# custom filter to add. For more information please see Qt Help Project / Custom
# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-
# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
# filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
@ -1433,7 +1312,7 @@ QHP_CUST_FILTER_ATTRS =
# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
# project's filter section matches. Qt Help Project / Filter Attributes (see:
# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes).
# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_SECT_FILTER_ATTRS =
@ -1526,7 +1405,7 @@ EXT_LINKS_IN_WINDOW = NO
FORMULA_FONTSIZE = 10
# Use the FORMULA_TRANSPARENT tag to determine whether or not the images
# Use the FORMULA_TRANPARENT tag to determine whether or not the images
# generated for formulas are transparent PNGs. Transparent PNGs are not
# supported properly for IE 6.0, but are supported on all modern browsers.
#
@ -1537,14 +1416,8 @@ FORMULA_FONTSIZE = 10
FORMULA_TRANSPARENT = YES
# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands
# to create new LaTeX commands to be used in formulas as building blocks. See
# the section "Including formulas" for details.
FORMULA_MACROFILE =
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
# https://www.mathjax.org) which uses client side JavaScript for the rendering
# http://www.mathjax.org) which uses client side Javascript for the rendering
# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
# installed or if you want to formulas look prettier in the HTML output. When
# enabled you may also need to install MathJax separately and configure the path
@ -1571,8 +1444,8 @@ MATHJAX_FORMAT = HTML-CSS
# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
# Content Delivery Network so you can quickly see the result without installing
# MathJax. However, it is strongly recommended to install a local copy of
# MathJax from https://www.mathjax.org before deployment.
# The default value is: https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/.
# MathJax from http://www.mathjax.org before deployment.
# The default value is: http://cdn.mathjax.org/mathjax/latest.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
@ -1614,7 +1487,7 @@ MATHJAX_CODEFILE =
SEARCHENGINE = YES
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
# implemented using a web server instead of a web client using JavaScript. There
# implemented using a web server instead of a web client using Javascript. There
# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
# setting. When disabled, doxygen will generate a PHP script for searching and
# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
@ -1633,7 +1506,7 @@ SERVER_BASED_SEARCH = NO
#
# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: https://xapian.org/).
# Xapian (see: http://xapian.org/).
#
# See the section "External Indexing and Searching" for details.
# The default value is: NO.
@ -1646,7 +1519,7 @@ EXTERNAL_SEARCH = NO
#
# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: https://xapian.org/). See the section "External Indexing and
# Xapian (see: http://xapian.org/). See the section "External Indexing and
# Searching" for details.
# This tag requires that the tag SEARCHENGINE is set to YES.
@ -1698,35 +1571,21 @@ LATEX_OUTPUT = latex
# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
# invoked.
#
# Note that when not enabling USE_PDFLATEX the default is latex when enabling
# USE_PDFLATEX the default is pdflatex and when in the later case latex is
# chosen this is overwritten by pdflatex. For specific output languages the
# default can have been set differently, this depends on the implementation of
# the output language.
# Note that when enabling USE_PDFLATEX this option is only used for generating
# bitmaps for formulas in the HTML output, but not in the Makefile that is
# written to the output directory.
# The default file is: latex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_CMD_NAME = latex
# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
# index for LaTeX.
# Note: This tag is used in the Makefile / make.bat.
# See also: LATEX_MAKEINDEX_CMD for the part in the generated output file
# (.tex).
# The default file is: makeindex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
MAKEINDEX_CMD_NAME = makeindex
# The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to
# generate index for LaTeX. In case there is no backslash (\) as first character
# it will be automatically added in the LaTeX code.
# Note: This tag is used in the generated output file (.tex).
# See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat.
# The default value is: makeindex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_MAKEINDEX_CMD = makeindex
# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
# documents. This may be useful for small projects and may help to save some
# trees in general.
@ -1745,12 +1604,9 @@ COMPACT_LATEX = NO
PAPER_TYPE = a4
# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
# that should be included in the LaTeX output. The package can be specified just
# by its name or with the correct syntax as to be used with the LaTeX
# \usepackage command. To get the times font for instance you can specify :
# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
# To use the option intlimits with the amsmath package you can specify:
# EXTRA_PACKAGES=[intlimits]{amsmath}
# that should be included in the LaTeX output. To get the times font for
# instance you can specify
# EXTRA_PACKAGES=times
# If left blank no extra packages will be included.
# This tag requires that the tag GENERATE_LATEX is set to YES.
@ -1847,28 +1703,12 @@ LATEX_SOURCE_CODE = NO
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
# bibliography, e.g. plainnat, or ieeetr. See
# https://en.wikipedia.org/wiki/BibTeX and \cite for more info.
# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
# The default value is: plain.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BIB_STYLE = plain
# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
# page will contain the date and time when the page was generated. Setting this
# to NO can help when comparing the output of multiple runs.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_TIMESTAMP = NO
# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute)
# path from which the emoji images will be read. If a relative path is entered,
# it will be relative to the LATEX_OUTPUT directory. If left blank the
# LATEX_OUTPUT directory will be used.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EMOJI_DIRECTORY =
#---------------------------------------------------------------------------
# Configuration options related to the RTF output
#---------------------------------------------------------------------------
@ -1908,9 +1748,9 @@ COMPACT_RTF = NO
RTF_HYPERLINKS = NO
# Load stylesheet definitions from file. Syntax is similar to doxygen's
# configuration file, i.e. a series of assignments. You only have to provide
# replacements, missing definitions are set to their default value.
# Load stylesheet definitions from file. Syntax is similar to doxygen's config
# file, i.e. a series of assignments. You only have to provide replacements,
# missing definitions are set to their default value.
#
# See also section "Doxygen usage" for information on how to generate the
# default style sheet that doxygen normally uses.
@ -1919,8 +1759,8 @@ RTF_HYPERLINKS = NO
RTF_STYLESHEET_FILE =
# Set optional variables used in the generation of an RTF document. Syntax is
# similar to doxygen's configuration file. A template extensions file can be
# generated using doxygen -e rtf extensionFile.
# similar to doxygen's config file. A template extensions file can be generated
# using doxygen -e rtf extensionFile.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_EXTENSIONS_FILE =
@ -2006,13 +1846,6 @@ XML_OUTPUT = xml
XML_PROGRAMLISTING = NO
# If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include
# namespace members in file scope as well, matching the HTML output.
# The default value is: NO.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_NS_MEMB_FILE_SCOPE = NO
#---------------------------------------------------------------------------
# Configuration options related to the DOCBOOK output
#---------------------------------------------------------------------------
@ -2045,9 +1878,9 @@ DOCBOOK_PROGRAMLISTING = NO
#---------------------------------------------------------------------------
# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
# AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures
# the structure of the code including all documentation. Note that this feature
# is still experimental and incomplete at the moment.
# AutoGen Definitions (see http://autogen.sf.net) file that captures the
# structure of the code including all documentation. Note that this feature is
# still experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_AUTOGEN_DEF = NO
@ -2214,6 +2047,12 @@ EXTERNAL_GROUPS = YES
EXTERNAL_PAGES = YES
# The PERL_PATH should be the absolute path and name of the perl script
# interpreter (i.e. the result of 'which perl').
# The default file (with absolute path) is: /usr/bin/perl.
PERL_PATH = /usr/bin/perl
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
@ -2227,6 +2066,15 @@ EXTERNAL_PAGES = YES
CLASS_DIAGRAMS = YES
# You can define message sequence charts within doxygen comments using the \msc
# command. Doxygen will then run the mscgen tool (see:
# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
# documentation. The MSCGEN_PATH tag allows you to specify the directory where
# the mscgen tool resides. If left empty the tool is assumed to be found in the
# default search path.
MSCGEN_PATH =
# You can include diagrams made with dia in doxygen documentation. Doxygen will
# then run dia to produce the diagram and insert it in the documentation. The
# DIA_PATH tag allows you to specify the directory where the dia binary resides.
@ -2245,7 +2093,7 @@ HIDE_UNDOC_RELATIONS = YES
# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
# Bell Labs. The other options in this section have no effect if this option is
# set to NO
# The default value is: YES.
# The default value is: NO.
HAVE_DOT = NO
@ -2359,8 +2207,7 @@ INCLUDED_BY_GRAPH = YES
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable call graphs for selected
# functions only using the \callgraph command. Disabling a call graph can be
# accomplished by means of the command \hidecallgraph.
# functions only using the \callgraph command.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
@ -2371,8 +2218,7 @@ CALL_GRAPH = NO
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable caller graphs for selected
# functions only using the \callergraph command. Disabling a caller graph can be
# accomplished by means of the command \hidecallergraph.
# functions only using the \callergraph command.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
@ -2395,17 +2241,11 @@ GRAPHICAL_HIERARCHY = YES
DIRECTORY_GRAPH = YES
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
# generated by dot. For an explanation of the image formats see the section
# output formats in the documentation of the dot tool (Graphviz (see:
# http://www.graphviz.org/)).
# generated by dot.
# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
# to make the SVG files visible in IE 9+ (other browsers do not have this
# requirement).
# Possible values are: png, png:cairo, png:cairo:cairo, png:cairo:gd, png:gd,
# png:gd:gd, jpg, jpg:cairo, jpg:cairo:gd, jpg:gd, jpg:gd:gd, gif, gif:cairo,
# gif:cairo:gd, gif:gd, gif:gd:gd, svg, png:gd, png:gd:gd, png:cairo,
# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
# png:gdiplus:gdiplus.
# Possible values are: png, jpg, gif and svg.
# The default value is: png.
# This tag requires that the tag HAVE_DOT is set to YES.
@ -2456,11 +2296,6 @@ DIAFILE_DIRS =
PLANTUML_JAR_PATH =
# When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a
# configuration file for plantuml.
PLANTUML_CFG_FILE =
# When using plantuml, the specified paths are searched for files specified by
# the !include statement in a plantuml block.

221
README.md
View file

@ -2,193 +2,100 @@ Intro for new developers and witnesses
------------------------
This is a quick introduction to get new developers and witnesses up to speed on Peerplays blockchain. It is intended for witnesses plannig to join a live, already deployed blockchain.
# Building on Ubuntu 18.04 LTS and Installation Instructions
The following dependencies were necessary for a clean install of Ubuntu 18.04 LTS:
```
sudo apt-get install autoconf bash build-essential ca-certificates cmake \
doxygen git graphviz libbz2-dev libcurl4-openssl-dev libncurses-dev \
libreadline-dev libssl-dev libtool libzmq3-dev locales ntp pkg-config \
wget
```
## Build Boost 1.67.0
# Building and Installation Instructions
Officially supported OS are Ubuntu 20.04 and Ubuntu 18.04.
## Ubuntu 20.04 and 18.04
Following dependencies are needed for a clean install of Ubuntu 20.04 and Ubuntu 18.04:
```
sudo apt-get install \
autoconf bash bison build-essential ca-certificates dnsutils expect flex git \
graphviz libbz2-dev libcurl4-openssl-dev libncurses-dev libpcre3-dev \
libsnappy-dev libsodium-dev libssl-dev libtool libzip-dev locales lsb-release \
mc nano net-tools ntp openssh-server pkg-config python3 python3-jinja2 sudo \
systemd-coredump wget
mkdir $HOME/src
cd $HOME/src
export BOOST_ROOT=$HOME/src/boost_1_67_0
sudo apt-get update
sudo apt-get install -y autotools-dev build-essential libbz2-dev libicu-dev python-dev
wget -c 'http://sourceforge.net/projects/boost/files/boost/1.67.0/boost_1_67_0.tar.bz2/download'\
-O boost_1_67_0.tar.bz2
tar xjf boost_1_67_0.tar.bz2
cd boost_1_67_0/
./bootstrap.sh "--prefix=$BOOST_ROOT"
./b2 install
```
Boost libraries setup:
```
wget https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.gz
tar -xzf boost_1_72_0.tar.gz boost_1_72_0
cd boost_1_72_0
./bootstrap.sh
./b2
sudo ./b2 install
sudo ldconfig
```
cmake setup:
```
wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh
chmod 755 ./cmake-3.24.2-linux-x86_64.sh
sudo ./cmake-3.24.2-linux-x86_64.sh --prefix=/usr --skip-license
cmake --version
```
## Building Peerplays
libzmq setup:
```
wget https://github.com/zeromq/libzmq/archive/refs/tags/v4.3.4.tar.gz
tar -xzvf v4.3.4.tar.gz
cd libzmq-4.3.4
mkdir build
cd build
cmake ..
make -j$(nproc)
sudo make install
sudo ldconfig
```
cppzmq setup:
```
wget https://github.com/zeromq/cppzmq/archive/refs/tags/v4.9.0.tar.gz
tar -xzvf v4.9.0.tar.gz
cd cppzmq-4.9.0
mkdir build
cd build
cmake ..
make -j$(nproc)
sudo make install
sudo ldconfig
```
gsl setup:
```
wget https://github.com/imatix/gsl/archive/refs/tags/v4.1.4.tar.gz
tar -xzvf v4.1.4.tar.gz
cd gsl-4.1.4
make -j$(nproc)
sudo make install
sudo ldconfig
```
libbitcoin-explorer setup:
```
git clone --branch version3.8.0 --depth 1 https://gitlab.com/PBSA/peerplays-1.0/libbitcoin-explorer.git
cd libbitcoin-explorer
sudo ./install.sh
sudo ldconfig
```
Doxygen setup:
```
wget https://github.com/doxygen/doxygen/archive/refs/tags/Release_1_8_17.tar.gz
tar -xvf Release_1_8_17.tar.gz
cd doxygen-Release_1_8_17
mkdir build
cd build
cmake ..
make -j$(nproc)
sudo make install
sudo ldconfig
```
Perl setup:
```
wget https://github.com/Perl/perl5/archive/refs/tags/v5.30.0.tar.gz
tar -xvf v5.30.0.tar.gz
cd perl5-5.30.0
./Configure -des
make -j$(nproc)
sudo make install
sudo ldconfig
```
Building Peerplays
```
git clone https://gitlab.com/PBSA/peerplays.git
cd $HOME/src
export BOOST_ROOT=$HOME/src/boost_1_67_0
git clone https://github.com/peerplays-network/peerplays.git
cd peerplays
git submodule update --init --recursive
# If you want to build Mainnet node
cmake -DCMAKE_BUILD_TYPE=Release
# If you want to build Testnet node
cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_PEERPLAYS_TESTNET=1
# Update -j flag depending on your current system specs;
# Recommended 4GB of RAM per 1 CPU core
# make -j2 for 8GB RAM
# make -j4 for 16GB RAM
# make -j8 for 32GB RAM
cmake -DBOOST_ROOT="$BOOST_ROOT" -DCMAKE_BUILD_TYPE=Release
make -j$(nproc)
sudo make install # this can install the executable files under /usr/local
make install # this can install the executable files under /usr/local
```
## Docker images
docker build -t peerplays .
## Docker image
Install docker, and add current user to docker group.
```
# Install docker
sudo apt install docker.io
sudo usermod -a -G docker $USER
# Add current user to docker group
sudo usermod -a -G docker $USER
# You need to restart your shell session, to apply group membership
# Type 'groups' to verify that you are a member of a docker group
# Build docker image (from the project root, must be a docker group member)
docker build -t peerplays .
# Start docker image
docker start peerplays
# Exposed ports
# # rpc service:
# EXPOSE 8090
# # p2p service:
# EXPOSE 1776
```
### Official docker image for Peerplas Mainnet
```
docker pull datasecuritynode/peerplays:latest
```
### Building docker images manually
```
# Checkout the code
git clone https://gitlab.com/PBSA/peerplays.git
cd peerplays
# Checkout the branch you want
# E.g.
# git checkout beatrice
# git checkout develop
git checkout master
git submodule update --init --recursive
# Execute from the project root, must be a docker group member
# Build docker image, using Ubuntu 20.04 base
docker build --no-cache -f Dockerfile -t peerplays .
# Build docker image, using Ubuntu 18.04 base
docker build --no-cache -f Dockerfile.18.04 -t peerplays-18-04 .
```
### Start docker image
```
# Start docker image, using Ubuntu 20.04 base
docker run peerplays:latest
# Start docker image, using Ubuntu 18.04 base
docker run peerplays-18-04:latest
```
Rest of the instructions on starting the chain remains same.
Rest of the instructions on starting the chain remains same.
Starting A Peerplays Node
-----------------
For Ubuntu 14.04 LTS and up users, see
[this](https://github.com/cryptonomex/graphene/wiki/build-ubuntu) and
then proceed with:
git clone https://github.com/peerplays-network/peerplays.git
cd peerplays
git submodule update --init --recursive
cmake -DBOOST_ROOT="$BOOST_ROOT" -DCMAKE_BUILD_TYPE=Release .
make
./programs/witness_node/witness_node
Launching the witness creates required directories. Next, **stop the witness** and continue.
$ vi witness_node_data_dir/config.ini
p2p-endpoint = 0.0.0.0:9777
rpc-endpoint = 127.0.0.1:8090
seed-node = 213.184.225.234:59500
Start the witness back up
./programs/witness_node/witness_node
@ -248,7 +155,7 @@ Create your witness (substitute the url for your witness information)
```
create_witness your_witness_username "url" true
```
**Be sure to take note of the block_signing_key**
**Be sure to take note of the block_signing_key**
IMPORTANT (issue below command using block_signing_key just obtained)
```

794
bkup_CMakeCache.txt Normal file
View file

@ -0,0 +1,794 @@
# This is the CMakeCache file.
# For build in directory: /home/pbattu/git/18.04/peerplays
# It was generated by CMake: /usr/bin/cmake
# You can edit this file to change values found and used by cmake.
# If you do not want to change any of the values, simply exit the editor.
# If you do want to change a value, simply edit, save, and exit the editor.
# The syntax for the file is as follows:
# KEY:TYPE=VALUE
# KEY is the name of a variable in the cache.
# TYPE is a hint to GUIs for the type of VALUE, DO NOT EDIT TYPE!.
# VALUE is the current value for the KEY.
########################
# EXTERNAL cache entries
########################
//No help, variable specified on the command line.
BOOST_ROOT:PATH=/home/pbattu/git/18.04/boost_1_67_0
//The threading library used by boost-thread
BOOST_THREAD_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libpthread.so
//Build bitshares executables (witness node, cli wallet, etc)
BUILD_BITSHARES_PROGRAMS:BOOL=TRUE
//Build bitshares unit tests
BUILD_BITSHARES_TESTS:BOOL=TRUE
//Build websocketpp examples.
BUILD_EXAMPLES:BOOL=OFF
//Build websocketpp tests.
BUILD_TESTS:BOOL=OFF
//Value Computed by CMake
BitShares_BINARY_DIR:STATIC=/home/pbattu/git/18.04/peerplays
//Value Computed by CMake
BitShares_SOURCE_DIR:STATIC=/home/pbattu/git/18.04/peerplays
//Boost chrono library (debug)
Boost_CHRONO_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_chrono.a
//Boost chrono library (release)
Boost_CHRONO_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_chrono.a
//Boost context library (debug)
Boost_CONTEXT_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_context.a
//Boost context library (release)
Boost_CONTEXT_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_context.a
//Boost coroutine library (debug)
Boost_COROUTINE_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_coroutine.a
//Boost coroutine library (release)
Boost_COROUTINE_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_coroutine.a
//Boost date_time library (debug)
Boost_DATE_TIME_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_date_time.a
//Boost date_time library (release)
Boost_DATE_TIME_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_date_time.a
//The directory containing a CMake configuration file for Boost.
Boost_DIR:PATH=Boost_DIR-NOTFOUND
//Boost filesystem library (debug)
Boost_FILESYSTEM_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_filesystem.a
//Boost filesystem library (release)
Boost_FILESYSTEM_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_filesystem.a
//Path to a file.
Boost_INCLUDE_DIR:PATH=/home/pbattu/git/18.04/boost_1_67_0/include
//Boost iostreams library (debug)
Boost_IOSTREAMS_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_iostreams.a
//Boost iostreams library (release)
Boost_IOSTREAMS_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_iostreams.a
//Boost library directory
Boost_LIBRARY_DIR:PATH=/home/pbattu/git/18.04/boost_1_67_0/lib
//Boost library directory DEBUG
Boost_LIBRARY_DIR_DEBUG:PATH=/home/pbattu/git/18.04/boost_1_67_0/lib
//Boost library directory RELEASE
Boost_LIBRARY_DIR_RELEASE:PATH=/home/pbattu/git/18.04/boost_1_67_0/lib
//Boost locale library (debug)
Boost_LOCALE_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_locale.a
//Boost locale library (release)
Boost_LOCALE_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_locale.a
//Boost program_options library (debug)
Boost_PROGRAM_OPTIONS_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_program_options.a
//Boost program_options library (release)
Boost_PROGRAM_OPTIONS_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_program_options.a
//Boost serialization library (debug)
Boost_SERIALIZATION_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_serialization.a
//Boost serialization library (release)
Boost_SERIALIZATION_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_serialization.a
//Boost signals library (debug)
Boost_SIGNALS_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_signals.a
//Boost signals library (release)
Boost_SIGNALS_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_signals.a
//Boost system library (debug)
Boost_SYSTEM_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_system.a
//Boost system library (release)
Boost_SYSTEM_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_system.a
//Boost thread library (debug)
Boost_THREAD_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_thread.a
//Boost thread library (release)
Boost_THREAD_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_thread.a
//Boost unit_test_framework library (debug)
Boost_UNIT_TEST_FRAMEWORK_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_unit_test_framework.a
//Boost unit_test_framework library (release)
Boost_UNIT_TEST_FRAMEWORK_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_unit_test_framework.a
//ON or OFF
Boost_USE_STATIC_LIBS:STRING=ON
//Path to a program.
CMAKE_AR:FILEPATH=/usr/bin/ar
//Choose the type of build, options are: None(CMAKE_CXX_FLAGS or
// CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel.
CMAKE_BUILD_TYPE:STRING=Debug
//Enable/Disable color output during build.
CMAKE_COLOR_MAKEFILE:BOOL=ON
//Configurations
CMAKE_CONFIGURATION_TYPES:STRING=Release;RelWithDebInfo;Debug
//CXX compiler
CMAKE_CXX_COMPILER:FILEPATH=/usr/bin/g++-5
//A wrapper around 'ar' adding the appropriate '--plugin' option
// for the GCC compiler
CMAKE_CXX_COMPILER_AR:FILEPATH=/usr/bin/gcc-ar-5
//A wrapper around 'ranlib' adding the appropriate '--plugin' option
// for the GCC compiler
CMAKE_CXX_COMPILER_RANLIB:FILEPATH=/usr/bin/gcc-ranlib-5
//Flags used by the compiler during all build types.
CMAKE_CXX_FLAGS:STRING=
//Flags used by the compiler during debug builds.
CMAKE_CXX_FLAGS_DEBUG:STRING=-g
//Flags used by the compiler during release builds for minimum
// size.
CMAKE_CXX_FLAGS_MINSIZEREL:STRING=-Os -DNDEBUG
//Flags used by the compiler during release builds.
CMAKE_CXX_FLAGS_RELEASE:STRING=-O3 -DNDEBUG
//Flags used by the compiler during release builds with debug info.
CMAKE_CXX_FLAGS_RELWITHDEBINFO:STRING=-O2 -g -DNDEBUG
//C compiler
CMAKE_C_COMPILER:FILEPATH=/usr/bin/gcc-5
//A wrapper around 'ar' adding the appropriate '--plugin' option
// for the GCC compiler
CMAKE_C_COMPILER_AR:FILEPATH=/usr/bin/gcc-ar-5
//A wrapper around 'ranlib' adding the appropriate '--plugin' option
// for the GCC compiler
CMAKE_C_COMPILER_RANLIB:FILEPATH=/usr/bin/gcc-ranlib-5
//Flags used by the compiler during all build types.
CMAKE_C_FLAGS:STRING=
//Flags used by the compiler during debug builds.
CMAKE_C_FLAGS_DEBUG:STRING=-g
//Flags used by the compiler during release builds for minimum
// size.
CMAKE_C_FLAGS_MINSIZEREL:STRING=-Os -DNDEBUG
//Flags used by the compiler during release builds.
CMAKE_C_FLAGS_RELEASE:STRING=-O3 -DNDEBUG
//Flags used by the compiler during release builds with debug info.
CMAKE_C_FLAGS_RELWITHDEBINFO:STRING=-O2 -g -DNDEBUG
//Flags used by the linker.
CMAKE_EXE_LINKER_FLAGS:STRING=
//Flags used by the linker during debug builds.
CMAKE_EXE_LINKER_FLAGS_DEBUG:STRING=
//Flags used by the linker during release minsize builds.
CMAKE_EXE_LINKER_FLAGS_MINSIZEREL:STRING=
//Flags used by the linker during release builds.
CMAKE_EXE_LINKER_FLAGS_RELEASE:STRING=
//Flags used by the linker during Release with Debug Info builds.
CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO:STRING=
//Enable/Disable output of compile commands during generation.
CMAKE_EXPORT_COMPILE_COMMANDS:BOOL=OFF
//Install path prefix, prepended onto install directories.
CMAKE_INSTALL_PREFIX:PATH=/usr/local
//Path to a program.
CMAKE_LINKER:FILEPATH=/usr/bin/ld
//Path to a program.
CMAKE_MAKE_PROGRAM:FILEPATH=/usr/bin/make
//Flags used by the linker during the creation of modules.
CMAKE_MODULE_LINKER_FLAGS:STRING=
//Flags used by the linker during debug builds.
CMAKE_MODULE_LINKER_FLAGS_DEBUG:STRING=
//Flags used by the linker during release minsize builds.
CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL:STRING=
//Flags used by the linker during release builds.
CMAKE_MODULE_LINKER_FLAGS_RELEASE:STRING=
//Flags used by the linker during Release with Debug Info builds.
CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO:STRING=
//Path to a program.
CMAKE_NM:FILEPATH=/usr/bin/nm
//Path to a program.
CMAKE_OBJCOPY:FILEPATH=/usr/bin/objcopy
//Path to a program.
CMAKE_OBJDUMP:FILEPATH=/usr/bin/objdump
//Value Computed by CMake
CMAKE_PROJECT_NAME:STATIC=BitShares
//Path to a program.
CMAKE_RANLIB:FILEPATH=/usr/bin/ranlib
//Flags used by the linker during the creation of dll's.
CMAKE_SHARED_LINKER_FLAGS:STRING=
//Flags used by the linker during debug builds.
CMAKE_SHARED_LINKER_FLAGS_DEBUG:STRING=
//Flags used by the linker during release minsize builds.
CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL:STRING=
//Flags used by the linker during release builds.
CMAKE_SHARED_LINKER_FLAGS_RELEASE:STRING=
//Flags used by the linker during Release with Debug Info builds.
CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO:STRING=
//If set, runtime paths are not added when installing shared libraries,
// but are added when building.
CMAKE_SKIP_INSTALL_RPATH:BOOL=NO
//If set, runtime paths are not added when using shared libraries.
CMAKE_SKIP_RPATH:BOOL=NO
//Flags used by the linker during the creation of static libraries.
CMAKE_STATIC_LINKER_FLAGS:STRING=
//Flags used by the linker during debug builds.
CMAKE_STATIC_LINKER_FLAGS_DEBUG:STRING=
//Flags used by the linker during release minsize builds.
CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL:STRING=
//Flags used by the linker during release builds.
CMAKE_STATIC_LINKER_FLAGS_RELEASE:STRING=
//Flags used by the linker during Release with Debug Info builds.
CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO:STRING=
//Path to a program.
CMAKE_STRIP:FILEPATH=/usr/bin/strip
//If this value is on, makefiles will be generated without the
// .SILENT directive, and all commands will be echoed to the console
// during the make. This is useful for debugging only. With Visual
// Studio IDE projects all commands are done without /nologo.
CMAKE_VERBOSE_MAKEFILE:BOOL=FALSE
//Path to a library.
CURSES_CURSES_LIBRARY:FILEPATH=CURSES_CURSES_LIBRARY-NOTFOUND
//Path to a library.
CURSES_FORM_LIBRARY:FILEPATH=CURSES_FORM_LIBRARY-NOTFOUND
//Path to a file.
CURSES_INCLUDE_PATH:PATH=CURSES_INCLUDE_PATH-NOTFOUND
//Path to a library.
CURSES_NCURSES_LIBRARY:FILEPATH=CURSES_NCURSES_LIBRARY-NOTFOUND
//Dot tool for use with Doxygen
DOXYGEN_DOT_EXECUTABLE:FILEPATH=DOXYGEN_DOT_EXECUTABLE-NOTFOUND
//Doxygen documentation generation tool (http://www.doxygen.org)
DOXYGEN_EXECUTABLE:FILEPATH=DOXYGEN_EXECUTABLE-NOTFOUND
//secp256k1 or openssl or mixed
ECC_IMPL:STRING=secp256k1
//Build BitShares for code coverage analysis
ENABLE_COVERAGE_TESTING:BOOL=FALSE
//Build websocketpp with CPP11 features enabled.
ENABLE_CPP11:BOOL=ON
//TRUE to try to use full zlib for compression, FALSE to use miniz.c
FC_USE_FULL_ZLIB:BOOL=FALSE
//Git command line client
GIT_EXECUTABLE:FILEPATH=/usr/bin/git
//location of the genesis.json to embed in the executable
GRAPHENE_EGENESIS_JSON:PATH=/home/pbattu/git/18.04/peerplays/genesis.json
//The directory containing a CMake configuration file for Gperftools.
Gperftools_DIR:PATH=Gperftools_DIR-NOTFOUND
//Installation directory for CMake files
INSTALL_CMAKE_DIR:PATH=lib/cmake/websocketpp
//Installation directory for header files
INSTALL_INCLUDE_DIR:PATH=include
//Log long API calls over websocket (ON OR OFF)
LOG_LONG_API:BOOL=ON
//Max API execution time in ms
LOG_LONG_API_MAX_MS:STRING=1000
//API execution time in ms at which to warn
LOG_LONG_API_WARN_MS:STRING=750
//Path to a library.
OPENSSL_CRYPTO_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libcrypto.a
//Path to a file.
OPENSSL_INCLUDE_DIR:PATH=/usr/include
//Path to a library.
OPENSSL_SSL_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libssl.a
//Path to a program.
PERL_EXECUTABLE:FILEPATH=/usr/bin/perl
//pkg-config executable
PKG_CONFIG_EXECUTABLE:FILEPATH=/usr/bin/pkg-config
//Path to a file.
READLINE_INCLUDE_DIR:PATH=/usr/include
//Path to a library.
READLINE_LIBRARIES:FILEPATH=/usr/lib/x86_64-linux-gnu/libreadline.so
//Path to a file.
Readline_INCLUDE_DIR:PATH=/usr/include
//Path to a library.
Readline_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libreadline.so
//Path to a file.
Readline_ROOT_DIR:PATH=/usr
//OFF
UNITY_BUILD:BOOL=OFF
//Path to a file.
ZLIB_INCLUDE_DIR:PATH=/usr/include
//Path to a library.
ZLIB_LIBRARY_DEBUG:FILEPATH=ZLIB_LIBRARY_DEBUG-NOTFOUND
//Path to a library.
ZLIB_LIBRARY_RELEASE:FILEPATH=/usr/lib/x86_64-linux-gnu/libz.so
//Value Computed by CMake
fc_BINARY_DIR:STATIC=/home/pbattu/git/18.04/peerplays/libraries/fc
//Dependencies for the target
fc_LIB_DEPENDS:STATIC=general;-L/usr/local/lib;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_thread.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_date_time.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_filesystem.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_system.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_program_options.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_signals.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_serialization.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_chrono.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_unit_test_framework.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_context.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_locale.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_iostreams.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_coroutine.a;general;/usr/lib/x86_64-linux-gnu/libpthread.so;general;/usr/lib/x86_64-linux-gnu/libssl.a;general;/usr/lib/x86_64-linux-gnu/libcrypto.a;general;/usr/lib/x86_64-linux-gnu/libz.so;general;dl;general;rt;general;/usr/lib/x86_64-linux-gnu/libreadline.so;general;secp256k1;
//Value Computed by CMake
fc_SOURCE_DIR:STATIC=/home/pbattu/git/18.04/peerplays/libraries/fc
//Dependencies for the target
graphene_account_history_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;
//Dependencies for the target
graphene_accounts_list_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;
//Dependencies for the target
graphene_affiliate_stats_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;
//Dependencies for the target
graphene_app_LIB_DEPENDS:STATIC=general;graphene_market_history;general;graphene_account_history;general;graphene_accounts_list;general;graphene_affiliate_stats;general;graphene_chain;general;fc;general;graphene_db;general;graphene_net;general;graphene_time;general;graphene_utilities;general;graphene_debug_witness;general;graphene_bookie;
//Dependencies for the target
graphene_bookie_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;
//Dependencies for the target
graphene_chain_LIB_DEPENDS:STATIC=general;fc;general;graphene_db;
//Dependencies for the target
graphene_db_LIB_DEPENDS:STATIC=general;fc;
//Dependencies for the target
graphene_debug_witness_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;
//Dependencies for the target
graphene_delayed_node_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;
//Dependencies for the target
graphene_egenesis_brief_LIB_DEPENDS:STATIC=general;graphene_chain;general;fc;
//Dependencies for the target
graphene_egenesis_full_LIB_DEPENDS:STATIC=general;graphene_chain;general;fc;
//Dependencies for the target
graphene_egenesis_none_LIB_DEPENDS:STATIC=general;graphene_chain;general;fc;
//Dependencies for the target
graphene_generate_genesis_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;general;graphene_time;
//Dependencies for the target
graphene_generate_uia_sharedrop_genesis_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;general;graphene_time;
//Dependencies for the target
graphene_market_history_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;
//Dependencies for the target
graphene_net_LIB_DEPENDS:STATIC=general;fc;general;graphene_db;
//Dependencies for the target
graphene_snapshot_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;
//Dependencies for the target
graphene_time_LIB_DEPENDS:STATIC=general;fc;
//Dependencies for the target
graphene_utilities_LIB_DEPENDS:STATIC=general;fc;
//Dependencies for the target
graphene_wallet_LIB_DEPENDS:STATIC=general;graphene_app;general;graphene_net;general;graphene_chain;general;graphene_utilities;general;fc;general;dl;
//Dependencies for the target
graphene_witness_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;
//Value Computed by CMake
websocketpp_BINARY_DIR:STATIC=/home/pbattu/git/18.04/peerplays/libraries/fc/vendor/websocketpp
//Value Computed by CMake
websocketpp_SOURCE_DIR:STATIC=/home/pbattu/git/18.04/peerplays/libraries/fc/vendor/websocketpp
########################
# INTERNAL cache entries
########################
//ADVANCED property for variable: BOOST_ROOT
BOOST_ROOT-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_CHRONO_LIBRARY_DEBUG
Boost_CHRONO_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_CHRONO_LIBRARY_RELEASE
Boost_CHRONO_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_CONTEXT_LIBRARY_DEBUG
Boost_CONTEXT_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_CONTEXT_LIBRARY_RELEASE
Boost_CONTEXT_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_COROUTINE_LIBRARY_DEBUG
Boost_COROUTINE_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_COROUTINE_LIBRARY_RELEASE
Boost_COROUTINE_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_DATE_TIME_LIBRARY_DEBUG
Boost_DATE_TIME_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_DATE_TIME_LIBRARY_RELEASE
Boost_DATE_TIME_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_DIR
Boost_DIR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_FILESYSTEM_LIBRARY_DEBUG
Boost_FILESYSTEM_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_FILESYSTEM_LIBRARY_RELEASE
Boost_FILESYSTEM_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_INCLUDE_DIR
Boost_INCLUDE_DIR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_IOSTREAMS_LIBRARY_DEBUG
Boost_IOSTREAMS_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_IOSTREAMS_LIBRARY_RELEASE
Boost_IOSTREAMS_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_LIBRARY_DIR
Boost_LIBRARY_DIR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_LIBRARY_DIR_DEBUG
Boost_LIBRARY_DIR_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_LIBRARY_DIR_RELEASE
Boost_LIBRARY_DIR_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_LOCALE_LIBRARY_DEBUG
Boost_LOCALE_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_LOCALE_LIBRARY_RELEASE
Boost_LOCALE_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_PROGRAM_OPTIONS_LIBRARY_DEBUG
Boost_PROGRAM_OPTIONS_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_PROGRAM_OPTIONS_LIBRARY_RELEASE
Boost_PROGRAM_OPTIONS_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_SERIALIZATION_LIBRARY_DEBUG
Boost_SERIALIZATION_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_SERIALIZATION_LIBRARY_RELEASE
Boost_SERIALIZATION_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_SIGNALS_LIBRARY_DEBUG
Boost_SIGNALS_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_SIGNALS_LIBRARY_RELEASE
Boost_SIGNALS_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_SYSTEM_LIBRARY_DEBUG
Boost_SYSTEM_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_SYSTEM_LIBRARY_RELEASE
Boost_SYSTEM_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_THREAD_LIBRARY_DEBUG
Boost_THREAD_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_THREAD_LIBRARY_RELEASE
Boost_THREAD_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_UNIT_TEST_FRAMEWORK_LIBRARY_DEBUG
Boost_UNIT_TEST_FRAMEWORK_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_UNIT_TEST_FRAMEWORK_LIBRARY_RELEASE
Boost_UNIT_TEST_FRAMEWORK_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_AR
CMAKE_AR-ADVANCED:INTERNAL=1
//This is the directory where this CMakeCache.txt was created
CMAKE_CACHEFILE_DIR:INTERNAL=/home/pbattu/git/18.04/peerplays
//Major version of cmake used to create the current loaded cache
CMAKE_CACHE_MAJOR_VERSION:INTERNAL=3
//Minor version of cmake used to create the current loaded cache
CMAKE_CACHE_MINOR_VERSION:INTERNAL=10
//Patch version of cmake used to create the current loaded cache
CMAKE_CACHE_PATCH_VERSION:INTERNAL=2
//ADVANCED property for variable: CMAKE_COLOR_MAKEFILE
CMAKE_COLOR_MAKEFILE-ADVANCED:INTERNAL=1
//Path to CMake executable.
CMAKE_COMMAND:INTERNAL=/usr/bin/cmake
//Path to cpack program executable.
CMAKE_CPACK_COMMAND:INTERNAL=/usr/bin/cpack
//Path to ctest program executable.
CMAKE_CTEST_COMMAND:INTERNAL=/usr/bin/ctest
//ADVANCED property for variable: CMAKE_CXX_COMPILER
CMAKE_CXX_COMPILER-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_CXX_COMPILER_AR
CMAKE_CXX_COMPILER_AR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_CXX_COMPILER_RANLIB
CMAKE_CXX_COMPILER_RANLIB-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_CXX_FLAGS
CMAKE_CXX_FLAGS-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_CXX_FLAGS_DEBUG
CMAKE_CXX_FLAGS_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_CXX_FLAGS_MINSIZEREL
CMAKE_CXX_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_CXX_FLAGS_RELWITHDEBINFO
CMAKE_CXX_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_C_COMPILER
CMAKE_C_COMPILER-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_C_COMPILER_AR
CMAKE_C_COMPILER_AR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_C_COMPILER_RANLIB
CMAKE_C_COMPILER_RANLIB-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_C_FLAGS
CMAKE_C_FLAGS-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_C_FLAGS_DEBUG
CMAKE_C_FLAGS_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_C_FLAGS_MINSIZEREL
CMAKE_C_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_C_FLAGS_RELEASE
CMAKE_C_FLAGS_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_C_FLAGS_RELWITHDEBINFO
CMAKE_C_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1
//Executable file format
CMAKE_EXECUTABLE_FORMAT:INTERNAL=ELF
//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS
CMAKE_EXE_LINKER_FLAGS-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_DEBUG
CMAKE_EXE_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_MINSIZEREL
CMAKE_EXE_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_RELEASE
CMAKE_EXE_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO
CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_EXPORT_COMPILE_COMMANDS
CMAKE_EXPORT_COMPILE_COMMANDS-ADVANCED:INTERNAL=1
//Name of external makefile project generator.
CMAKE_EXTRA_GENERATOR:INTERNAL=
//Name of generator.
CMAKE_GENERATOR:INTERNAL=Unix Makefiles
//Name of generator platform.
CMAKE_GENERATOR_PLATFORM:INTERNAL=
//Name of generator toolset.
CMAKE_GENERATOR_TOOLSET:INTERNAL=
//Have symbol pthread_create
CMAKE_HAVE_LIBC_CREATE:INTERNAL=
//Have library pthreads
CMAKE_HAVE_PTHREADS_CREATE:INTERNAL=
//Have library pthread
CMAKE_HAVE_PTHREAD_CREATE:INTERNAL=1
//Have include pthread.h
CMAKE_HAVE_PTHREAD_H:INTERNAL=1
//Source directory with the top level CMakeLists.txt file for this
// project
CMAKE_HOME_DIRECTORY:INTERNAL=/home/pbattu/git/18.04/peerplays
//Install .so files without execute permission.
CMAKE_INSTALL_SO_NO_EXE:INTERNAL=1
//ADVANCED property for variable: CMAKE_LINKER
CMAKE_LINKER-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_MAKE_PROGRAM
CMAKE_MAKE_PROGRAM-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS
CMAKE_MODULE_LINKER_FLAGS-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_DEBUG
CMAKE_MODULE_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL
CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_RELEASE
CMAKE_MODULE_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO
CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_NM
CMAKE_NM-ADVANCED:INTERNAL=1
//number of local generators
CMAKE_NUMBER_OF_MAKEFILES:INTERNAL=37
//ADVANCED property for variable: CMAKE_OBJCOPY
CMAKE_OBJCOPY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_OBJDUMP
CMAKE_OBJDUMP-ADVANCED:INTERNAL=1
//Platform information initialized
CMAKE_PLATFORM_INFO_INITIALIZED:INTERNAL=1
//ADVANCED property for variable: CMAKE_RANLIB
CMAKE_RANLIB-ADVANCED:INTERNAL=1
//Path to CMake installation.
CMAKE_ROOT:INTERNAL=/usr/share/cmake-3.10
//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS
CMAKE_SHARED_LINKER_FLAGS-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_DEBUG
CMAKE_SHARED_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL
CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_RELEASE
CMAKE_SHARED_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO
CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_SKIP_INSTALL_RPATH
CMAKE_SKIP_INSTALL_RPATH-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_SKIP_RPATH
CMAKE_SKIP_RPATH-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS
CMAKE_STATIC_LINKER_FLAGS-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_DEBUG
CMAKE_STATIC_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL
CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_RELEASE
CMAKE_STATIC_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO
CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_STRIP
CMAKE_STRIP-ADVANCED:INTERNAL=1
//uname command
CMAKE_UNAME:INTERNAL=/bin/uname
//ADVANCED property for variable: CMAKE_VERBOSE_MAKEFILE
CMAKE_VERBOSE_MAKEFILE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CURSES_CURSES_LIBRARY
CURSES_CURSES_LIBRARY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CURSES_FORM_LIBRARY
CURSES_FORM_LIBRARY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CURSES_INCLUDE_PATH
CURSES_INCLUDE_PATH-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CURSES_NCURSES_LIBRARY
CURSES_NCURSES_LIBRARY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: DOXYGEN_DOT_EXECUTABLE
DOXYGEN_DOT_EXECUTABLE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: DOXYGEN_EXECUTABLE
DOXYGEN_EXECUTABLE-ADVANCED:INTERNAL=1
//Details about finding OpenSSL
FIND_PACKAGE_MESSAGE_DETAILS_OpenSSL:INTERNAL=[/usr/lib/x86_64-linux-gnu/libcrypto.a][/usr/include][v1.1.0g()]
//Details about finding Perl
FIND_PACKAGE_MESSAGE_DETAILS_Perl:INTERNAL=[/usr/bin/perl][v5.26.1()]
//Details about finding Readline
FIND_PACKAGE_MESSAGE_DETAILS_Readline:INTERNAL=[/usr/include][/usr/lib/x86_64-linux-gnu/libreadline.so][v()]
//Details about finding Threads
FIND_PACKAGE_MESSAGE_DETAILS_Threads:INTERNAL=[TRUE][v()]
//Details about finding ZLIB
FIND_PACKAGE_MESSAGE_DETAILS_ZLIB:INTERNAL=[/usr/lib/x86_64-linux-gnu/libz.so][/usr/include][v1.2.11()]
//ADVANCED property for variable: GIT_EXECUTABLE
GIT_EXECUTABLE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: OPENSSL_CRYPTO_LIBRARY
OPENSSL_CRYPTO_LIBRARY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: OPENSSL_INCLUDE_DIR
OPENSSL_INCLUDE_DIR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: OPENSSL_SSL_LIBRARY
OPENSSL_SSL_LIBRARY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: PERL_EXECUTABLE
PERL_EXECUTABLE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: PKG_CONFIG_EXECUTABLE
PKG_CONFIG_EXECUTABLE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Readline_INCLUDE_DIR
Readline_INCLUDE_DIR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Readline_LIBRARY
Readline_LIBRARY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Readline_ROOT_DIR
Readline_ROOT_DIR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: ZLIB_INCLUDE_DIR
ZLIB_INCLUDE_DIR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: ZLIB_LIBRARY_DEBUG
ZLIB_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: ZLIB_LIBRARY_RELEASE
ZLIB_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//Last used BOOST_ROOT value.
_BOOST_ROOT_LAST:INTERNAL=/home/pbattu/git/18.04/boost_1_67_0
//Components requested for this build tree.
_Boost_COMPONENTS_SEARCHED:INTERNAL=chrono;context;coroutine;date_time;filesystem;iostreams;locale;program_options;serialization;signals;system;thread;unit_test_framework
//Last used Boost_INCLUDE_DIR value.
_Boost_INCLUDE_DIR_LAST:INTERNAL=/home/pbattu/git/18.04/boost_1_67_0/include
//Last used Boost_LIBRARY_DIR_DEBUG value.
_Boost_LIBRARY_DIR_DEBUG_LAST:INTERNAL=/home/pbattu/git/18.04/boost_1_67_0/lib
//Last used Boost_LIBRARY_DIR value.
_Boost_LIBRARY_DIR_LAST:INTERNAL=/home/pbattu/git/18.04/boost_1_67_0/lib
//Last used Boost_LIBRARY_DIR_RELEASE value.
_Boost_LIBRARY_DIR_RELEASE_LAST:INTERNAL=/home/pbattu/git/18.04/boost_1_67_0/lib
//Last used Boost_NAMESPACE value.
_Boost_NAMESPACE_LAST:INTERNAL=boost
//Last used Boost_USE_MULTITHREADED value.
_Boost_USE_MULTITHREADED_LAST:INTERNAL=TRUE
//Last used Boost_USE_STATIC_LIBS value.
_Boost_USE_STATIC_LIBS_LAST:INTERNAL=ON
_OPENSSL_CFLAGS:INTERNAL=
_OPENSSL_CFLAGS_I:INTERNAL=
_OPENSSL_CFLAGS_OTHER:INTERNAL=
_OPENSSL_FOUND:INTERNAL=1
_OPENSSL_INCLUDEDIR:INTERNAL=/usr/include
_OPENSSL_INCLUDE_DIRS:INTERNAL=
_OPENSSL_LDFLAGS:INTERNAL=-lssl;-lcrypto
_OPENSSL_LDFLAGS_OTHER:INTERNAL=
_OPENSSL_LIBDIR:INTERNAL=/usr/lib/x86_64-linux-gnu
_OPENSSL_LIBRARIES:INTERNAL=ssl;crypto
_OPENSSL_LIBRARY_DIRS:INTERNAL=
_OPENSSL_LIBS:INTERNAL=
_OPENSSL_LIBS_L:INTERNAL=
_OPENSSL_LIBS_OTHER:INTERNAL=
_OPENSSL_LIBS_PATHS:INTERNAL=
_OPENSSL_PREFIX:INTERNAL=/usr
_OPENSSL_STATIC_CFLAGS:INTERNAL=
_OPENSSL_STATIC_CFLAGS_I:INTERNAL=
_OPENSSL_STATIC_CFLAGS_OTHER:INTERNAL=
_OPENSSL_STATIC_INCLUDE_DIRS:INTERNAL=
_OPENSSL_STATIC_LDFLAGS:INTERNAL=-lssl;-ldl;-lcrypto;-ldl
_OPENSSL_STATIC_LDFLAGS_OTHER:INTERNAL=
_OPENSSL_STATIC_LIBDIR:INTERNAL=
_OPENSSL_STATIC_LIBRARIES:INTERNAL=ssl;dl;crypto;dl
_OPENSSL_STATIC_LIBRARY_DIRS:INTERNAL=
_OPENSSL_STATIC_LIBS:INTERNAL=
_OPENSSL_STATIC_LIBS_L:INTERNAL=
_OPENSSL_STATIC_LIBS_OTHER:INTERNAL=
_OPENSSL_STATIC_LIBS_PATHS:INTERNAL=
_OPENSSL_VERSION:INTERNAL=1.1.0g
_OPENSSL_openssl_INCLUDEDIR:INTERNAL=
_OPENSSL_openssl_LIBDIR:INTERNAL=
_OPENSSL_openssl_PREFIX:INTERNAL=
_OPENSSL_openssl_VERSION:INTERNAL=
__pkg_config_arguments__OPENSSL:INTERNAL=QUIET;openssl
__pkg_config_checked__OPENSSL:INTERNAL=1
prefix_result:INTERNAL=/usr/lib/x86_64-linux-gnu

View file

@ -1,6 +0,0 @@
#!/bin/bash
find ./libraries/app -regex ".*[c|h]pp" | xargs clang-format -i
find ./libraries/chain/hardfork.d -regex ".*hf" | xargs clang-format -i
find ./libraries/plugins/peerplays_sidechain -regex ".*[c|h]pp" | xargs clang-format -i
find ./programs/cli_wallet -regex ".*[c|h]pp" | xargs clang-format -i

61
docker/default_config.ini Normal file
View file

@ -0,0 +1,61 @@
# Endpoint for P2P node to listen on
p2p-endpoint = 0.0.0.0:9090
# P2P nodes to connect to on startup (may specify multiple times)
# seed-node =
# JSON array of P2P nodes to connect to on startup
# seed-nodes =
# Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.
# checkpoint =
# Endpoint for websocket RPC to listen on
rpc-endpoint = 0.0.0.0:8090
# Endpoint for TLS websocket RPC to listen on
# rpc-tls-endpoint =
# The TLS certificate file for this server
# server-pem =
# Password for this certificate
# server-pem-password =
# File to read Genesis State from
# genesis-json =
# Block signing key to use for init witnesses, overrides genesis file
# dbg-init-key =
# JSON file specifying API permissions
# api-access =
# Enable block production, even if the chain is stale.
enable-stale-production = false
# Percent of witnesses (0-99) that must be participating in order to produce blocks
required-participation = false
# ID of witness controlled by this node (e.g. "1.6.5", quotes are required, may specify multiple times)
# witness-id =
# Tuple of [PublicKey, WIF private key] (may specify multiple times)
# private-key = ["BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV","5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"]
# Account ID to track history for (may specify multiple times)
# track-account =
# Track market history by grouping orders into buckets of equal size measured in seconds specified as a JSON array of numbers
# bucket-size = [15,60,300,3600,86400]
bucket-size = [60,300,900,1800,3600,14400,86400]
# for 1 min, 5 mins, 30 mins, 1h, 4 hs and 1 day. i think this should be the default.
# How far back in time to track history for each bucket size, measured in the number of buckets (default: 1000)
history-per-size = 1000
# Max amount of operations to store in the database, per account (drastically reduces RAM requirements)
max-ops-per-account = 1000
# Remove old operation history # objects from RAM
partial-operations = true

87
docker/peerplaysentry.sh Normal file
View file

@ -0,0 +1,87 @@
#!/bin/bash
PEERPLAYSD="/usr/local/bin/witness_node"
# For blockchain download
VERSION=`cat /etc/peerplays/version`
## Supported Environmental Variables
#
# * $PEERPLAYSD_SEED_NODES
# * $PEERPLAYSD_RPC_ENDPOINT
# * $PEERPLAYSD_PLUGINS
# * $PEERPLAYSD_REPLAY
# * $PEERPLAYSD_RESYNC
# * $PEERPLAYSD_P2P_ENDPOINT
# * $PEERPLAYSD_WITNESS_ID
# * $PEERPLAYSD_PRIVATE_KEY
# * $PEERPLAYSD_DEBUG_PRIVATE_KEY
# * $PEERPLAYSD_TRACK_ACCOUNTS
# * $PEERPLAYSD_PARTIAL_OPERATIONS
# * $PEERPLAYSD_MAX_OPS_PER_ACCOUNT
# * $PEERPLAYSD_TRUSTED_NODE
#
ARGS=""
# Translate environmental variables
if [[ ! -z "$PEERPLAYSD_SEED_NODES" ]]; then
for NODE in $PEERPLAYSD_SEED_NODES ; do
ARGS+=" --seed-node=$NODE"
done
fi
if [[ ! -z "$PEERPLAYSD_RPC_ENDPOINT" ]]; then
ARGS+=" --rpc-endpoint=${PEERPLAYSD_RPC_ENDPOINT}"
fi
if [[ ! -z "$PEERPLAYSD_REPLAY" ]]; then
ARGS+=" --replay-blockchain"
fi
if [[ ! -z "$PEERPLAYSD_RESYNC" ]]; then
ARGS+=" --resync-blockchain"
fi
if [[ ! -z "$PEERPLAYSD_P2P_ENDPOINT" ]]; then
ARGS+=" --p2p-endpoint=${PEERPLAYSD_P2P_ENDPOINT}"
fi
if [[ ! -z "$PEERPLAYSD_WITNESS_ID" ]]; then
ARGS+=" --witness-id=$PEERPLAYSD_WITNESS_ID"
fi
if [[ ! -z "$PEERPLAYSD_PRIVATE_KEY" ]]; then
ARGS+=" --private-key=$PEERPLAYSD_PRIVATE_KEY"
fi
if [[ ! -z "$PEERPLAYSD_DEBUG_PRIVATE_KEY" ]]; then
ARGS+=" --debug-private-key=$PEERPLAYSD_DEBUG_PRIVATE_KEY"
fi
if [[ ! -z "$PEERPLAYSD_TRACK_ACCOUNTS" ]]; then
for ACCOUNT in $PEERPLAYSD_TRACK_ACCOUNTS ; do
ARGS+=" --track-account=$ACCOUNT"
done
fi
if [[ ! -z "$PEERPLAYSD_PARTIAL_OPERATIONS" ]]; then
ARGS+=" --partial-operations=${PEERPLAYSD_PARTIAL_OPERATIONS}"
fi
if [[ ! -z "$PEERPLAYSD_MAX_OPS_PER_ACCOUNT" ]]; then
ARGS+=" --max-ops-per-account=${PEERPLAYSD_MAX_OPS_PER_ACCOUNT}"
fi
if [[ ! -z "$PEERPLAYSD_TRUSTED_NODE" ]]; then
ARGS+=" --trusted-node=${PEERPLAYSD_TRUSTED_NODE}"
fi
## Link the peerplays config file into home
## This link has been created in Dockerfile, already
ln -f -s /etc/peerplays/config.ini /var/lib/peerplays
# Plugins need to be provided in a space-separated list, which
# makes it necessary to write it like this
if [[ ! -z "$PEERPLAYSD_PLUGINS" ]]; then
$PEERPLAYSD --data-dir ${HOME} ${ARGS} ${PEERPLAYSD_ARGS} --plugins "${PEERPLAYSD_PLUGINS}"
else
$PEERPLAYSD --data-dir ${HOME} ${ARGS} ${PEERPLAYSD_ARGS}
fi

2
docs

@ -1 +1 @@
Subproject commit 1e924950c2f92b166c34ceb294e8b8c4997a6c4e
Subproject commit 8d8b69d82482101279460fa02f814d0e4030966f

File diff suppressed because it is too large Load diff

View file

@ -1,11 +1,12 @@
add_subdirectory( app )
add_subdirectory( chain )
add_subdirectory( db )
add_subdirectory( egenesis )
add_subdirectory( fc )
add_subdirectory( db )
#add_subdirectory( deterministic_openssl_rand )
add_subdirectory( chain )
add_subdirectory( egenesis )
add_subdirectory( net )
add_subdirectory( plugins )
add_subdirectory( sha3 )
#add_subdirectory( p2p )
add_subdirectory( time )
add_subdirectory( utilities )
add_subdirectory( app )
add_subdirectory( plugins )
add_subdirectory( wallet )

View file

@ -4,19 +4,16 @@ file(GLOB EGENESIS_HEADERS "../egenesis/include/graphene/app/*.hpp")
add_library( graphene_app
api.cpp
application.cpp
config_util.cpp
database_api.cpp
plugin.cpp
config_util.cpp
${HEADERS}
${EGENESIS_HEADERS}
)
# need to link graphene_debug_witness because plugins aren't sufficiently isolated #246
#target_link_libraries( graphene_app graphene_market_history graphene_account_history graphene_chain fc graphene_db graphene_net graphene_utilities graphene_debug_witness )
target_link_libraries( graphene_app
PUBLIC graphene_net graphene_utilities
graphene_account_history graphene_accounts_list graphene_affiliate_stats graphene_bookie graphene_debug_witness graphene_elasticsearch graphene_es_objects graphene_generate_genesis graphene_market_history peerplays_sidechain )
target_link_libraries( graphene_app graphene_market_history graphene_account_history graphene_accounts_list graphene_affiliate_stats graphene_chain fc graphene_db graphene_net graphene_time graphene_utilities graphene_debug_witness graphene_bookie graphene_elasticsearch peerplays_sidechain )
target_include_directories( graphene_app
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include"
"${CMAKE_CURRENT_SOURCE_DIR}/../egenesis/include" )
@ -33,25 +30,3 @@ INSTALL( TARGETS
ARCHIVE DESTINATION lib
)
INSTALL( FILES ${HEADERS} DESTINATION "include/graphene/app" )
add_library( graphene_plugin
plugin.cpp
include/graphene/app/plugin.hpp
)
target_link_libraries( graphene_plugin
PUBLIC graphene_net graphene_utilities )
target_include_directories( graphene_plugin
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" )
INSTALL( TARGETS
graphene_app
RUNTIME DESTINATION bin
LIBRARY DESTINATION lib
ARCHIVE DESTINATION lib
)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -25,36 +25,36 @@
#include <graphene/app/config_util.hpp>
#include <graphene/chain/config.hpp>
#include <fc/reflect/variant.hpp>
#include <fc/string.hpp>
#include <fc/exception/exception.hpp>
#include <fc/log/console_appender.hpp>
#include <fc/log/file_appender.hpp>
#include <fc/log/logger_config.hpp>
#include <fc/reflect/variant.hpp>
#include <fc/string.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/ini_parser.hpp>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/algorithm/string/split.hpp>
#include <boost/property_tree/ini_parser.hpp>
#include <boost/property_tree/ptree.hpp>
#include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string.hpp>
#include <fstream>
namespace bpo = boost::program_options;
class deduplicator {
class deduplicator
{
public:
deduplicator() :
modifier(nullptr) {
}
deduplicator() : modifier(nullptr) {}
deduplicator(const boost::shared_ptr<bpo::option_description> (*mod_fn)(const boost::shared_ptr<bpo::option_description> &)) :
modifier(mod_fn) {
}
deduplicator(const boost::shared_ptr<bpo::option_description> (*mod_fn)(const boost::shared_ptr<bpo::option_description>&))
: modifier(mod_fn) {}
const boost::shared_ptr<bpo::option_description> next(const boost::shared_ptr<bpo::option_description> &o) {
const boost::shared_ptr<bpo::option_description> next(const boost::shared_ptr<bpo::option_description>& o)
{
const std::string name = o->long_name();
if (seen.find(name) != seen.end())
if( seen.find( name ) != seen.end() )
return nullptr;
seen.insert(name);
return modifier ? modifier(o) : o;
@ -62,14 +62,15 @@ public:
private:
boost::container::flat_set<std::string> seen;
const boost::shared_ptr<bpo::option_description> (*modifier)(const boost::shared_ptr<bpo::option_description> &);
const boost::shared_ptr<bpo::option_description> (*modifier)(const boost::shared_ptr<bpo::option_description>&);
};
// Currently, you can only specify the filenames and logging levels, which
// are all most users would want to change. At a later time, options can
// be added to control rotation intervals, compression, and other seldom-
// used features
static void write_default_logging_config_to_stream(std::ostream &out) {
static void write_default_logging_config_to_stream(std::ostream& out)
{
out << "# declare an appender named \"stderr\" that writes messages to the console\n"
"[log.console_appender.stderr]\n"
"stream=std_error\n\n"
@ -114,22 +115,26 @@ static void write_default_logging_config_to_stream(std::ostream &out) {
// logging config is too complicated to be parsed by boost::program_options,
// so we do it by hand
static fc::optional<fc::logging_config> load_logging_config_from_ini_file(const fc::path &config_ini_filename) {
try {
static fc::optional<fc::logging_config> load_logging_config_from_ini_file(const fc::path& config_ini_filename)
{
try
{
fc::logging_config logging_config;
bool found_logging_config = false;
boost::property_tree::ptree config_ini_tree;
boost::property_tree::ini_parser::read_ini(config_ini_filename.preferred_string().c_str(), config_ini_tree);
for (const auto &section : config_ini_tree) {
const std::string &section_name = section.first;
const boost::property_tree::ptree &section_tree = section.second;
for (const auto& section : config_ini_tree)
{
const std::string& section_name = section.first;
const boost::property_tree::ptree& section_tree = section.second;
const std::string console_appender_section_prefix = "log.console_appender.";
const std::string file_appender_section_prefix = "log.file_appender.";
const std::string logger_section_prefix = "logger.";
if (boost::starts_with(section_name, console_appender_section_prefix)) {
if (boost::starts_with(section_name, console_appender_section_prefix))
{
std::string console_appender_name = section_name.substr(console_appender_section_prefix.length());
std::string stream_name = section_tree.get<std::string>("stream");
@ -137,18 +142,20 @@ static fc::optional<fc::logging_config> load_logging_config_from_ini_file(const
// stdout/stderr will be taken from ini file, everything else hard-coded here
fc::console_appender::config console_appender_config;
console_appender_config.level_colors.emplace_back(
fc::console_appender::level_color(fc::log_level::debug,
fc::console_appender::color::green));
fc::console_appender::level_color(fc::log_level::debug,
fc::console_appender::color::green));
console_appender_config.level_colors.emplace_back(
fc::console_appender::level_color(fc::log_level::warn,
fc::console_appender::color::brown));
fc::console_appender::level_color(fc::log_level::warn,
fc::console_appender::color::brown));
console_appender_config.level_colors.emplace_back(
fc::console_appender::level_color(fc::log_level::error,
fc::console_appender::color::cyan));
fc::console_appender::level_color(fc::log_level::error,
fc::console_appender::color::cyan));
console_appender_config.stream = fc::variant(stream_name).as<fc::console_appender::stream::type>(GRAPHENE_MAX_NESTED_OBJECTS);
logging_config.appenders.push_back(fc::appender_config(console_appender_name, "console", fc::variant(console_appender_config, GRAPHENE_MAX_NESTED_OBJECTS)));
found_logging_config = true;
} else if (boost::starts_with(section_name, file_appender_section_prefix)) {
}
else if (boost::starts_with(section_name, file_appender_section_prefix))
{
std::string file_appender_name = section_name.substr(file_appender_section_prefix.length());
fc::path file_name = section_tree.get<std::string>("filename");
if (file_name.is_relative())
@ -167,7 +174,9 @@ static fc::optional<fc::logging_config> load_logging_config_from_ini_file(const
file_appender_config.rotation_limit = fc::days(limit);
logging_config.appenders.push_back(fc::appender_config(file_appender_name, "file", fc::variant(file_appender_config, GRAPHENE_MAX_NESTED_OBJECTS)));
found_logging_config = true;
} else if (boost::starts_with(section_name, logger_section_prefix)) {
}
else if (boost::starts_with(section_name, logger_section_prefix))
{
std::string logger_name = section_name.substr(logger_section_prefix.length());
std::string level_string = section_tree.get<std::string>("level");
std::string appenders_string = section_tree.get<std::string>("appenders");
@ -188,66 +197,74 @@ static fc::optional<fc::logging_config> load_logging_config_from_ini_file(const
FC_RETHROW_EXCEPTIONS(warn, "")
}
static const boost::shared_ptr<bpo::option_description> new_option_description(const std::string &name, const bpo::value_semantic *value, const std::string &description) {
static const boost::shared_ptr<bpo::option_description> new_option_description( const std::string& name, const bpo::value_semantic* value, const std::string& description )
{
bpo::options_description helper("");
helper.add_options()(name.c_str(), value, description.c_str());
helper.add_options()( name.c_str(), value, description.c_str() );
return helper.options()[0];
}
static void load_config_file(const fc::path &config_ini_path, const bpo::options_description &cfg_options,
bpo::variables_map &options) {
static void load_config_file(const fc::path& config_ini_path, const bpo::options_description& cfg_options,
bpo::variables_map& options )
{
deduplicator dedup;
bpo::options_description unique_options("Graphene Witness Node");
for (const boost::shared_ptr<bpo::option_description> opt : cfg_options.options()) {
for( const boost::shared_ptr<bpo::option_description> opt : cfg_options.options() )
{
const boost::shared_ptr<bpo::option_description> od = dedup.next(opt);
if (!od)
continue;
unique_options.add(od);
if( !od ) continue;
unique_options.add( od );
}
// get the basic options
bpo::store(bpo::parse_config_file<char>(config_ini_path.preferred_string().c_str(),
unique_options, true),
options);
unique_options, true), options);
}
static bool load_logging_config_file(const fc::path &config_ini_path) {
static bool load_logging_config_file(const fc::path& config_ini_path)
{
// try to get logging options from the config file.
try {
try
{
fc::optional<fc::logging_config> logging_config = load_logging_config_from_ini_file(config_ini_path);
if (logging_config) {
if (logging_config)
{
fc::configure_logging(*logging_config);
return true;
}
} catch (const fc::exception &ex) {
}
catch (const fc::exception& ex)
{
wlog("Error parsing logging config from logging config file ${config}, using default config", ("config", config_ini_path.preferred_string()));
}
return false;
}
static void create_new_config_file(const fc::path &config_ini_path, const fc::path &data_dir,
const bpo::options_description &cfg_options) {
static void create_new_config_file(const fc::path& config_ini_path, const fc::path& data_dir,
const bpo::options_description& cfg_options )
{
ilog("Writing new config file at ${path}", ("path", config_ini_path));
if (!fc::exists(data_dir))
if( !fc::exists(data_dir) )
fc::create_directories(data_dir);
auto modify_option_defaults = [](const boost::shared_ptr<bpo::option_description> &o) -> const boost::shared_ptr<bpo::option_description> {
const std::string &name = o->long_name();
if (name == "partial-operations")
return new_option_description(name, bpo::value<bool>()->default_value(true), o->description());
if (name == "max-ops-per-account")
return new_option_description(name, bpo::value<int>()->default_value(100), o->description());
return o;
auto modify_option_defaults = [](const boost::shared_ptr<bpo::option_description>& o) -> const boost::shared_ptr<bpo::option_description> {
const std::string& name = o->long_name();
if( name == "partial-operations" )
return new_option_description(name, bpo::value<bool>()->default_value(true), o->description() );
if( name == "max-ops-per-account" )
return new_option_description(name, bpo::value<int>()->default_value(100), o->description() );
return o;
};
deduplicator dedup(modify_option_defaults);
std::ofstream out_cfg(config_ini_path.preferred_string());
std::string plugin_header_surrounding(78, '=');
for (const boost::shared_ptr<bpo::option_description> opt : cfg_options.options()) {
std::string plugin_header_surrounding( 78, '=' );
for( const boost::shared_ptr<bpo::option_description> opt : cfg_options.options() )
{
const boost::shared_ptr<bpo::option_description> od = dedup.next(opt);
if (!od)
continue;
if( !od ) continue;
if (od->long_name().find("plugin-cfg-header-") == 0) // it's a plugin header
if( od->long_name().find("plugin-cfg-header-") == 0 ) // it's a plugin header
{
out_cfg << "\n";
out_cfg << "# " << plugin_header_surrounding << "\n";
@ -257,21 +274,20 @@ static void create_new_config_file(const fc::path &config_ini_path, const fc::pa
continue;
}
if (!od->description().empty())
if( !od->description().empty() )
out_cfg << "# " << od->description() << "\n";
boost::any store;
if (!od->semantic()->apply_default(store))
if( !od->semantic()->apply_default(store) )
out_cfg << "# " << od->long_name() << " = \n";
else {
auto example = od->format_parameter();
if (example.empty())
if( example.empty() )
// This is a boolean switch
out_cfg << od->long_name() << " = "
<< "false\n";
out_cfg << od->long_name() << " = " << "false\n";
else {
// The string is formatted "arg (=<interesting part>)"
example.erase(0, 6);
example.erase(example.length() - 1);
example.erase(example.length()-1);
out_cfg << od->long_name() << " = " << example << "\n";
}
}
@ -288,9 +304,11 @@ static void create_new_config_file(const fc::path &config_ini_path, const fc::pa
out_cfg.close();
}
static void create_logging_config_file(const fc::path &config_ini_path, const fc::path &data_dir) {
static void create_logging_config_file(const fc::path& config_ini_path, const fc::path& data_dir)
{
ilog("Writing new config file at ${path}", ("path", config_ini_path));
if (!exists(data_dir)) {
if (!exists(data_dir))
{
create_directories(data_dir);
}
@ -301,29 +319,36 @@ static void create_logging_config_file(const fc::path &config_ini_path, const fc
namespace graphene { namespace app {
void load_configuration_options(const fc::path &data_dir, const bpo::options_description &cfg_options, bpo::variables_map &options) {
const auto config_ini_path = data_dir / "config.ini";
const auto logging_ini_path = data_dir / "logging.ini";
void load_configuration_options(const fc::path& data_dir, const bpo::options_description& cfg_options, bpo::variables_map& options)
{
const auto config_ini_path = data_dir / "config.ini";
const auto logging_ini_path = data_dir / "logging.ini";
if (!exists(config_ini_path) && fc::exists(logging_ini_path)) {
// this is an uncommon case
create_new_config_file(config_ini_path, data_dir, cfg_options);
} else if (!exists(config_ini_path)) {
// create default config.ini and logging.ini
create_new_config_file(config_ini_path, data_dir, cfg_options);
create_logging_config_file(logging_ini_path, data_dir);
if(!exists(config_ini_path) && fc::exists(logging_ini_path))
{
// this is an uncommon case
create_new_config_file(config_ini_path, data_dir, cfg_options);
}
else if(!exists(config_ini_path))
{
// create default config.ini and logging.ini
create_new_config_file(config_ini_path, data_dir, cfg_options);
create_logging_config_file(logging_ini_path, data_dir);
}
// load witness node configuration
load_config_file(config_ini_path, cfg_options, options);
// load logging configuration
if (fc::exists(logging_ini_path))
{
load_logging_config_file(logging_ini_path);
}
else
{
// this is the legacy config.ini case
load_logging_config_file(config_ini_path);
}
}
// load witness node configuration
load_config_file(config_ini_path, cfg_options, options);
// load logging configuration
if (fc::exists(logging_ini_path)) {
load_logging_config_file(logging_ini_path);
} else {
// this is the legacy config.ini case
load_logging_config_file(config_ini_path);
}
}
}} // namespace graphene::app
} } // graphene::app

File diff suppressed because it is too large Load diff

View file

@ -25,23 +25,24 @@
#include <graphene/app/database_api.hpp>
#include <graphene/chain/protocol/confidential.hpp>
#include <graphene/chain/protocol/types.hpp>
#include <graphene/chain/protocol/confidential.hpp>
#include <graphene/market_history/market_history_plugin.hpp>
#include <graphene/accounts_list/accounts_list_plugin.hpp>
#include <graphene/elasticsearch/elasticsearch_plugin.hpp>
#include <graphene/debug_witness/debug_api.hpp>
#include <graphene/affiliate_stats/affiliate_stats_api.hpp>
#include <graphene/bookie/bookie_api.hpp>
#include <graphene/net/node.hpp>
#include <graphene/accounts_list/accounts_list_plugin.hpp>
#include <graphene/affiliate_stats/affiliate_stats_api.hpp>
#include <graphene/bookie/bookie_api.hpp>
#include <graphene/debug_witness/debug_api.hpp>
#include <graphene/elasticsearch/elasticsearch_plugin.hpp>
#include <graphene/market_history/market_history_plugin.hpp>
#include <graphene/peerplays_sidechain/sidechain_api.hpp>
#include <fc/api.hpp>
#include <fc/optional.hpp>
#include <fc/crypto/elliptic.hpp>
#include <fc/network/ip.hpp>
#include <fc/optional.hpp>
#include <boost/container/flat_set.hpp>
@ -51,415 +52,452 @@
#include <vector>
namespace graphene { namespace app {
using namespace graphene::chain;
using namespace graphene::market_history;
using namespace graphene::accounts_list;
using namespace fc::ecc;
using namespace std;
using namespace graphene::chain;
using namespace graphene::market_history;
using namespace graphene::accounts_list;
using namespace fc::ecc;
using namespace std;
class application;
class application;
struct verify_range_result {
bool success;
uint64_t min_val;
uint64_t max_val;
};
struct verify_range_proof_rewind_result {
bool success;
uint64_t min_val;
uint64_t max_val;
uint64_t value_out;
fc::ecc::blind_factor_type blind_out;
string message_out;
};
struct account_asset_balance {
string name;
account_id_type account_id;
share_type amount;
};
struct asset_holders {
asset_id_type asset_id;
int count;
};
/**
* @brief The history_api class implements the RPC API for account history
*
* This API contains methods to access account histories
*/
class history_api {
public:
history_api(application &app) :
_app(app),
database_api(std::ref(*app.chain_database())) {
}
/**
* @brief Get operations relevant to the specificed account
* @param account_id_or_name The account ID or name whose history should be queried
* @param stop ID of the earliest operation to retrieve
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start ID of the most recent operation to retrieve
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
vector<operation_history_object> get_account_history(const std::string account_id_or_name,
operation_history_id_type stop = operation_history_id_type(),
unsigned limit = 100,
operation_history_id_type start = operation_history_id_type()) const;
/**
* @brief Get only asked operations relevant to the specified account
* @param account_id_or_name The account ID or name whose history should be queried
* @param operation_id The ID of the operation we want to get operations in the account( 0 = transfer , 1 = limit order create, ...)
* @param stop ID of the earliest operation to retrieve
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start ID of the most recent operation to retrieve
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
vector<operation_history_object> get_account_history_operations(const std::string account_id_or_name,
int operation_id,
operation_history_id_type start = operation_history_id_type(),
operation_history_id_type stop = operation_history_id_type(),
unsigned limit = 100) const;
/**
* @breif Get operations relevant to the specified account referenced
* by an event numbering specific to the account. The current number of operations
* for the account can be found in the account statistics (or use 0 for start).
* @param account_id_or_name The account ID or name whose history should be queried
* @param stop Sequence number of earliest operation. 0 is default and will
* query 'limit' number of operations.
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start Sequence number of the most recent operation to retrieve.
* 0 is default, which will start querying from the most recent operation.
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
vector<operation_history_object> get_relative_account_history(const std::string account_id_or_name,
uint32_t stop = 0,
unsigned limit = 100,
uint32_t start = 0) const;
vector<order_history_object> get_fill_order_history(std::string asset_a, std::string asset_b, uint32_t limit) const;
vector<bucket_object> get_market_history(std::string asset_a, std::string asset_b, uint32_t bucket_seconds,
fc::time_point_sec start, fc::time_point_sec end) const;
vector<account_balance_object> list_core_accounts() const;
flat_set<uint32_t> get_market_history_buckets() const;
uint32_t api_limit_get_account_history_operations = 100;
uint32_t api_limit_get_account_history = 100;
uint32_t api_limit_get_relative_account_history = 100;
private:
application &_app;
graphene::app::database_api database_api;
};
/**
* @brief Block api
*/
class block_api {
public:
block_api(graphene::chain::database &db);
~block_api();
vector<optional<signed_block>> get_blocks(uint32_t block_num_from, uint32_t block_num_to) const;
private:
graphene::chain::database &_db;
};
/**
* @brief The network_broadcast_api class allows broadcasting of transactions.
*/
class network_broadcast_api : public std::enable_shared_from_this<network_broadcast_api> {
public:
network_broadcast_api(application &a);
struct transaction_confirmation {
transaction_id_type id;
uint32_t block_num;
uint32_t trx_num;
processed_transaction trx;
struct verify_range_result
{
bool success;
uint64_t min_val;
uint64_t max_val;
};
struct verify_range_proof_rewind_result
{
bool success;
uint64_t min_val;
uint64_t max_val;
uint64_t value_out;
fc::ecc::blind_factor_type blind_out;
string message_out;
};
typedef std::function<void(variant /*transaction_confirmation*/)> confirmation_callback;
struct account_asset_balance
{
string name;
account_id_type account_id;
share_type amount;
};
struct asset_holders
{
asset_id_type asset_id;
int count;
};
/**
* @brief Broadcast a transaction to the network
* @param trx The transaction to broadcast
* @brief The history_api class implements the RPC API for account history
*
* The transaction will be checked for validity in the local database prior to broadcasting. If it fails to
* apply locally, an error will be thrown and the transaction will not be broadcast.
* This API contains methods to access account histories
*/
void broadcast_transaction(const signed_transaction &trx);
class history_api
{
public:
history_api(application& app)
:_app(app), database_api( std::ref(*app.chain_database())) {}
/** this version of broadcast transaction registers a callback method that will be called when the transaction is
* included into a block. The callback method includes the transaction id, block number, and transaction number in the
* block.
*/
void broadcast_transaction_with_callback(confirmation_callback cb, const signed_transaction &trx);
/**
* @brief Get operations relevant to the specificed account
* @param account_id_or_name The account ID or name whose history should be queried
* @param stop ID of the earliest operation to retrieve
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start ID of the most recent operation to retrieve
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
vector<operation_history_object> get_account_history(const std::string account_id_or_name,
operation_history_id_type stop = operation_history_id_type(),
unsigned limit = 100,
operation_history_id_type start = operation_history_id_type())const;
/** this version of broadcast transaction registers a callback method that will be called when the transaction is
* included into a block. The callback method includes the transaction id, block number, and transaction number in the
* block.
*/
fc::variant broadcast_transaction_synchronous(const signed_transaction &trx);
/**
* @brief Get only asked operations relevant to the specified account
* @param account_id_or_name The account ID or name whose history should be queried
* @param operation_id The ID of the operation we want to get operations in the account( 0 = transfer , 1 = limit order create, ...)
* @param stop ID of the earliest operation to retrieve
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start ID of the most recent operation to retrieve
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
vector<operation_history_object> get_account_history_operations(const std::string account_id_or_name,
int operation_id,
operation_history_id_type start = operation_history_id_type(),
operation_history_id_type stop = operation_history_id_type(),
unsigned limit = 100)const;
void broadcast_block(const signed_block &block);
/**
* @breif Get operations relevant to the specified account referenced
* by an event numbering specific to the account. The current number of operations
* for the account can be found in the account statistics (or use 0 for start).
* @param account_id_or_name The account ID or name whose history should be queried
* @param stop Sequence number of earliest operation. 0 is default and will
* query 'limit' number of operations.
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start Sequence number of the most recent operation to retrieve.
* 0 is default, which will start querying from the most recent operation.
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
vector<operation_history_object> get_relative_account_history( const std::string account_id_or_name,
uint32_t stop = 0,
unsigned limit = 100,
uint32_t start = 0) const;
vector<order_history_object> get_fill_order_history( std::string asset_a, std::string asset_b, uint32_t limit )const;
vector<bucket_object> get_market_history( std::string asset_a, std::string asset_b, uint32_t bucket_seconds,
fc::time_point_sec start, fc::time_point_sec end )const;
vector<account_balance_object> list_core_accounts()const;
flat_set<uint32_t> get_market_history_buckets()const;
private:
application& _app;
graphene::app::database_api database_api;
};
/**
* @brief Not reflected, thus not accessible to API clients.
*
* This function is registered to receive the applied_block
* signal from the chain database when a block is received.
* It then dispatches callbacks to clients who have requested
* to be notified when a particular txid is included in a block.
* @brief Block api
*/
void on_applied_block(const signed_block &b);
class block_api
{
public:
block_api(graphene::chain::database& db);
~block_api();
private:
boost::signals2::scoped_connection _applied_block_connection;
map<transaction_id_type, confirmation_callback> _callbacks;
application &_app;
};
vector<optional<signed_block>> get_blocks(uint32_t block_num_from, uint32_t block_num_to)const;
private:
graphene::chain::database& _db;
};
/**
* @brief The network_node_api class allows maintenance of p2p connections.
*/
class network_node_api {
public:
network_node_api(application &a);
/**
* @brief Return general network information, such as p2p port
* @brief The network_broadcast_api class allows broadcasting of transactions.
*/
fc::variant_object get_info() const;
class network_broadcast_api : public std::enable_shared_from_this<network_broadcast_api>
{
public:
network_broadcast_api(application& a);
struct transaction_confirmation
{
transaction_id_type id;
uint32_t block_num;
uint32_t trx_num;
processed_transaction trx;
};
typedef std::function<void(variant/*transaction_confirmation*/)> confirmation_callback;
/**
* @brief Broadcast a transaction to the network
* @param trx The transaction to broadcast
*
* The transaction will be checked for validity in the local database prior to broadcasting. If it fails to
* apply locally, an error will be thrown and the transaction will not be broadcast.
*/
void broadcast_transaction(const signed_transaction& trx);
/** this version of broadcast transaction registers a callback method that will be called when the transaction is
* included into a block. The callback method includes the transaction id, block number, and transaction number in the
* block.
*/
void broadcast_transaction_with_callback( confirmation_callback cb, const signed_transaction& trx);
/** this version of broadcast transaction registers a callback method that will be called when the transaction is
* included into a block. The callback method includes the transaction id, block number, and transaction number in the
* block.
*/
fc::variant broadcast_transaction_synchronous(const signed_transaction& trx);
void broadcast_block( const signed_block& block );
/**
* @brief Not reflected, thus not accessible to API clients.
*
* This function is registered to receive the applied_block
* signal from the chain database when a block is received.
* It then dispatches callbacks to clients who have requested
* to be notified when a particular txid is included in a block.
*/
void on_applied_block( const signed_block& b );
private:
boost::signals2::scoped_connection _applied_block_connection;
map<transaction_id_type,confirmation_callback> _callbacks;
application& _app;
};
/**
* @brief add_node Connect to a new peer
* @param ep The IP/Port of the peer to connect to
* @brief The network_node_api class allows maintenance of p2p connections.
*/
void add_node(const fc::ip::endpoint &ep);
class network_node_api
{
public:
network_node_api(application& a);
/**
* @brief Return general network information, such as p2p port
*/
fc::variant_object get_info() const;
/**
* @brief add_node Connect to a new peer
* @param ep The IP/Port of the peer to connect to
*/
void add_node(const fc::ip::endpoint& ep);
/**
* @brief Get status of all current connections to peers
*/
std::vector<net::peer_status> get_connected_peers() const;
/**
* @brief Get advanced node parameters, such as desired and max
* number of connections
*/
fc::variant_object get_advanced_node_parameters() const;
/**
* @brief Set advanced node parameters, such as desired and max
* number of connections
* @param params a JSON object containing the name/value pairs for the parameters to set
*/
void set_advanced_node_parameters(const fc::variant_object& params);
/**
* @brief Return list of potential peers
*/
std::vector<net::potential_peer_record> get_potential_peers() const;
/**
* @brief Return list of pending transactions.
*/
map<transaction_id_type, signed_transaction> list_pending_transactions() const;
/**
* @brief Subscribes caller for notifications about pending transactions.
* @param callback a functional object which will be called when new transaction is created.
*/
void subscribe_to_pending_transactions(std::function<void(const variant&)> callback);
/**
* @brief Unsubscribes caller from notifications about pending transactions.
*/
void unsubscribe_from_pending_transactions();
private:
application& _app;
map<transaction_id_type, signed_transaction> _pending_transactions;
boost::signals2::scoped_connection _pending_trx_connection;
boost::signals2::scoped_connection _applied_block_connection;
std::function<void(const variant&)> _on_pending_transaction;
};
class crypto_api
{
public:
crypto_api();
fc::ecc::commitment_type blind( const fc::ecc::blind_factor_type& blind, uint64_t value );
fc::ecc::blind_factor_type blind_sum( const std::vector<blind_factor_type>& blinds_in, uint32_t non_neg );
bool verify_sum( const std::vector<commitment_type>& commits_in, const std::vector<commitment_type>& neg_commits_in, int64_t excess );
verify_range_result verify_range( const fc::ecc::commitment_type& commit, const std::vector<char>& proof );
std::vector<char> range_proof_sign( uint64_t min_value,
const commitment_type& commit,
const blind_factor_type& commit_blind,
const blind_factor_type& nonce,
int8_t base10_exp,
uint8_t min_bits,
uint64_t actual_value );
verify_range_proof_rewind_result verify_range_proof_rewind( const blind_factor_type& nonce,
const fc::ecc::commitment_type& commit,
const std::vector<char>& proof );
range_proof_info range_get_info( const std::vector<char>& proof );
};
/**
* @brief Get status of all current connections to peers
* @brief
*/
std::vector<net::peer_status> get_connected_peers() const;
class asset_api
{
public:
asset_api(graphene::app::application& app);
~asset_api();
/**
* @brief Get advanced node parameters, such as desired and max
* number of connections
*/
fc::variant_object get_advanced_node_parameters() const;
/**
* @brief Get asset holders for a specific asset
* @param asset The specific asset id or symbol
* @param start The start index
* @param limit Maximum limit must not exceed 100
* @return A list of asset holders for the specified asset
*/
vector<account_asset_balance> get_asset_holders( std::string asset, uint32_t start, uint32_t limit )const;
/**
* @brief Set advanced node parameters, such as desired and max
* number of connections
* @param params a JSON object containing the name/value pairs for the parameters to set
*/
void set_advanced_node_parameters(const fc::variant_object &params);
/**
* @brief Get asset holders count for a specific asset
* @param asset The specific asset id or symbol
* @return Holders count for the specified asset
*/
int get_asset_holders_count( std::string asset )const;
/**
* @brief Return list of potential peers
*/
std::vector<net::potential_peer_record> get_potential_peers() const;
/**
* @brief Get all asset holders
* @return A list of all asset holders
*/
vector<asset_holders> get_all_asset_holders() const;
/**
* @brief Return list of pending transactions.
*/
map<transaction_id_type, signed_transaction> list_pending_transactions() const;
/**
* @brief Subscribes caller for notifications about pending transactions.
* @param callback a functional object which will be called when new transaction is created.
*/
void subscribe_to_pending_transactions(std::function<void(const variant &)> callback);
/**
* @brief Unsubscribes caller from notifications about pending transactions.
*/
void unsubscribe_from_pending_transactions();
private:
application &_app;
map<transaction_id_type, signed_transaction> _pending_transactions;
boost::signals2::scoped_connection _pending_trx_connection;
boost::signals2::scoped_connection _applied_block_connection;
std::function<void(const variant &)> _on_pending_transaction;
};
/**
* @brief
*/
class asset_api {
public:
asset_api(graphene::app::application &app);
~asset_api();
/**
* @brief Get asset holders for a specific asset
* @param asset The specific asset id or symbol
* @param start The start index
* @param limit Maximum limit must not exceed 100
* @return A list of asset holders for the specified asset
*/
vector<account_asset_balance> get_asset_holders(std::string asset, uint32_t start, uint32_t limit) const;
/**
* @brief Get asset holders count for a specific asset
* @param asset The specific asset id or symbol
* @return Holders count for the specified asset
*/
int get_asset_holders_count(std::string asset) const;
/**
* @brief Get all asset holders
* @return A list of all asset holders
*/
vector<asset_holders> get_all_asset_holders() const;
uint32_t api_limit_get_asset_holders = 100;
private:
graphene::app::application &_app;
graphene::chain::database &_db;
graphene::app::database_api database_api;
};
}} // namespace graphene::app
private:
graphene::app::application& _app;
graphene::chain::database& _db;
graphene::app::database_api database_api;
};
} } // graphene::app
extern template class fc::api<graphene::app::block_api>;
extern template class fc::api<graphene::app::network_broadcast_api>;
extern template class fc::api<graphene::app::network_node_api>;
extern template class fc::api<graphene::app::history_api>;
extern template class fc::api<graphene::app::crypto_api>;
extern template class fc::api<graphene::app::asset_api>;
extern template class fc::api<graphene::debug_witness::debug_api>;
namespace graphene { namespace app {
/**
* @brief The login_api class implements the bottom layer of the RPC API
*
* All other APIs must be requested from this API.
*/
class login_api {
public:
login_api(application &a);
~login_api();
/**
* @brief Authenticate to the RPC server
* @param user Username to login with
* @param password Password to login with
* @return True if logged in successfully; false otherwise
* @brief The login_api class implements the bottom layer of the RPC API
*
* @note This must be called prior to requesting other APIs. Other APIs may not be accessible until the client
* has sucessfully authenticated.
* All other APIs must be requested from this API.
*/
bool login(const string &user, const string &password);
/// @brief Retrieve the network block API
fc::api<block_api> block() const;
/// @brief Retrieve the network broadcast API
fc::api<network_broadcast_api> network_broadcast() const;
/// @brief Retrieve the database API
fc::api<database_api> database() const;
/// @brief Retrieve the history API
fc::api<history_api> history() const;
/// @brief Retrieve the network node API
fc::api<network_node_api> network_node() const;
/// @brief Retrieve the asset API
fc::api<asset_api> asset() const;
/// @brief Retrieve the debug API (if available)
fc::api<graphene::debug_witness::debug_api> debug() const;
/// @brief Retrieve the bookie API (if available)
fc::api<graphene::bookie::bookie_api> bookie() const;
/// @brief Retrieve the affiliate_stats API (if available)
fc::api<graphene::affiliate_stats::affiliate_stats_api> affiliate_stats() const;
/// @brief Retrieve the sidechain_api API (if available)
fc::api<graphene::peerplays_sidechain::sidechain_api> sidechain() const;
class login_api
{
public:
login_api(application& a);
~login_api();
/// @brief Called to enable an API, not reflected.
void enable_api(const string &api_name);
/**
* @brief Authenticate to the RPC server
* @param user Username to login with
* @param password Password to login with
* @return True if logged in successfully; false otherwise
*
* @note This must be called prior to requesting other APIs. Other APIs may not be accessible until the client
* has sucessfully authenticated.
*/
bool login(const string& user, const string& password);
/// @brief Retrieve the network block API
fc::api<block_api> block()const;
/// @brief Retrieve the network broadcast API
fc::api<network_broadcast_api> network_broadcast()const;
/// @brief Retrieve the database API
fc::api<database_api> database()const;
/// @brief Retrieve the history API
fc::api<history_api> history()const;
/// @brief Retrieve the network node API
fc::api<network_node_api> network_node()const;
/// @brief Retrieve the cryptography API
fc::api<crypto_api> crypto()const;
/// @brief Retrieve the asset API
fc::api<asset_api> asset()const;
/// @brief Retrieve the debug API (if available)
fc::api<graphene::debug_witness::debug_api> debug()const;
/// @brief Retrieve the bookie API (if available)
fc::api<graphene::bookie::bookie_api> bookie()const;
/// @brief Retrieve the affiliate_stats API (if available)
fc::api<graphene::affiliate_stats::affiliate_stats_api> affiliate_stats()const;
private:
application &_app;
optional<fc::api<block_api>> _block_api;
optional<fc::api<database_api>> _database_api;
optional<fc::api<network_broadcast_api>> _network_broadcast_api;
optional<fc::api<network_node_api>> _network_node_api;
optional<fc::api<history_api>> _history_api;
optional<fc::api<asset_api>> _asset_api;
optional<fc::api<graphene::debug_witness::debug_api>> _debug_api;
optional<fc::api<graphene::bookie::bookie_api>> _bookie_api;
optional<fc::api<graphene::affiliate_stats::affiliate_stats_api>> _affiliate_stats_api;
optional<fc::api<graphene::peerplays_sidechain::sidechain_api>> _sidechain_api;
};
/// @brief Called to enable an API, not reflected.
void enable_api( const string& api_name );
private:
}} // namespace graphene::app
application& _app;
optional< fc::api<block_api> > _block_api;
optional< fc::api<database_api> > _database_api;
optional< fc::api<network_broadcast_api> > _network_broadcast_api;
optional< fc::api<network_node_api> > _network_node_api;
optional< fc::api<history_api> > _history_api;
optional< fc::api<crypto_api> > _crypto_api;
optional< fc::api<asset_api> > _asset_api;
optional< fc::api<graphene::debug_witness::debug_api> > _debug_api;
optional< fc::api<graphene::bookie::bookie_api> > _bookie_api;
optional< fc::api<graphene::affiliate_stats::affiliate_stats_api> > _affiliate_stats_api;
};
}} // graphene::app
extern template class fc::api<graphene::app::login_api>;
// clang-format off
FC_REFLECT( graphene::app::network_broadcast_api::transaction_confirmation,
(id)(block_num)(trx_num)(trx) )
FC_REFLECT( graphene::app::verify_range_result,
(success)(min_val)(max_val) )
FC_REFLECT( graphene::app::verify_range_proof_rewind_result,
(success)(min_val)(max_val)(value_out)(blind_out)(message_out) )
//FC_REFLECT_TYPENAME( fc::ecc::compact_signature );
//FC_REFLECT_TYPENAME( fc::ecc::commitment_type );
FC_REFLECT(graphene::app::network_broadcast_api::transaction_confirmation,
(id)(block_num)(trx_num)(trx))
FC_REFLECT(graphene::app::verify_range_result,
(success)(min_val)(max_val))
FC_REFLECT(graphene::app::verify_range_proof_rewind_result,
(success)(min_val)(max_val)(value_out)(blind_out)(message_out))
FC_REFLECT(graphene::app::account_asset_balance,
(name)(account_id)(amount));
FC_REFLECT(graphene::app::asset_holders,
(asset_id)(count));
FC_REFLECT( graphene::app::account_asset_balance, (name)(account_id)(amount) );
FC_REFLECT( graphene::app::asset_holders, (asset_id)(count) );
FC_API(graphene::app::history_api,
(get_account_history)
(get_account_history_operations)
(get_relative_account_history)
(get_fill_order_history)
(get_market_history)
(get_market_history_buckets)
(list_core_accounts))
(get_account_history)
(get_account_history_operations)
(get_relative_account_history)
(get_fill_order_history)
(get_market_history)
(get_market_history_buckets)
(list_core_accounts)
)
FC_API(graphene::app::block_api,
(get_blocks))
(get_blocks)
)
FC_API(graphene::app::network_broadcast_api,
(broadcast_transaction)
(broadcast_transaction_with_callback)
(broadcast_transaction_synchronous)
(broadcast_block))
(broadcast_transaction)
(broadcast_transaction_with_callback)
(broadcast_transaction_synchronous)
(broadcast_block)
)
FC_API(graphene::app::network_node_api,
(get_info)
(add_node)
(get_connected_peers)
(get_potential_peers)
(get_advanced_node_parameters)
(set_advanced_node_parameters)
(list_pending_transactions)
(subscribe_to_pending_transactions)
(unsubscribe_from_pending_transactions))
(get_info)
(add_node)
(get_connected_peers)
(get_potential_peers)
(get_advanced_node_parameters)
(set_advanced_node_parameters)
(list_pending_transactions)
(subscribe_to_pending_transactions)
(unsubscribe_from_pending_transactions)
)
FC_API(graphene::app::crypto_api,
(blind)
(blind_sum)
(verify_sum)
(verify_range)
(range_proof_sign)
(verify_range_proof_rewind)
(range_get_info)
)
FC_API(graphene::app::asset_api,
(get_asset_holders)
(get_asset_holders_count)
(get_all_asset_holders))
(get_asset_holders)
(get_asset_holders_count)
(get_all_asset_holders)
)
FC_API(graphene::app::login_api,
(login)
(block)
(network_broadcast)
(database)
(history)
(network_node)
(asset)
(debug)
(bookie)
(affiliate_stats)
(sidechain))
// clang-format on
(login)
(block)
(network_broadcast)
(database)
(history)
(network_node)
(crypto)
(asset)
(debug)
(bookie)
(affiliate_stats)
)

View file

@ -31,26 +31,26 @@
namespace graphene { namespace app {
struct api_access_info {
struct api_access_info
{
std::string password_hash_b64;
std::string password_salt_b64;
std::vector<std::string> allowed_apis;
std::vector< std::string > allowed_apis;
};
struct api_access {
std::map<std::string, api_access_info> permission_map;
struct api_access
{
std::map< std::string, api_access_info > permission_map;
};
}} // namespace graphene::app
// clang-format off
} } // graphene::app
FC_REFLECT( graphene::app::api_access_info,
(password_hash_b64)
(password_salt_b64)
(allowed_apis))
(password_hash_b64)
(password_salt_b64)
(allowed_apis)
)
FC_REFLECT( graphene::app::api_access,
(permission_map))
// clang-format on
(permission_map)
)

View file

@ -24,86 +24,89 @@
#pragma once
#include <graphene/app/api_access.hpp>
#include <graphene/chain/database.hpp>
#include <graphene/net/node.hpp>
#include <graphene/chain/database.hpp>
#include <boost/program_options.hpp>
namespace graphene { namespace app {
namespace detail {
class application_impl;
}
using std::string;
namespace detail { class application_impl; }
using std::string;
class abstract_plugin;
class abstract_plugin;
class application {
public:
application();
~application();
class application
{
public:
application();
~application();
void set_program_options(boost::program_options::options_description &cli,
boost::program_options::options_description &cfg) const;
void initialize(const fc::path &data_dir, const boost::program_options::variables_map &options);
void initialize_plugins(const boost::program_options::variables_map &options);
void startup();
void shutdown();
void startup_plugins();
void shutdown_plugins();
void set_program_options( boost::program_options::options_description& command_line_options,
boost::program_options::options_description& configuration_file_options )const;
void initialize(const fc::path& data_dir, const boost::program_options::variables_map&options);
void initialize_plugins( const boost::program_options::variables_map& options );
void startup();
void shutdown();
void startup_plugins();
void shutdown_plugins();
template <typename PluginType>
std::shared_ptr<PluginType> register_plugin() {
auto plug = std::make_shared<PluginType>();
plug->plugin_set_app(this);
template<typename PluginType>
std::shared_ptr<PluginType> register_plugin()
{
auto plug = std::make_shared<PluginType>();
plug->plugin_set_app(this);
boost::program_options::options_description plugin_cli_options(plug->plugin_name() + " plugin. " + plug->plugin_description() + "\nOptions"), plugin_cfg_options;
plug->plugin_set_program_options(plugin_cli_options, plugin_cfg_options);
if (!plugin_cli_options.options().empty())
_cli_options.add(plugin_cli_options);
boost::program_options::options_description plugin_cli_options(plug->plugin_name() + " plugin. " + plug->plugin_description() + "\nOptions"), plugin_cfg_options;
//boost::program_options::options_description plugin_cli_options("Options for plugin " + plug->plugin_name()), plugin_cfg_options;
plug->plugin_set_program_options(plugin_cli_options, plugin_cfg_options);
if( !plugin_cli_options.options().empty() )
_cli_options.add(plugin_cli_options);
if (!plugin_cfg_options.options().empty()) {
std::string header_name = "plugin-cfg-header-" + plug->plugin_name();
std::string header_desc = plug->plugin_name() + " plugin options";
_cfg_options.add_options()(header_name.c_str(), header_desc.c_str());
_cfg_options.add(plugin_cfg_options);
}
if( !plugin_cfg_options.options().empty() )
{
std::string header_name = "plugin-cfg-header-" + plug->plugin_name();
std::string header_desc = plug->plugin_name() + " plugin options";
_cfg_options.add_options()(header_name.c_str(), header_desc.c_str());
_cfg_options.add(plugin_cfg_options);
}
add_available_plugin(plug);
return plug;
}
std::shared_ptr<abstract_plugin> get_plugin(const string &name) const;
add_available_plugin( plug );
return plug;
}
std::shared_ptr<abstract_plugin> get_plugin( const string& name )const;
template <typename PluginType>
std::shared_ptr<PluginType> get_plugin(const string &name) const {
std::shared_ptr<abstract_plugin> abs_plugin = get_plugin(name);
std::shared_ptr<PluginType> result = std::dynamic_pointer_cast<PluginType>(abs_plugin);
FC_ASSERT(result != std::shared_ptr<PluginType>());
return result;
}
template<typename PluginType>
std::shared_ptr<PluginType> get_plugin( const string& name ) const
{
std::shared_ptr<abstract_plugin> abs_plugin = get_plugin( name );
std::shared_ptr<PluginType> result = std::dynamic_pointer_cast<PluginType>( abs_plugin );
FC_ASSERT( result != std::shared_ptr<PluginType>() );
return result;
}
net::node_ptr p2p_node();
std::shared_ptr<chain::database> chain_database() const;
net::node_ptr p2p_node();
std::shared_ptr<chain::database> chain_database()const;
void set_block_production(bool producing_blocks);
fc::optional<api_access_info> get_api_access_info(const string &username) const;
void set_api_access_info(const string &username, api_access_info &&permissions);
void set_block_production(bool producing_blocks);
fc::optional< api_access_info > get_api_access_info( const string& username )const;
void set_api_access_info(const string& username, api_access_info&& permissions);
bool is_finished_syncing() const;
/// Emitted when syncing finishes (is_finished_syncing will return true)
boost::signals2::signal<void()> syncing_finished;
bool is_finished_syncing()const;
/// Emitted when syncing finishes (is_finished_syncing will return true)
boost::signals2::signal<void()> syncing_finished;
void enable_plugin(const string &name);
void enable_plugin( const string& name );
bool is_plugin_enabled(const string &name) const;
bool is_plugin_enabled(const string& name) const;
std::shared_ptr<fc::thread> elasticsearch_thread;
std::shared_ptr<fc::thread> elasticsearch_thread;
private:
void add_available_plugin(std::shared_ptr<abstract_plugin> p);
std::shared_ptr<detail::application_impl> my;
private:
void add_available_plugin( std::shared_ptr<abstract_plugin> p );
std::shared_ptr<detail::application_impl> my;
boost::program_options::options_description _cli_options;
boost::program_options::options_description _cfg_options;
};
boost::program_options::options_description _cli_options;
boost::program_options::options_description _cfg_options;
};
}} // namespace graphene::app
} }

View file

@ -23,12 +23,12 @@
*/
#pragma once
#include <boost/program_options.hpp>
#include <fc/filesystem.hpp>
#include <boost/program_options.hpp>
namespace graphene { namespace app {
void load_configuration_options(const fc::path &data_dir, const boost::program_options::options_description &cfg_options,
boost::program_options::variables_map &options);
void load_configuration_options(const fc::path &data_dir, const boost::program_options::options_description &cfg_options,
boost::program_options::variables_map &options);
}} // namespace graphene::app
} } // graphene::app

File diff suppressed because it is too large Load diff

View file

@ -24,53 +24,52 @@
#pragma once
#include <graphene/chain/account_object.hpp>
#include <graphene/chain/market_evaluator.hpp>
#include <graphene/chain/vesting_balance_object.hpp>
#include <graphene/chain/market_evaluator.hpp>
#include <graphene/chain/withdraw_permission_object.hpp>
namespace graphene { namespace app {
using namespace graphene::chain;
using namespace graphene::chain;
struct full_account {
account_object account;
account_statistics_object statistics;
string registrar_name;
string referrer_name;
string lifetime_referrer_name;
vector<variant> votes;
optional<vesting_balance_object> cashback_balance;
vector<account_balance_object> balances;
vector<vesting_balance_object> vesting_balances;
vector<limit_order_object> limit_orders;
vector<call_order_object> call_orders;
vector<force_settlement_object> settle_orders;
vector<proposal_object> proposals;
vector<asset_id_type> assets;
vector<withdraw_permission_object> withdraws;
// vector<pending_dividend_payout_balance_object> pending_dividend_payments;
vector<pending_dividend_payout_balance_for_holder_object> pending_dividend_payments;
};
struct full_account
{
account_object account;
account_statistics_object statistics;
string registrar_name;
string referrer_name;
string lifetime_referrer_name;
vector<variant> votes;
optional<vesting_balance_object> cashback_balance;
vector<account_balance_object> balances;
vector<vesting_balance_object> vesting_balances;
vector<limit_order_object> limit_orders;
vector<call_order_object> call_orders;
vector<force_settlement_object> settle_orders;
vector<proposal_object> proposals;
vector<asset_id_type> assets;
vector<withdraw_permission_object> withdraws;
// vector<pending_dividend_payout_balance_object> pending_dividend_payments;
vector<pending_dividend_payout_balance_for_holder_object> pending_dividend_payments;
};
}} // namespace graphene::app
} }
// clang-format off
FC_REFLECT(graphene::app::full_account,
(account)
(statistics)
(registrar_name)
(referrer_name)
(lifetime_referrer_name)
(votes)
(cashback_balance)
(balances)
(vesting_balances)
(limit_orders)
(call_orders)
(settle_orders)
(proposals)
(assets)
(withdraws)
(pending_dividend_payments))
// clang-format on
FC_REFLECT( graphene::app::full_account,
(account)
(statistics)
(registrar_name)
(referrer_name)
(lifetime_referrer_name)
(votes)
(cashback_balance)
(balances)
(vesting_balances)
(limit_orders)
(call_orders)
(settle_orders)
(proposals)
(assets)
(withdraws)
(proposals)
(pending_dividend_payments)
)

View file

@ -30,119 +30,118 @@
namespace graphene { namespace app {
class abstract_plugin {
public:
virtual ~abstract_plugin() {
}
virtual std::string plugin_name() const = 0;
virtual std::string plugin_description() const = 0;
class abstract_plugin
{
public:
virtual ~abstract_plugin(){}
virtual std::string plugin_name()const = 0;
virtual std::string plugin_description()const = 0;
/**
* @brief Perform early startup routines and register plugin indexes, callbacks, etc.
*
* Plugins MUST supply a method initialize() which will be called early in the application startup. This method
* should contain early setup code such as initializing variables, adding indexes to the database, registering
* callback methods from the database, adding APIs, etc., as well as applying any options in the @ref options map
*
* This method is called BEFORE the database is open, therefore any routines which require any chain state MUST
* NOT be called by this method. These routines should be performed in startup() instead.
*
* @param options The options passed to the application, via configuration files or command line
*/
virtual void plugin_initialize(const boost::program_options::variables_map &options) = 0;
/**
* @brief Perform early startup routines and register plugin indexes, callbacks, etc.
*
* Plugins MUST supply a method initialize() which will be called early in the application startup. This method
* should contain early setup code such as initializing variables, adding indexes to the database, registering
* callback methods from the database, adding APIs, etc., as well as applying any options in the @ref options map
*
* This method is called BEFORE the database is open, therefore any routines which require any chain state MUST
* NOT be called by this method. These routines should be performed in startup() instead.
*
* @param options The options passed to the application, via configuration files or command line
*/
virtual void plugin_initialize( const boost::program_options::variables_map& options ) = 0;
/**
* @brief Begin normal runtime operations
*
* Plugins MUST supply a method startup() which will be called at the end of application startup. This method
* should contain code which schedules any tasks, or requires chain state.
*/
virtual void plugin_startup() = 0;
/**
* @brief Begin normal runtime operations
*
* Plugins MUST supply a method startup() which will be called at the end of application startup. This method
* should contain code which schedules any tasks, or requires chain state.
*/
virtual void plugin_startup() = 0;
/**
* @brief Cleanly shut down the plugin.
*
* This is called to request a clean shutdown (e.g. due to SIGINT or SIGTERM).
*/
virtual void plugin_shutdown() = 0;
/**
* @brief Cleanly shut down the plugin.
*
* This is called to request a clean shutdown (e.g. due to SIGINT or SIGTERM).
*/
virtual void plugin_shutdown() = 0;
/**
* @brief Register the application instance with the plugin.
*
* This is called by the framework to set the application.
*/
virtual void plugin_set_app(application *a) = 0;
/**
* @brief Register the application instance with the plugin.
*
* This is called by the framework to set the application.
*/
virtual void plugin_set_app( application* a ) = 0;
/**
* @brief Fill in command line parameters used by the plugin.
*
* @param command_line_options All options this plugin supports taking on the command-line
* @param config_file_options All options this plugin supports storing in a configuration file
*
* This method populates its arguments with any
* command-line and configuration file options the plugin supports.
* If a plugin does not need these options, it
* may simply provide an empty implementation of this method.
*/
virtual void plugin_set_program_options(boost::program_options::options_description &command_line_options,
boost::program_options::options_description &config_file_options) = 0;
/**
* @brief Fill in command line parameters used by the plugin.
*
* @param command_line_options All options this plugin supports taking on the command-line
* @param config_file_options All options this plugin supports storing in a configuration file
*
* This method populates its arguments with any
* command-line and configuration file options the plugin supports.
* If a plugin does not need these options, it
* may simply provide an empty implementation of this method.
*/
virtual void plugin_set_program_options(
boost::program_options::options_description& command_line_options,
boost::program_options::options_description& config_file_options
) = 0;
};
/**
* Provides basic default implementations of abstract_plugin functions.
*/
class plugin : public abstract_plugin {
public:
plugin();
virtual ~plugin() override;
class plugin : public abstract_plugin
{
public:
plugin();
virtual ~plugin() override;
virtual std::string plugin_name() const override;
virtual std::string plugin_description() const override;
virtual void plugin_initialize(const boost::program_options::variables_map &options) override;
virtual void plugin_startup() override;
virtual void plugin_shutdown() override;
virtual void plugin_set_app(application *app) override;
virtual void plugin_set_program_options(boost::program_options::options_description &command_line_options,
boost::program_options::options_description &config_file_options) override;
virtual std::string plugin_name()const override;
virtual std::string plugin_description()const override;
virtual void plugin_initialize( const boost::program_options::variables_map& options ) override;
virtual void plugin_startup() override;
virtual void plugin_shutdown() override;
virtual void plugin_set_app( application* app ) override;
virtual void plugin_set_program_options(
boost::program_options::options_description& command_line_options,
boost::program_options::options_description& config_file_options
) override;
chain::database &database() {
return *app().chain_database();
}
application &app() const {
assert(_app);
return *_app;
}
chain::database& database() { return *app().chain_database(); }
application& app()const { assert(_app); return *_app; }
protected:
net::node& p2p_node() { return *app().p2p_node(); }
protected:
net::node &p2p_node() {
return *app().p2p_node();
}
private:
application *_app = nullptr;
private:
application* _app = nullptr;
};
/// @group Some useful tools for boost::program_options arguments using vectors of JSON strings
/// @{
template <typename T>
T dejsonify(const string &s, uint32_t max_depth) {
template<typename T>
T dejsonify(const string& s, uint32_t max_depth)
{
return fc::json::from_string(s).as<T>(max_depth);
}
namespace impl {
template <typename T>
T dejsonify(const string &s) {
return graphene::app::dejsonify<T>(s, GRAPHENE_MAX_NESTED_OBJECTS);
template<typename T>
T dejsonify( const string& s )
{
return graphene::app::dejsonify<T>( s, GRAPHENE_MAX_NESTED_OBJECTS );
}
}
} // namespace impl
#define DEFAULT_VALUE_VECTOR(value) default_value({fc::json::to_string(value)}, fc::json::to_string(value))
#define LOAD_VALUE_SET(options, name, container, type) \
if (options.count(name)) { \
const std::vector<std::string> &ops = options[name].as<std::vector<std::string>>(); \
#define LOAD_VALUE_SET(options, name, container, type) \
if( options.count(name) ) { \
const std::vector<std::string>& ops = options[name].as<std::vector<std::string>>(); \
std::transform(ops.begin(), ops.end(), std::inserter(container, container.end()), &graphene::app::impl::dejsonify<type>); \
}
}
/// @}
}} // namespace graphene::app
} } //graphene::app

View file

@ -27,44 +27,54 @@
namespace graphene { namespace app {
plugin::plugin() {
plugin::plugin()
{
_app = nullptr;
return;
}
plugin::~plugin() {
plugin::~plugin()
{
return;
}
std::string plugin::plugin_name() const {
std::string plugin::plugin_name()const
{
return "<unknown plugin>";
}
std::string plugin::plugin_description() const {
std::string plugin::plugin_description()const
{
return "<no description>";
}
void plugin::plugin_initialize(const boost::program_options::variables_map &options) {
void plugin::plugin_initialize( const boost::program_options::variables_map& options )
{
return;
}
void plugin::plugin_startup() {
void plugin::plugin_startup()
{
return;
}
void plugin::plugin_shutdown() {
void plugin::plugin_shutdown()
{
return;
}
void plugin::plugin_set_app(application *app) {
void plugin::plugin_set_app( application* app )
{
_app = app;
return;
}
void plugin::plugin_set_program_options(
boost::program_options::options_description &cli,
boost::program_options::options_description &cfg) {
boost::program_options::options_description& command_line_options,
boost::program_options::options_description& config_file_options
)
{
return;
}
}} // namespace graphene::app
} } // graphene::app

View file

@ -8,29 +8,142 @@ add_dependencies( build_hardfork_hpp cat-parts )
file(GLOB HEADERS "include/graphene/chain/*.hpp")
file(GLOB PROTOCOL_HEADERS "include/graphene/chain/protocol/*.hpp")
file(GLOB CPP_FILES "*.cpp")
file(GLOB PROTOCOL_CPP_FILES "protocol/*.cpp")
#if( GRAPHENE_DISABLE_UNITY_BUILD )
list(FILTER CPP_FILES EXCLUDE REGEX "[/]database[.]cpp$")
#message ("--- ${CPP_FILES}")
if( GRAPHENE_DISABLE_UNITY_BUILD )
set( GRAPHENE_DB_FILES
db_balance.cpp
db_bet.cpp
db_block.cpp
db_debug.cpp
db_getter.cpp
db_init.cpp
db_maint.cpp
db_management.cpp
db_market.cpp
db_update.cpp
db_witness_schedule.cpp
)
message( STATUS "Graphene database unity build disabled" )
#else( GRAPHENE_DISABLE_UNITY_BUILD )
# list(FILTER CPP_FILES EXCLUDE REGEX ".*db_.*[.]cpp$")
# #message ("--- ${CPP_FILES}")
# message( STATUS "Graphene database unity build enabled" )
#endif( GRAPHENE_DISABLE_UNITY_BUILD )
else( GRAPHENE_DISABLE_UNITY_BUILD )
set( GRAPHENE_DB_FILES
database.cpp )
message( STATUS "Graphene database unity build enabled" )
endif( GRAPHENE_DISABLE_UNITY_BUILD )
## SORT .cpp by most likely to change / break compile
add_library( graphene_chain
${CPP_FILES}
${PROTOCOL_CPP_FILES}
# As database takes the longest to compile, start it first
${GRAPHENE_DB_FILES}
fork_database.cpp
protocol/types.cpp
protocol/address.cpp
protocol/authority.cpp
protocol/asset.cpp
protocol/assert.cpp
protocol/account.cpp
protocol/transfer.cpp
protocol/committee_member.cpp
protocol/witness.cpp
protocol/market.cpp
protocol/proposal.cpp
protocol/withdraw_permission.cpp
protocol/asset_ops.cpp
protocol/lottery_ops.cpp
protocol/memo.cpp
protocol/worker.cpp
protocol/custom.cpp
protocol/operations.cpp
protocol/transaction.cpp
protocol/block.cpp
protocol/fee_schedule.cpp
protocol/confidential.cpp
protocol/vote.cpp
protocol/tournament.cpp
protocol/small_ops.cpp
protocol/custom_permission.cpp
protocol/custom_account_authority.cpp
protocol/offer.cpp
genesis_state.cpp
get_config.cpp
pts_address.cpp
evaluator.cpp
balance_evaluator.cpp
account_evaluator.cpp
assert_evaluator.cpp
witness_evaluator.cpp
committee_member_evaluator.cpp
asset_evaluator.cpp
lottery_evaluator.cpp
transfer_evaluator.cpp
proposal_evaluator.cpp
market_evaluator.cpp
vesting_balance_evaluator.cpp
tournament_evaluator.cpp
tournament_object.cpp
match_object.cpp
game_object.cpp
withdraw_permission_evaluator.cpp
worker_evaluator.cpp
confidential_evaluator.cpp
special_authority.cpp
buyback.cpp
account_object.cpp
asset_object.cpp
fba_object.cpp
proposal_object.cpp
vesting_balance_object.cpp
small_objects.cpp
block_database.cpp
is_authorized_asset.cpp
protocol/sport.cpp
sport_evaluator.cpp
protocol/event_group.cpp
event_group_evaluator.cpp
event_group_object.cpp
protocol/event.cpp
event_evaluator.cpp
event_object.cpp
protocol/betting_market.cpp
betting_market_evaluator.cpp
betting_market_object.cpp
betting_market_group_object.cpp
custom_permission_evaluator.cpp
custom_account_authority_evaluator.cpp
affiliate_payout.cpp
offer_object.cpp
offer_evaluator.cpp
nft_evaluator.cpp
protocol/nft.cpp
protocol/account_role.cpp
account_role_evaluator.cpp
son_evaluator.cpp
son_object.cpp
son_wallet_evaluator.cpp
son_wallet_deposit_evaluator.cpp
son_wallet_withdraw_evaluator.cpp
sidechain_address_evaluator.cpp
sidechain_transaction_evaluator.cpp
${HEADERS}
${PROTOCOL_HEADERS}
"${CMAKE_CURRENT_BINARY_DIR}/include/graphene/chain/hardfork.hpp"
)
add_dependencies( graphene_chain build_hardfork_hpp )
target_link_libraries( graphene_chain graphene_db )
target_link_libraries( graphene_chain fc graphene_db )
target_include_directories( graphene_chain
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" )

View file

@ -22,6 +22,8 @@
* THE SOFTWARE.
*/
#include <fc/smart_ref_impl.hpp>
#include <graphene/chain/account_evaluator.hpp>
#include <graphene/chain/buyback.hpp>
#include <graphene/chain/buyback_object.hpp>
@ -53,54 +55,7 @@ void verify_authority_accounts( const database& db, const authority& a )
}
}
// Overwrites the num_son values from the origin to the destination for those sidechains which are found in the origin.
// Keeps the values of num_son for the sidechains which are found in the destination, but not in the origin.
// Returns false if an error is detected.
bool merge_num_sons( flat_map<sidechain_type, uint16_t>& destination,
const flat_map<sidechain_type, uint16_t>& origin,
fc::optional<time_point_sec> head_block_time = {})
{
const auto active_sidechains = head_block_time.valid() ? active_sidechain_types(*head_block_time) : all_sidechain_types;
bool success = true;
for (const auto &ns : origin)
{
destination[ns.first] = ns.second;
if (active_sidechains.find(ns.first) == active_sidechains.end())
{
success = false;
}
}
return success;
}
flat_map<sidechain_type, uint16_t> count_SON_votes_per_sidechain( const flat_set<vote_id_type>& votes )
{
flat_map<sidechain_type, uint16_t> SON_votes_per_sidechain = account_options::ext::empty_num_son();
for (const auto &vote : votes)
{
switch (vote.type())
{
case vote_id_type::son_bitcoin:
SON_votes_per_sidechain[sidechain_type::bitcoin]++;
break;
case vote_id_type::son_hive:
SON_votes_per_sidechain[sidechain_type::hive]++;
break;
case vote_id_type::son_ethereum:
SON_votes_per_sidechain[sidechain_type::ethereum]++;
break;
default:
break;
}
}
return SON_votes_per_sidechain;
}
void verify_account_votes( const database& db, const account_options& options, fc::optional<account_object> account = {} )
void verify_account_votes( const database& db, const account_options& options )
{
// ensure account's votes satisfy requirements
// NB only the part of vote checking that requires chain state is here,
@ -109,47 +64,10 @@ void verify_account_votes( const database& db, const account_options& options, f
const auto& gpo = db.get_global_properties();
const auto& chain_params = gpo.parameters;
FC_ASSERT( db.find_object(options.voting_account), "Invalid proxy account specified." );
FC_ASSERT( options.num_witness <= chain_params.maximum_witness_count,
"Voted for more witnesses than currently allowed (${c})", ("c", chain_params.maximum_witness_count) );
FC_ASSERT( options.num_committee <= chain_params.maximum_committee_count,
"Voted for more committee members than currently allowed (${c})", ("c", chain_params.maximum_committee_count) );
FC_ASSERT( chain_params.extensions.value.maximum_son_count.valid() , "Invalid maximum son count" );
flat_map<sidechain_type, uint16_t> merged_num_sons = account_options::ext::empty_num_son();
// Merge with existing account if exists
if ( account.valid() && account->options.extensions.value.num_son.valid())
{
merge_num_sons( merged_num_sons, *account->options.extensions.value.num_son, db.head_block_time() );
}
// Apply update operation on top
if ( options.extensions.value.num_son.valid() )
{
merge_num_sons( merged_num_sons, *options.extensions.value.num_son, db.head_block_time() );
}
for(const auto& num_sons : merged_num_sons)
{
FC_ASSERT( num_sons.second <= *chain_params.extensions.value.maximum_son_count,
"Voted for more sons than currently allowed (${c})", ("c", *chain_params.extensions.value.maximum_son_count) );
}
// Count the votes for SONs and confirm that the account did not vote for less SONs than num_son
flat_map<sidechain_type, uint16_t> SON_votes_per_sidechain = count_SON_votes_per_sidechain(options.votes);
for (const auto& number_of_votes : SON_votes_per_sidechain)
{
// Number of votes of account_options are also checked in account_options::do_evaluate,
// but there we are checking the value before merging num_sons, so the values should be checked again
const auto sidechain = number_of_votes.first;
FC_ASSERT( number_of_votes.second >= merged_num_sons[sidechain],
"Voted for less sons than specified in num_son (votes ${v} < num_son ${ns}) for sidechain ${s}",
("v", number_of_votes.second) ("ns", merged_num_sons[sidechain]) ("s", sidechain) );
}
FC_ASSERT( db.find_object(options.voting_account), "Invalid proxy account specified." );
uint32_t max_vote_id = gpo.next_available_vote_id;
@ -263,13 +181,6 @@ object_id_type account_create_evaluator::do_apply( const account_create_operatio
obj.owner = o.owner;
obj.active = o.active;
obj.options = o.options;
obj.options.extensions.value.num_son = account_options::ext::empty_num_son();
if ( o.options.extensions.value.num_son.valid() )
{
merge_num_sons( *obj.options.extensions.value.num_son, *o.options.extensions.value.num_son );
}
obj.statistics = d.create<account_statistics_object>([&obj](account_statistics_object& s){
s.owner = obj.id;
s.name = obj.name;
@ -369,7 +280,7 @@ void_result account_update_evaluator::do_evaluate( const account_update_operatio
acnt = &o.account(d);
if( o.new_options.valid() )
verify_account_votes( d, *o.new_options, *acnt );
verify_account_votes( d, *o.new_options );
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
@ -408,31 +319,7 @@ void_result account_update_evaluator::do_apply( const account_update_operation&
a.active = *o.active;
a.top_n_control_flags = 0;
}
// New num_son structure initialized to 0
flat_map<sidechain_type, uint16_t> new_num_son = account_options::ext::empty_num_son();
// If num_son of existing object is valid, we should merge the existing data
if ( a.options.extensions.value.num_son.valid() )
{
merge_num_sons( new_num_son, *a.options.extensions.value.num_son );
}
// If num_son of the operation are valid, they should merge the existing data
if ( o.new_options )
{
const auto new_options = *o.new_options;
if ( new_options.extensions.value.num_son.valid() )
{
merge_num_sons( new_num_son, *new_options.extensions.value.num_son );
}
a.options = *o.new_options;
}
a.options.extensions.value.num_son = new_num_son;
if( o.new_options ) a.options = *o.new_options;
if( o.extensions.value.owner_special_authority.valid() )
{
a.owner_special_authority = *(o.extensions.value.owner_special_authority);

View file

@ -42,7 +42,8 @@ void_result asset_create_evaluator::do_evaluate( const asset_create_operation& o
database& d = db();
FC_ASSERT(d.is_asset_creation_allowed(op.symbol), "Asset creation not allowed at current time");
if (d.head_block_time() < HARDFORK_SON_TIME)
FC_ASSERT(op.symbol != "BTC", "BTC asset creation before SON hardfork");
const auto& chain_parameters = d.get_global_properties().parameters;
FC_ASSERT( op.common_options.whitelist_authorities.size() <= chain_parameters.maximum_asset_whitelist_authorities );
@ -78,7 +79,7 @@ void_result asset_create_evaluator::do_evaluate( const asset_create_operation& o
{
auto dotpos = op.symbol.rfind( '.' );
if( dotpos != std::string::npos )
{
auto prefix = op.symbol.substr( 0, dotpos );
auto asset_symbol_itr = asset_indx.find( prefix );
@ -121,7 +122,7 @@ void_result asset_create_evaluator::do_evaluate( const asset_create_operation& o
FC_ASSERT( op.bitasset_opts );
FC_ASSERT( op.precision == op.bitasset_opts->short_backing_asset(d).precision );
}
return void_result();
} FC_CAPTURE_AND_RETHROW( (op) ) }
@ -176,7 +177,7 @@ object_id_type asset_create_evaluator::do_apply( const asset_create_operation& o
a.options.core_exchange_rate.base.asset_id = next_asset_id;
a.dynamic_asset_data_id = dyn_asset.id;
if( op.bitasset_opts.valid() )
a.bitasset_data_id = bit_asset_id;
});
@ -190,8 +191,6 @@ void_result lottery_asset_create_evaluator::do_evaluate( const lottery_asset_cre
database& d = db();
FC_ASSERT(d.is_asset_creation_allowed(op.symbol), "Lottery asset creation not allowed at current time");
const auto& chain_parameters = d.get_global_properties().parameters;
FC_ASSERT( op.common_options.whitelist_authorities.size() <= chain_parameters.maximum_asset_whitelist_authorities );
FC_ASSERT( op.common_options.blacklist_authorities.size() <= chain_parameters.maximum_asset_whitelist_authorities );
@ -226,7 +225,7 @@ void_result lottery_asset_create_evaluator::do_evaluate( const lottery_asset_cre
{
auto dotpos = op.symbol.rfind( '.' );
if( dotpos != std::string::npos )
{
auto prefix = op.symbol.substr( 0, dotpos );
auto asset_symbol_itr = asset_indx.find( prefix );
@ -579,7 +578,7 @@ void_result asset_update_dividend_evaluator::do_evaluate(const asset_update_divi
auto& params = db().get_global_properties().parameters;
if (o.new_options.payout_interval &&
*o.new_options.payout_interval < params.maintenance_interval)
FC_THROW("New payout interval may not be less than the maintenance interval",
FC_THROW("New payout interval may not be less than the maintenance interval",
("new_payout_interval", o.new_options.payout_interval)("maintenance_interval", params.maintenance_interval));
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
@ -597,6 +596,7 @@ void_result asset_update_dividend_evaluator::do_apply( const asset_update_divide
obj.referrer = op.issuer;
obj.lifetime_referrer = op.issuer(db()).lifetime_referrer;
auto& params = db().get_global_properties().parameters;
obj.network_fee_percentage = GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE;
obj.lifetime_referrer_fee_percentage = GRAPHENE_DEFAULT_LIFETIME_REFERRER_PERCENT_OF_FEE;
obj.referrer_rewards_percentage = GRAPHENE_DEFAULT_LIFETIME_REFERRER_PERCENT_OF_FEE;

View file

@ -266,7 +266,7 @@ map< account_id_type, vector< uint16_t > > asset_object::distribute_winners_part
*t += percents_to_distribute / holders.size();
}
auto sweeps_distribution_percentage = db.get_global_properties().parameters.sweeps_distribution_percentage();
for( size_t c = 0; c < winner_numbers.size(); ++c ) {
for( int c = 0; c < winner_numbers.size(); ++c ) {
auto winner_num = winner_numbers[c];
lottery_reward_operation reward_op;
reward_op.lottery = get_id();

View file

@ -22,6 +22,7 @@
* THE SOFTWARE.
*/
#define DEFAULT_LOGGER "betting"
#include <fc/smart_ref_impl.hpp>
#include <graphene/chain/betting_market_evaluator.hpp>
#include <graphene/chain/betting_market_object.hpp>

View file

@ -541,7 +541,7 @@ void betting_market_group_object::dispatch_new_status(database& db, betting_mark
} } // graphene::chain
namespace fc {
namespace fc {
// Manually reflect betting_market_group_object to variant to properly reflect "state"
void to_variant(const graphene::chain::betting_market_group_object& betting_market_group_obj, fc::variant& v, uint32_t max_depth)
{

View file

@ -466,7 +466,7 @@ void betting_market_object::on_canceled_event(database& db)
} } // graphene::chain
namespace fc {
namespace fc {
// Manually reflect betting_market_object to variant to properly reflect "state"
void to_variant(const graphene::chain::betting_market_object& event_obj, fc::variant& v, uint32_t max_depth)
{
@ -493,3 +493,4 @@ namespace fc {
const_cast<int*>(event_obj.my->state_machine.current_state())[0] = (int)status;
}
} //end namespace fc

View file

@ -24,6 +24,7 @@
#include <graphene/chain/block_database.hpp>
#include <graphene/chain/protocol/fee_schedule.hpp>
#include <fc/io/raw.hpp>
#include <fc/smart_ref_impl.hpp>
namespace graphene { namespace chain {
@ -76,10 +77,6 @@ void block_database::flush()
void block_database::store( const block_id_type& _id, const signed_block& b )
{
if (true == replay_mode){
return;
}
block_id_type id = _id;
if( id == block_id_type() )
{
@ -103,15 +100,8 @@ void block_database::remove( const block_id_type& id )
index_entry e;
auto index_pos = sizeof(e)*block_header::num_from_id(id);
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
std::streampos s_pos = _block_num_to_pos.tellg();
if (-1 == s_pos){
FC_THROW_EXCEPTION(fc::key_not_found_exception, "Block ${id} not contained in block database, _block_num_to_pos.tellg failed", ("id", id));
}
if ( static_cast<uint32_t>(s_pos) <= index_pos ){
if ( _block_num_to_pos.tellg() <= index_pos )
FC_THROW_EXCEPTION(fc::key_not_found_exception, "Block ${id} not contained in block database", ("id", id));
}
_block_num_to_pos.seekg( index_pos );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
@ -125,27 +115,20 @@ void block_database::remove( const block_id_type& id )
} FC_CAPTURE_AND_RETHROW( (id) ) }
bool block_database::contains( const block_id_type& id )const
{ try {
{
if( id == block_id_type() )
return false;
index_entry e;
auto index_pos = sizeof(e)*block_header::num_from_id(id);
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
std::streampos s_pos = _block_num_to_pos.tellg();
if (-1 == s_pos){
FC_THROW_EXCEPTION(fc::key_not_found_exception, "Block ${id} not contained in block database, _block_num_to_pos.tellg failed", ("id", id));
}
if ( static_cast<uint32_t>(s_pos) < index_pos + sizeof(e) )
if ( _block_num_to_pos.tellg() < index_pos + sizeof(e) )
return false;
_block_num_to_pos.seekg( index_pos );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
return e.block_id == id && e.block_size > 0;
} FC_CAPTURE_AND_RETHROW( (id) ) }
}
block_id_type block_database::fetch_block_id( uint32_t block_num )const
{
@ -170,13 +153,7 @@ optional<signed_block> block_database::fetch_optional( const block_id_type& id )
index_entry e;
auto index_pos = sizeof(e)*block_header::num_from_id(id);
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
std::streampos s_pos = _block_num_to_pos.tellg();
if (-1 == s_pos){
FC_THROW_EXCEPTION(fc::key_not_found_exception, "Block ${id} not contained in block database, _block_num_to_pos.tellg failed", ("id", id));
}
if ( static_cast<uint32_t>(s_pos) <= index_pos )
if ( _block_num_to_pos.tellg() <= index_pos )
return {};
_block_num_to_pos.seekg( index_pos );
@ -208,12 +185,7 @@ optional<signed_block> block_database::fetch_by_number( uint32_t block_num )cons
index_entry e;
auto index_pos = sizeof(e)*block_num;
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
std::streampos s_pos = _block_num_to_pos.tellg();
if (-1 == s_pos){
FC_THROW_EXCEPTION(fc::key_not_found_exception, "Block ${block_num} not contained in block database, _block_num_to_pos.tellg failed", ("block_num", block_num));
}
if ( static_cast<uint32_t>(s_pos) <= index_pos )
if ( _block_num_to_pos.tellg() <= index_pos )
return {};
_block_num_to_pos.seekg( index_pos, _block_num_to_pos.beg );
@ -242,11 +214,7 @@ optional<index_entry> block_database::last_index_entry()const {
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
std::streampos pos = _block_num_to_pos.tellg();
if (-1 == pos){
FC_THROW_EXCEPTION(fc::key_not_found_exception, "last_index_entry tellg failed");
}
if( static_cast<size_t>(pos) < sizeof(index_entry) )
if( pos < sizeof(index_entry) )
return optional<index_entry>();
pos -= pos % sizeof(index_entry);
@ -259,7 +227,7 @@ optional<index_entry> block_database::last_index_entry()const {
_block_num_to_pos.seekg( pos );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
if( _block_num_to_pos.gcount() == sizeof(e) && e.block_size > 0
&& e.block_pos + static_cast<uint64_t>(e.block_size) <= static_cast<uint64_t>(blocks_size) )
&& e.block_pos + e.block_size <= blocks_size )
try
{
vector<char> data( e.block_size );
@ -304,9 +272,4 @@ optional<block_id_type> block_database::last_id()const
return optional<block_id_type>();
}
void block_database::set_replay_mode(bool mode)
{
replay_mode = mode;
}
} }

View file

@ -30,6 +30,8 @@
#include <graphene/chain/protocol/vote.hpp>
#include <graphene/chain/transaction_evaluation_state.hpp>
#include <fc/smart_ref_impl.hpp>
namespace graphene { namespace chain {
void_result committee_member_create_evaluator::do_evaluate( const committee_member_create_operation& op )

View file

@ -29,167 +29,155 @@
#include <graphene/chain/fba_accumulator_id.hpp>
#include <graphene/chain/hardfork.hpp>
#include <fc/smart_ref_impl.hpp>
namespace graphene { namespace chain {
void_result transfer_to_blind_evaluator::do_evaluate( const transfer_to_blind_operation& o )
{ try {
const auto& d = db();
if( d.head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME )
{
const auto& atype = o.amount.asset_id(d);
FC_ASSERT( atype.allow_confidential() );
FC_ASSERT( !atype.is_transfer_restricted() );
FC_ASSERT( !(atype.options.flags & white_list) );
const auto& d = db();
for( const auto& out : o.outputs )
{
for( const auto& a : out.owner.account_auths )
a.first(d); // verify all accounts exist and are valid
}
}
const auto& atype = o.amount.asset_id(db());
FC_ASSERT( atype.allow_confidential() );
FC_ASSERT( !atype.is_transfer_restricted() );
FC_ASSERT( !(atype.options.flags & white_list) );
return void_result();
for( const auto& out : o.outputs )
{
for( const auto& a : out.owner.account_auths )
a.first(d); // verify all accounts exist and are valid
}
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
void_result transfer_to_blind_evaluator::do_apply( const transfer_to_blind_operation& o )
void_result transfer_to_blind_evaluator::do_apply( const transfer_to_blind_operation& o )
{ try {
if( db().head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
db().adjust_balance(o.from, -o.amount);
db().adjust_balance( o.from, -o.amount );
const auto &add = o.amount.asset_id(db()).dynamic_asset_data_id(db()); // verify fee is a legit asset
db().modify(add, [&](asset_dynamic_data_object &obj) {
obj.confidential_supply += o.amount.amount;
FC_ASSERT(obj.confidential_supply >= 0);
});
for (const auto &out : o.outputs) {
db().create<blinded_balance_object>([&](blinded_balance_object &obj) {
obj.asset_id = o.amount.asset_id;
obj.owner = out.owner;
obj.commitment = out.commitment;
});
}
}
return void_result();
const auto& add = o.amount.asset_id(db()).dynamic_asset_data_id(db()); // verify fee is a legit asset
db().modify( add, [&]( asset_dynamic_data_object& obj ){
obj.confidential_supply += o.amount.amount;
FC_ASSERT( obj.confidential_supply >= 0 );
});
for( const auto& out : o.outputs )
{
db().create<blinded_balance_object>( [&]( blinded_balance_object& obj ){
obj.asset_id = o.amount.asset_id;
obj.owner = out.owner;
obj.commitment = out.commitment;
});
}
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
void transfer_to_blind_evaluator::pay_fee()
{
const auto& d = db();
if( d.head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
if (d.head_block_time() >= HARDFORK_563_TIME)
pay_fba_fee(fba_accumulator_id_transfer_to_blind);
else
generic_evaluator::pay_fee();
}
if( db().head_block_time() >= HARDFORK_563_TIME )
pay_fba_fee( fba_accumulator_id_transfer_to_blind );
else
generic_evaluator::pay_fee();
}
void_result transfer_from_blind_evaluator::do_evaluate( const transfer_from_blind_operation& o )
{ try {
const auto& d = db();
if( d.head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
o.fee.asset_id(d); // verify fee is a legit asset
const auto &bbi = d.get_index_type<blinded_balance_index>();
const auto &cidx = bbi.indices().get<by_commitment>();
for (const auto &in : o.inputs) {
auto itr = cidx.find(in.commitment);
FC_ASSERT(itr != cidx.end());
FC_ASSERT(itr->asset_id == o.fee.asset_id);
FC_ASSERT(itr->owner == in.owner);
}
}
return void_result();
const auto& d = db();
o.fee.asset_id(d); // verify fee is a legit asset
const auto& bbi = d.get_index_type<blinded_balance_index>();
const auto& cidx = bbi.indices().get<by_commitment>();
for( const auto& in : o.inputs )
{
auto itr = cidx.find( in.commitment );
FC_ASSERT( itr != cidx.end() );
FC_ASSERT( itr->asset_id == o.fee.asset_id );
FC_ASSERT( itr->owner == in.owner );
}
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
void_result transfer_from_blind_evaluator::do_apply( const transfer_from_blind_operation& o )
void_result transfer_from_blind_evaluator::do_apply( const transfer_from_blind_operation& o )
{ try {
if( db().head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
db().adjust_balance(o.fee_payer(), o.fee);
db().adjust_balance(o.to, o.amount);
const auto &bbi = db().get_index_type<blinded_balance_index>();
const auto &cidx = bbi.indices().get<by_commitment>();
for (const auto &in : o.inputs) {
auto itr = cidx.find(in.commitment);
FC_ASSERT(itr != cidx.end());
db().remove(*itr);
}
const auto &add = o.amount.asset_id(db()).dynamic_asset_data_id(db()); // verify fee is a legit asset
db().modify(add, [&](asset_dynamic_data_object &obj) {
obj.confidential_supply -= o.amount.amount + o.fee.amount;
FC_ASSERT(obj.confidential_supply >= 0);
});
}
return void_result();
db().adjust_balance( o.fee_payer(), o.fee );
db().adjust_balance( o.to, o.amount );
const auto& bbi = db().get_index_type<blinded_balance_index>();
const auto& cidx = bbi.indices().get<by_commitment>();
for( const auto& in : o.inputs )
{
auto itr = cidx.find( in.commitment );
FC_ASSERT( itr != cidx.end() );
db().remove( *itr );
}
const auto& add = o.amount.asset_id(db()).dynamic_asset_data_id(db()); // verify fee is a legit asset
db().modify( add, [&]( asset_dynamic_data_object& obj ){
obj.confidential_supply -= o.amount.amount + o.fee.amount;
FC_ASSERT( obj.confidential_supply >= 0 );
});
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
void transfer_from_blind_evaluator::pay_fee()
{
const auto& d = db();
if( d.head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
if (d.head_block_time() >= HARDFORK_563_TIME)
pay_fba_fee(fba_accumulator_id_transfer_from_blind);
else
generic_evaluator::pay_fee();
}
if( db().head_block_time() >= HARDFORK_563_TIME )
pay_fba_fee( fba_accumulator_id_transfer_from_blind );
else
generic_evaluator::pay_fee();
}
void_result blind_transfer_evaluator::do_evaluate( const blind_transfer_operation& o )
{ try {
const auto& d = db();
if( d.head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
o.fee.asset_id(d); // verify fee is a legit asset
const auto &bbi = d.get_index_type<blinded_balance_index>();
const auto &cidx = bbi.indices().get<by_commitment>();
for (const auto &out : o.outputs) {
for (const auto &a : out.owner.account_auths)
a.first(d); // verify all accounts exist and are valid
}
for (const auto &in : o.inputs) {
auto itr = cidx.find(in.commitment);
GRAPHENE_ASSERT(itr != cidx.end(), blind_transfer_unknown_commitment, "", ("commitment", in.commitment));
FC_ASSERT(itr->asset_id == o.fee.asset_id);
FC_ASSERT(itr->owner == in.owner);
}
}
return void_result();
const auto& d = db();
o.fee.asset_id(db()); // verify fee is a legit asset
const auto& bbi = db().get_index_type<blinded_balance_index>();
const auto& cidx = bbi.indices().get<by_commitment>();
for( const auto& out : o.outputs )
{
for( const auto& a : out.owner.account_auths )
a.first(d); // verify all accounts exist and are valid
}
for( const auto& in : o.inputs )
{
auto itr = cidx.find( in.commitment );
GRAPHENE_ASSERT( itr != cidx.end(), blind_transfer_unknown_commitment, "", ("commitment",in.commitment) );
FC_ASSERT( itr->asset_id == o.fee.asset_id );
FC_ASSERT( itr->owner == in.owner );
}
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
void_result blind_transfer_evaluator::do_apply( const blind_transfer_operation& o )
void_result blind_transfer_evaluator::do_apply( const blind_transfer_operation& o )
{ try {
if( db().head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
db().adjust_balance(o.fee_payer(), o.fee); // deposit the fee to the temp account
const auto &bbi = db().get_index_type<blinded_balance_index>();
const auto &cidx = bbi.indices().get<by_commitment>();
for (const auto &in : o.inputs) {
auto itr = cidx.find(in.commitment);
GRAPHENE_ASSERT(itr != cidx.end(), blind_transfer_unknown_commitment, "", ("commitment", in.commitment));
db().remove(*itr);
}
for (const auto &out : o.outputs) {
db().create<blinded_balance_object>([&](blinded_balance_object &obj) {
obj.asset_id = o.fee.asset_id;
obj.owner = out.owner;
obj.commitment = out.commitment;
});
}
const auto &add = o.fee.asset_id(db()).dynamic_asset_data_id(db());
db().modify(add, [&](asset_dynamic_data_object &obj) {
obj.confidential_supply -= o.fee.amount;
FC_ASSERT(obj.confidential_supply >= 0);
});
}
return void_result();
db().adjust_balance( o.fee_payer(), o.fee ); // deposit the fee to the temp account
const auto& bbi = db().get_index_type<blinded_balance_index>();
const auto& cidx = bbi.indices().get<by_commitment>();
for( const auto& in : o.inputs )
{
auto itr = cidx.find( in.commitment );
GRAPHENE_ASSERT( itr != cidx.end(), blind_transfer_unknown_commitment, "", ("commitment",in.commitment) );
db().remove( *itr );
}
for( const auto& out : o.outputs )
{
db().create<blinded_balance_object>( [&]( blinded_balance_object& obj ){
obj.asset_id = o.fee.asset_id;
obj.owner = out.owner;
obj.commitment = out.commitment;
});
}
const auto& add = o.fee.asset_id(db()).dynamic_asset_data_id(db());
db().modify( add, [&]( asset_dynamic_data_object& obj ){
obj.confidential_supply -= o.fee.amount;
FC_ASSERT( obj.confidential_supply >= 0 );
});
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
void blind_transfer_evaluator::pay_fee()
{
const auto& d = db();
if( d.head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
if (d.head_block_time() >= HARDFORK_563_TIME)
pay_fba_fee(fba_accumulator_id_blind_transfer);
else
generic_evaluator::pay_fee();
}
if( db().head_block_time() >= HARDFORK_563_TIME )
pay_fba_fee( fba_accumulator_id_blind_transfer );
else
generic_evaluator::pay_fee();
}
} } // graphene::chain

View file

@ -21,6 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <fc/smart_ref_impl.hpp>
#include "db_balance.cpp"
#include "db_bet.cpp"
#include "db_block.cpp"
@ -30,6 +31,6 @@
#include "db_maint.cpp"
#include "db_management.cpp"
#include "db_market.cpp"
#include "db_notify.cpp"
#include "db_update.cpp"
#include "db_witness_schedule.cpp"
#include "db_notify.cpp"

View file

@ -140,10 +140,8 @@ void database::adjust_sweeps_vesting_balance(account_id_type account, int64_t de
b.balance = delta;
});
} else {
if( delta < 0 ) {
uint64_t delta_uint64 = -delta;
FC_ASSERT( itr->get_balance() >= delta_uint64, "Insufficient Balance: ${a}'s balance of ${b} is less than required ${r}", ("a",account)("b",itr->get_balance())("r",-delta));
}
if( delta < 0 )
FC_ASSERT( itr->get_balance() >= -delta, "Insufficient Balance: ${a}'s balance of ${b} is less than required ${r}", ("a",account)("b",itr->get_balance())("r",-delta));
modify(*itr, [&delta,&asset_id,this](sweeps_vesting_balance_object& b) {
b.adjust_balance( asset( delta, asset_id ) );
b.last_claim_date = head_block_time();

View file

@ -303,6 +303,8 @@ void database::settle_betting_market_group(const betting_market_group_object& be
remove(betting_market);
}
const event_object& event = betting_market_group.event_id(*this);
fc_dlog(fc::logger::get("betting"), "removing betting market group ${id}", ("id", betting_market_group.id));
remove(betting_market_group);
@ -535,9 +537,11 @@ int match_bet(database& db, const bet_object& taker_bet, const bet_object& maker
// because we matched at the maker's odds and not the taker's odds, the remaining amount to match
// may not be an even multiple of the taker's odds; round it down.
share_type taker_remaining_factor = unrounded_taker_remaining_amount_to_match / takers_odds_maker_odds_ratio;
share_type taker_remaining_maker_amount_to_match = taker_remaining_factor * takers_odds_maker_odds_ratio;
share_type taker_remaining_bet_amount = taker_remaining_factor * takers_odds_taker_odds_ratio;
taker_refund_amount = taker_bet.amount_to_bet.amount - taker_amount_to_match - taker_remaining_bet_amount;
//idump((taker_remaining_factor)(taker_remaining_maker_amount_to_match)(taker_remaining_bet_amount)(taker_refund_amount));
}
if (taker_refund_amount > share_type())

View file

@ -40,17 +40,16 @@
#include <graphene/chain/exceptions.hpp>
#include <graphene/chain/evaluator.hpp>
#include <graphene/chain/witness_schedule_object.hpp>
#include <graphene/db/object_database.hpp>
#include <fc/crypto/digest.hpp>
#include <boost/filesystem.hpp>
#include <fc/smart_ref_impl.hpp>
namespace {
struct proposed_operations_digest_accumulator
{
typedef void result_type;
void operator()(const graphene::chain::proposal_create_operation& proposal)
{
for (auto& operation: proposal.proposed_ops)
@ -58,20 +57,20 @@ namespace {
proposed_operations_digests.push_back(fc::digest(operation.op));
}
}
//empty template method is needed for all other operation types
//we can ignore them, we are interested in only proposal_create_operation
template<class T>
void operator()(const T&)
void operator()(const T&)
{}
std::vector<fc::sha256> proposed_operations_digests;
};
std::vector<fc::sha256> gather_proposed_operations_digests(const graphene::chain::transaction& trx)
{
proposed_operations_digest_accumulator digest_accumulator;
for (auto& operation: trx.operations)
{
if( operation.which() != graphene::chain::operation::tag<graphene::chain::betting_market_group_create_operation>::value
@ -80,7 +79,7 @@ namespace {
else
edump( ("Found dup"));
}
return digest_accumulator.proposed_operations_digests;
}
}
@ -150,27 +149,24 @@ std::vector<block_id_type> database::get_block_ids_on_fork(block_id_type head_of
result.emplace_back(branches.first.back()->previous_id());
return result;
}
void database::check_transaction_for_duplicated_operations(const signed_transaction& trx)
void database::check_tansaction_for_duplicated_operations(const signed_transaction& trx)
{
const auto& proposal_index = get_index<proposal_object>();
std::set<fc::sha256> existed_operations_digests;
proposal_index.inspect_all_objects( [&](const object& obj){
const proposal_object& proposal = static_cast<const proposal_object&>(obj);
auto proposed_operations_digests = gather_proposed_operations_digests( proposal.proposed_transaction );
existed_operations_digests.insert( proposed_operations_digests.begin(), proposed_operations_digests.end() );
});
for (auto& pending_transaction: _pending_tx)
{
const std::lock_guard<std::mutex> pending_tx_lock{_pending_tx_mutex};
for (auto &pending_transaction : _pending_tx)
{
auto proposed_operations_digests = gather_proposed_operations_digests(pending_transaction);
existed_operations_digests.insert(proposed_operations_digests.begin(), proposed_operations_digests.end());
}
auto proposed_operations_digests = gather_proposed_operations_digests(pending_transaction);
existed_operations_digests.insert(proposed_operations_digests.begin(), proposed_operations_digests.end());
}
auto proposed_operations_digests = gather_proposed_operations_digests(trx);
for (auto& digest: proposed_operations_digests)
{
@ -190,12 +186,7 @@ bool database::push_block(const signed_block& new_block, uint32_t skip)
bool result;
detail::with_skip_flags( *this, skip, [&]()
{
std::vector<processed_transaction> pending_tx = [this] {
const std::lock_guard<std::mutex> pending_tx_lock{_pending_tx_mutex};
return std::move(_pending_tx);
}();
detail::without_pending_transactions( *this, std::move(pending_tx),
detail::without_pending_transactions( *this, std::move(_pending_tx),
[&]()
{
result = _push_block(new_block);
@ -206,9 +197,6 @@ bool database::push_block(const signed_block& new_block, uint32_t skip)
bool database::_push_block(const signed_block& new_block)
{ try {
boost::filesystem::space_info si = boost::filesystem::space(get_data_dir());
FC_ASSERT((si.available) > 104857600, "Rejecting block due to low disk space"); // 104857600 bytes = 100 MB
uint32_t skip = get_node_properties().skip_flags;
const auto now = fc::time_point::now().sec_since_epoch();
@ -350,7 +338,7 @@ void database::verify_signing_witness( const signed_block& new_block, const fork
FC_ASSERT( new_block.witness == wid, "Witness produced block at wrong time",
("block witness",new_block.witness)("scheduled",wid)("slot_num",slot_num) );
FC_ASSERT( new_block.validate_signee( wid(*this).signing_key ) );
}
}
}
void database::update_witnesses( fork_item& fork_entry )const
@ -364,7 +352,7 @@ void database::update_witnesses( fork_item& fork_entry )const
const witness_schedule_object& wso = get_witness_schedule_object();
fork_entry.scheduled_witnesses = std::make_shared< vector< pair< witness_id_type, public_key_type > > >();
fork_entry.scheduled_witnesses->reserve( wso.current_shuffled_witnesses.size() );
for( size_t i = 0; i < wso.current_shuffled_witnesses.size(); ++i )
{
const auto& witness = wso.current_shuffled_witnesses[i](*this);
@ -395,26 +383,17 @@ processed_transaction database::_push_transaction( const signed_transaction& trx
{
// If this is the first transaction pushed after applying a block, start a new undo session.
// This allows us to quickly rewind to the clean state of the head block, in case a new block arrives.
{
const std::lock_guard<std::mutex> pending_tx_session_lock{_pending_tx_session_mutex};
if (!_pending_tx_session.valid()) {
const std::lock_guard<std::mutex> undo_db_lock{_undo_db_mutex};
_pending_tx_session = _undo_db.start_undo_session();
}
}
if( !_pending_tx_session.valid() )
_pending_tx_session = _undo_db.start_undo_session();
// Create a temporary undo session as a child of _pending_tx_session.
// The temporary session will be discarded by the destructor if
// _apply_transaction fails. If we make it to merge(), we
// apply the changes.
const std::lock_guard<std::mutex> undo_db_lock{_undo_db_mutex};
auto temp_session = _undo_db.start_undo_session();
auto processed_trx = _apply_transaction(trx);
{
const std::lock_guard<std::mutex> pending_tx_lock{_pending_tx_mutex};
_pending_tx.push_back(processed_trx);
}
auto processed_trx = _apply_transaction( trx );
_pending_tx.push_back(processed_trx);
// notify_changed_objects();
// The transaction applied successfully. Merge its changes into the pending block session.
@ -427,7 +406,6 @@ processed_transaction database::_push_transaction( const signed_transaction& trx
processed_transaction database::validate_transaction( const signed_transaction& trx )
{
const std::lock_guard<std::mutex> undo_db_lock{_undo_db_mutex};
auto session = _undo_db.start_undo_session();
return _apply_transaction( trx );
}
@ -456,12 +434,7 @@ processed_transaction database::push_proposal(const proposal_object& proposal)
{
for( size_t i=old_applied_ops_size,n=_applied_ops.size(); i<n; i++ )
{
if(_applied_ops[i].valid()) {
ilog("removing failed operation from applied_ops: ${op}", ("op", *(_applied_ops[i])));
}
else{
ilog("Can't remove failed operation from applied_ops (operation is not valid), op_id : ${op_id}", ("op_id", i));
}
ilog( "removing failed operation from applied_ops: ${op}", ("op", *(_applied_ops[i])) );
_applied_ops[i].reset();
}
}
@ -527,52 +500,47 @@ signed_block database::_generate_block(
// the value of the "when" variable is known, which means we need to
// re-apply pending transactions in this method.
//
{
const std::lock_guard<std::mutex> pending_tx_session_lock{_pending_tx_session_mutex};
_pending_tx_session.reset();
_pending_tx_session = _undo_db.start_undo_session();
}
_pending_tx_session.reset();
_pending_tx_session = _undo_db.start_undo_session();
uint64_t postponed_tx_count = 0;
// pop pending state (reset to head block state)
for( const processed_transaction& tx : _pending_tx )
{
const std::lock_guard<std::mutex> pending_tx_lock{_pending_tx_mutex};
for (const processed_transaction &tx : _pending_tx) {
size_t new_total_size = total_block_size + fc::raw::pack_size(tx);
size_t new_total_size = total_block_size + fc::raw::pack_size( tx );
// postpone transaction if it would make block too big
if (new_total_size >= maximum_block_size) {
postponed_tx_count++;
continue;
}
// postpone transaction if it would make block too big
if( new_total_size >= maximum_block_size )
{
postponed_tx_count++;
continue;
}
try {
auto temp_session = _undo_db.start_undo_session();
processed_transaction ptx = _apply_transaction(tx);
temp_session.merge();
try
{
auto temp_session = _undo_db.start_undo_session();
processed_transaction ptx = _apply_transaction( tx );
temp_session.merge();
// We have to recompute pack_size(ptx) because it may be different
// than pack_size(tx) (i.e. if one or more results increased
// their size)
total_block_size += fc::raw::pack_size(ptx);
pending_block.transactions.push_back(ptx);
} catch (const fc::exception &e) {
// Do nothing, transaction will not be re-applied
wlog("Transaction was not processed while generating block due to ${e}", ("e", e));
wlog("The transaction was ${t}", ("t", tx));
}
// We have to recompute pack_size(ptx) because it may be different
// than pack_size(tx) (i.e. if one or more results increased
// their size)
total_block_size += fc::raw::pack_size( ptx );
pending_block.transactions.push_back( ptx );
}
catch ( const fc::exception& e )
{
// Do nothing, transaction will not be re-applied
wlog( "Transaction was not processed while generating block due to ${e}", ("e", e) );
wlog( "The transaction was ${t}", ("t", tx) );
}
}
if( postponed_tx_count > 0 )
{
wlog( "Postponed ${n} transactions due to block size limit", ("n", postponed_tx_count) );
}
{
const std::lock_guard<std::mutex> pending_tx_session_lock{_pending_tx_session_mutex};
_pending_tx_session.reset();
}
_pending_tx_session.reset();
// We have temporarily broken the invariant that
// _pending_tx_session is the result of applying _pending_tx, as
@ -584,7 +552,7 @@ signed_block database::_generate_block(
pending_block.timestamp = when;
pending_block.transaction_merkle_root = pending_block.calculate_merkle_root();
pending_block.witness = witness_id;
// Genesis witnesses start with a default initial secret
if( witness_obj.next_secret_hash == secret_hash_type::hash( secret_hash_type() ) ) {
pending_block.previous_secret = secret_hash_type();
@ -594,7 +562,7 @@ signed_block database::_generate_block(
fc::raw::pack( last_enc, witness_obj.previous_secret );
pending_block.previous_secret = last_enc.result();
}
secret_hash_type::encoder next_enc;
fc::raw::pack( next_enc, block_signing_private_key );
fc::raw::pack( next_enc, pending_block.previous_secret );
@ -620,11 +588,7 @@ signed_block database::_generate_block(
*/
void database::pop_block()
{ try {
{
const std::lock_guard<std::mutex> pending_tx_session_lock{_pending_tx_session_mutex};
_pending_tx_session.reset();
}
_pending_tx_session.reset();
auto head_id = head_block_id();
optional<signed_block> head_block = fetch_block_by_id( head_id );
GRAPHENE_ASSERT( head_block.valid(), pop_empty_chain, "there are no blocks to pop" );
@ -638,8 +602,6 @@ void database::pop_block()
void database::clear_pending()
{ try {
const std::lock_guard<std::mutex> pending_tx_lock{_pending_tx_mutex};
const std::lock_guard<std::mutex> pending_tx_session_lock{_pending_tx_session_mutex};
assert( (_pending_tx.size() == 0) || _pending_tx_session.valid() );
_pending_tx.clear();
_pending_tx_session.reset();
@ -658,7 +620,7 @@ uint32_t database::push_applied_operation( const operation& op )
void database::set_applied_operation_result( uint32_t op_id, const operation_result& result )
{
assert( op_id < _applied_ops.size() );
if( _applied_ops[op_id].valid() )
if( _applied_ops[op_id] )
_applied_ops[op_id]->result = result;
else
{
@ -731,19 +693,16 @@ void database::_apply_block( const signed_block& next_block )
// For VOPs derived directly from a real op,
// use the real op's (block_num,trx_in_block,op_in_trx), virtual_op starts from 1.
// For VOPs created after processed all transactions,
// trx_in_block = the_block.trsanctions.size(), virtual_op starts from 0.
// trx_in_block = the_block.trsanctions.size(), virtual_op starts from 0.
++_current_trx_in_block;
_current_op_in_trx = 0;
_current_virtual_op = 0;
_current_virtual_op = 0;
}
if (global_props.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SCHEDULED_ALGORITHM) {
update_witness_schedule(next_block);
for(const auto& active_sons : global_props.active_sons) {
if(!active_sons.second.empty()) {
update_son_schedule(active_sons.first, next_block);
}
if(global_props.active_sons.size() > 0) {
update_son_schedule(next_block);
}
}
@ -755,10 +714,9 @@ void database::_apply_block( const signed_block& next_block )
// Are we at the maintenance interval?
if( maint_needed )
perform_chain_maintenance(next_block, global_props);
check_ending_lotteries();
check_ending_nft_lotteries();
create_block_summary(next_block);
place_delayed_bets(); // must happen after update_global_dynamic_data() updates the time
clear_expired_transactions();
@ -776,15 +734,11 @@ void database::_apply_block( const signed_block& next_block )
// TODO: figure out if we could collapse this function into
// update_global_dynamic_data() as perhaps these methods only need
// to be called for header validation?
update_maintenance_flag( maint_needed );
if (global_props.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SHUFFLED_ALGORITHM) {
update_witness_schedule();
for(const auto& active_sidechain_type : active_sidechain_types(dynamic_global_props.time)) {
if(global_props.active_sons.at(active_sidechain_type).size() > 0) {
update_son_schedule(active_sidechain_type);
}
if(global_props.active_sons.size() > 0) {
update_son_schedule();
}
}
@ -833,7 +787,7 @@ processed_transaction database::_apply_transaction(const signed_transaction& trx
auto& trx_idx = get_mutable_index_type<transaction_index>();
const chain_id_type& chain_id = get_chain_id();
transaction_id_type trx_id;
if( !(skip & skip_transaction_dupe_check) )
{
trx_id = trx.id();
@ -852,7 +806,7 @@ processed_transaction database::_apply_transaction(const signed_transaction& trx
return get_account_custom_authorities(id, op);
};
trx.verify_authority( chain_id, get_active, get_owner, get_custom,
true,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(head_block_time()),
get_global_properties().parameters.max_authority_depth );
}
@ -931,7 +885,7 @@ const witness_object& database::validate_block_header( uint32_t skip, const sign
FC_ASSERT( secret_hash_type::hash( next_block.previous_secret ) == witness.next_secret_hash, "",
( "previous_secret", next_block.previous_secret )( "next_secret_hash", witness.next_secret_hash ) );
if( !(skip&skip_witness_signature) )
if( !(skip&skip_witness_signature) )
FC_ASSERT( next_block.validate_signee( witness.signing_key ) );
if( !(skip&skip_witness_schedule_check) )

View file

@ -34,6 +34,8 @@
#include <graphene/chain/son_object.hpp>
#include <graphene/chain/son_proposal_object.hpp>
#include <fc/smart_ref_impl.hpp>
#include <ctime>
#include <algorithm>
@ -66,7 +68,7 @@ const dynamic_global_property_object& database::get_dynamic_global_properties()
const fee_schedule& database::current_fee_schedule()const
{
return std::ref( *get_global_properties().parameters.current_fees );
return get_global_properties().parameters.current_fees;
}
time_point_sec database::head_block_time()const
@ -109,7 +111,7 @@ uint32_t database::last_non_undoable_block_num() const
return head_block_num() - _undo_db.size();
}
std::vector<uint32_t> database::get_seeds( asset_id_type for_asset, uint8_t count_winners ) const
std::vector<uint32_t> database::get_seeds(asset_id_type for_asset, uint8_t count_winners) const
{
FC_ASSERT( count_winners <= 64 );
std::string salted_string = std::string(_random_number_generator._seed) + std::to_string(for_asset.instance.value);
@ -222,32 +224,17 @@ std::set<son_id_type> database::get_sons_to_be_deregistered()
for( auto& son : son_idx )
{
bool need_to_be_deregistered = true;
for(const auto& status : son.statuses)
if(son.status == son_status::in_maintenance)
{
const auto& sidechain = status.first;
if(status.second != son_status::in_maintenance)
need_to_be_deregistered = false;
if(need_to_be_deregistered)
auto stats = son.statistics(*this);
// TODO : We need to add a function that returns if we can deregister SON
// i.e. with introduction of PW code, we have to make a decision if the SON
// is needed for release of funds from the PW
if(head_block_time() - stats.last_down_timestamp >= fc::seconds(get_global_properties().parameters.son_deregister_time()))
{
auto stats = son.statistics(*this);
// TODO : We need to add a function that returns if we can deregister SON
// i.e. with introduction of PW code, we have to make a decision if the SON
// is needed for release of funds from the PW
if(stats.last_active_timestamp.contains(sidechain)) {
if (head_block_time() - stats.last_active_timestamp.at(sidechain) < fc::seconds(get_global_properties().parameters.son_deregister_time())) {
need_to_be_deregistered = false;
}
}
ret.insert(son.id);
}
}
if(need_to_be_deregistered)
{
ret.insert(son.id);
}
}
return ret;
}
@ -304,96 +291,30 @@ bool database::is_son_dereg_valid( son_id_type son_id )
return false;
}
bool status_son_dereg_valid = true;
for (const auto &active_sidechain_type : active_sidechain_types(head_block_time())) {
if(son->statuses.at(active_sidechain_type) != son_status::in_maintenance)
status_son_dereg_valid = false;
if(status_son_dereg_valid)
{
if(son->statistics(*this).last_active_timestamp.contains(active_sidechain_type)) {
if (head_block_time() - son->statistics(*this).last_active_timestamp.at(active_sidechain_type) < fc::seconds(get_global_properties().parameters.son_deregister_time())) {
status_son_dereg_valid = false;
}
}
}
}
return status_son_dereg_valid;
return (son->status == son_status::in_maintenance &&
(head_block_time() - son->statistics(*this).last_down_timestamp >= fc::seconds(get_global_properties().parameters.son_deregister_time())));
}
bool database::is_son_active( sidechain_type type, son_id_type son_id )
bool database::is_son_active( son_id_type son_id )
{
const auto& son_idx = get_index_type<son_index>().indices().get< by_id >();
auto son = son_idx.find( son_id );
if(son == son_idx.end()) {
if(son == son_idx.end())
{
return false;
}
const global_property_object& gpo = get_global_properties();
if(!gpo.active_sons.contains(type)) {
return false;
}
const auto& gpo_as = gpo.active_sons.at(type);
vector<son_id_type> active_son_ids;
active_son_ids.reserve(gpo_as.size());
std::transform(gpo_as.cbegin(), gpo_as.cend(),
active_son_ids.reserve(gpo.active_sons.size());
std::transform(gpo.active_sons.begin(), gpo.active_sons.end(),
std::inserter(active_son_ids, active_son_ids.end()),
[](const son_sidechain_info& swi) {
[](const son_info& swi) {
return swi.son_id;
});
if(active_son_ids.empty()) {
return false;
}
auto it_son = std::find(active_son_ids.begin(), active_son_ids.end(), son_id);
return (it_son != active_son_ids.end());
}
vector<uint64_t> database::get_random_numbers(uint64_t minimum, uint64_t maximum, uint64_t selections, bool duplicates)
{
FC_ASSERT( selections <= 100000 );
if (duplicates == false) {
FC_ASSERT( maximum - minimum >= selections );
}
vector<uint64_t> v;
v.reserve(selections);
if (duplicates) {
for (uint64_t i = 0; i < selections; i++) {
int64_t rnd = get_random_bits(maximum - minimum) + minimum;
v.push_back(rnd);
}
} else {
vector<uint64_t> tmpv;
tmpv.reserve(selections);
for (uint64_t i = minimum; i < maximum; i++) {
tmpv.push_back(i);
}
for (uint64_t i = 0; (i < selections) && (tmpv.size() > 0); i++) {
uint64_t idx = get_random_bits(tmpv.size());
v.push_back(tmpv.at(idx));
tmpv.erase(tmpv.begin() + idx);
}
}
return v;
}
bool database::is_asset_creation_allowed(const string &symbol)
{
if (symbol == "BTC")
{
if (head_block_time() < HARDFORK_SON_TIME)
return false;
}
return true;
}
}
}
} }

View file

@ -53,7 +53,6 @@
#include <graphene/chain/custom_account_authority_object.hpp>
#include <graphene/chain/offer_object.hpp>
#include <graphene/chain/account_role_object.hpp>
#include <graphene/chain/random_number_object.hpp>
#include <graphene/chain/nft_object.hpp>
@ -95,17 +94,16 @@
#include <graphene/chain/offer_evaluator.hpp>
#include <graphene/chain/nft_evaluator.hpp>
#include <graphene/chain/account_role_evaluator.hpp>
#include <graphene/chain/nft_lottery_evaluator.hpp>
#include <graphene/chain/son_evaluator.hpp>
#include <graphene/chain/son_wallet_evaluator.hpp>
#include <graphene/chain/son_wallet_deposit_evaluator.hpp>
#include <graphene/chain/son_wallet_withdraw_evaluator.hpp>
#include <graphene/chain/sidechain_address_evaluator.hpp>
#include <graphene/chain/sidechain_transaction_evaluator.hpp>
#include <graphene/chain/random_number_evaluator.hpp>
#include <graphene/chain/protocol/fee_schedule.hpp>
#include <fc/smart_ref_impl.hpp>
#include <fc/uint128.hpp>
#include <fc/crypto/digest.hpp>
@ -206,12 +204,6 @@ const uint8_t offer_history_object::type_id;
const uint8_t account_role_object::space_id;
const uint8_t account_role_object::type_id;
const uint8_t nft_lottery_balance_object::space_id;
const uint8_t nft_lottery_balance_object::type_id;
const uint8_t random_number_object::space_id;
const uint8_t random_number_object::type_id;
void database::initialize_evaluators()
{
_operation_evaluators.resize(255);
@ -304,9 +296,6 @@ void database::initialize_evaluators()
register_evaluator<account_role_create_evaluator>();
register_evaluator<account_role_update_evaluator>();
register_evaluator<account_role_delete_evaluator>();
register_evaluator<nft_lottery_token_purchase_evaluator>();
register_evaluator<nft_lottery_reward_evaluator>();
register_evaluator<nft_lottery_end_evaluator>();
register_evaluator<create_son_evaluator>();
register_evaluator<update_son_evaluator>();
register_evaluator<deregister_son_evaluator>();
@ -326,57 +315,12 @@ void database::initialize_evaluators()
register_evaluator<sidechain_transaction_sign_evaluator>();
register_evaluator<sidechain_transaction_send_evaluator>();
register_evaluator<sidechain_transaction_settle_evaluator>();
register_evaluator<random_number_store_evaluator>();
}
void database::initialize_hardforks()
{
_hardfork_times.emplace_back(HARDFORK_357_TIME);
_hardfork_times.emplace_back(HARDFORK_359_TIME);
_hardfork_times.emplace_back(HARDFORK_385_TIME);
_hardfork_times.emplace_back(HARDFORK_409_TIME);
_hardfork_times.emplace_back(HARDFORK_413_TIME);
_hardfork_times.emplace_back(HARDFORK_415_TIME);
_hardfork_times.emplace_back(HARDFORK_416_TIME);
_hardfork_times.emplace_back(HARDFORK_419_TIME);
_hardfork_times.emplace_back(HARDFORK_436_TIME);
_hardfork_times.emplace_back(HARDFORK_445_TIME);
_hardfork_times.emplace_back(HARDFORK_453_TIME);
_hardfork_times.emplace_back(HARDFORK_480_TIME);
_hardfork_times.emplace_back(HARDFORK_483_TIME);
_hardfork_times.emplace_back(HARDFORK_516_TIME);
_hardfork_times.emplace_back(HARDFORK_533_TIME);
_hardfork_times.emplace_back(HARDFORK_538_TIME);
_hardfork_times.emplace_back(HARDFORK_555_TIME);
_hardfork_times.emplace_back(HARDFORK_563_TIME);
_hardfork_times.emplace_back(HARDFORK_572_TIME);
_hardfork_times.emplace_back(HARDFORK_599_TIME);
_hardfork_times.emplace_back(HARDFORK_607_TIME);
_hardfork_times.emplace_back(HARDFORK_613_TIME);
_hardfork_times.emplace_back(HARDFORK_615_TIME);
_hardfork_times.emplace_back(HARDFORK_999_TIME);
_hardfork_times.emplace_back(HARDFORK_1000_TIME);
_hardfork_times.emplace_back(HARDFORK_1001_TIME);
_hardfork_times.emplace_back(HARDFORK_5050_1_TIME);
_hardfork_times.emplace_back(HARDFORK_CORE_429_TIME);
_hardfork_times.emplace_back(HARDFORK_GPOS_TIME);
_hardfork_times.emplace_back(HARDFORK_NFT_TIME);
_hardfork_times.emplace_back(HARDFORK_SON_FOR_HIVE_TIME);
_hardfork_times.emplace_back(HARDFORK_SON_TIME);
_hardfork_times.emplace_back(HARDFORK_SON2_TIME);
_hardfork_times.emplace_back(HARDFORK_SON_FOR_ETHEREUM_TIME);
_hardfork_times.emplace_back(HARDFORK_SWEEPS_TIME);
std::sort(_hardfork_times.begin(), _hardfork_times.end());
}
void database::initialize_indexes()
{
reset_indexes();
const std::lock_guard<std::mutex> undo_db_lock{_undo_db_mutex};
_undo_db.set_max_size(GRAPHENE_MIN_UNDO_HISTORY);
_undo_db.set_max_size( GRAPHENE_MIN_UNDO_HISTORY );
//Protocol object indexes
add_index< primary_index<asset_index, 13> >(); // 8192 assets per chunk
@ -460,9 +404,7 @@ void database::initialize_indexes()
add_index< primary_index<lottery_balance_index > >();
add_index< primary_index<sweeps_vesting_balance_index > >();
add_index< primary_index<offer_history_index > >();
add_index< primary_index<nft_lottery_balance_index > >();
add_index< primary_index<son_stats_index > >();
add_index< primary_index<random_number_index > >();
}
@ -476,9 +418,7 @@ void database::init_genesis(const genesis_state_type& genesis_state)
FC_ASSERT(genesis_state.initial_active_witnesses <= genesis_state.initial_witness_candidates.size(),
"initial_active_witnesses is larger than the number of candidate witnesses.");
const std::lock_guard<std::mutex> undo_db_lock{_undo_db_mutex};
_undo_db.disable();
struct auth_inhibitor {
auth_inhibitor(database& db) : db(db), old_flags(db.node_properties().skip_flags)
{ db.node_properties().skip_flags |= skip_authority_check; }
@ -994,6 +934,7 @@ void database::init_genesis(const genesis_state_type& genesis_state)
const auto& idx = get_index_type<asset_index>().indices().get<by_symbol>();
auto it = idx.begin();
bool has_imbalanced_assets = false;
while( it != idx.end() )
{
@ -1005,6 +946,7 @@ void database::init_genesis(const genesis_state_type& genesis_state)
FC_ASSERT( debt_itr != total_debts.end() );
if( supply_itr->second != debt_itr->second )
{
has_imbalanced_assets = true;
elog( "Genesis for asset ${aname} is not balanced\n"
" Debt is ${debt}\n"
" Supply is ${supply}\n",
@ -1016,6 +958,10 @@ void database::init_genesis(const genesis_state_type& genesis_state)
}
++it;
}
// @romek
#if 0
FC_ASSERT( !has_imbalanced_assets );
#endif
// Save tallied supplies
for( const auto& item : total_supplies )
@ -1104,9 +1050,8 @@ void database::init_genesis(const genesis_state_type& genesis_state)
FC_ASSERT( _p_witness_schedule_obj->id == witness_schedule_id_type() );
// Initialize witness schedule
#ifndef NDEBUG
const son_schedule_object& ssobitcoin =
const son_schedule_object& sso =
#endif
create<son_schedule_object>([&](son_schedule_object& _sso)
{
@ -1115,64 +1060,24 @@ void database::init_genesis(const genesis_state_type& genesis_state)
witness_scheduler_rng rng(_sso.rng_seed.begin(), GRAPHENE_NEAR_SCHEDULE_CTR_IV);
auto init_bitcoin_sons = get_global_properties().active_sons.at(sidechain_type::bitcoin);
auto init_witnesses = get_global_properties().active_witnesses;
_sso.scheduler = son_scheduler();
_sso.scheduler._min_token_count = std::max(int(init_bitcoin_sons.size()) / 2, 1);
_sso.scheduler._min_token_count = std::max(int(init_witnesses.size()) / 2, 1);
_sso.last_scheduling_block = 0;
_sso.recent_slots_filled = fc::uint128::max_value();
});
assert( ssobitcoin.id == son_schedule_id_type(get_son_schedule_id(sidechain_type::bitcoin)) );
#ifndef NDEBUG
const son_schedule_object& ssoethereum =
#endif
create<son_schedule_object>([&](son_schedule_object& _sso)
{
// for scheduled
memset(_sso.rng_seed.begin(), 0, _sso.rng_seed.size());
witness_scheduler_rng rng(_sso.rng_seed.begin(), GRAPHENE_NEAR_SCHEDULE_CTR_IV);
auto init_ethereum_sons = get_global_properties().active_sons.at(sidechain_type::ethereum);
_sso.scheduler = son_scheduler();
_sso.scheduler._min_token_count = std::max(int(init_ethereum_sons.size()) / 2, 1);
_sso.last_scheduling_block = 0;
_sso.recent_slots_filled = fc::uint128::max_value();
});
assert( ssoethereum.id == son_schedule_id_type(get_son_schedule_id(sidechain_type::ethereum)) );
#ifndef NDEBUG
const son_schedule_object& ssohive =
#endif
create<son_schedule_object>([&](son_schedule_object& _sso)
{
// for scheduled
memset(_sso.rng_seed.begin(), 0, _sso.rng_seed.size());
witness_scheduler_rng rng(_sso.rng_seed.begin(), GRAPHENE_NEAR_SCHEDULE_CTR_IV);
auto init_hive_sons = get_global_properties().active_sons.at(sidechain_type::hive);
_sso.scheduler = son_scheduler();
_sso.scheduler._min_token_count = std::max(int(init_hive_sons.size()) / 2, 1);
_sso.last_scheduling_block = 0;
_sso.recent_slots_filled = fc::uint128::max_value();
});
assert( ssohive.id == son_schedule_id_type(get_son_schedule_id(sidechain_type::hive)) );
assert( sso.id == son_schedule_id_type() );
// Enable fees
modify(get_global_properties(), [&genesis_state](global_property_object& p) {
p.parameters.current_fees = genesis_state.initial_parameters.current_fees;
});
// Create FBA counters
create<fba_accumulator_object>([&]( fba_accumulator_object& acc )
{

File diff suppressed because it is too large Load diff

View file

@ -28,7 +28,6 @@
#include <graphene/chain/witness_schedule_object.hpp>
#include <graphene/chain/special_authority_object.hpp>
#include <graphene/chain/operation_history_object.hpp>
#include <graphene/chain/nft_object.hpp>
#include <graphene/chain/protocol/fee_schedule.hpp>
#include <fc/io/fstream.hpp>
@ -44,7 +43,6 @@ database::database() :
{
initialize_indexes();
initialize_evaluators();
initialize_hardforks();
}
database::~database()
@ -109,6 +107,7 @@ void database::reindex( fc::path data_dir )
ilog( "reindexing blockchain" );
auto start = fc::time_point::now();
const auto last_block_num = last_block->block_num();
uint32_t flush_point = last_block_num < 10000 ? 0 : last_block_num - 10000;
uint32_t undo_point = last_block_num < 50 ? 0 : last_block_num - 50;
ilog( "Replaying blocks, starting at ${next}...", ("next",head_block_num() + 1) );
@ -124,7 +123,8 @@ void database::reindex( fc::path data_dir )
}
for( uint32_t i = head_block_num() + 1; i <= last_block_num; ++i )
{
if( i % 1000000 == 0 )
if( i % 10000 == 0 ) std::cerr << " " << double(i*100)/last_block_num << "% "<<i << " of " <<last_block_num<<" \n";
if( i == flush_point )
{
ilog( "Writing database to disk at block ${i}", ("i",i) );
flush();
@ -233,12 +233,7 @@ void database::open(
FC_ASSERT( *last_block >= head_block_id(),
"last block ID does not match current chain state",
("last_block->id", last_block)("head_block_id",head_block_num()) );
_block_id_to_block.set_replay_mode(true);
reindex( data_dir );
_block_id_to_block.set_replay_mode(false);
}
_opened = true;
}
@ -249,7 +244,7 @@ void database::close(bool rewind)
{
if (!_opened)
return;
// TODO: Save pending tx's on close()
clear_pending();
@ -299,7 +294,7 @@ void database::force_slow_replays()
void database::check_ending_lotteries()
{
try {
const auto& lotteries_idx = get_index_type<asset_index>().indices().get<active_lotteries>();
const auto& lotteries_idx = get_index_type<asset_index>().indices().get<active_lotteries>();
for( auto checking_asset: lotteries_idx )
{
FC_ASSERT( checking_asset.is_lottery() );
@ -311,24 +306,6 @@ void database::check_ending_lotteries()
} catch( ... ) {}
}
void database::check_ending_nft_lotteries()
{
try {
const auto &nft_lotteries_idx = get_index_type<nft_metadata_index>().indices().get<active_nft_lotteries>();
for (auto checking_token : nft_lotteries_idx)
{
FC_ASSERT(checking_token.is_lottery());
const auto &lottery_options = checking_token.lottery_data->lottery_options;
FC_ASSERT(lottery_options.is_active);
// Check the current supply of lottery tokens
auto current_supply = checking_token.get_token_current_supply(*this);
if ((lottery_options.ending_on_soldout && (current_supply == checking_token.max_supply)) ||
(lottery_options.end_date != time_point_sec() && (lottery_options.end_date <= head_block_time())))
checking_token.end_lottery(*this);
}
} catch( ... ) {}
}
void database::check_lottery_end_by_participants( asset_id_type asset_id )
{
try {

View file

@ -24,7 +24,6 @@
#include <fc/container/flat.hpp>
#include <graphene/chain/database.hpp>
#include <graphene/chain/protocol/authority.hpp>
#include <graphene/chain/protocol/operations.hpp>
#include <graphene/chain/protocol/transaction.hpp>
@ -42,10 +41,6 @@
#include <graphene/chain/transaction_object.hpp>
#include <graphene/chain/impacted.hpp>
#include <graphene/chain/hardfork.hpp>
#include <graphene/chain/account_object.hpp>
#include <graphene/chain/account_role_object.hpp>
#include <graphene/chain/son_object.hpp>
#include <graphene/chain/sidechain_address_object.hpp>
using namespace fc;
@ -203,10 +198,27 @@ struct get_impacted_account_visitor
_impacted.insert( op.issuer );
}
//! We don't use this operations
void operator()( const transfer_to_blind_operation& op ){}
void operator()( const blind_transfer_operation& op ){}
void operator()( const transfer_from_blind_operation& op ){}
void operator()( const transfer_to_blind_operation& op )
{
_impacted.insert( op.from );
for( const auto& out : op.outputs )
add_authority_accounts( _impacted, out.owner );
}
void operator()( const blind_transfer_operation& op )
{
for( const auto& in : op.inputs )
add_authority_accounts( _impacted, in.owner );
for( const auto& out : op.outputs )
add_authority_accounts( _impacted, out.owner );
}
void operator()( const transfer_from_blind_operation& op )
{
_impacted.insert( op.to );
for( const auto& in : op.inputs )
add_authority_accounts( _impacted, in.owner );
}
void operator()( const asset_settle_cancel_operation& op )
{
@ -347,13 +359,6 @@ struct get_impacted_account_visitor
void operator()( const account_role_delete_operation& op ){
_impacted.insert( op.owner );
}
void operator()( const nft_lottery_token_purchase_operation& op ){
_impacted.insert( op.buyer );
}
void operator()( const nft_lottery_reward_operation& op ) {
_impacted.insert( op.winner );
}
void operator()( const nft_lottery_end_operation& op ) {}
void operator()( const son_create_operation& op ) {
_impacted.insert( op.owner_account );
}
@ -411,9 +416,6 @@ struct get_impacted_account_visitor
void operator()( const sidechain_transaction_settle_operation& op ) {
_impacted.insert( op.payer );
}
void operator()( const random_number_store_operation& op ) {
_impacted.insert( op.account );
}
};
void graphene::chain::operation_get_impacted_accounts( const operation& op, flat_set<account_id_type>& result, bool ignore_custom_operation_required_auths ) {
@ -526,9 +528,6 @@ void get_relevant_accounts( const object* obj, flat_set<account_id_type>& accoun
} case sidechain_transaction_object_type:{
break;
}
default: {
break;
}
}
}
else if( obj->id.space() == implementation_ids )
@ -583,10 +582,6 @@ void get_relevant_accounts( const object* obj, flat_set<account_id_type>& accoun
break;
case impl_fba_accumulator_object_type:
break;
case impl_nft_lottery_balance_object_type:
break;
default:
break;
}
}
} // end get_relevant_accounts( const object* obj, flat_set<account_id_type>& accounts )
@ -608,6 +603,7 @@ void database::notify_changed_objects()
if( _undo_db.enabled() )
{
const auto& head_undo = _undo_db.head();
auto chain_time = head_block_time();
// New
if( !new_objects.empty() )
@ -619,7 +615,8 @@ void database::notify_changed_objects()
new_ids.push_back(item);
auto obj = find_object(item);
if(obj != nullptr)
get_relevant_accounts(obj, new_accounts_impacted, true);
get_relevant_accounts(obj, new_accounts_impacted,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(chain_time));
}
GRAPHENE_TRY_NOTIFY( new_objects, new_ids, new_accounts_impacted)
@ -633,7 +630,8 @@ void database::notify_changed_objects()
for( const auto& item : head_undo.old_values )
{
changed_ids.push_back(item.first);
get_relevant_accounts(item.second.get(), changed_accounts_impacted, true);
get_relevant_accounts(item.second.get(), changed_accounts_impacted,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(chain_time));
}
GRAPHENE_TRY_NOTIFY( changed_objects, changed_ids, changed_accounts_impacted)
@ -650,7 +648,8 @@ void database::notify_changed_objects()
removed_ids.emplace_back( item.first );
auto obj = item.second.get();
removed.emplace_back( obj );
get_relevant_accounts(obj, removed_accounts_impacted, true);
get_relevant_accounts(obj, removed_accounts_impacted,
MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(chain_time));
}
GRAPHENE_TRY_NOTIFY( removed_objects, removed_ids, removed, removed_accounts_impacted)

View file

@ -26,18 +26,16 @@
#include <graphene/chain/db_with.hpp>
#include <graphene/chain/asset_object.hpp>
#include <graphene/chain/betting_market_object.hpp>
#include <graphene/chain/game_object.hpp>
#include <graphene/chain/global_property_object.hpp>
#include <graphene/chain/hardfork.hpp>
#include <graphene/chain/market_object.hpp>
#include <graphene/chain/offer_object.hpp>
#include <graphene/chain/proposal_object.hpp>
#include <graphene/chain/son_proposal_object.hpp>
#include <graphene/chain/tournament_object.hpp>
#include <graphene/chain/transaction_object.hpp>
#include <graphene/chain/withdraw_permission_object.hpp>
#include <graphene/chain/witness_object.hpp>
#include <graphene/chain/tournament_object.hpp>
#include <graphene/chain/game_object.hpp>
#include <graphene/chain/betting_market_object.hpp>
#include <graphene/chain/protocol/fee_schedule.hpp>
@ -48,6 +46,7 @@ namespace graphene { namespace chain {
void database::update_global_dynamic_data( const signed_block& b, const uint32_t missed_blocks )
{
const dynamic_global_property_object& _dgp = get_dynamic_global_properties();
const global_property_object& gpo = get_global_properties();
// dynamic global properties updating
modify( _dgp, [&b,this,missed_blocks]( dynamic_global_property_object& dgp ){

View file

@ -74,32 +74,21 @@ witness_id_type database::get_scheduled_witness( uint32_t slot_num )const
return wid;
}
unsigned_int database::get_son_schedule_id( sidechain_type type )const
{
static const map<sidechain_type, unsigned_int> schedule_map = {
{ sidechain_type::bitcoin, 0 },
{ sidechain_type::ethereum, 1 },
{ sidechain_type::hive, 2 }
};
return schedule_map.at(type);
}
son_id_type database::get_scheduled_son( sidechain_type type, uint32_t slot_num )const
son_id_type database::get_scheduled_son( uint32_t slot_num )const
{
son_id_type sid;
const global_property_object& gpo = get_global_properties();
if (gpo.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SHUFFLED_ALGORITHM)
{
const dynamic_global_property_object& dpo = get_dynamic_global_properties();
const son_schedule_object& sso = son_schedule_id_type(get_son_schedule_id(type))(*this);
const son_schedule_object& sso = son_schedule_id_type()(*this);
uint64_t current_aslot = dpo.current_aslot + slot_num;
return sso.current_shuffled_sons[ current_aslot % sso.current_shuffled_sons.size() ];
}
if (gpo.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SCHEDULED_ALGORITHM &&
slot_num != 0 )
{
const son_schedule_object& sso = son_schedule_id_type(get_son_schedule_id(type))(*this);
const son_schedule_object& sso = son_schedule_id_type()(*this);
// ask the near scheduler who goes in the given slot
bool slot_is_near = sso.scheduler.get_slot(slot_num-1, sid);
if(! slot_is_near)
@ -200,39 +189,36 @@ void database::update_witness_schedule()
}
}
void database::update_son_schedule(sidechain_type type)
void database::update_son_schedule()
{
const son_schedule_object& sso = son_schedule_id_type()(*this);
const global_property_object& gpo = get_global_properties();
const son_schedule_object& sidechain_sso = get(son_schedule_id_type(get_son_schedule_id(type)));
if( gpo.active_sons.at(type).size() != 0 &&
head_block_num() % gpo.active_sons.at(type).size() == 0)
if( head_block_num() % gpo.active_sons.size() == 0 )
{
modify( sidechain_sso, [&]( son_schedule_object& _sso )
modify( sso, [&]( son_schedule_object& _sso )
{
_sso.current_shuffled_sons.clear();
_sso.current_shuffled_sons.reserve( gpo.active_sons.at(type).size() );
_sso.current_shuffled_sons.reserve( gpo.active_sons.size() );
for ( const auto &w : gpo.active_sons.at(type) ) {
_sso.current_shuffled_sons.push_back(w.son_id);
}
for( const son_info& w : gpo.active_sons )
_sso.current_shuffled_sons.push_back( w.son_id );
auto now_hi = uint64_t(head_block_time().sec_since_epoch()) << 32;
for (uint32_t i = 0; i < _sso.current_shuffled_sons.size(); ++i)
for( uint32_t i = 0; i < _sso.current_shuffled_sons.size(); ++i )
{
/// High performance random generator
/// http://xorshift.di.unimi.it/
uint64_t k = now_hi + uint64_t(i) * 2685821657736338717ULL;
uint64_t k = now_hi + uint64_t(i)*2685821657736338717ULL;
k ^= (k >> 12);
k ^= (k << 25);
k ^= (k >> 27);
k *= 2685821657736338717ULL;
uint32_t jmax = _sso.current_shuffled_sons.size() - i;
uint32_t j = i + k % jmax;
std::swap(_sso.current_shuffled_sons[i],
_sso.current_shuffled_sons[j]);
uint32_t j = i + k%jmax;
std::swap( _sso.current_shuffled_sons[i],
_sso.current_shuffled_sons[j] );
}
});
}
@ -318,15 +304,13 @@ void database::update_witness_schedule(const signed_block& next_block)
idump( ( double(total_time/1000000.0)/calls) );
}
void database::update_son_schedule(sidechain_type type, const signed_block& next_block)
void database::update_son_schedule(const signed_block& next_block)
{
auto start = fc::time_point::now();
#ifndef NDEBUG
const son_schedule_object& sso = get(son_schedule_id_type());
#endif
const global_property_object& gpo = get_global_properties();
const uint32_t schedule_needs_filled = gpo.active_sons.at(type).size();
const uint32_t schedule_slot = get_slot_at_time(next_block.timestamp);
const son_schedule_object& sso = get(son_schedule_id_type());
uint32_t schedule_needs_filled = gpo.active_sons.size();
uint32_t schedule_slot = get_slot_at_time(next_block.timestamp);
// We shouldn't be able to generate _pending_block with timestamp
// in the past, and incoming blocks from the network with timestamp
@ -335,49 +319,48 @@ void database::update_son_schedule(sidechain_type type, const signed_block& next
assert( schedule_slot > 0 );
son_id_type first_son;
bool slot_is_near = sso.scheduler.get_slot( schedule_slot-1, first_son );
son_id_type son;
const dynamic_global_property_object& dpo = get_dynamic_global_properties();
assert( dpo.random.data_size() == witness_scheduler_rng::seed_length );
assert( witness_scheduler_rng::seed_length == sso.rng_seed.size() );
const son_schedule_object& sidechain_sso = get(son_schedule_id_type(get_son_schedule_id(type)));
son_id_type first_son;
bool slot_is_near = sidechain_sso.scheduler.get_slot( schedule_slot-1, first_son );
son_id_type son_id;
modify(sidechain_sso, [&](son_schedule_object& _sso)
modify(sso, [&](son_schedule_object& _sso)
{
_sso.slots_since_genesis += schedule_slot;
witness_scheduler_rng rng(_sso.rng_seed.data, _sso.slots_since_genesis);
_sso.slots_since_genesis += schedule_slot;
witness_scheduler_rng rng(sso.rng_seed.data, _sso.slots_since_genesis);
_sso.scheduler._min_token_count = std::max(int(gpo.active_sons.at(type).size()) / 2, 1);
_sso.scheduler._min_token_count = std::max(int(gpo.active_sons.size()) / 2, 1);
if( slot_is_near )
{
uint32_t drain = schedule_slot;
while( drain > 0 )
{
if( _sso.scheduler.size() == 0 )
break;
_sso.scheduler.consume_schedule();
--drain;
}
}
else
{
_sso.scheduler.reset_schedule( first_son );
}
while( !_sso.scheduler.get_slot(schedule_needs_filled, son_id) )
{
if( _sso.scheduler.produce_schedule(rng) & emit_turn )
memcpy(_sso.rng_seed.begin(), dpo.random.data(), dpo.random.data_size());
}
_sso.last_scheduling_block = next_block.block_num();
_sso.recent_slots_filled = (
(_sso.recent_slots_filled << 1)
+ 1) << (schedule_slot - 1);
if( slot_is_near )
{
uint32_t drain = schedule_slot;
while( drain > 0 )
{
if( _sso.scheduler.size() == 0 )
break;
_sso.scheduler.consume_schedule();
--drain;
}
}
else
{
_sso.scheduler.reset_schedule( first_son );
}
while( !_sso.scheduler.get_slot(schedule_needs_filled, son) )
{
if( _sso.scheduler.produce_schedule(rng) & emit_turn )
memcpy(_sso.rng_seed.begin(), dpo.random.data(), dpo.random.data_size());
}
_sso.last_scheduling_block = next_block.block_num();
_sso.recent_slots_filled = (
(_sso.recent_slots_filled << 1)
+ 1) << (schedule_slot - 1);
});
auto end = fc::time_point::now();
static uint64_t total_time = 0;
static uint64_t calls = 0;

View file

@ -47,7 +47,7 @@ namespace graphene { namespace chain {
};
} }
FC_REFLECT_ENUM(graphene::chain::event_state,
FC_REFLECT_ENUM(graphene::chain::event_state,
(upcoming)
(frozen_upcoming)
(in_progress)
@ -61,12 +61,12 @@ namespace graphene { namespace chain {
namespace msm = boost::msm;
namespace mpl = boost::mpl;
namespace
namespace
{
// Events -- most events happen when the witnesses publish an event_update operation with a new
// status, so if they publish an event with the status set to `frozen`, we'll generate a `frozen_event`
struct upcoming_event
struct upcoming_event
{
database& db;
upcoming_event(database& db) : db(db) {}
@ -76,12 +76,12 @@ namespace graphene { namespace chain {
database& db;
in_progress_event(database& db) : db(db) {}
};
struct frozen_event
struct frozen_event
{
database& db;
frozen_event(database& db) : db(db) {}
};
struct finished_event
struct finished_event
{
database& db;
finished_event(database& db) : db(db) {}
@ -104,7 +104,7 @@ namespace graphene { namespace chain {
betting_market_group_resolved_event(database& db, betting_market_group_id_type resolved_group, bool was_canceled) : db(db), resolved_group(resolved_group), was_canceled(was_canceled) {}
};
// event triggered when a betting market group is closed. When we get this,
// event triggered when a betting market group is closed. When we get this,
// if all child betting market groups are closed, transition to finished
struct betting_market_group_closed_event
{
@ -127,7 +127,7 @@ namespace graphene { namespace chain {
void on_entry(const upcoming_event& event, event_state_machine_& fsm) {
dlog("event ${id} -> upcoming", ("id", fsm.event_obj->id));
auto& betting_market_group_index = event.db.get_index_type<betting_market_group_object_index>().indices().get<by_event_id>();
for (const betting_market_group_object& betting_market_group :
for (const betting_market_group_object& betting_market_group :
boost::make_iterator_range(betting_market_group_index.equal_range(fsm.event_obj->id)))
try
{
@ -147,7 +147,7 @@ namespace graphene { namespace chain {
void on_entry(const in_progress_event& event, event_state_machine_& fsm) {
dlog("event ${id} -> in_progress", ("id", fsm.event_obj->id));
auto& betting_market_group_index = event.db.get_index_type<betting_market_group_object_index>().indices().get<by_event_id>();
for (const betting_market_group_object& betting_market_group :
for (const betting_market_group_object& betting_market_group :
boost::make_iterator_range(betting_market_group_index.equal_range(fsm.event_obj->id)))
try
{
@ -203,7 +203,7 @@ namespace graphene { namespace chain {
void freeze_betting_market_groups(const frozen_event& event) {
auto& betting_market_group_index = event.db.get_index_type<betting_market_group_object_index>().indices().get<by_event_id>();
for (const betting_market_group_object& betting_market_group :
for (const betting_market_group_object& betting_market_group :
boost::make_iterator_range(betting_market_group_index.equal_range(event_obj->id)))
{
try
@ -222,7 +222,7 @@ namespace graphene { namespace chain {
void close_all_betting_market_groups(const finished_event& event) {
auto& betting_market_group_index = event.db.get_index_type<betting_market_group_object_index>().indices().get<by_event_id>();
for (const betting_market_group_object& betting_market_group :
for (const betting_market_group_object& betting_market_group :
boost::make_iterator_range(betting_market_group_index.equal_range(event_obj->id)))
{
try
@ -241,7 +241,7 @@ namespace graphene { namespace chain {
void cancel_all_betting_market_groups(const canceled_event& event) {
auto& betting_market_group_index = event.db.template get_index_type<betting_market_group_object_index>().indices().template get<by_event_id>();
for (const betting_market_group_object& betting_market_group :
for (const betting_market_group_object& betting_market_group :
boost::make_iterator_range(betting_market_group_index.equal_range(event_obj->id)))
event.db.modify(betting_market_group, [&event](betting_market_group_object& betting_market_group_obj) {
betting_market_group_obj.on_canceled_event(event.db, true);
@ -252,15 +252,15 @@ namespace graphene { namespace chain {
bool all_betting_market_groups_are_closed(const betting_market_group_closed_event& event)
{
auto& betting_market_group_index = event.db.get_index_type<betting_market_group_object_index>().indices().get<by_event_id>();
for (const betting_market_group_object& betting_market_group :
for (const betting_market_group_object& betting_market_group :
boost::make_iterator_range(betting_market_group_index.equal_range(event_obj->id)))
if (betting_market_group.id != event.closed_group)
{
betting_market_group_status status = betting_market_group.get_status();
if (status != betting_market_group_status::closed &&
status != betting_market_group_status::graded &&
status != betting_market_group_status::re_grading &&
status != betting_market_group_status::settled &&
if (status != betting_market_group_status::closed &&
status != betting_market_group_status::graded &&
status != betting_market_group_status::re_grading &&
status != betting_market_group_status::settled &&
status != betting_market_group_status::canceled)
return false;
}
@ -276,7 +276,7 @@ namespace graphene { namespace chain {
if (event_obj->at_least_one_betting_market_group_settled)
return false;
auto& betting_market_group_index = event.db.get_index_type<betting_market_group_object_index>().indices().get<by_event_id>();
for (const betting_market_group_object& betting_market_group :
for (const betting_market_group_object& betting_market_group :
boost::make_iterator_range(betting_market_group_index.equal_range(event_obj->id)))
if (betting_market_group.id != event.resolved_group)
if (betting_market_group.get_status() != betting_market_group_status::canceled)
@ -290,7 +290,7 @@ namespace graphene { namespace chain {
event_obj->at_least_one_betting_market_group_settled = true;
auto& betting_market_group_index = event.db.get_index_type<betting_market_group_object_index>().indices().get<by_event_id>();
for (const betting_market_group_object& betting_market_group :
for (const betting_market_group_object& betting_market_group :
boost::make_iterator_range(betting_market_group_index.equal_range(event_obj->id))) {
if (betting_market_group.id != event.resolved_group) {
betting_market_group_status status = betting_market_group.get_status();
@ -344,6 +344,7 @@ namespace graphene { namespace chain {
{
FC_THROW_EXCEPTION(graphene::chain::no_transition, "No transition");
}
template <class Fsm>
void no_transition(canceled_event const& e, Fsm&, int state)
{
@ -371,7 +372,7 @@ namespace graphene { namespace chain {
{
}
event_object::event_object(const event_object& rhs) :
event_object::event_object(const event_object& rhs) :
graphene::db::abstract_object<event_object>(rhs),
name(rhs.name),
season(rhs.season),
@ -407,7 +408,7 @@ namespace graphene { namespace chain {
}
namespace {
bool verify_event_status_constants()
{
unsigned error_count = 0;
@ -442,19 +443,19 @@ namespace graphene { namespace chain {
dlog("Event status constants are correct");
else
wlog("There were ${count} errors in the event status constants", ("count", error_count));
return error_count == 0;
}
} // end anonymous namespace
event_status event_object::get_status() const
{
static bool state_constants_are_correct = verify_event_status_constants();
(void)&state_constants_are_correct;
event_state state = (event_state)my->state_machine.current_state()[0];
ddump((state));
switch (state)
{
case event_state::upcoming:
@ -522,8 +523,8 @@ namespace graphene { namespace chain {
my->state_machine.process_event(betting_market_group_closed_event(db, closed_group));
}
// These are the only statuses that can be explicitly set by witness operations. The missing
// status, 'settled', is automatically set when all of the betting market groups have
// These are the only statuses that can be explicitly set by witness operations. The missing
// status, 'settled', is automatically set when all of the betting market groups have
// settled/canceled
void event_object::dispatch_new_status(database& db, event_status new_status)
{
@ -532,16 +533,16 @@ namespace graphene { namespace chain {
on_upcoming_event(db);
break;
case event_status::in_progress: // by witnesses when the event starts
on_in_progress_event(db);
on_in_progress_event(db);
break;
case event_status::frozen: // by witnesses when the event needs to be frozen
on_frozen_event(db);
on_frozen_event(db);
break;
case event_status::finished: // by witnesses when the event is complete
on_finished_event(db);
on_finished_event(db);
break;
case event_status::canceled: // by witnesses to cancel the event
on_canceled_event(db);
on_canceled_event(db);
break;
default:
FC_THROW("Status ${new_status} cannot be explicitly set", ("new_status", new_status));
@ -550,7 +551,7 @@ namespace graphene { namespace chain {
} } // graphene::chain
namespace fc {
namespace fc {
// Manually reflect event_object to variant to properly reflect "state"
void to_variant(const graphene::chain::event_object& event_obj, fc::variant& v, uint32_t max_depth)
{

View file

@ -24,6 +24,7 @@
#include <graphene/chain/fork_database.hpp>
#include <graphene/chain/exceptions.hpp>
#include <graphene/chain/protocol/fee_schedule.hpp>
#include <fc/smart_ref_impl.hpp>
namespace graphene { namespace chain {
fork_database::fork_database()

View file

@ -547,7 +547,7 @@ namespace graphene { namespace chain {
} } // graphene::chain
namespace fc {
namespace fc {
// Manually reflect game_object to variant to properly reflect "state"
void to_variant(const graphene::chain::game_object& game_obj, fc::variant& v, uint32_t max_depth)
{

View file

@ -25,6 +25,7 @@
#include <graphene/chain/genesis_state.hpp>
// these are required to serialize a genesis_state
#include <fc/smart_ref_impl.hpp> // required for gcc in release mode
#include <graphene/chain/protocol/fee_schedule.hpp>
namespace graphene { namespace chain {

View file

@ -1,7 +1,3 @@
#ifndef HARDFORK_1000_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_1000_TIME (fc::time_point_sec::from_iso_string("2018-10-20T01:46:40"))
#else
#define HARDFORK_1000_TIME (fc::time_point_sec::from_iso_string("2019-02-18T12:00:00"))
#endif
#define HARDFORK_1000_TIME (fc::time_point_sec( 1540000000 ))
#endif

View file

@ -1,8 +1,4 @@
// added delete sport and delete event group operations
#ifndef HARDFORK_1001_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_1001_TIME (fc::time_point_sec::from_iso_string("2018-10-20T01:46:40"))
#else
#define HARDFORK_1001_TIME (fc::time_point_sec::from_iso_string("2019-02-18T12:00:00"))
#endif
#define HARDFORK_1001_TIME (fc::time_point_sec( 1540000000 ))
#endif

View file

@ -1,8 +1,4 @@
// #357 Disallow publishing certain malformed price feeds
#ifndef HARDFORK_357_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_357_TIME (fc::time_point_sec::from_iso_string("2015-10-09T18:45:00"))
#else
#define HARDFORK_357_TIME (fc::time_point_sec::from_iso_string("2015-10-09T18:45:00"))
#endif
#define HARDFORK_357_TIME (fc::time_point_sec( 1444416300 ))
#endif

View file

@ -1,8 +1,4 @@
// #359 Allow digits in asset name
#ifndef HARDFORK_359_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_359_TIME (fc::time_point_sec::from_iso_string("2015-10-09T18:45:00"))
#else
#define HARDFORK_359_TIME (fc::time_point_sec::from_iso_string("2015-10-09T18:45:00"))
#endif
#define HARDFORK_359_TIME (fc::time_point_sec( 1444416300 ))
#endif

View file

@ -1,8 +1,4 @@
// #385 October 23 enforce PARENT.CHILD and allow short names
#ifndef HARDFORK_385_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_385_TIME (fc::time_point_sec::from_iso_string("2015-10-23T00:00:00"))
#else
#define HARDFORK_385_TIME (fc::time_point_sec::from_iso_string("2015-10-23T00:00:00"))
#endif
#define HARDFORK_385_TIME (fc::time_point_sec( 1445558400 ))
#endif

View file

@ -1,8 +1,4 @@
// #409 Allow creation of sub-assets
#ifndef HARDFORK_409_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_409_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#else
#define HARDFORK_409_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#endif
#define HARDFORK_409_TIME (fc::time_point_sec( 1446652800 ))
#endif

View file

@ -1,8 +1,4 @@
// #413 Add operation to claim asset fees
#ifndef HARDFORK_413_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_413_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#else
#define HARDFORK_413_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#endif
#define HARDFORK_413_TIME (fc::time_point_sec( 1446652800 ))
#endif

View file

@ -1,8 +1,4 @@
// #415 Default accept policy for asset with no whitelist authorities
#ifndef HARDFORK_415_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_415_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#else
#define HARDFORK_415_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#endif
#define HARDFORK_415_TIME (fc::time_point_sec( 1446652800 ))
#endif

View file

@ -1,8 +1,4 @@
// #416 enforce_white_list is inconsistently applied
#ifndef HARDFORK_416_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_416_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#else
#define HARDFORK_416_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#endif
#define HARDFORK_416_TIME (fc::time_point_sec( 1446652800 ))
#endif

View file

@ -1,8 +1,4 @@
// #419 Account can pay fees in blacklisted asset
#ifndef HARDFORK_419_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_419_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#else
#define HARDFORK_419_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#endif
#define HARDFORK_419_TIME (fc::time_point_sec( 1446652800 ))
#endif

View file

@ -1,8 +1,4 @@
// #436 Prevent margin call from being triggered unless feed < call price
#ifndef HARDFORK_436_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_436_TIME (fc::time_point_sec::from_iso_string("2015-12-16T18:00:00"))
#else
#define HARDFORK_436_TIME (fc::time_point_sec::from_iso_string("2015-12-16T18:00:00"))
#endif
#define HARDFORK_436_TIME (fc::time_point_sec( 1450288800 ))
#endif

View file

@ -1,8 +1,4 @@
// #445 Refund create order fees on cancel
#ifndef HARDFORK_445_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_445_TIME (fc::time_point_sec::from_iso_string("2015-12-16T18:00:00"))
#else
#define HARDFORK_445_TIME (fc::time_point_sec::from_iso_string("2015-12-16T18:00:00"))
#endif
#define HARDFORK_445_TIME (fc::time_point_sec( 1450288800 ))
#endif

View file

@ -1,8 +1,4 @@
// #453 Hardfork to retroactively correct referral percentages
#ifndef HARDFORK_453_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_453_TIME (fc::time_point_sec::from_iso_string("2015-12-16T18:00:00"))
#else
#define HARDFORK_453_TIME (fc::time_point_sec::from_iso_string("2015-12-16T18:00:00"))
#endif
#define HARDFORK_453_TIME (fc::time_point_sec( 1450288800 ))
#endif

View file

@ -1,8 +1,4 @@
// #480 Fix non-BTS MIA core_exchange_rate check
#ifndef HARDFORK_480_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_480_TIME (fc::time_point_sec::from_iso_string("2015-12-17T19:00:00"))
#else
#define HARDFORK_480_TIME (fc::time_point_sec::from_iso_string("2015-12-17T19:00:00"))
#endif
#define HARDFORK_480_TIME (fc::time_point_sec( 1450378800 ))
#endif

View file

@ -1,8 +1,4 @@
// #483 Operation history numbering change
#ifndef HARDFORK_483_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_483_TIME (fc::time_point_sec::from_iso_string("2015-12-17T19:00:00"))
#else
#define HARDFORK_483_TIME (fc::time_point_sec::from_iso_string("2015-12-17T19:00:00"))
#endif
#define HARDFORK_483_TIME (fc::time_point_sec( 1450378800 ))
#endif

View file

@ -1,7 +1,4 @@
// 5050_1 HARDFORK Wednesday, 15 April 2020 20:00:00 GMT
#ifndef HARDFORK_5050_1_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_5050_1_TIME (fc::time_point_sec::from_iso_string("2020-04-15T20:00:00"))
#else
#define HARDFORK_5050_1_TIME (fc::time_point_sec::from_iso_string("2020-04-22T20:00:00"))
#endif
#define HARDFORK_5050_1_TIME (fc::time_point_sec( 1586980800 ))
#endif

View file

@ -1,8 +1,4 @@
// #516 Special authorities
#ifndef HARDFORK_516_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_516_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#else
#define HARDFORK_516_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#endif
#define HARDFORK_516_TIME (fc::time_point_sec( 1456250400 ))
#endif

View file

@ -1,8 +1,4 @@
// #533 Improve vote counting implementation
#ifndef HARDFORK_533_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_533_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#else
#define HARDFORK_533_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#endif
#define HARDFORK_533_TIME (fc::time_point_sec( 1456250400 ))
#endif

View file

@ -1,8 +1,4 @@
// #538 Buyback accounts
#ifndef HARDFORK_538_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_538_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#else
#define HARDFORK_538_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#endif
#define HARDFORK_538_TIME (fc::time_point_sec( 1456250400 ))
#endif

View file

@ -1,8 +1,4 @@
// #555 Buyback accounts
#ifndef HARDFORK_555_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_555_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#else
#define HARDFORK_555_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#endif
#define HARDFORK_555_TIME (fc::time_point_sec( 1456250400 ))
#endif

View file

@ -1,8 +1,4 @@
// #563 Stealth fee routing
#ifndef HARDFORK_563_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_563_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#else
#define HARDFORK_563_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#endif
#define HARDFORK_563_TIME (fc::time_point_sec( 1456250400 ))
#endif

View file

@ -1,8 +1,4 @@
// #572 Allow asset to update permission flags when no supply exists
#ifndef HARDFORK_572_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_572_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#else
#define HARDFORK_572_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#endif
#define HARDFORK_572_TIME (fc::time_point_sec( 1456250400 ))
#endif

View file

@ -1,8 +1,4 @@
// #599 Unpacking of extension is incorrect
#ifndef HARDFORK_599_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_599_TIME (fc::time_point_sec::from_iso_string("2016-04-04T17:00:00"))
#else
#define HARDFORK_599_TIME (fc::time_point_sec::from_iso_string("2016-04-04T17:00:00"))
#endif
#define HARDFORK_599_TIME (fc::time_point_sec( 1459789200 ))
#endif

View file

@ -1,8 +1,4 @@
// #607 Disable negative voting on workers
#ifndef HARDFORK_607_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_607_TIME (fc::time_point_sec::from_iso_string("2016-03-23T17:00:00"))
#else
#define HARDFORK_607_TIME (fc::time_point_sec::from_iso_string("2016-03-23T17:00:00"))
#endif
#define HARDFORK_607_TIME (fc::time_point_sec( 1458752400 ))
#endif

View file

@ -1,8 +1,4 @@
// #613 Deprecate annual membership
#ifndef HARDFORK_613_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_613_TIME (fc::time_point_sec::from_iso_string("2016-03-23T17:00:00"))
#else
#define HARDFORK_613_TIME (fc::time_point_sec::from_iso_string("2016-03-23T17:00:00"))
#endif
#define HARDFORK_613_TIME (fc::time_point_sec( 1458752400 ))
#endif

View file

@ -1,8 +1,4 @@
// #615 Fix price feed expiration check, so websocket server will never spam too much data
#ifndef HARDFORK_615_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_615_TIME (fc::time_point_sec::from_iso_string("2016-03-23T17:00:00"))
#else
#define HARDFORK_615_TIME (fc::time_point_sec::from_iso_string("2016-03-23T17:00:00"))
#endif
#define HARDFORK_615_TIME (fc::time_point_sec( 1458752400 ))
#endif

View file

@ -1,8 +1,4 @@
// Placeholder HF for affiliate reward system
#ifndef HARDFORK_999_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_999_TIME (fc::time_point_sec::from_iso_string("2018-10-20T01:46:40"))
#else
#define HARDFORK_999_TIME (fc::time_point_sec::from_iso_string("2019-02-18T12:00:00"))
#endif
#define HARDFORK_999_TIME (fc::time_point_sec( 1540000000 ))
#endif

View file

@ -1,8 +1,4 @@
// bitshares-core #429 rounding issue when creating assets
#ifndef HARDFORK_CORE_429_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_CORE_429_TIME (fc::time_point_sec::from_iso_string("2019-08-26T02:00:00"))
#else
#define HARDFORK_CORE_429_TIME (fc::time_point_sec::from_iso_string("2019-09-13T02:00:00"))
#endif
#define HARDFORK_CORE_429_TIME (fc::time_point_sec( 1566784800 ))
#endif

View file

@ -0,0 +1,6 @@
// #210 Check authorities on custom_operation
#ifndef HARDFORK_CORE_210_TIME
#define HARDFORK_CORE_210_TIME (fc::time_point_sec(1893456000)) // Jan 1 00:00:00 2030 (Not yet scheduled)
// Bugfix: pre-HF 210, custom_operation's required_auths field was ignored.
#define MUST_IGNORE_CUSTOM_OP_REQD_AUTHS(chain_time) (chain_time <= HARDFORK_CORE_210_TIME)
#endif

View file

@ -1,7 +1,4 @@
// GPOS HARDFORK Monday, 6 January 2020 01:00:00 GMT
#ifndef HARDFORK_GPOS_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_GPOS_TIME (fc::time_point_sec::from_iso_string("2020-01-06T01:00:00"))
#else
#define HARDFORK_GPOS_TIME (fc::time_point_sec::from_iso_string("2020-02-17T22:00:00"))
#endif
#define HARDFORK_GPOS_TIME (fc::time_point_sec( 1578272400 ))
#endif

View file

@ -1,7 +0,0 @@
#ifndef HARDFORK_HOTFIX_2024_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_HOTFIX_2024_TIME (fc::time_point_sec::from_iso_string("2023-12-20T00:00:00"))
#else
#define HARDFORK_HOTFIX_2024_TIME (fc::time_point_sec::from_iso_string("2023-12-20T00:00:00"))
#endif
#endif

View file

@ -1,7 +1,4 @@
// NFT HARDFORK Sat, 15-Aug-20 00:00:00 UTC
#ifndef HARDFORK_NFT_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_NFT_TIME (fc::time_point_sec::from_iso_string("2020-08-15T00:00:00"))
#else
#define HARDFORK_NFT_TIME (fc::time_point_sec::from_iso_string("2020-12-21T00:00:00"))
#endif
#define HARDFORK_NFT_TIME (fc::time_point_sec( 1597449600 ))
#endif

View file

@ -1,7 +0,0 @@
#ifndef HARDFORK_SIDECHAIN_DELETE_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_SIDECHAIN_DELETE_TIME (fc::time_point_sec::from_iso_string("2022-11-16T02:00:00"))
#else
#define HARDFORK_SIDECHAIN_DELETE_TIME (fc::time_point_sec::from_iso_string("2022-11-16T02:00:00"))
#endif
#endif

View file

@ -1,7 +1,7 @@
// SON HARDFORK Wednesday, January 1, 2020 12:00:00 AM - 1577836800
// SON HARDFORK Monday, March 30, 2020 3:00:00 PM - 1585573200
// SON HARDFORK Monday, September 21, 2020 1:43:11 PM - 1600695791
#ifndef HARDFORK_SON_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_SON_TIME (fc::time_point_sec::from_iso_string("2020-10-28T00:00:00"))
#else
#define HARDFORK_SON_TIME (fc::time_point_sec::from_iso_string("2020-12-21T00:00:00"))
#endif
#include <ctime>
#define HARDFORK_SON_TIME (fc::time_point_sec( 1585573200 ))
#endif

View file

@ -1,7 +0,0 @@
#ifndef HARDFORK_SON2_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_SON2_TIME (fc::time_point_sec::from_iso_string("2021-07-31T00:00:00"))
#else
#define HARDFORK_SON2_TIME (fc::time_point_sec::from_iso_string("2021-07-31T00:00:00"))
#endif
#endif

View file

@ -1,7 +0,0 @@
#ifndef HARDFORK_SON_FOR_ETHEREUM_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_SON_FOR_ETHEREUM_TIME (fc::time_point_sec::from_iso_string("2023-07-17T12:00:00"))
#else
#define HARDFORK_SON_FOR_ETHEREUM_TIME (fc::time_point_sec::from_iso_string("2023-10-24T12:00:00"))
#endif
#endif

View file

@ -1,7 +0,0 @@
#ifndef HARDFORK_SON_FOR_HIVE_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_SON_FOR_HIVE_TIME (fc::time_point_sec::from_iso_string("2021-03-31T00:00:00"))
#else
#define HARDFORK_SON_FOR_HIVE_TIME (fc::time_point_sec::from_iso_string("2021-12-21T00:00:00"))
#endif
#endif

View file

@ -1,7 +1,3 @@
#ifndef HARDFORK_SWEEPS_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_SWEEPS_TIME (fc::time_point_sec::from_iso_string("2019-08-26T02:00:00"))
#else
#define HARDFORK_SWEEPS_TIME (fc::time_point_sec::from_iso_string("2019-09-13T02:00:00"))
#endif
#define HARDFORK_SWEEPS_TIME (fc::time_point_sec( 1566784800 ))
#endif

View file

@ -28,10 +28,10 @@ namespace graphene
using account_role_multi_index_type = multi_index_container<
account_role_object,
indexed_by<
ordered_unique< tag<by_id>,
ordered_unique< tag<by_id>,
member<object, object_id_type, &object::id>
>,
ordered_non_unique< tag<by_owner>,
ordered_non_unique< tag<by_owner>,
member<account_role_object, account_id_type, &account_role_object::owner>
>,
ordered_unique< tag<by_expiration>,
@ -46,4 +46,4 @@ namespace graphene
} // namespace graphene
FC_REFLECT_DERIVED(graphene::chain::account_role_object, (graphene::db::object),
(owner)(name)(metadata)(allowed_operations)(whitelisted_accounts)(valid_to))
(owner)(name)(metadata)(allowed_operations)(whitelisted_accounts)(valid_to))

View file

@ -164,12 +164,7 @@ namespace graphene { namespace chain {
template<class DB>
const asset_bitasset_data_object& bitasset_data(const DB& db)const
{
FC_ASSERT( bitasset_data_id.valid(),
"Asset ${a} (${id}) is not a market issued asset.",
("a",this->symbol)("id",this->id) );
return db.get(*bitasset_data_id);
}
{ assert(bitasset_data_id); return db.get(*bitasset_data_id); }
template<class DB>
const asset_dividend_data_object& dividend_data(const DB& db)const

View file

@ -24,18 +24,19 @@
#pragma once
#include <graphene/chain/protocol/types.hpp>
#include <graphene/chain/protocol/betting_market.hpp>
#include <graphene/db/object.hpp>
#include <graphene/db/generic_index.hpp>
#include <boost/multi_index/composite_key.hpp>
#include <graphene/chain/protocol/betting_market.hpp>
#include <sstream>
#include <boost/multi_index/composite_key.hpp>
namespace graphene { namespace chain {
class betting_market_object;
class betting_market_group_object;
} }
namespace fc {
namespace fc {
void to_variant(const graphene::chain::betting_market_object& betting_market_obj, fc::variant& v, uint32_t max_depth = 1);
void from_variant(const fc::variant& v, graphene::chain::betting_market_object& betting_market_obj, uint32_t max_depth = 1);
void to_variant(const graphene::chain::betting_market_group_object& betting_market_group_obj, fc::variant& v, uint32_t max_depth = 1);
@ -625,9 +626,10 @@ typedef multi_index_container<
typedef generic_index<betting_market_position_object, betting_market_position_multi_index_type> betting_market_position_index;
template<typename Stream>
inline Stream& operator<<( Stream& s, const betting_market_object& betting_market_obj )
{
{
// pack all fields exposed in the header in the usual way
// instead of calling the derived pack, just serialize the one field in the base class
// fc::raw::pack<Stream, const graphene::db::abstract_object<betting_market_object> >(s, betting_market_obj);
@ -647,7 +649,7 @@ inline Stream& operator<<( Stream& s, const betting_market_object& betting_marke
}
template<typename Stream>
inline Stream& operator>>( Stream& s, betting_market_object& betting_market_obj )
{
{
// unpack all fields exposed in the header in the usual way
//fc::raw::unpack<Stream, graphene::db::abstract_object<betting_market_object> >(s, betting_market_obj);
fc::raw::unpack(s, betting_market_obj.id);
@ -661,14 +663,14 @@ inline Stream& operator>>( Stream& s, betting_market_object& betting_market_obj
fc::raw::unpack(s, stringified_stream);
std::istringstream stream(stringified_stream);
betting_market_obj.unpack_impl(stream);
return s;
}
template<typename Stream>
inline Stream& operator<<( Stream& s, const betting_market_group_object& betting_market_group_obj )
{
{
// pack all fields exposed in the header in the usual way
// instead of calling the derived pack, just serialize the one field in the base class
// fc::raw::pack<Stream, const graphene::db::abstract_object<betting_market_group_object> >(s, betting_market_group_obj);
@ -691,7 +693,7 @@ inline Stream& operator<<( Stream& s, const betting_market_group_object& betting
}
template<typename Stream>
inline Stream& operator>>( Stream& s, betting_market_group_object& betting_market_group_obj )
{
{
// unpack all fields exposed in the header in the usual way
//fc::raw::unpack<Stream, graphene::db::abstract_object<betting_market_group_object> >(s, betting_market_group_obj);
fc::raw::unpack(s, betting_market_group_obj.id);
@ -709,113 +711,15 @@ inline Stream& operator>>( Stream& s, betting_market_group_object& betting_marke
fc::raw::unpack(s, stringified_stream);
std::istringstream stream(stringified_stream);
betting_market_group_obj.unpack_impl(stream);
return s;
}
} } // graphene::chain
FC_REFLECT_DERIVED( graphene::chain::betting_market_rules_object, (graphene::db::object), (name)(description) )
FC_REFLECT_DERIVED( graphene::chain::betting_market_group_object, (graphene::db::object), (description) )
FC_REFLECT_DERIVED( graphene::chain::betting_market_object, (graphene::db::object), (group_id) )
FC_REFLECT_DERIVED( graphene::chain::bet_object, (graphene::db::object), (bettor_id)(betting_market_id)(amount_to_bet)(backer_multiplier)(back_or_lay)(end_of_delay) )
FC_REFLECT_DERIVED( graphene::chain::betting_market_position_object, (graphene::db::object), (bettor_id)(betting_market_id)(pay_if_payout_condition)(pay_if_not_payout_condition)(pay_if_canceled)(pay_if_not_canceled)(fees_collected) )
namespace fc {
template<>
template<>
inline void if_enum<fc::false_type>::from_variant(const variant &vo, graphene::chain::betting_market_object &v, uint32_t max_depth) {
from_variant(vo, v, max_depth);
}
template<>
template<>
inline void if_enum<fc::false_type>::to_variant(const graphene::chain::betting_market_object &v, variant &vo, uint32_t max_depth) {
to_variant(v, vo, max_depth);
}
namespace raw { namespace detail {
template<>
template<>
inline void if_enum<fc::false_type>::pack(fc::datastream<size_t> &s, const graphene::chain::betting_market_object &v, uint32_t) {
s << v;
}
template<>
template<>
inline void if_enum<fc::false_type>::pack(fc::datastream<char*> &s, const graphene::chain::betting_market_object &v, uint32_t) {
s << v;
}
template<>
template<>
inline void if_enum<fc::false_type>::unpack(fc::datastream<const char*> &s, graphene::chain::betting_market_object &v, uint32_t) {
s >> v;
}
} } // namespace fc::raw::detail
template <>
struct get_typename<graphene::chain::betting_market_object> {
static const char *name() {
return "graphene::chain::betting_market_object";
}
};
template <>
struct reflector<graphene::chain::betting_market_object> {
typedef graphene::chain::betting_market_object type;
typedef fc::true_type is_defined;
typedef fc::false_type is_enum;
};
} // namespace fc
namespace fc {
template<>
template<>
inline void if_enum<fc::false_type>::from_variant(const variant &vo, graphene::chain::betting_market_group_object &v, uint32_t max_depth) {
from_variant(vo, v, max_depth);
}
template<>
template<>
inline void if_enum<fc::false_type>::to_variant(const graphene::chain::betting_market_group_object &v, variant &vo, uint32_t max_depth) {
to_variant(v, vo, max_depth);
}
namespace raw { namespace detail {
template<>
template<>
inline void if_enum<fc::false_type>::pack(fc::datastream<size_t> &s, const graphene::chain::betting_market_group_object &v, uint32_t) {
s << v;
}
template<>
template<>
inline void if_enum<fc::false_type>::pack(fc::datastream<char*> &s, const graphene::chain::betting_market_group_object &v, uint32_t) {
s << v;
}
template<>
template<>
inline void if_enum<fc::false_type>::unpack(fc::datastream<const char*> &s, graphene::chain::betting_market_group_object &v, uint32_t) {
s >> v;
}
} } // namespace fc::raw:detail
template <>
struct get_typename<graphene::chain::betting_market_group_object> {
static const char *name() {
return "graphene::chain::betting_market_group_object";
}
};
template <>
struct reflector<graphene::chain::betting_market_group_object> {
typedef graphene::chain::betting_market_group_object type;
typedef fc::true_type is_defined;
typedef fc::false_type is_enum;
};
} // namespace fc

View file

@ -47,11 +47,7 @@ namespace graphene { namespace chain {
optional<signed_block> fetch_by_number( uint32_t block_num )const;
optional<signed_block> last()const;
optional<block_id_type> last_id()const;
void set_replay_mode(bool mode);
private:
bool replay_mode = false;
optional<index_entry> last_index_entry()const;
fc::path _index_filename;
mutable std::fstream _blocks;

View file

@ -23,13 +23,8 @@
*/
#pragma once
#ifdef BUILD_PEERPLAYS_TESTNET
#define GRAPHENE_SYMBOL "TEST"
#define GRAPHENE_ADDRESS_PREFIX "TEST"
#else
#define GRAPHENE_SYMBOL "PPY"
#define GRAPHENE_ADDRESS_PREFIX "PPY"
#endif
#define GRAPHENE_MIN_ACCOUNT_NAME_LENGTH 1
#define GRAPHENE_MAX_ACCOUNT_NAME_LENGTH 63
@ -158,7 +153,7 @@
#define GRAPHENE_RECENTLY_MISSED_COUNT_INCREMENT 4
#define GRAPHENE_RECENTLY_MISSED_COUNT_DECREMENT 3
#define GRAPHENE_CURRENT_DB_VERSION "PPY2.5"
#define GRAPHENE_CURRENT_DB_VERSION "PPY2.3"
#define GRAPHENE_IRREVERSIBLE_THRESHOLD (70 * GRAPHENE_1_PERCENT)

View file

@ -66,8 +66,6 @@ namespace graphene { namespace chain {
database();
~database();
std::vector<fc::time_point_sec> _hardfork_times;
enum validation_steps
{
skip_nothing = 0,
@ -142,8 +140,8 @@ namespace graphene { namespace chain {
void add_checkpoints( const flat_map<uint32_t,block_id_type>& checkpts );
const flat_map<uint32_t,block_id_type> get_checkpoints()const { return _checkpoints; }
bool before_last_checkpoint()const;
void check_transaction_for_duplicated_operations(const signed_transaction& trx);
void check_tansaction_for_duplicated_operations(const signed_transaction& trx);
bool push_block( const signed_block& b, uint32_t skip = skip_nothing );
processed_transaction push_transaction( const signed_transaction& trx, uint32_t skip = skip_nothing );
@ -245,16 +243,7 @@ namespace graphene { namespace chain {
witness_id_type get_scheduled_witness(uint32_t slot_num)const;
/**
* @brief Get son schedule id for the given sidechain_type.
*
* type sidechain_type we getting schedule.
*
* returns Id of the schedule object.
*/
unsigned_int get_son_schedule_id(sidechain_type type)const;
/**
* @brief Get the bitcoin or hive son scheduled for block production in a slot.
* @brief Get the son scheduled for block production in a slot.
*
* slot_num always corresponds to a time in the future.
*
@ -267,7 +256,7 @@ namespace graphene { namespace chain {
*
* Passing slot_num == 0 returns GRAPHENE_NULL_WITNESS
*/
son_id_type get_scheduled_son(sidechain_type type, uint32_t slot_num)const;
son_id_type get_scheduled_son(uint32_t slot_num)const;
/**
* Get the time at which the given slot occurs.
@ -292,12 +281,11 @@ namespace graphene { namespace chain {
vector<witness_id_type> get_near_witness_schedule()const;
void update_witness_schedule();
void update_witness_schedule(const signed_block& next_block);
void update_son_schedule(sidechain_type type);
void update_son_schedule(sidechain_type type, const signed_block& next_block);
void update_son_schedule();
void update_son_schedule(const signed_block& next_block);
void check_lottery_end_by_participants( asset_id_type asset_id );
void check_ending_lotteries();
void check_ending_nft_lotteries();
//////////////////// db_getter.cpp ////////////////////
@ -322,8 +310,7 @@ namespace graphene { namespace chain {
fc::optional<operation> create_son_deregister_proposal( son_id_type son_id, account_id_type paying_son );
signed_transaction create_signed_transaction( const fc::ecc::private_key& signing_private_key, const operation& op );
bool is_son_dereg_valid( son_id_type son_id );
bool is_son_active( sidechain_type type, son_id_type son_id );
bool is_asset_creation_allowed(const string& symbol);
bool is_son_active( son_id_type son_id );
time_point_sec head_block_time()const;
uint32_t head_block_num()const;
@ -337,14 +324,11 @@ namespace graphene { namespace chain {
uint32_t last_non_undoable_block_num() const;
vector<authority> get_account_custom_authorities(account_id_type account, const operation& op)const;
vector<uint64_t> get_random_numbers(uint64_t minimum, uint64_t maximum, uint64_t selections, bool duplicates);
//////////////////// db_init.cpp ////////////////////
void initialize_evaluators();
/// Reset the object graph in-memory
void initialize_indexes();
void initialize_hardforks();
void init_genesis(const genesis_state_type& genesis_state = genesis_state_type());
template<typename EvaluatorType>
@ -520,16 +504,12 @@ namespace graphene { namespace chain {
void notify_changed_objects();
private:
std::mutex _pending_tx_session_mutex;
optional<undo_database::session> _pending_tx_session;
vector< unique_ptr<op_evaluator> > _operation_evaluators;
template<class Index>
vector<std::reference_wrapper<const typename Index::object_type>> sort_votable_objects(size_t count)const;
template<class Index>
vector<std::reference_wrapper<const typename Index::object_type>> sort_votable_objects(sidechain_type sidechain, size_t count)const;
//////////////////// db_block.cpp ////////////////////
public:
@ -579,22 +559,19 @@ namespace graphene { namespace chain {
void initialize_budget_record( fc::time_point_sec now, budget_record& rec )const;
void process_budget();
void pay_workers( share_type& budget );
void pay_sons_before_hf_ethereum();
void pay_sons_after_hf_ethereum();
void pay_sons();
void perform_son_tasks();
void perform_chain_maintenance(const signed_block& next_block, const global_property_object& global_props);
void update_active_witnesses();
void update_active_committee_members();
void update_son_metrics( const flat_map<sidechain_type, vector<son_sidechain_info> >& curr_active_sons );
void update_son_metrics( const vector<son_info>& curr_active_sons );
void update_active_sons();
void remove_son_proposal( const proposal_object& proposal );
void remove_inactive_son_down_proposals( const vector<son_id_type>& son_ids_to_remove );
void remove_inactive_son_proposals( const vector<son_id_type>& son_ids_to_remove );
void update_son_statuses( const flat_map<sidechain_type, vector<son_sidechain_info> >& curr_active_sons,
const flat_map<sidechain_type, vector<son_sidechain_info> >& new_active_sons );
void update_son_wallet( const flat_map<sidechain_type, vector<son_sidechain_info> >& new_active_sons );
void update_son_statuses( const vector<son_info>& cur_active_sons, const vector<son_info>& new_active_sons );
void update_son_wallet( const vector<son_info>& new_active_sons );
void update_worker_votes();
void hotfix_2024();
public:
double calculate_vesting_factor(const account_object& stake_account);
@ -605,7 +582,6 @@ namespace graphene { namespace chain {
///@}
///@}
std::mutex _pending_tx_mutex;
vector< processed_transaction > _pending_tx;
fork_database _fork_db;
@ -633,17 +609,11 @@ namespace graphene { namespace chain {
uint16_t _current_op_in_trx = 0;
uint32_t _current_virtual_op = 0;
vector<uint64_t> _vote_tally_buffer;
vector<uint64_t> _witness_count_histogram_buffer;
vector<uint64_t> _committee_count_histogram_buffer;
flat_map<sidechain_type, vector<uint64_t> > _son_count_histogram_buffer = []{
flat_map<sidechain_type, vector<uint64_t> > son_count_histogram_buffer;
for(const auto& active_sidechain_type : all_sidechain_types){
son_count_histogram_buffer[active_sidechain_type] = vector<uint64_t>{};
}
return son_count_histogram_buffer;
}();
uint64_t _total_voting_stake;
vector<uint64_t> _vote_tally_buffer;
vector<uint64_t> _witness_count_histogram_buffer;
vector<uint64_t> _committee_count_histogram_buffer;
vector<uint64_t> _son_count_histogram_buffer;
uint64_t _total_voting_stake;
flat_map<uint32_t,block_id_type> _checkpoints;

Some files were not shown because too many files have changed in this diff Show more