Compare commits

..

1 commit

Author SHA1 Message Date
satyakoneru
cf4ee1f96e GRPH-60-Recursion_nesting_guard 2019-08-28 11:13:01 +00:00
488 changed files with 13572 additions and 272495 deletions

View file

@ -1,136 +0,0 @@
---
Language: Cpp
AccessModifierOffset: -3
AlignAfterOpenBracket: Align
AlignConsecutiveMacros: false
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignEscapedNewlines: Right
AlignOperands: true
AlignTrailingComments: true
AllowAllArgumentsOnNextLine: true
AllowAllConstructorInitializersOnNextLine: false
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: Never
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: None
AllowShortLambdasOnASingleLine: None
AllowShortIfStatementsOnASingleLine: Never
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: MultiLine
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterCaseLabel: false
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
SplitEmptyFunction: true
SplitEmptyRecord: true
SplitEmptyNamespace: true
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Attach
BreakBeforeInheritanceComma: false
BreakInheritanceList: BeforeColon
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: AfterColon
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: false
ColumnLimit: 0
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: true
ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 6
ContinuationIndentWidth: 6
Cpp11BracedListStyle: true
DeriveLineEnding: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeBlocks: Preserve
IncludeCategories:
- Regex: '^"(llvm|llvm-c|clang|clang-c)/'
Priority: 2
SortPriority: 0
- Regex: '^(<|"(gtest|gmock|isl|json)/)'
Priority: 3
SortPriority: 0
- Regex: '.*'
Priority: 1
SortPriority: 0
IncludeIsMainRegex: '(Test)?$'
IncludeIsMainSourceRegex: ''
IndentCaseLabels: false
IndentGotoLabels: false
IndentPPDirectives: None
IndentWidth: 3
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: true
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Auto
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 19
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Right
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: false
SpaceAfterLogicalNot: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceInEmptyBlock: false
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInConditionalStatement: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
SpaceBeforeSquareBrackets: false
Standard: Latest
StatementMacros:
- Q_UNUSED
- QT_REQUIRE_VERSION
TabWidth: 3
UseCRLF: false
UseTab: Never
...

6
.gitignore vendored
View file

@ -11,10 +11,6 @@ moc_*
hardfork.hpp
build_xc
data
CMakeDoxyfile.in
build
build__*
libraries/utilities/git_revision.cpp
@ -47,4 +43,4 @@ object_database/*
*.pyo
.vscode
.DS_Store
.idea
.idea

View file

@ -1,171 +1,36 @@
include:
- template: Code-Quality.gitlab-ci.yml
- template: Dependency-Scanning.gitlab-ci.yml
- template: License-Scanning.gitlab-ci.yml
- template: SAST.gitlab-ci.yml
- template: Secret-Detection.gitlab-ci.yml
stages:
- pull
- build
- test
- dockerize
- python-test
- deploy
build-mainnet:
stage: build
script:
- rm -rf .git/modules/docs .git/modules/libraries/fc ./docs ./libraries/fc
- git submodule sync
- git submodule update --init --recursive
- rm -rf build
- mkdir build
- cd build
- cmake -DCMAKE_BUILD_TYPE=Release ..
- make -j$(nproc)
artifacts:
untracked: true
paths:
- build/libraries/
- build/programs/
- build/tests/
tags:
- builder
before_script:
- cd /var/www/Projects/595.peerplays/blockchain
test-mainnet:
stage: test
dependencies:
- build-mainnet
pulljob:
stage: pull
script:
- ./build/libraries/fc/tests/all_tests
- ./build/tests/betting_test --log_level=message
- ./build/tests/chain_test --log_level=message
- ./build/tests/cli_test --log_level=message
tags:
- builder
dockerize-mainnet:
stage: dockerize
variables:
IMAGE: $CI_REGISTRY_IMAGE/mainnet/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA
before_script:
- docker info
- docker builder prune -a -f
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build --no-cache -t $IMAGE .
- docker push $IMAGE
after_script:
- docker rmi $IMAGE
tags:
- builder
timeout:
3h
build-testnet:
stage: build
script:
- rm -rf .git/modules/docs .git/modules/libraries/fc ./docs ./libraries/fc
- git submodule sync
- git submodule update --init --recursive
- rm -rf build
- mkdir build
- cd build
- cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_PEERPLAYS_TESTNET=1 ..
- make -j$(nproc)
artifacts:
untracked: true
paths:
- build/libraries/
- build/programs/
- build/tests/
when: manual
tags:
- builder
deploy-testnet:
stage: deploy
dependencies:
- build-testnet
script:
- sudo systemctl stop witness
- rm $WORK_DIR/peerplays/witness_node || true
- cp build/programs/witness_node/witness_node $WORK_DIR/peerplays/
- sudo systemctl restart witness
rules:
- if: $CI_COMMIT_BRANCH == "master"
when: always
environment:
name: devnet
url: $DEVNET_URL
- git pull origin master
only:
- master
tags:
- devnet
- pp-dev
test-testnet:
buildjob:
stage: build
script:
- cmake .
- make
only:
- master
tags:
- pp-dev
testjob:
stage: test
dependencies:
- build-testnet
script:
- ./build/libraries/fc/tests/all_tests
- ./build/tests/betting_test --log_level=message
- ./build/tests/chain_test --log_level=message
- ./build/tests/cli_test --log_level=message
tags:
- builder
when:
manual
timeout:
1h
dockerize-testnet:
stage: dockerize
variables:
IMAGE: $CI_REGISTRY_IMAGE/testnet/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA
before_script:
- docker info
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- docker build --no-cache -t $IMAGE .
- docker push $IMAGE
after_script:
- docker rmi $IMAGE
tags:
- builder
when:
manual
timeout:
3h
test-e2e:
stage: python-test
variables:
IMAGE: $CI_REGISTRY_IMAGE/mainnet/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA
before_script:
- docker info
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
script:
- git clone https://gitlab.com/PBSA/tools-libs/peerplays-utils.git
- cd peerplays-utils/peerplays-qa-environment
- git checkout origin/feature/python-e2e-tests-for-CI
- cd e2e-tests/
- python3 -m venv venv
- source venv/bin/activate
- pip3 install -r requirements.txt
- docker-compose down --remove-orphans
- docker ps -a
- docker pull $IMAGE
- docker tag $IMAGE peerplays-base:latest
- docker image ls -a
- docker-compose build
- python3 main.py --start all
- docker ps -a
- python3 -m pytest test_btc_init_state.py test_hive_inital_state.py test_pp_inital_state.py
- python3 main.py --stop
- deactivate
- docker ps -a
after_script:
- docker rmi $(docker images -a | grep -v 'hive-for-peerplays\|ethereum-for-peerplays\|bitcoin-for-peerplays\|ubuntu-for-peerplays' | awk '{print $3}')
tags:
- python-tests
when:
manual
- ./tests/chain_test
- ./tests/tournament_test
only:
- master
tags:
- pp-dev

11
.gitmodules vendored
View file

@ -1,9 +1,8 @@
[submodule "docs"]
path = docs
url = https://github.com/bitshares/bitshares-core.wiki.git
ignore = dirty
path = docs
url = https://github.com/bitshares/bitshares-core.wiki.git
ignore = dirty
[submodule "libraries/fc"]
path = libraries/fc
url = https://gitlab.com/PBSA/tools-libs/peerplays-fc.git
branch = develop
ignore = dirty
url = https://github.com/PBSA/peerplays-fc.git
ignore = dirty

View file

@ -1,10 +0,0 @@
sonar.projectKey=peerplays-network_peerplays
sonar.organization=peerplays-network
# This is the name and version displayed in the SonarCloud UI.
sonar.projectName=peerplays
# Path is relative to the sonar-project.properties file. Replace "\" by "/" on Windows.
sonar.sources=.
sonar.host.url=https://sonarcloud.io

View file

@ -1,279 +0,0 @@
#
# DO NOT EDIT! THIS FILE WAS GENERATED BY CMAKE!
#
DOXYFILE_ENCODING = @DOXYGEN_DOXYFILE_ENCODING@
PROJECT_NAME = @DOXYGEN_PROJECT_NAME@
PROJECT_NUMBER = @DOXYGEN_PROJECT_NUMBER@
PROJECT_BRIEF = @DOXYGEN_PROJECT_BRIEF@
PROJECT_LOGO = @DOXYGEN_PROJECT_LOGO@
OUTPUT_DIRECTORY = @DOXYGEN_OUTPUT_DIRECTORY@
CREATE_SUBDIRS = @DOXYGEN_CREATE_SUBDIRS@
ALLOW_UNICODE_NAMES = @DOXYGEN_ALLOW_UNICODE_NAMES@
OUTPUT_LANGUAGE = @DOXYGEN_OUTPUT_LANGUAGE@
OUTPUT_TEXT_DIRECTION = @DOXYGEN_OUTPUT_TEXT_DIRECTION@
BRIEF_MEMBER_DESC = @DOXYGEN_BRIEF_MEMBER_DESC@
REPEAT_BRIEF = @DOXYGEN_REPEAT_BRIEF@
ABBREVIATE_BRIEF = @DOXYGEN_ABBREVIATE_BRIEF@
ALWAYS_DETAILED_SEC = @DOXYGEN_ALWAYS_DETAILED_SEC@
INLINE_INHERITED_MEMB = @DOXYGEN_INLINE_INHERITED_MEMB@
FULL_PATH_NAMES = @DOXYGEN_FULL_PATH_NAMES@
STRIP_FROM_PATH = @DOXYGEN_STRIP_FROM_PATH@
STRIP_FROM_INC_PATH = @DOXYGEN_STRIP_FROM_INC_PATH@
SHORT_NAMES = @DOXYGEN_SHORT_NAMES@
JAVADOC_AUTOBRIEF = @DOXYGEN_JAVADOC_AUTOBRIEF@
JAVADOC_BANNER = @DOXYGEN_JAVADOC_BANNER@
QT_AUTOBRIEF = @DOXYGEN_QT_AUTOBRIEF@
MULTILINE_CPP_IS_BRIEF = @DOXYGEN_MULTILINE_CPP_IS_BRIEF@
INHERIT_DOCS = @DOXYGEN_INHERIT_DOCS@
SEPARATE_MEMBER_PAGES = @DOXYGEN_SEPARATE_MEMBER_PAGES@
TAB_SIZE = @DOXYGEN_TAB_SIZE@
ALIASES = @DOXYGEN_ALIASES@
TCL_SUBST = @DOXYGEN_TCL_SUBST@
OPTIMIZE_OUTPUT_FOR_C = @DOXYGEN_OPTIMIZE_OUTPUT_FOR_C@
OPTIMIZE_OUTPUT_JAVA = @DOXYGEN_OPTIMIZE_OUTPUT_JAVA@
OPTIMIZE_FOR_FORTRAN = @DOXYGEN_OPTIMIZE_FOR_FORTRAN@
OPTIMIZE_OUTPUT_VHDL = @DOXYGEN_OPTIMIZE_OUTPUT_VHDL@
OPTIMIZE_OUTPUT_SLICE = @DOXYGEN_OPTIMIZE_OUTPUT_SLICE@
EXTENSION_MAPPING = @DOXYGEN_EXTENSION_MAPPING@
MARKDOWN_SUPPORT = @DOXYGEN_MARKDOWN_SUPPORT@
TOC_INCLUDE_HEADINGS = @DOXYGEN_TOC_INCLUDE_HEADINGS@
AUTOLINK_SUPPORT = @DOXYGEN_AUTOLINK_SUPPORT@
BUILTIN_STL_SUPPORT = @DOXYGEN_BUILTIN_STL_SUPPORT@
CPP_CLI_SUPPORT = @DOXYGEN_CPP_CLI_SUPPORT@
SIP_SUPPORT = @DOXYGEN_SIP_SUPPORT@
IDL_PROPERTY_SUPPORT = @DOXYGEN_IDL_PROPERTY_SUPPORT@
DISTRIBUTE_GROUP_DOC = @DOXYGEN_DISTRIBUTE_GROUP_DOC@
GROUP_NESTED_COMPOUNDS = @DOXYGEN_GROUP_NESTED_COMPOUNDS@
SUBGROUPING = @DOXYGEN_SUBGROUPING@
INLINE_GROUPED_CLASSES = @DOXYGEN_INLINE_GROUPED_CLASSES@
INLINE_SIMPLE_STRUCTS = @DOXYGEN_INLINE_SIMPLE_STRUCTS@
TYPEDEF_HIDES_STRUCT = @DOXYGEN_TYPEDEF_HIDES_STRUCT@
LOOKUP_CACHE_SIZE = @DOXYGEN_LOOKUP_CACHE_SIZE@
EXTRACT_ALL = @DOXYGEN_EXTRACT_ALL@
EXTRACT_PRIVATE = @DOXYGEN_EXTRACT_PRIVATE@
EXTRACT_PRIV_VIRTUAL = @DOXYGEN_EXTRACT_PRIV_VIRTUAL@
EXTRACT_PACKAGE = @DOXYGEN_EXTRACT_PACKAGE@
EXTRACT_STATIC = @DOXYGEN_EXTRACT_STATIC@
EXTRACT_LOCAL_CLASSES = @DOXYGEN_EXTRACT_LOCAL_CLASSES@
EXTRACT_LOCAL_METHODS = @DOXYGEN_EXTRACT_LOCAL_METHODS@
EXTRACT_ANON_NSPACES = @DOXYGEN_EXTRACT_ANON_NSPACES@
HIDE_UNDOC_MEMBERS = @DOXYGEN_HIDE_UNDOC_MEMBERS@
HIDE_UNDOC_CLASSES = @DOXYGEN_HIDE_UNDOC_CLASSES@
HIDE_FRIEND_COMPOUNDS = @DOXYGEN_HIDE_FRIEND_COMPOUNDS@
HIDE_IN_BODY_DOCS = @DOXYGEN_HIDE_IN_BODY_DOCS@
INTERNAL_DOCS = @DOXYGEN_INTERNAL_DOCS@
CASE_SENSE_NAMES = @DOXYGEN_CASE_SENSE_NAMES@
HIDE_SCOPE_NAMES = @DOXYGEN_HIDE_SCOPE_NAMES@
HIDE_COMPOUND_REFERENCE= @DOXYGEN_HIDE_COMPOUND_REFERENCE@
SHOW_INCLUDE_FILES = @DOXYGEN_SHOW_INCLUDE_FILES@
SHOW_GROUPED_MEMB_INC = @DOXYGEN_SHOW_GROUPED_MEMB_INC@
FORCE_LOCAL_INCLUDES = @DOXYGEN_FORCE_LOCAL_INCLUDES@
INLINE_INFO = @DOXYGEN_INLINE_INFO@
SORT_MEMBER_DOCS = @DOXYGEN_SORT_MEMBER_DOCS@
SORT_BRIEF_DOCS = @DOXYGEN_SORT_BRIEF_DOCS@
SORT_MEMBERS_CTORS_1ST = @DOXYGEN_SORT_MEMBERS_CTORS_1ST@
SORT_GROUP_NAMES = @DOXYGEN_SORT_GROUP_NAMES@
SORT_BY_SCOPE_NAME = @DOXYGEN_SORT_BY_SCOPE_NAME@
STRICT_PROTO_MATCHING = @DOXYGEN_STRICT_PROTO_MATCHING@
GENERATE_TODOLIST = @DOXYGEN_GENERATE_TODOLIST@
GENERATE_TESTLIST = @DOXYGEN_GENERATE_TESTLIST@
GENERATE_BUGLIST = @DOXYGEN_GENERATE_BUGLIST@
GENERATE_DEPRECATEDLIST= @DOXYGEN_GENERATE_DEPRECATEDLIST@
ENABLED_SECTIONS = @DOXYGEN_ENABLED_SECTIONS@
MAX_INITIALIZER_LINES = @DOXYGEN_MAX_INITIALIZER_LINES@
SHOW_USED_FILES = @DOXYGEN_SHOW_USED_FILES@
SHOW_FILES = @DOXYGEN_SHOW_FILES@
SHOW_NAMESPACES = @DOXYGEN_SHOW_NAMESPACES@
FILE_VERSION_FILTER = @DOXYGEN_FILE_VERSION_FILTER@
LAYOUT_FILE = @DOXYGEN_LAYOUT_FILE@
CITE_BIB_FILES = @DOXYGEN_CITE_BIB_FILES@
QUIET = @DOXYGEN_QUIET@
WARNINGS = @DOXYGEN_WARNINGS@
WARN_IF_UNDOCUMENTED = @DOXYGEN_WARN_IF_UNDOCUMENTED@
WARN_IF_DOC_ERROR = @DOXYGEN_WARN_IF_DOC_ERROR@
WARN_NO_PARAMDOC = @DOXYGEN_WARN_NO_PARAMDOC@
WARN_AS_ERROR = @DOXYGEN_WARN_AS_ERROR@
WARN_FORMAT = @DOXYGEN_WARN_FORMAT@
WARN_LOGFILE = @DOXYGEN_WARN_LOGFILE@
INPUT = @DOXYGEN_INPUT@
INPUT_ENCODING = @DOXYGEN_INPUT_ENCODING@
FILE_PATTERNS = @DOXYGEN_FILE_PATTERNS@
RECURSIVE = @DOXYGEN_RECURSIVE@
EXCLUDE = @DOXYGEN_EXCLUDE@
EXCLUDE_SYMLINKS = @DOXYGEN_EXCLUDE_SYMLINKS@
EXCLUDE_PATTERNS = @DOXYGEN_EXCLUDE_PATTERNS@
EXCLUDE_SYMBOLS = @DOXYGEN_EXCLUDE_SYMBOLS@
EXAMPLE_PATH = @DOXYGEN_EXAMPLE_PATH@
EXAMPLE_PATTERNS = @DOXYGEN_EXAMPLE_PATTERNS@
EXAMPLE_RECURSIVE = @DOXYGEN_EXAMPLE_RECURSIVE@
IMAGE_PATH = @DOXYGEN_IMAGE_PATH@
INPUT_FILTER = @DOXYGEN_INPUT_FILTER@
FILTER_PATTERNS = @DOXYGEN_FILTER_PATTERNS@
FILTER_SOURCE_FILES = @DOXYGEN_FILTER_SOURCE_FILES@
FILTER_SOURCE_PATTERNS = @DOXYGEN_FILTER_SOURCE_PATTERNS@
USE_MDFILE_AS_MAINPAGE = @DOXYGEN_USE_MDFILE_AS_MAINPAGE@
SOURCE_BROWSER = @DOXYGEN_SOURCE_BROWSER@
INLINE_SOURCES = @DOXYGEN_INLINE_SOURCES@
STRIP_CODE_COMMENTS = @DOXYGEN_STRIP_CODE_COMMENTS@
REFERENCED_BY_RELATION = @DOXYGEN_REFERENCED_BY_RELATION@
REFERENCES_RELATION = @DOXYGEN_REFERENCES_RELATION@
REFERENCES_LINK_SOURCE = @DOXYGEN_REFERENCES_LINK_SOURCE@
SOURCE_TOOLTIPS = @DOXYGEN_SOURCE_TOOLTIPS@
USE_HTAGS = @DOXYGEN_USE_HTAGS@
VERBATIM_HEADERS = @DOXYGEN_VERBATIM_HEADERS@
CLANG_ASSISTED_PARSING = @DOXYGEN_CLANG_ASSISTED_PARSING@
CLANG_OPTIONS = @DOXYGEN_CLANG_OPTIONS@
CLANG_DATABASE_PATH = @DOXYGEN_CLANG_DATABASE_PATH@
ALPHABETICAL_INDEX = @DOXYGEN_ALPHABETICAL_INDEX@
COLS_IN_ALPHA_INDEX = @DOXYGEN_COLS_IN_ALPHA_INDEX@
IGNORE_PREFIX = @DOXYGEN_IGNORE_PREFIX@
GENERATE_HTML = @DOXYGEN_GENERATE_HTML@
HTML_OUTPUT = @DOXYGEN_HTML_OUTPUT@
HTML_FILE_EXTENSION = @DOXYGEN_HTML_FILE_EXTENSION@
HTML_HEADER = @DOXYGEN_HTML_HEADER@
HTML_FOOTER = @DOXYGEN_HTML_FOOTER@
HTML_STYLESHEET = @DOXYGEN_HTML_STYLESHEET@
HTML_EXTRA_STYLESHEET = @DOXYGEN_HTML_EXTRA_STYLESHEET@
HTML_EXTRA_FILES = @DOXYGEN_HTML_EXTRA_FILES@
HTML_COLORSTYLE_HUE = @DOXYGEN_HTML_COLORSTYLE_HUE@
HTML_COLORSTYLE_SAT = @DOXYGEN_HTML_COLORSTYLE_SAT@
HTML_COLORSTYLE_GAMMA = @DOXYGEN_HTML_COLORSTYLE_GAMMA@
HTML_TIMESTAMP = @DOXYGEN_HTML_TIMESTAMP@
HTML_DYNAMIC_MENUS = @DOXYGEN_HTML_DYNAMIC_MENUS@
HTML_DYNAMIC_SECTIONS = @DOXYGEN_HTML_DYNAMIC_SECTIONS@
HTML_INDEX_NUM_ENTRIES = @DOXYGEN_HTML_INDEX_NUM_ENTRIES@
GENERATE_DOCSET = @DOXYGEN_GENERATE_DOCSET@
DOCSET_FEEDNAME = @DOXYGEN_DOCSET_FEEDNAME@
DOCSET_BUNDLE_ID = @DOXYGEN_DOCSET_BUNDLE_ID@
DOCSET_PUBLISHER_ID = @DOXYGEN_DOCSET_PUBLISHER_ID@
DOCSET_PUBLISHER_NAME = @DOXYGEN_DOCSET_PUBLISHER_NAME@
GENERATE_HTMLHELP = @DOXYGEN_GENERATE_HTMLHELP@
CHM_FILE = @DOXYGEN_CHM_FILE@
HHC_LOCATION = @DOXYGEN_HHC_LOCATION@
GENERATE_CHI = @DOXYGEN_GENERATE_CHI@
CHM_INDEX_ENCODING = @DOXYGEN_CHM_INDEX_ENCODING@
BINARY_TOC = @DOXYGEN_BINARY_TOC@
TOC_EXPAND = @DOXYGEN_TOC_EXPAND@
GENERATE_QHP = @DOXYGEN_GENERATE_QHP@
QCH_FILE = @DOXYGEN_QCH_FILE@
QHP_NAMESPACE = @DOXYGEN_QHP_NAMESPACE@
QHP_VIRTUAL_FOLDER = @DOXYGEN_QHP_VIRTUAL_FOLDER@
QHP_CUST_FILTER_NAME = @DOXYGEN_QHP_CUST_FILTER_NAME@
QHP_CUST_FILTER_ATTRS = @DOXYGEN_QHP_CUST_FILTER_ATTRS@
QHP_SECT_FILTER_ATTRS = @DOXYGEN_QHP_SECT_FILTER_ATTRS@
QHG_LOCATION = @DOXYGEN_QHG_LOCATION@
GENERATE_ECLIPSEHELP = @DOXYGEN_GENERATE_ECLIPSEHELP@
ECLIPSE_DOC_ID = @DOXYGEN_ECLIPSE_DOC_ID@
DISABLE_INDEX = @DOXYGEN_DISABLE_INDEX@
GENERATE_TREEVIEW = @DOXYGEN_GENERATE_TREEVIEW@
ENUM_VALUES_PER_LINE = @DOXYGEN_ENUM_VALUES_PER_LINE@
TREEVIEW_WIDTH = @DOXYGEN_TREEVIEW_WIDTH@
EXT_LINKS_IN_WINDOW = @DOXYGEN_EXT_LINKS_IN_WINDOW@
FORMULA_FONTSIZE = @DOXYGEN_FORMULA_FONTSIZE@
FORMULA_TRANSPARENT = @DOXYGEN_FORMULA_TRANSPARENT@
USE_MATHJAX = @DOXYGEN_USE_MATHJAX@
MATHJAX_FORMAT = @DOXYGEN_MATHJAX_FORMAT@
MATHJAX_RELPATH = @DOXYGEN_MATHJAX_RELPATH@
MATHJAX_EXTENSIONS = @DOXYGEN_MATHJAX_EXTENSIONS@
MATHJAX_CODEFILE = @DOXYGEN_MATHJAX_CODEFILE@
SEARCHENGINE = @DOXYGEN_SEARCHENGINE@
SERVER_BASED_SEARCH = @DOXYGEN_SERVER_BASED_SEARCH@
EXTERNAL_SEARCH = @DOXYGEN_EXTERNAL_SEARCH@
SEARCHENGINE_URL = @DOXYGEN_SEARCHENGINE_URL@
SEARCHDATA_FILE = @DOXYGEN_SEARCHDATA_FILE@
EXTERNAL_SEARCH_ID = @DOXYGEN_EXTERNAL_SEARCH_ID@
EXTRA_SEARCH_MAPPINGS = @DOXYGEN_EXTRA_SEARCH_MAPPINGS@
GENERATE_LATEX = @DOXYGEN_GENERATE_LATEX@
LATEX_OUTPUT = @DOXYGEN_LATEX_OUTPUT@
LATEX_CMD_NAME = @DOXYGEN_LATEX_CMD_NAME@
MAKEINDEX_CMD_NAME = @DOXYGEN_MAKEINDEX_CMD_NAME@
LATEX_MAKEINDEX_CMD = @DOXYGEN_LATEX_MAKEINDEX_CMD@
COMPACT_LATEX = @DOXYGEN_COMPACT_LATEX@
PAPER_TYPE = @DOXYGEN_PAPER_TYPE@
EXTRA_PACKAGES = @DOXYGEN_EXTRA_PACKAGES@
LATEX_HEADER = @DOXYGEN_LATEX_HEADER@
LATEX_FOOTER = @DOXYGEN_LATEX_FOOTER@
LATEX_EXTRA_STYLESHEET = @DOXYGEN_LATEX_EXTRA_STYLESHEET@
LATEX_EXTRA_FILES = @DOXYGEN_LATEX_EXTRA_FILES@
PDF_HYPERLINKS = @DOXYGEN_PDF_HYPERLINKS@
USE_PDFLATEX = @DOXYGEN_USE_PDFLATEX@
LATEX_BATCHMODE = @DOXYGEN_LATEX_BATCHMODE@
LATEX_HIDE_INDICES = @DOXYGEN_LATEX_HIDE_INDICES@
LATEX_SOURCE_CODE = @DOXYGEN_LATEX_SOURCE_CODE@
LATEX_BIB_STYLE = @DOXYGEN_LATEX_BIB_STYLE@
LATEX_TIMESTAMP = @DOXYGEN_LATEX_TIMESTAMP@
LATEX_EMOJI_DIRECTORY = @DOXYGEN_LATEX_EMOJI_DIRECTORY@
GENERATE_RTF = @DOXYGEN_GENERATE_RTF@
RTF_OUTPUT = @DOXYGEN_RTF_OUTPUT@
COMPACT_RTF = @DOXYGEN_COMPACT_RTF@
RTF_HYPERLINKS = @DOXYGEN_RTF_HYPERLINKS@
RTF_STYLESHEET_FILE = @DOXYGEN_RTF_STYLESHEET_FILE@
RTF_EXTENSIONS_FILE = @DOXYGEN_RTF_EXTENSIONS_FILE@
RTF_SOURCE_CODE = @DOXYGEN_RTF_SOURCE_CODE@
GENERATE_MAN = @DOXYGEN_GENERATE_MAN@
MAN_OUTPUT = @DOXYGEN_MAN_OUTPUT@
MAN_EXTENSION = @DOXYGEN_MAN_EXTENSION@
MAN_SUBDIR = @DOXYGEN_MAN_SUBDIR@
MAN_LINKS = @DOXYGEN_MAN_LINKS@
GENERATE_XML = @DOXYGEN_GENERATE_XML@
XML_OUTPUT = @DOXYGEN_XML_OUTPUT@
XML_PROGRAMLISTING = @DOXYGEN_XML_PROGRAMLISTING@
XML_NS_MEMB_FILE_SCOPE = @DOXYGEN_XML_NS_MEMB_FILE_SCOPE@
GENERATE_DOCBOOK = @DOXYGEN_GENERATE_DOCBOOK@
DOCBOOK_OUTPUT = @DOXYGEN_DOCBOOK_OUTPUT@
DOCBOOK_PROGRAMLISTING = @DOXYGEN_DOCBOOK_PROGRAMLISTING@
GENERATE_AUTOGEN_DEF = @DOXYGEN_GENERATE_AUTOGEN_DEF@
GENERATE_PERLMOD = @DOXYGEN_GENERATE_PERLMOD@
PERLMOD_LATEX = @DOXYGEN_PERLMOD_LATEX@
PERLMOD_PRETTY = @DOXYGEN_PERLMOD_PRETTY@
PERLMOD_MAKEVAR_PREFIX = @DOXYGEN_PERLMOD_MAKEVAR_PREFIX@
ENABLE_PREPROCESSING = @DOXYGEN_ENABLE_PREPROCESSING@
MACRO_EXPANSION = @DOXYGEN_MACRO_EXPANSION@
EXPAND_ONLY_PREDEF = @DOXYGEN_EXPAND_ONLY_PREDEF@
SEARCH_INCLUDES = @DOXYGEN_SEARCH_INCLUDES@
INCLUDE_PATH = @DOXYGEN_INCLUDE_PATH@
INCLUDE_FILE_PATTERNS = @DOXYGEN_INCLUDE_FILE_PATTERNS@
PREDEFINED = @DOXYGEN_PREDEFINED@
EXPAND_AS_DEFINED = @DOXYGEN_EXPAND_AS_DEFINED@
SKIP_FUNCTION_MACROS = @DOXYGEN_SKIP_FUNCTION_MACROS@
TAGFILES = @DOXYGEN_TAGFILES@
GENERATE_TAGFILE = @DOXYGEN_GENERATE_TAGFILE@
ALLEXTERNALS = @DOXYGEN_ALLEXTERNALS@
EXTERNAL_GROUPS = @DOXYGEN_EXTERNAL_GROUPS@
EXTERNAL_PAGES = @DOXYGEN_EXTERNAL_PAGES@
CLASS_DIAGRAMS = @DOXYGEN_CLASS_DIAGRAMS@
DIA_PATH = @DOXYGEN_DIA_PATH@
HIDE_UNDOC_RELATIONS = @DOXYGEN_HIDE_UNDOC_RELATIONS@
HAVE_DOT = @DOXYGEN_HAVE_DOT@
DOT_NUM_THREADS = @DOXYGEN_DOT_NUM_THREADS@
DOT_FONTNAME = @DOXYGEN_DOT_FONTNAME@
DOT_FONTSIZE = @DOXYGEN_DOT_FONTSIZE@
DOT_FONTPATH = @DOXYGEN_DOT_FONTPATH@
CLASS_GRAPH = @DOXYGEN_CLASS_GRAPH@
COLLABORATION_GRAPH = @DOXYGEN_COLLABORATION_GRAPH@
GROUP_GRAPHS = @DOXYGEN_GROUP_GRAPHS@
UML_LOOK = @DOXYGEN_UML_LOOK@
UML_LIMIT_NUM_FIELDS = @DOXYGEN_UML_LIMIT_NUM_FIELDS@
TEMPLATE_RELATIONS = @DOXYGEN_TEMPLATE_RELATIONS@
INCLUDE_GRAPH = @DOXYGEN_INCLUDE_GRAPH@
INCLUDED_BY_GRAPH = @DOXYGEN_INCLUDED_BY_GRAPH@
CALL_GRAPH = @DOXYGEN_CALL_GRAPH@
CALLER_GRAPH = @DOXYGEN_CALLER_GRAPH@
GRAPHICAL_HIERARCHY = @DOXYGEN_GRAPHICAL_HIERARCHY@
DIRECTORY_GRAPH = @DOXYGEN_DIRECTORY_GRAPH@
DOT_IMAGE_FORMAT = @DOXYGEN_DOT_IMAGE_FORMAT@
INTERACTIVE_SVG = @DOXYGEN_INTERACTIVE_SVG@
DOT_PATH = @DOXYGEN_DOT_PATH@
DOTFILE_DIRS = @DOXYGEN_DOTFILE_DIRS@
MSCFILE_DIRS = @DOXYGEN_MSCFILE_DIRS@
DIAFILE_DIRS = @DOXYGEN_DIAFILE_DIRS@
PLANTUML_JAR_PATH = @DOXYGEN_PLANTUML_JAR_PATH@
PLANTUML_CFG_FILE = @DOXYGEN_PLANTUML_CFG_FILE@
PLANTUML_INCLUDE_PATH = @DOXYGEN_PLANTUML_INCLUDE_PATH@
DOT_GRAPH_MAX_NODES = @DOXYGEN_DOT_GRAPH_MAX_NODES@
MAX_DOT_GRAPH_DEPTH = @DOXYGEN_MAX_DOT_GRAPH_DEPTH@
DOT_TRANSPARENT = @DOXYGEN_DOT_TRANSPARENT@
DOT_MULTI_TARGETS = @DOXYGEN_DOT_MULTI_TARGETS@
GENERATE_LEGEND = @DOXYGEN_GENERATE_LEGEND@
DOT_CLEANUP = @DOXYGEN_DOT_CLEANUP@

View file

@ -1,11 +1,11 @@
# Defines Peerplays library target.
project( Peerplays )
# Defines BitShares library target.
project( BitShares )
cmake_minimum_required( VERSION 2.8.12 )
set( BLOCKCHAIN_NAME "Peerplays" )
set( BLOCKCHAIN_NAME "BitShares" )
set( CLI_CLIENT_EXECUTABLE_NAME graphene_client )
set( GUI_CLIENT_EXECUTABLE_NAME Peerplays )
set( GUI_CLIENT_EXECUTABLE_NAME BitShares )
set( CUSTOM_URL_SCHEME "gcs" )
set( INSTALLER_APP_ID "68ad7005-8eee-49c9-95ce-9eed97e5b347" )
@ -22,76 +22,8 @@ endif()
list( APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules" )
function(get_linux_lsb_release_information)
find_program(LSB_RELEASE_EXEC lsb_release)
if(NOT LSB_RELEASE_EXEC)
message(FATAL_ERROR "Could not detect lsb_release executable, can not gather required information")
endif()
execute_process(COMMAND "${LSB_RELEASE_EXEC}" --short --id OUTPUT_VARIABLE LSB_RELEASE_ID_SHORT OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(COMMAND "${LSB_RELEASE_EXEC}" --short --release OUTPUT_VARIABLE LSB_RELEASE_VERSION_SHORT OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(COMMAND "${LSB_RELEASE_EXEC}" --short --codename OUTPUT_VARIABLE LSB_RELEASE_CODENAME_SHORT OUTPUT_STRIP_TRAILING_WHITESPACE)
set(LSB_RELEASE_ID_SHORT "${LSB_RELEASE_ID_SHORT}" PARENT_SCOPE)
set(LSB_RELEASE_VERSION_SHORT "${LSB_RELEASE_VERSION_SHORT}" PARENT_SCOPE)
set(LSB_RELEASE_CODENAME_SHORT "${LSB_RELEASE_CODENAME_SHORT}" PARENT_SCOPE)
endfunction()
if(CMAKE_SYSTEM_NAME MATCHES "Linux")
find_package(cppzmq)
target_link_libraries(cppzmq)
get_linux_lsb_release_information()
message(STATUS "Linux ${LSB_RELEASE_ID_SHORT} ${LSB_RELEASE_VERSION_SHORT} ${LSB_RELEASE_CODENAME_SHORT}")
string(REGEX MATCHALL "([0-9]+)" arg_list ${LSB_RELEASE_VERSION_SHORT})
list( LENGTH arg_list listlen )
if (NOT listlen)
message(FATAL_ERROR "Could not detect Ubuntu version")
endif()
list(GET arg_list 0 output)
message("Ubuntu version is: ${output}")
add_definitions(-DPEERPLAYS_UBUNTU_VERSION=${output})
endif()
# function to help with cUrl
macro(FIND_CURL)
if (NOT WIN32 AND NOT APPLE AND CURL_STATICLIB)
find_package(OpenSSL REQUIRED)
set (OLD_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
set (CMAKE_FIND_LIBRARY_SUFFIXES .a)
find_package(CURL REQUIRED)
list(APPEND CURL_LIBRARIES ${OPENSSL_LIBRARIES} ${BOOST_THREAD_LIBRARY} ${CMAKE_DL_LIBS})
set (CMAKE_FIND_LIBRARY_SUFFIXES ${OLD_SUFFIXES})
else (NOT WIN32 AND NOT APPLE AND CURL_STATICLIB)
find_package(CURL REQUIRED)
endif (NOT WIN32 AND NOT APPLE AND CURL_STATICLIB)
if( WIN32 )
if ( MSVC )
list( APPEND CURL_LIBRARIES Wldap32 )
endif( MSVC )
if( MINGW )
# MinGW requires a specific order of included libraries ( CURL before ZLib )
find_package( ZLIB REQUIRED )
list( APPEND CURL_LIBRARIES ${ZLIB_LIBRARY} pthread )
endif( MINGW )
list( APPEND CURL_LIBRARIES ${PLATFORM_SPECIFIC_LIBS} )
endif( WIN32 )
endmacro()
set(CMAKE_EXPORT_COMPILE_COMMANDS "ON")
if (BUILD_PEERPLAYS_TESTNET)
set(GRAPHENE_EGENESIS_JSON "${CMAKE_CURRENT_SOURCE_DIR}/genesis-testnet.json" CACHE PATH "location of the genesis.json to embed in the executable" )
#add_compile_definitions(BUILD_PEERPLAYS_TESTNET=1)
add_definitions(-DBUILD_PEERPLAYS_TESTNET=1)
message ("\n====================\nBuilding for Testnet\n====================\n")
else (BUILD_PEERPLAYS_TESTNET)
set(GRAPHENE_EGENESIS_JSON "${CMAKE_CURRENT_SOURCE_DIR}/genesis-mainnet.json" CACHE PATH "location of the genesis.json to embed in the executable" )
message ("\n====================\nBuilding for Mainnet\n====================\n")
endif (BUILD_PEERPLAYS_TESTNET)
set(GRAPHENE_EGENESIS_JSON "${CMAKE_CURRENT_SOURCE_DIR}/genesis.json" CACHE PATH "location of the genesis.json to embed in the executable" )
#set (ENABLE_INSTALLER 1)
#set (USE_PCH 1)
@ -114,6 +46,7 @@ LIST(APPEND BOOST_COMPONENTS thread
system
filesystem
program_options
signals
serialization
chrono
unit_test_framework
@ -138,7 +71,7 @@ ENDIF()
if( WIN32 )
message( STATUS "Configuring Peerplays on WIN32")
message( STATUS "Configuring BitShares on WIN32")
set( DB_VERSION 60 )
set( BDB_STATIC_LIBS 1 )
@ -170,13 +103,20 @@ if( WIN32 )
SET(TCL_LIBRARY ${TCL_LIBS})
else( WIN32 ) # Apple AND Linux
find_library(READLINE_LIBRARIES NAMES readline)
find_path(READLINE_INCLUDE_DIR readline/readline.h)
#if(NOT READLINE_INCLUDE_DIR OR NOT READLINE_LIBRARIES)
# MESSAGE(FATAL_ERROR "Could not find lib readline.")
#endif()
if( APPLE )
# Apple Specific Options Here
message( STATUS "Configuring Peerplays on OS X" )
message( STATUS "Configuring BitShares on OS X" )
set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -std=c++11 -stdlib=libc++ -Wall" )
else( APPLE )
# Linux Specific Options Here
message( STATUS "Configuring Peerplays on Linux" )
message( STATUS "Configuring BitShares on Linux" )
set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -std=c++11 -Wall" )
set( rt_library rt )
#set( pthread_library pthread)
@ -195,7 +135,7 @@ else( WIN32 ) # Apple AND Linux
endif( APPLE )
if( "${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" )
set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall" )
set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-builtin-memcmp" )
elseif( "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" )
if( CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.0.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.0.0 )
set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-invalid-partial-specialization" )
@ -214,7 +154,7 @@ else( WIN32 ) # Apple AND Linux
endif( WIN32 )
set(ENABLE_COVERAGE_TESTING FALSE CACHE BOOL "Build Peerplays for code coverage analysis")
set(ENABLE_COVERAGE_TESTING FALSE CACHE BOOL "Build BitShares for code coverage analysis")
if(ENABLE_COVERAGE_TESTING)
SET(CMAKE_CXX_FLAGS "--coverage ${CMAKE_CXX_FLAGS}")
@ -223,13 +163,13 @@ endif()
add_subdirectory( libraries )
set(BUILD_PEERPLAYS_PROGRAMS TRUE CACHE BOOL "Build peerplays executables (witness node, cli wallet, etc)")
set(BUILD_BITSHARES_PROGRAMS TRUE CACHE BOOL "Build bitshares executables (witness node, cli wallet, etc)")
add_subdirectory( programs )
set(BUILD_PEERPLAYS_TESTS TRUE CACHE BOOL "Build peerplays unit tests")
if( BUILD_PEERPLAYS_TESTS )
set(BUILD_BITSHARES_TESTS TRUE CACHE BOOL "Build bitshares unit tests")
if( BUILD_BITSHARES_TESTS )
add_subdirectory( tests )
endif( BUILD_PEERPLAYS_TESTS )
endif( BUILD_BITSHARES_TESTS )
if (ENABLE_INSTALLER)
@ -251,18 +191,18 @@ set(CPACK_PACKAGE_VERSION_MAJOR "${VERSION_MAJOR}")
set(CPACK_PACKAGE_VERSION_MINOR "${VERSION_MINOR}")
set(CPACK_PACKAGE_VERSION_PATCH "${VERSION_PATCH}")
set(CPACK_PACKAGE_VERSION "${CPACK_PACKAGE_VERSION_MAJOR}.${CPACK_PACKAGE_VERSION_MINOR}.${CPACK_PACKAGE_VERSION_PATCH}")
set(CPACK_PACKAGE_DESCRIPTION "A client for the Peerplays network")
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "A client for the Peerplays network")
set(CPACK_PACKAGE_DESCRIPTION "A client for the BitShares network")
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "A client for the BitShares network")
set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE.md")
set(CPACK_PACKAGE_INSTALL_DIRECTORY "Peerplays ${CPACK_PACKAGE_VERSION}")
set(CPACK_PACKAGE_INSTALL_DIRECTORY "BitShares ${CPACK_PACKAGE_VERSION}")
if(WIN32)
SET(CPACK_GENERATOR "ZIP;NSIS")
set(CPACK_PACKAGE_NAME "Peerplays") # override above
set(CPACK_PACKAGE_NAME "BitShares") # override above
set(CPACK_NSIS_EXECUTABLES_DIRECTORY .)
set(CPACK_NSIS_PACKAGE_NAME "Peerplays v${CPACK_PACKAGE_VERSION}")
set(CPACK_NSIS_PACKAGE_NAME "BitShares v${CPACK_PACKAGE_VERSION}")
set(CPACK_NSIS_DISPLAY_NAME "${CPACK_NSIS_PACKAGE_NAME}")
set(CPACK_NSIS_DEFINES " !define MUI_STARTMENUPAGE_DEFAULTFOLDER \\\"Peerplays\\\"")
set(CPACK_NSIS_DEFINES " !define MUI_STARTMENUPAGE_DEFAULTFOLDER \\\"BitShares\\\"")
# it seems like windows zip files usually don't have a single directory inside them, unix tgz frequently do
SET(CPACK_INCLUDE_TOPLEVEL_DIRECTORY 0)
@ -280,8 +220,3 @@ endif(LINUX)
include(CPack)
endif(ENABLE_INSTALLER)
unset(GRAPHENE_EGENESIS_JSON)
unset(GRAPHENE_EGENESIS_JSON CACHE)
unset(BUILD_PEERPLAYS_TESTNET)
unset(BUILD_PEERPLAYS_TESTNET CACHE)

View file

@ -1,218 +1,93 @@
FROM ubuntu:20.04
FROM ubuntu:18.04
MAINTAINER PeerPlays Blockchain Standards Association
#===============================================================================
# Ubuntu setup
#===============================================================================
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US.UTF-8
ENV LC_ALL en_US.UTF-8
RUN \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y \
apt-utils \
autoconf \
bash \
bison \
build-essential \
ca-certificates \
dnsutils \
expect \
flex \
cmake \
doxygen \
git \
graphviz \
libbz2-dev \
libcurl4-openssl-dev \
libncurses-dev \
libsnappy-dev \
libreadline-dev \
libssl-dev \
libtool \
libzip-dev \
locales \
lsb-release \
mc \
nano \
net-tools \
ntp \
openssh-server \
pkg-config \
python3 \
python3-jinja2 \
sudo \
systemd-coredump \
wget
ENV HOME /home/peerplays
RUN useradd -rm -d /home/peerplays -s /bin/bash -g root -G sudo -u 1000 peerplays
RUN echo "peerplays ALL=(ALL) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/peerplays
RUN chmod 440 /etc/sudoers.d/peerplays
RUN service ssh start
RUN echo 'peerplays:peerplays' | chpasswd
# SSH
EXPOSE 22
WORKDIR /home/peerplays/src
#===============================================================================
# Boost setup
#===============================================================================
ntp \
wget \
&& \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
RUN \
wget https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.gz && \
tar -xzf boost_1_72_0.tar.gz && \
cd boost_1_72_0 && \
./bootstrap.sh && \
sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \
locale-gen
ADD . /peerplays-core
WORKDIR /peerplays-core
# Compile Boost
RUN \
BOOST_ROOT=$HOME/boost_1_67_0 && \
wget -c 'http://sourceforge.net/projects/boost/files/boost/1.67.0/boost_1_67_0.tar.gz/download' -O boost_1_67_0.tar.gz &&\
tar -zxvf boost_1_67_0.tar.gz && \
cd boost_1_67_0/ && \
./bootstrap.sh "--prefix=$BOOST_ROOT" && \
./b2 install && \
ldconfig && \
rm -rf /home/peerplays/src/*
#===============================================================================
# cmake setup
#===============================================================================
cd ..
# Compile Peerplays
RUN \
wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh && \
chmod 755 ./cmake-3.24.2-linux-x86_64.sh && \
./cmake-3.24.2-linux-x86_64.sh --prefix=/usr --skip-license && \
cmake --version && \
rm -rf /home/peerplays/src/*
#===============================================================================
# libzmq setup
#===============================================================================
RUN \
wget https://github.com/zeromq/libzmq/archive/refs/tags/v4.3.4.tar.gz && \
tar -xzvf v4.3.4.tar.gz && \
cd libzmq-4.3.4 && \
mkdir build && \
cd build && \
cmake .. && \
make -j$(nproc) && \
make install && \
ldconfig && \
rm -rf /home/peerplays/src/*
#===============================================================================
# cppzmq setup
#===============================================================================
RUN \
wget https://github.com/zeromq/cppzmq/archive/refs/tags/v4.9.0.tar.gz && \
tar -xzvf v4.9.0.tar.gz && \
cd cppzmq-4.9.0 && \
mkdir build && \
cd build && \
cmake .. && \
make -j$(nproc) && \
make install && \
ldconfig && \
rm -rf /home/peerplays/src/*
#===============================================================================
# gsl setup
#===============================================================================
RUN \
DEBIAN_FRONTEND=noninteractive apt-get install -y \
libpcre3-dev
RUN \
wget https://github.com/imatix/gsl/archive/refs/tags/v4.1.4.tar.gz && \
tar -xzvf v4.1.4.tar.gz && \
cd gsl-4.1.4 && \
make -j$(nproc) && \
make install && \
rm -rf /home/peerplays/src/*
#===============================================================================
# libbitcoin-build setup
# libbitcoin-explorer setup
#===============================================================================
RUN \
DEBIAN_FRONTEND=noninteractive apt-get install -y \
libsodium-dev
RUN \
git clone --branch version3.8.0 --depth 1 https://gitlab.com/PBSA/peerplays-1.0/libbitcoin-explorer.git && \
cd libbitcoin-explorer && \
./install.sh && \
ldconfig && \
rm -rf /home/peerplays/src/*
#===============================================================================
# Doxygen setup
#===============================================================================
RUN \
sudo apt install -y bison flex && \
wget https://github.com/doxygen/doxygen/archive/refs/tags/Release_1_8_17.tar.gz && \
tar -xvf Release_1_8_17.tar.gz && \
cd doxygen-Release_1_8_17 && \
mkdir build && \
cd build && \
cmake .. && \
make -j$(nproc) install && \
ldconfig
#===============================================================================
# Perl setup
#===============================================================================
RUN \
wget https://github.com/Perl/perl5/archive/refs/tags/v5.30.0.tar.gz && \
tar -xvf v5.30.0.tar.gz && \
cd perl5-5.30.0 && \
./Configure -des && \
make -j$(nproc) install && \
ldconfig
#===============================================================================
# Peerplays setup
#===============================================================================
## Clone Peerplays
#RUN \
# git clone https://gitlab.com/PBSA/peerplays.git && \
# cd peerplays && \
# git checkout develop && \
# git submodule update --init --recursive && \
# git branch --show-current && \
# git log --oneline -n 5
# Add local source
ADD . peerplays
# Configure Peerplays
RUN \
cd peerplays && \
BOOST_ROOT=$HOME/boost_1_67_0 && \
git submodule update --init --recursive && \
git log --oneline -n 5 && \
mkdir build && \
cd build && \
cmake -DCMAKE_BUILD_TYPE=Release ..
mkdir build/release && \
cd build/release && \
cmake \
-DBOOST_ROOT="$BOOST_ROOT" \
-DCMAKE_BUILD_TYPE=Release \
../.. && \
make witness_node cli_wallet && \
install -s programs/witness_node/witness_node programs/cli_wallet/cli_wallet /usr/local/bin && \
#
# Obtain version
mkdir /etc/peerplays && \
git rev-parse --short HEAD > /etc/peerplays/version && \
cd / && \
rm -rf /peerplays-core
# Build Peerplays
RUN \
cd peerplays/build && \
make -j$(nproc) cli_wallet witness_node
# Home directory $HOME
WORKDIR /
RUN useradd -s /bin/bash -m -d /var/lib/peerplays peerplays
ENV HOME /var/lib/peerplays
RUN chown peerplays:peerplays -R /var/lib/peerplays
WORKDIR /home/peerplays/peerplays-network
# Volume
VOLUME ["/var/lib/peerplays", "/etc/peerplays"]
# Setup Peerplays runimage
RUN \
ln -s /home/peerplays/src/peerplays/build/programs/cli_wallet/cli_wallet ./ && \
ln -s /home/peerplays/src/peerplays/build/programs/witness_node/witness_node ./
RUN ./witness_node --create-genesis-json genesis.json && \
rm genesis.json
RUN chown peerplays:root -R /home/peerplays/peerplays-network
# Peerplays RPC
# rpc service:
EXPOSE 8090
# Peerplays P2P:
EXPOSE 9777
# p2p service:
EXPOSE 1776
# Peerplays
CMD ["./witness_node", "-d", "./witness_node_data_dir"]
# default exec/config files
ADD docker/default_config.ini /etc/peerplays/config.ini
ADD docker/peerplaysentry.sh /usr/local/bin/peerplaysentry.sh
RUN chmod a+x /usr/local/bin/peerplaysentry.sh
# Make Docker send SIGINT instead of SIGTERM to the daemon
STOPSIGNAL SIGINT
# default execute entry
CMD ["/usr/local/bin/peerplaysentry.sh"]

View file

@ -1,219 +0,0 @@
FROM ubuntu:18.04
#===============================================================================
# Ubuntu setup
#===============================================================================
RUN \
apt-get update -y && \
DEBIAN_FRONTEND=noninteractive apt-get install -y \
apt-utils \
autoconf \
bash \
bison \
build-essential \
ca-certificates \
dnsutils \
expect \
flex \
git \
graphviz \
libbz2-dev \
libcurl4-openssl-dev \
libncurses-dev \
libsnappy-dev \
libssl-dev \
libtool \
libzip-dev \
locales \
lsb-release \
mc \
nano \
net-tools \
ntp \
openssh-server \
pkg-config \
python3 \
python3-jinja2 \
sudo \
systemd-coredump \
wget
ENV HOME /home/peerplays
RUN useradd -rm -d /home/peerplays -s /bin/bash -g root -G sudo -u 1000 peerplays
RUN echo "peerplays ALL=(ALL) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/peerplays
RUN chmod 440 /etc/sudoers.d/peerplays
RUN service ssh start
RUN echo 'peerplays:peerplays' | chpasswd
# SSH
EXPOSE 22
WORKDIR /home/peerplays/src
#===============================================================================
# Boost setup
#===============================================================================
RUN \
wget https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.gz && \
tar -xzf boost_1_72_0.tar.gz && \
cd boost_1_72_0 && \
./bootstrap.sh && \
./b2 install && \
ldconfig && \
rm -rf /home/peerplays/src/*
#===============================================================================
# cmake setup
#===============================================================================
RUN \
wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh && \
chmod 755 ./cmake-3.24.2-linux-x86_64.sh && \
./cmake-3.24.2-linux-x86_64.sh --prefix=/usr --skip-license && \
cmake --version && \
rm -rf /home/peerplays/src/*
#===============================================================================
# libzmq setup
#===============================================================================
RUN \
wget https://github.com/zeromq/libzmq/archive/refs/tags/v4.3.4.tar.gz && \
tar -xzvf v4.3.4.tar.gz && \
cd libzmq-4.3.4 && \
mkdir build && \
cd build && \
cmake .. && \
make -j$(nproc) && \
make install && \
ldconfig && \
rm -rf /home/peerplays/src/*
#===============================================================================
# cppzmq setup
#===============================================================================
RUN \
wget https://github.com/zeromq/cppzmq/archive/refs/tags/v4.9.0.tar.gz && \
tar -xzvf v4.9.0.tar.gz && \
cd cppzmq-4.9.0 && \
mkdir build && \
cd build && \
cmake .. && \
make -j$(nproc) && \
make install && \
ldconfig && \
rm -rf /home/peerplays/src/*
#===============================================================================
# gsl setup
#===============================================================================
RUN \
DEBIAN_FRONTEND=noninteractive apt-get install -y \
libpcre3-dev
RUN \
wget https://github.com/imatix/gsl/archive/refs/tags/v4.1.4.tar.gz && \
tar -xzvf v4.1.4.tar.gz && \
cd gsl-4.1.4 && \
make -j$(nproc) && \
make install && \
rm -rf /home/peerplays/src/*
#===============================================================================
# libbitcoin-build setup
# libbitcoin-explorer setup
#===============================================================================
RUN \
DEBIAN_FRONTEND=noninteractive apt-get install -y \
libsodium-dev
RUN \
git clone --branch version3.8.0 --depth 1 https://gitlab.com/PBSA/peerplays-1.0/libbitcoin-explorer.git && \
cd libbitcoin-explorer && \
./install.sh && \
ldconfig && \
rm -rf /home/peerplays/src/*
#===============================================================================
# Doxygen setup
#===============================================================================
RUN \
sudo apt install -y bison flex && \
wget https://github.com/doxygen/doxygen/archive/refs/tags/Release_1_8_17.tar.gz && \
tar -xvf Release_1_8_17.tar.gz && \
cd doxygen-Release_1_8_17 && \
mkdir build && \
cd build && \
cmake .. && \
make -j$(nproc) install && \
ldconfig
#===============================================================================
# Perl setup
#===============================================================================
RUN \
wget https://github.com/Perl/perl5/archive/refs/tags/v5.30.0.tar.gz && \
tar -xvf v5.30.0.tar.gz && \
cd perl5-5.30.0 && \
./Configure -des && \
make -j$(nproc) install && \
ldconfig
#===============================================================================
# Peerplays setup
#===============================================================================
## Clone Peerplays
#RUN \
# git clone https://gitlab.com/PBSA/peerplays.git && \
# cd peerplays && \
# git checkout develop && \
# git submodule update --init --recursive && \
# git branch --show-current && \
# git log --oneline -n 5
# Add local source
ADD . peerplays
# Configure Peerplays
RUN \
cd peerplays && \
git submodule update --init --recursive && \
git symbolic-ref --short HEAD && \
git log --oneline -n 5 && \
mkdir build && \
cd build && \
cmake -DCMAKE_BUILD_TYPE=Release ..
# Build Peerplays
RUN \
cd peerplays/build && \
make -j$(nproc) cli_wallet witness_node
WORKDIR /home/peerplays/peerplays-network
# Setup Peerplays runimage
RUN \
ln -s /home/peerplays/src/peerplays/build/programs/cli_wallet/cli_wallet ./ && \
ln -s /home/peerplays/src/peerplays/build/programs/witness_node/witness_node ./
RUN ./witness_node --create-genesis-json genesis.json && \
rm genesis.json
RUN chown peerplays:root -R /home/peerplays/peerplays-network
# Peerplays RPC
EXPOSE 8090
# Peerplays P2P:
EXPOSE 9777
# Peerplays
CMD ["./witness_node", "-d", "./witness_node_data_dir"]

345
Doxyfile
View file

@ -1,4 +1,4 @@
# Doxyfile 1.8.17
# Doxyfile 1.8.9.1
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
@ -17,11 +17,11 @@
# Project related configuration options
#---------------------------------------------------------------------------
# This tag specifies the encoding used for all characters in the configuration
# file that follow. The default is UTF-8 which is also the encoding used for all
# text before the first occurrence of this tag. Doxygen uses libiconv (or the
# iconv built into libc) for the transcoding. See
# https://www.gnu.org/software/libiconv/ for the list of possible encodings.
# This tag specifies the encoding used for all characters in the config file
# that follow. The default is UTF-8 which is also the encoding used for all text
# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
# for the list of possible encodings.
# The default value is: UTF-8.
DOXYFILE_ENCODING = UTF-8
@ -32,7 +32,7 @@ DOXYFILE_ENCODING = UTF-8
# title of most generated pages and in a few other places.
# The default value is: My Project.
PROJECT_NAME = "Peerplays"
PROJECT_NAME = "Graphene"
# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
# could be handy for archiving the generated documentation or if some version
@ -93,14 +93,6 @@ ALLOW_UNICODE_NAMES = NO
OUTPUT_LANGUAGE = English
# The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all
# documentation generated by doxygen is written. Doxygen will use this
# information to generate all generated output in the proper direction.
# Possible values are: None, LTR, RTL and Context.
# The default value is: None.
OUTPUT_TEXT_DIRECTION = None
# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
# descriptions after the members that are listed in the file and class
# documentation (similar to Javadoc). Set to NO to disable this.
@ -187,16 +179,6 @@ SHORT_NAMES = NO
JAVADOC_AUTOBRIEF = NO
# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line
# such as
# /***************
# as being the beginning of a Javadoc-style comment "banner". If set to NO, the
# Javadoc-style will behave just like regular comments and it will not be
# interpreted by doxygen.
# The default value is: NO.
JAVADOC_BANNER = NO
# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
# line (until the first dot) of a Qt-style comment as the brief description. If
# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
@ -244,12 +226,7 @@ TAB_SIZE = 4
# will allow you to put the command \sideeffect (or @sideeffect) in the
# documentation, which will result in a user-defined paragraph with heading
# "Side Effects:". You can put \n's in the value part of an alias to insert
# newlines (in the resulting output). You can put ^^ in the value part of an
# alias to insert a newline as if a physical newline was in the original file.
# When you need a literal { or } or , in the value part of an alias you have to
# escape them by means of a backslash (\), this can lead to conflicts with the
# commands \{ and \} for these it is advised to use the version @{ and @} or use
# a double escape (\\{ and \\})
# newlines.
ALIASES =
@ -287,26 +264,17 @@ OPTIMIZE_FOR_FORTRAN = NO
OPTIMIZE_OUTPUT_VHDL = NO
# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice
# sources only. Doxygen will then generate output that is more tailored for that
# language. For instance, namespaces will be presented as modules, types will be
# separated into more groups, etc.
# The default value is: NO.
OPTIMIZE_OUTPUT_SLICE = NO
# Doxygen selects the parser to use depending on the extension of the files it
# parses. With this tag you can assign which parser to use for a given
# extension. Doxygen has a built-in mapping, but you can override or extend it
# using this tag. The format is ext=language, where ext is a file extension, and
# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice,
# Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
# tries to guess whether the code is fixed or free formatted code, this is the
# default for Fortran type files), VHDL, tcl. For instance to make doxygen treat
# .inc files as Fortran files (default is PHP), and .f files as C (default is
# Fortran), use: inc=Fortran f=C.
# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
# Fortran. In the later case the parser tries to guess whether the code is fixed
# or free formatted code, this is the default for Fortran type files), VHDL. For
# instance to make doxygen treat .inc files as Fortran files (default is PHP),
# and .f files as C (default is Fortran), use: inc=Fortran f=C.
#
# Note: For files without extension you can use no_extension as a placeholder.
#
@ -317,7 +285,7 @@ EXTENSION_MAPPING =
# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
# according to the Markdown format, which allows for more readable
# documentation. See https://daringfireball.net/projects/markdown/ for details.
# documentation. See http://daringfireball.net/projects/markdown/ for details.
# The output of markdown processing is further processed by doxygen, so you can
# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
# case of backward compatibilities issues.
@ -325,15 +293,6 @@ EXTENSION_MAPPING =
MARKDOWN_SUPPORT = YES
# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
# to that level are automatically included in the table of contents, even if
# they do not have an id attribute.
# Note: This feature currently applies only to Markdown headings.
# Minimum value: 0, maximum value: 99, default value: 5.
# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
TOC_INCLUDE_HEADINGS = 5
# When enabled doxygen tries to link words that correspond to documented
# classes, or namespaces to their corresponding documentation. Such a link can
# be prevented in individual cases by putting a % sign in front of the word or
@ -359,7 +318,7 @@ BUILTIN_STL_SUPPORT = NO
CPP_CLI_SUPPORT = NO
# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen
# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
# will parse them like normal C++ but will assume all classes use public instead
# of private inheritance when no explicit protection keyword is present.
# The default value is: NO.
@ -384,13 +343,6 @@ IDL_PROPERTY_SUPPORT = YES
DISTRIBUTE_GROUP_DOC = NO
# If one adds a struct or class to a group and this option is enabled, then also
# any nested class or struct is added to the same group. By default this option
# is disabled and one has to add nested compounds explicitly via \ingroup.
# The default value is: NO.
GROUP_NESTED_COMPOUNDS = NO
# Set the SUBGROUPING tag to YES to allow class member groups of the same type
# (for instance a group of public functions) to be put as a subgroup of that
# type (e.g. under the Public Functions section). Set it to NO to prevent
@ -465,12 +417,6 @@ EXTRACT_ALL = YES
EXTRACT_PRIVATE = NO
# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual
# methods of a class will be included in the documentation.
# The default value is: NO.
EXTRACT_PRIV_VIRTUAL = NO
# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
# scope will be included in the documentation.
# The default value is: NO.
@ -525,8 +471,8 @@ HIDE_UNDOC_MEMBERS = NO
HIDE_UNDOC_CLASSES = NO
# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
# declarations. If set to NO, these declarations will be included in the
# documentation.
# (class|struct|union) declarations. If set to NO, these declarations will be
# included in the documentation.
# The default value is: NO.
HIDE_FRIEND_COMPOUNDS = NO
@ -549,7 +495,7 @@ INTERNAL_DOCS = NO
# names in lower-case letters. If set to YES, upper-case letters are also
# allowed. This is useful if you have classes or files whose names only differ
# in case and if your file system supports case sensitive file names. Windows
# (including Cygwin) ands Mac users are advised to set this option to NO.
# and Mac users are advised to set this option to NO.
# The default value is: system dependent.
CASE_SENSE_NAMES = NO
@ -736,7 +682,7 @@ LAYOUT_FILE =
# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
# the reference definitions. This must be a list of .bib files. The .bib
# extension is automatically appended if omitted. This requires the bibtex tool
# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info.
# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
# For LaTeX the style of the bibliography can be controlled using
# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
# search path. See also \cite for info how to create references.
@ -781,18 +727,11 @@ WARN_IF_DOC_ERROR = YES
# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
# are documented, but have no documentation for their parameters or return
# value. If set to NO, doxygen will only warn about wrong or incomplete
# parameter documentation, but not about the absence of documentation. If
# EXTRACT_ALL is set to YES then this flag will automatically be disabled.
# parameter documentation, but not about the absence of documentation.
# The default value is: NO.
WARN_NO_PARAMDOC = NO
# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
# a warning is encountered.
# The default value is: NO.
WARN_AS_ERROR = NO
# The WARN_FORMAT tag determines the format of the warning messages that doxygen
# can produce. The string should contain the $file, $line, and $text tags, which
# will be replaced by the file and line number from which the warning originated
@ -816,19 +755,15 @@ WARN_LOGFILE =
# The INPUT tag is used to specify the files and/or directories that contain
# documented source files. You may enter file names like myfile.cpp or
# directories like /usr/src/myproject. Separate the files or directories with
# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
# spaces.
# Note: If this tag is empty the current directory is searched.
INPUT = doc/main.dox \
libraries/chain \
libraries/chain/db \
libraries/app \
libraries/wallet
INPUT = doc/main.dox libraries/chain libraries/chain/db libraries/app libraries/wallet
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
# documentation (see: https://www.gnu.org/software/libiconv/) for the list of
# documentation (see: http://www.gnu.org/software/libiconv) for the list of
# possible encodings.
# The default value is: UTF-8.
@ -836,19 +771,12 @@ INPUT_ENCODING = UTF-8
# If the value of the INPUT tag contains directories, you can use the
# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
# *.h) to filter out the source-files in the directories.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# read by doxygen.
#
# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp,
# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h,
# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc,
# *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment),
# *.doc (to be provided as doxygen C comment), *.txt (to be provided as doxygen
# C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f, *.for, *.tcl, *.vhd,
# *.vhdl, *.ucf, *.qsf and *.ice.
# *.h) to filter out the source-files in the directories. If left blank the
# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
# *.qsf, *.as and *.js.
FILE_PATTERNS =
@ -934,10 +862,6 @@ IMAGE_PATH =
# Note that the filter must not add or remove lines; it is applied before the
# code is scanned, but not when the output code is generated. If lines are added
# or removed, the anchors will not be placed correctly.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# properly processed by doxygen.
INPUT_FILTER =
@ -947,10 +871,6 @@ INPUT_FILTER =
# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
# patterns match the file name, INPUT_FILTER is applied.
#
# Note that for custom extensions or not directly supported extensions you also
# need to set EXTENSION_MAPPING for the extension otherwise the files are not
# properly processed by doxygen.
FILTER_PATTERNS =
@ -1003,7 +923,7 @@ INLINE_SOURCES = NO
STRIP_CODE_COMMENTS = YES
# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
# entity all documented functions referencing it will be listed.
# function all documented functions referencing it will be listed.
# The default value is: NO.
REFERENCED_BY_RELATION = NO
@ -1035,12 +955,12 @@ SOURCE_TOOLTIPS = YES
# If the USE_HTAGS tag is set to YES then the references to source code will
# point to the HTML generated by the htags(1) tool instead of doxygen built-in
# source browser. The htags tool is part of GNU's global source tagging system
# (see https://www.gnu.org/software/global/global.html). You will need version
# (see http://www.gnu.org/software/global/global.html). You will need version
# 4.8.6 or higher.
#
# To use it do the following:
# - Install the latest version of global
# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file
# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
# - Make sure the INPUT points to the root of the source tree
# - Run doxygen as normal
#
@ -1062,35 +982,6 @@ USE_HTAGS = NO
VERBATIM_HEADERS = YES
# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
# cost of reduced performance. This can be particularly helpful with template
# rich C++ code for which doxygen's built-in parser lacks the necessary type
# information.
# Note: The availability of this option depends on whether or not doxygen was
# generated with the -Duse_libclang=ON option for CMake.
# The default value is: NO.
CLANG_ASSISTED_PARSING = NO
# If clang assisted parsing is enabled you can provide the compiler with command
# line options that you would normally use when invoking the compiler. Note that
# the include paths will already be set by doxygen for the files and directories
# specified with INPUT and INCLUDE_PATH.
# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
CLANG_OPTIONS =
# If clang assisted parsing is enabled you can provide the clang parser with the
# path to the compilation database (see:
# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) used when the files
# were built. This is equivalent to specifying the "-p" option to a clang tool,
# such as clang-check. These options will then be passed to the parser.
# Note: The availability of this option depends on whether or not doxygen was
# generated with the -Duse_libclang=ON option for CMake.
CLANG_DATABASE_PATH =
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
@ -1209,7 +1100,7 @@ HTML_EXTRA_FILES =
# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
# will adjust the colors in the style sheet and background images according to
# this color. Hue is specified as an angle on a colorwheel, see
# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
# purple, and 360 is red again.
# Minimum value: 0, maximum value: 359, default value: 220.
@ -1238,23 +1129,11 @@ HTML_COLORSTYLE_GAMMA = 80
# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
# page will contain the date and time when the page was generated. Setting this
# to YES can help to show when doxygen was last run and thus if the
# documentation is up to date.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_TIMESTAMP = YES
# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
# documentation will contain a main index with vertical navigation menus that
# are dynamically created via JavaScript. If disabled, the navigation index will
# consists of multiple levels of tabs that are statically embedded in every HTML
# page. Disable this option to support browsers that do not have JavaScript,
# like the Qt help browser.
# to NO can help when comparing the output of multiple runs.
# The default value is: YES.
# This tag requires that the tag GENERATE_HTML is set to YES.
HTML_DYNAMIC_MENUS = YES
HTML_TIMESTAMP = YES
# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
# documentation will contain sections that can be hidden and shown after the
@ -1279,13 +1158,13 @@ HTML_INDEX_NUM_ENTRIES = 100
# If the GENERATE_DOCSET tag is set to YES, additional index files will be
# generated that can be used as input for Apple's Xcode 3 integrated development
# environment (see: https://developer.apple.com/xcode/), introduced with OSX
# 10.5 (Leopard). To create a documentation set, doxygen will generate a
# environment (see: http://developer.apple.com/tools/xcode/), introduced with
# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
# Makefile in the HTML output directory. Running make will produce the docset in
# that directory and running make install will install the docset in
# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy
# genXcode/_index.html for more information.
# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
# for more information.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
@ -1324,7 +1203,7 @@ DOCSET_PUBLISHER_NAME = Publisher
# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
# (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on
# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
# Windows.
#
# The HTML Help Workshop contains a compiler that can convert all HTML output
@ -1400,7 +1279,7 @@ QCH_FILE =
# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
# Project output. For more information please see Qt Help Project / Namespace
# (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
# The default value is: org.doxygen.Project.
# This tag requires that the tag GENERATE_QHP is set to YES.
@ -1408,7 +1287,7 @@ QHP_NAMESPACE = org.doxygen.Project
# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
# Help Project output. For more information please see Qt Help Project / Virtual
# Folders (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-
# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
# folders).
# The default value is: doc.
# This tag requires that the tag GENERATE_QHP is set to YES.
@ -1417,7 +1296,7 @@ QHP_VIRTUAL_FOLDER = doc
# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
# filter to add. For more information please see Qt Help Project / Custom
# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-
# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
# filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
@ -1425,7 +1304,7 @@ QHP_CUST_FILTER_NAME =
# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
# custom filter to add. For more information please see Qt Help Project / Custom
# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-
# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
# filters).
# This tag requires that the tag GENERATE_QHP is set to YES.
@ -1433,7 +1312,7 @@ QHP_CUST_FILTER_ATTRS =
# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
# project's filter section matches. Qt Help Project / Filter Attributes (see:
# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes).
# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
# This tag requires that the tag GENERATE_QHP is set to YES.
QHP_SECT_FILTER_ATTRS =
@ -1526,7 +1405,7 @@ EXT_LINKS_IN_WINDOW = NO
FORMULA_FONTSIZE = 10
# Use the FORMULA_TRANSPARENT tag to determine whether or not the images
# Use the FORMULA_TRANPARENT tag to determine whether or not the images
# generated for formulas are transparent PNGs. Transparent PNGs are not
# supported properly for IE 6.0, but are supported on all modern browsers.
#
@ -1537,14 +1416,8 @@ FORMULA_FONTSIZE = 10
FORMULA_TRANSPARENT = YES
# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands
# to create new LaTeX commands to be used in formulas as building blocks. See
# the section "Including formulas" for details.
FORMULA_MACROFILE =
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
# https://www.mathjax.org) which uses client side JavaScript for the rendering
# http://www.mathjax.org) which uses client side Javascript for the rendering
# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
# installed or if you want to formulas look prettier in the HTML output. When
# enabled you may also need to install MathJax separately and configure the path
@ -1571,8 +1444,8 @@ MATHJAX_FORMAT = HTML-CSS
# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
# Content Delivery Network so you can quickly see the result without installing
# MathJax. However, it is strongly recommended to install a local copy of
# MathJax from https://www.mathjax.org before deployment.
# The default value is: https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/.
# MathJax from http://www.mathjax.org before deployment.
# The default value is: http://cdn.mathjax.org/mathjax/latest.
# This tag requires that the tag USE_MATHJAX is set to YES.
MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
@ -1614,7 +1487,7 @@ MATHJAX_CODEFILE =
SEARCHENGINE = YES
# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
# implemented using a web server instead of a web client using JavaScript. There
# implemented using a web server instead of a web client using Javascript. There
# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
# setting. When disabled, doxygen will generate a PHP script for searching and
# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
@ -1633,7 +1506,7 @@ SERVER_BASED_SEARCH = NO
#
# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: https://xapian.org/).
# Xapian (see: http://xapian.org/).
#
# See the section "External Indexing and Searching" for details.
# The default value is: NO.
@ -1646,7 +1519,7 @@ EXTERNAL_SEARCH = NO
#
# Doxygen ships with an example indexer (doxyindexer) and search engine
# (doxysearch.cgi) which are based on the open source search engine library
# Xapian (see: https://xapian.org/). See the section "External Indexing and
# Xapian (see: http://xapian.org/). See the section "External Indexing and
# Searching" for details.
# This tag requires that the tag SEARCHENGINE is set to YES.
@ -1698,35 +1571,21 @@ LATEX_OUTPUT = latex
# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
# invoked.
#
# Note that when not enabling USE_PDFLATEX the default is latex when enabling
# USE_PDFLATEX the default is pdflatex and when in the later case latex is
# chosen this is overwritten by pdflatex. For specific output languages the
# default can have been set differently, this depends on the implementation of
# the output language.
# Note that when enabling USE_PDFLATEX this option is only used for generating
# bitmaps for formulas in the HTML output, but not in the Makefile that is
# written to the output directory.
# The default file is: latex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_CMD_NAME = latex
# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
# index for LaTeX.
# Note: This tag is used in the Makefile / make.bat.
# See also: LATEX_MAKEINDEX_CMD for the part in the generated output file
# (.tex).
# The default file is: makeindex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
MAKEINDEX_CMD_NAME = makeindex
# The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to
# generate index for LaTeX. In case there is no backslash (\) as first character
# it will be automatically added in the LaTeX code.
# Note: This tag is used in the generated output file (.tex).
# See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat.
# The default value is: makeindex.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_MAKEINDEX_CMD = makeindex
# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
# documents. This may be useful for small projects and may help to save some
# trees in general.
@ -1745,12 +1604,9 @@ COMPACT_LATEX = NO
PAPER_TYPE = a4
# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
# that should be included in the LaTeX output. The package can be specified just
# by its name or with the correct syntax as to be used with the LaTeX
# \usepackage command. To get the times font for instance you can specify :
# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times}
# To use the option intlimits with the amsmath package you can specify:
# EXTRA_PACKAGES=[intlimits]{amsmath}
# that should be included in the LaTeX output. To get the times font for
# instance you can specify
# EXTRA_PACKAGES=times
# If left blank no extra packages will be included.
# This tag requires that the tag GENERATE_LATEX is set to YES.
@ -1847,28 +1703,12 @@ LATEX_SOURCE_CODE = NO
# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
# bibliography, e.g. plainnat, or ieeetr. See
# https://en.wikipedia.org/wiki/BibTeX and \cite for more info.
# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
# The default value is: plain.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_BIB_STYLE = plain
# If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated
# page will contain the date and time when the page was generated. Setting this
# to NO can help when comparing the output of multiple runs.
# The default value is: NO.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_TIMESTAMP = NO
# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute)
# path from which the emoji images will be read. If a relative path is entered,
# it will be relative to the LATEX_OUTPUT directory. If left blank the
# LATEX_OUTPUT directory will be used.
# This tag requires that the tag GENERATE_LATEX is set to YES.
LATEX_EMOJI_DIRECTORY =
#---------------------------------------------------------------------------
# Configuration options related to the RTF output
#---------------------------------------------------------------------------
@ -1908,9 +1748,9 @@ COMPACT_RTF = NO
RTF_HYPERLINKS = NO
# Load stylesheet definitions from file. Syntax is similar to doxygen's
# configuration file, i.e. a series of assignments. You only have to provide
# replacements, missing definitions are set to their default value.
# Load stylesheet definitions from file. Syntax is similar to doxygen's config
# file, i.e. a series of assignments. You only have to provide replacements,
# missing definitions are set to their default value.
#
# See also section "Doxygen usage" for information on how to generate the
# default style sheet that doxygen normally uses.
@ -1919,8 +1759,8 @@ RTF_HYPERLINKS = NO
RTF_STYLESHEET_FILE =
# Set optional variables used in the generation of an RTF document. Syntax is
# similar to doxygen's configuration file. A template extensions file can be
# generated using doxygen -e rtf extensionFile.
# similar to doxygen's config file. A template extensions file can be generated
# using doxygen -e rtf extensionFile.
# This tag requires that the tag GENERATE_RTF is set to YES.
RTF_EXTENSIONS_FILE =
@ -2006,13 +1846,6 @@ XML_OUTPUT = xml
XML_PROGRAMLISTING = NO
# If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include
# namespace members in file scope as well, matching the HTML output.
# The default value is: NO.
# This tag requires that the tag GENERATE_XML is set to YES.
XML_NS_MEMB_FILE_SCOPE = NO
#---------------------------------------------------------------------------
# Configuration options related to the DOCBOOK output
#---------------------------------------------------------------------------
@ -2045,9 +1878,9 @@ DOCBOOK_PROGRAMLISTING = NO
#---------------------------------------------------------------------------
# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
# AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures
# the structure of the code including all documentation. Note that this feature
# is still experimental and incomplete at the moment.
# AutoGen Definitions (see http://autogen.sf.net) file that captures the
# structure of the code including all documentation. Note that this feature is
# still experimental and incomplete at the moment.
# The default value is: NO.
GENERATE_AUTOGEN_DEF = NO
@ -2214,6 +2047,12 @@ EXTERNAL_GROUPS = YES
EXTERNAL_PAGES = YES
# The PERL_PATH should be the absolute path and name of the perl script
# interpreter (i.e. the result of 'which perl').
# The default file (with absolute path) is: /usr/bin/perl.
PERL_PATH = /usr/bin/perl
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
@ -2227,6 +2066,15 @@ EXTERNAL_PAGES = YES
CLASS_DIAGRAMS = YES
# You can define message sequence charts within doxygen comments using the \msc
# command. Doxygen will then run the mscgen tool (see:
# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the
# documentation. The MSCGEN_PATH tag allows you to specify the directory where
# the mscgen tool resides. If left empty the tool is assumed to be found in the
# default search path.
MSCGEN_PATH =
# You can include diagrams made with dia in doxygen documentation. Doxygen will
# then run dia to produce the diagram and insert it in the documentation. The
# DIA_PATH tag allows you to specify the directory where the dia binary resides.
@ -2245,7 +2093,7 @@ HIDE_UNDOC_RELATIONS = YES
# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
# Bell Labs. The other options in this section have no effect if this option is
# set to NO
# The default value is: YES.
# The default value is: NO.
HAVE_DOT = NO
@ -2359,8 +2207,7 @@ INCLUDED_BY_GRAPH = YES
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable call graphs for selected
# functions only using the \callgraph command. Disabling a call graph can be
# accomplished by means of the command \hidecallgraph.
# functions only using the \callgraph command.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
@ -2371,8 +2218,7 @@ CALL_GRAPH = NO
#
# Note that enabling this option will significantly increase the time of a run.
# So in most cases it will be better to enable caller graphs for selected
# functions only using the \callergraph command. Disabling a caller graph can be
# accomplished by means of the command \hidecallergraph.
# functions only using the \callergraph command.
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
@ -2395,17 +2241,11 @@ GRAPHICAL_HIERARCHY = YES
DIRECTORY_GRAPH = YES
# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
# generated by dot. For an explanation of the image formats see the section
# output formats in the documentation of the dot tool (Graphviz (see:
# http://www.graphviz.org/)).
# generated by dot.
# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
# to make the SVG files visible in IE 9+ (other browsers do not have this
# requirement).
# Possible values are: png, png:cairo, png:cairo:cairo, png:cairo:gd, png:gd,
# png:gd:gd, jpg, jpg:cairo, jpg:cairo:gd, jpg:gd, jpg:gd:gd, gif, gif:cairo,
# gif:cairo:gd, gif:gd, gif:gd:gd, svg, png:gd, png:gd:gd, png:cairo,
# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and
# png:gdiplus:gdiplus.
# Possible values are: png, jpg, gif and svg.
# The default value is: png.
# This tag requires that the tag HAVE_DOT is set to YES.
@ -2456,11 +2296,6 @@ DIAFILE_DIRS =
PLANTUML_JAR_PATH =
# When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a
# configuration file for plantuml.
PLANTUML_CFG_FILE =
# When using plantuml, the specified paths are searched for files specified by
# the !include statement in a plantuml block.

220
README.md
View file

@ -2,193 +2,99 @@ Intro for new developers and witnesses
------------------------
This is a quick introduction to get new developers and witnesses up to speed on Peerplays blockchain. It is intended for witnesses plannig to join a live, already deployed blockchain.
# Building on Ubuntu 18.04 LTS and Installation Instructions
The following dependencies were necessary for a clean install of Ubuntu 18.04 LTS:
```
sudo apt-get install gcc-5 g++-5 cmake make libbz2-dev\
libdb++-dev libdb-dev libssl-dev openssl libreadline-dev\
autoconf libtool git
```
## Build Boost 1.67.0
# Building and Installation Instructions
Officially supported OS are Ubuntu 20.04 and Ubuntu 18.04.
## Ubuntu 20.04 and 18.04
Following dependencies are needed for a clean install of Ubuntu 20.04 and Ubuntu 18.04:
```
sudo apt-get install \
autoconf bash bison build-essential ca-certificates dnsutils expect flex git \
graphviz libbz2-dev libcurl4-openssl-dev libncurses-dev libpcre3-dev \
libsnappy-dev libsodium-dev libssl-dev libtool libzip-dev locales lsb-release \
mc nano net-tools ntp openssh-server pkg-config python3 python3-jinja2 sudo \
systemd-coredump wget
mkdir $HOME/src
cd $HOME/src
export BOOST_ROOT=$HOME/src/boost_1_67_0
sudo apt-get update
sudo apt-get install -y autotools-dev build-essential libbz2-dev libicu-dev python-dev
wget -c 'http://sourceforge.net/projects/boost/files/boost/1.67.0/boost_1_67_0.tar.bz2/download'\
-O boost_1_67_0.tar.bz2
tar xjf boost_1_67_0.tar.bz2
cd boost_1_67_0/
./bootstrap.sh "--prefix=$BOOST_ROOT"
./b2 install
```
Boost libraries setup:
```
wget https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.gz
tar -xzf boost_1_72_0.tar.gz boost_1_72_0
cd boost_1_72_0
./bootstrap.sh
./b2
sudo ./b2 install
sudo ldconfig
```
cmake setup:
```
wget https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh
chmod 755 ./cmake-3.24.2-linux-x86_64.sh
sudo ./cmake-3.24.2-linux-x86_64.sh --prefix=/usr --skip-license
cmake --version
```
## Building Peerplays
libzmq setup:
```
wget https://github.com/zeromq/libzmq/archive/refs/tags/v4.3.4.tar.gz
tar -xzvf v4.3.4.tar.gz
cd libzmq-4.3.4
mkdir build
cd build
cmake ..
make -j$(nproc)
sudo make install
sudo ldconfig
```
cppzmq setup:
```
wget https://github.com/zeromq/cppzmq/archive/refs/tags/v4.9.0.tar.gz
tar -xzvf v4.9.0.tar.gz
cd cppzmq-4.9.0
mkdir build
cd build
cmake ..
make -j$(nproc)
sudo make install
sudo ldconfig
```
gsl setup:
```
wget https://github.com/imatix/gsl/archive/refs/tags/v4.1.4.tar.gz
tar -xzvf v4.1.4.tar.gz
cd gsl-4.1.4
make -j$(nproc)
sudo make install
sudo ldconfig
```
libbitcoin-explorer setup:
```
git clone --branch version3.8.0 --depth 1 https://gitlab.com/PBSA/peerplays-1.0/libbitcoin-explorer.git
cd libbitcoin-explorer
sudo ./install.sh
sudo ldconfig
```
Doxygen setup:
```
wget https://github.com/doxygen/doxygen/archive/refs/tags/Release_1_8_17.tar.gz
tar -xvf Release_1_8_17.tar.gz
cd doxygen-Release_1_8_17
mkdir build
cd build
cmake ..
make -j$(nproc)
sudo make install
sudo ldconfig
```
Perl setup:
```
wget https://github.com/Perl/perl5/archive/refs/tags/v5.30.0.tar.gz
tar -xvf v5.30.0.tar.gz
cd perl5-5.30.0
./Configure -des
make -j$(nproc)
sudo make install
sudo ldconfig
```
Building Peerplays
```
git clone https://gitlab.com/PBSA/peerplays.git
cd $HOME/src
export BOOST_ROOT=$HOME/src/boost_1_67_0
git clone https://github.com/peerplays-network/peerplays.git
cd peerplays
git submodule update --init --recursive
# If you want to build Mainnet node
cmake -DCMAKE_BUILD_TYPE=Release
# If you want to build Testnet node
cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_PEERPLAYS_TESTNET=1
# Update -j flag depending on your current system specs;
# Recommended 4GB of RAM per 1 CPU core
# make -j2 for 8GB RAM
# make -j4 for 16GB RAM
# make -j8 for 32GB RAM
cmake -DBOOST_ROOT="$BOOST_ROOT" -DCMAKE_BUILD_TYPE=Release
make -j$(nproc)
sudo make install # this can install the executable files under /usr/local
make install # this can install the executable files under /usr/local
```
## Docker images
docker build -t peerplays .
## Docker image
Install docker, and add current user to docker group.
```
# Install docker
sudo apt install docker.io
sudo usermod -a -G docker $USER
# Add current user to docker group
sudo usermod -a -G docker $USER
# You need to restart your shell session, to apply group membership
# Type 'groups' to verify that you are a member of a docker group
# Build docker image (from the project root, must be a docker group member)
docker build -t peerplays .
# Start docker image
docker start peerplays
# Exposed ports
# # rpc service:
# EXPOSE 8090
# # p2p service:
# EXPOSE 1776
```
### Official docker image for Peerplas Mainnet
```
docker pull datasecuritynode/peerplays:latest
```
### Building docker images manually
```
# Checkout the code
git clone https://gitlab.com/PBSA/peerplays.git
cd peerplays
# Checkout the branch you want
# E.g.
# git checkout beatrice
# git checkout develop
git checkout master
git submodule update --init --recursive
# Execute from the project root, must be a docker group member
# Build docker image, using Ubuntu 20.04 base
docker build --no-cache -f Dockerfile -t peerplays .
# Build docker image, using Ubuntu 18.04 base
docker build --no-cache -f Dockerfile.18.04 -t peerplays-18-04 .
```
### Start docker image
```
# Start docker image, using Ubuntu 20.04 base
docker run peerplays:latest
# Start docker image, using Ubuntu 18.04 base
docker run peerplays-18-04:latest
```
Rest of the instructions on starting the chain remains same.
Rest of the instructions on starting the chain remains same.
Starting A Peerplays Node
-----------------
For Ubuntu 14.04 LTS and up users, see
[this](https://github.com/cryptonomex/graphene/wiki/build-ubuntu) and
then proceed with:
git clone https://github.com/peerplays-network/peerplays.git
cd peerplays
git submodule update --init --recursive
cmake -DBOOST_ROOT="$BOOST_ROOT" -DCMAKE_BUILD_TYPE=Release .
make
./programs/witness_node/witness_node
Launching the witness creates required directories. Next, **stop the witness** and continue.
$ vi witness_node_data_dir/config.ini
p2p-endpoint = 0.0.0.0:9777
rpc-endpoint = 127.0.0.1:8090
seed-node = 213.184.225.234:59500
Start the witness back up
./programs/witness_node/witness_node
@ -248,7 +154,7 @@ Create your witness (substitute the url for your witness information)
```
create_witness your_witness_username "url" true
```
**Be sure to take note of the block_signing_key**
**Be sure to take note of the block_signing_key**
IMPORTANT (issue below command using block_signing_key just obtained)
```

794
bkup_CMakeCache.txt Normal file
View file

@ -0,0 +1,794 @@
# This is the CMakeCache file.
# For build in directory: /home/pbattu/git/18.04/peerplays
# It was generated by CMake: /usr/bin/cmake
# You can edit this file to change values found and used by cmake.
# If you do not want to change any of the values, simply exit the editor.
# If you do want to change a value, simply edit, save, and exit the editor.
# The syntax for the file is as follows:
# KEY:TYPE=VALUE
# KEY is the name of a variable in the cache.
# TYPE is a hint to GUIs for the type of VALUE, DO NOT EDIT TYPE!.
# VALUE is the current value for the KEY.
########################
# EXTERNAL cache entries
########################
//No help, variable specified on the command line.
BOOST_ROOT:PATH=/home/pbattu/git/18.04/boost_1_67_0
//The threading library used by boost-thread
BOOST_THREAD_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libpthread.so
//Build bitshares executables (witness node, cli wallet, etc)
BUILD_BITSHARES_PROGRAMS:BOOL=TRUE
//Build bitshares unit tests
BUILD_BITSHARES_TESTS:BOOL=TRUE
//Build websocketpp examples.
BUILD_EXAMPLES:BOOL=OFF
//Build websocketpp tests.
BUILD_TESTS:BOOL=OFF
//Value Computed by CMake
BitShares_BINARY_DIR:STATIC=/home/pbattu/git/18.04/peerplays
//Value Computed by CMake
BitShares_SOURCE_DIR:STATIC=/home/pbattu/git/18.04/peerplays
//Boost chrono library (debug)
Boost_CHRONO_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_chrono.a
//Boost chrono library (release)
Boost_CHRONO_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_chrono.a
//Boost context library (debug)
Boost_CONTEXT_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_context.a
//Boost context library (release)
Boost_CONTEXT_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_context.a
//Boost coroutine library (debug)
Boost_COROUTINE_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_coroutine.a
//Boost coroutine library (release)
Boost_COROUTINE_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_coroutine.a
//Boost date_time library (debug)
Boost_DATE_TIME_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_date_time.a
//Boost date_time library (release)
Boost_DATE_TIME_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_date_time.a
//The directory containing a CMake configuration file for Boost.
Boost_DIR:PATH=Boost_DIR-NOTFOUND
//Boost filesystem library (debug)
Boost_FILESYSTEM_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_filesystem.a
//Boost filesystem library (release)
Boost_FILESYSTEM_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_filesystem.a
//Path to a file.
Boost_INCLUDE_DIR:PATH=/home/pbattu/git/18.04/boost_1_67_0/include
//Boost iostreams library (debug)
Boost_IOSTREAMS_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_iostreams.a
//Boost iostreams library (release)
Boost_IOSTREAMS_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_iostreams.a
//Boost library directory
Boost_LIBRARY_DIR:PATH=/home/pbattu/git/18.04/boost_1_67_0/lib
//Boost library directory DEBUG
Boost_LIBRARY_DIR_DEBUG:PATH=/home/pbattu/git/18.04/boost_1_67_0/lib
//Boost library directory RELEASE
Boost_LIBRARY_DIR_RELEASE:PATH=/home/pbattu/git/18.04/boost_1_67_0/lib
//Boost locale library (debug)
Boost_LOCALE_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_locale.a
//Boost locale library (release)
Boost_LOCALE_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_locale.a
//Boost program_options library (debug)
Boost_PROGRAM_OPTIONS_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_program_options.a
//Boost program_options library (release)
Boost_PROGRAM_OPTIONS_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_program_options.a
//Boost serialization library (debug)
Boost_SERIALIZATION_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_serialization.a
//Boost serialization library (release)
Boost_SERIALIZATION_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_serialization.a
//Boost signals library (debug)
Boost_SIGNALS_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_signals.a
//Boost signals library (release)
Boost_SIGNALS_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_signals.a
//Boost system library (debug)
Boost_SYSTEM_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_system.a
//Boost system library (release)
Boost_SYSTEM_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_system.a
//Boost thread library (debug)
Boost_THREAD_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_thread.a
//Boost thread library (release)
Boost_THREAD_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_thread.a
//Boost unit_test_framework library (debug)
Boost_UNIT_TEST_FRAMEWORK_LIBRARY_DEBUG:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_unit_test_framework.a
//Boost unit_test_framework library (release)
Boost_UNIT_TEST_FRAMEWORK_LIBRARY_RELEASE:FILEPATH=/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_unit_test_framework.a
//ON or OFF
Boost_USE_STATIC_LIBS:STRING=ON
//Path to a program.
CMAKE_AR:FILEPATH=/usr/bin/ar
//Choose the type of build, options are: None(CMAKE_CXX_FLAGS or
// CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel.
CMAKE_BUILD_TYPE:STRING=Debug
//Enable/Disable color output during build.
CMAKE_COLOR_MAKEFILE:BOOL=ON
//Configurations
CMAKE_CONFIGURATION_TYPES:STRING=Release;RelWithDebInfo;Debug
//CXX compiler
CMAKE_CXX_COMPILER:FILEPATH=/usr/bin/g++-5
//A wrapper around 'ar' adding the appropriate '--plugin' option
// for the GCC compiler
CMAKE_CXX_COMPILER_AR:FILEPATH=/usr/bin/gcc-ar-5
//A wrapper around 'ranlib' adding the appropriate '--plugin' option
// for the GCC compiler
CMAKE_CXX_COMPILER_RANLIB:FILEPATH=/usr/bin/gcc-ranlib-5
//Flags used by the compiler during all build types.
CMAKE_CXX_FLAGS:STRING=
//Flags used by the compiler during debug builds.
CMAKE_CXX_FLAGS_DEBUG:STRING=-g
//Flags used by the compiler during release builds for minimum
// size.
CMAKE_CXX_FLAGS_MINSIZEREL:STRING=-Os -DNDEBUG
//Flags used by the compiler during release builds.
CMAKE_CXX_FLAGS_RELEASE:STRING=-O3 -DNDEBUG
//Flags used by the compiler during release builds with debug info.
CMAKE_CXX_FLAGS_RELWITHDEBINFO:STRING=-O2 -g -DNDEBUG
//C compiler
CMAKE_C_COMPILER:FILEPATH=/usr/bin/gcc-5
//A wrapper around 'ar' adding the appropriate '--plugin' option
// for the GCC compiler
CMAKE_C_COMPILER_AR:FILEPATH=/usr/bin/gcc-ar-5
//A wrapper around 'ranlib' adding the appropriate '--plugin' option
// for the GCC compiler
CMAKE_C_COMPILER_RANLIB:FILEPATH=/usr/bin/gcc-ranlib-5
//Flags used by the compiler during all build types.
CMAKE_C_FLAGS:STRING=
//Flags used by the compiler during debug builds.
CMAKE_C_FLAGS_DEBUG:STRING=-g
//Flags used by the compiler during release builds for minimum
// size.
CMAKE_C_FLAGS_MINSIZEREL:STRING=-Os -DNDEBUG
//Flags used by the compiler during release builds.
CMAKE_C_FLAGS_RELEASE:STRING=-O3 -DNDEBUG
//Flags used by the compiler during release builds with debug info.
CMAKE_C_FLAGS_RELWITHDEBINFO:STRING=-O2 -g -DNDEBUG
//Flags used by the linker.
CMAKE_EXE_LINKER_FLAGS:STRING=
//Flags used by the linker during debug builds.
CMAKE_EXE_LINKER_FLAGS_DEBUG:STRING=
//Flags used by the linker during release minsize builds.
CMAKE_EXE_LINKER_FLAGS_MINSIZEREL:STRING=
//Flags used by the linker during release builds.
CMAKE_EXE_LINKER_FLAGS_RELEASE:STRING=
//Flags used by the linker during Release with Debug Info builds.
CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO:STRING=
//Enable/Disable output of compile commands during generation.
CMAKE_EXPORT_COMPILE_COMMANDS:BOOL=OFF
//Install path prefix, prepended onto install directories.
CMAKE_INSTALL_PREFIX:PATH=/usr/local
//Path to a program.
CMAKE_LINKER:FILEPATH=/usr/bin/ld
//Path to a program.
CMAKE_MAKE_PROGRAM:FILEPATH=/usr/bin/make
//Flags used by the linker during the creation of modules.
CMAKE_MODULE_LINKER_FLAGS:STRING=
//Flags used by the linker during debug builds.
CMAKE_MODULE_LINKER_FLAGS_DEBUG:STRING=
//Flags used by the linker during release minsize builds.
CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL:STRING=
//Flags used by the linker during release builds.
CMAKE_MODULE_LINKER_FLAGS_RELEASE:STRING=
//Flags used by the linker during Release with Debug Info builds.
CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO:STRING=
//Path to a program.
CMAKE_NM:FILEPATH=/usr/bin/nm
//Path to a program.
CMAKE_OBJCOPY:FILEPATH=/usr/bin/objcopy
//Path to a program.
CMAKE_OBJDUMP:FILEPATH=/usr/bin/objdump
//Value Computed by CMake
CMAKE_PROJECT_NAME:STATIC=BitShares
//Path to a program.
CMAKE_RANLIB:FILEPATH=/usr/bin/ranlib
//Flags used by the linker during the creation of dll's.
CMAKE_SHARED_LINKER_FLAGS:STRING=
//Flags used by the linker during debug builds.
CMAKE_SHARED_LINKER_FLAGS_DEBUG:STRING=
//Flags used by the linker during release minsize builds.
CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL:STRING=
//Flags used by the linker during release builds.
CMAKE_SHARED_LINKER_FLAGS_RELEASE:STRING=
//Flags used by the linker during Release with Debug Info builds.
CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO:STRING=
//If set, runtime paths are not added when installing shared libraries,
// but are added when building.
CMAKE_SKIP_INSTALL_RPATH:BOOL=NO
//If set, runtime paths are not added when using shared libraries.
CMAKE_SKIP_RPATH:BOOL=NO
//Flags used by the linker during the creation of static libraries.
CMAKE_STATIC_LINKER_FLAGS:STRING=
//Flags used by the linker during debug builds.
CMAKE_STATIC_LINKER_FLAGS_DEBUG:STRING=
//Flags used by the linker during release minsize builds.
CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL:STRING=
//Flags used by the linker during release builds.
CMAKE_STATIC_LINKER_FLAGS_RELEASE:STRING=
//Flags used by the linker during Release with Debug Info builds.
CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO:STRING=
//Path to a program.
CMAKE_STRIP:FILEPATH=/usr/bin/strip
//If this value is on, makefiles will be generated without the
// .SILENT directive, and all commands will be echoed to the console
// during the make. This is useful for debugging only. With Visual
// Studio IDE projects all commands are done without /nologo.
CMAKE_VERBOSE_MAKEFILE:BOOL=FALSE
//Path to a library.
CURSES_CURSES_LIBRARY:FILEPATH=CURSES_CURSES_LIBRARY-NOTFOUND
//Path to a library.
CURSES_FORM_LIBRARY:FILEPATH=CURSES_FORM_LIBRARY-NOTFOUND
//Path to a file.
CURSES_INCLUDE_PATH:PATH=CURSES_INCLUDE_PATH-NOTFOUND
//Path to a library.
CURSES_NCURSES_LIBRARY:FILEPATH=CURSES_NCURSES_LIBRARY-NOTFOUND
//Dot tool for use with Doxygen
DOXYGEN_DOT_EXECUTABLE:FILEPATH=DOXYGEN_DOT_EXECUTABLE-NOTFOUND
//Doxygen documentation generation tool (http://www.doxygen.org)
DOXYGEN_EXECUTABLE:FILEPATH=DOXYGEN_EXECUTABLE-NOTFOUND
//secp256k1 or openssl or mixed
ECC_IMPL:STRING=secp256k1
//Build BitShares for code coverage analysis
ENABLE_COVERAGE_TESTING:BOOL=FALSE
//Build websocketpp with CPP11 features enabled.
ENABLE_CPP11:BOOL=ON
//TRUE to try to use full zlib for compression, FALSE to use miniz.c
FC_USE_FULL_ZLIB:BOOL=FALSE
//Git command line client
GIT_EXECUTABLE:FILEPATH=/usr/bin/git
//location of the genesis.json to embed in the executable
GRAPHENE_EGENESIS_JSON:PATH=/home/pbattu/git/18.04/peerplays/genesis.json
//The directory containing a CMake configuration file for Gperftools.
Gperftools_DIR:PATH=Gperftools_DIR-NOTFOUND
//Installation directory for CMake files
INSTALL_CMAKE_DIR:PATH=lib/cmake/websocketpp
//Installation directory for header files
INSTALL_INCLUDE_DIR:PATH=include
//Log long API calls over websocket (ON OR OFF)
LOG_LONG_API:BOOL=ON
//Max API execution time in ms
LOG_LONG_API_MAX_MS:STRING=1000
//API execution time in ms at which to warn
LOG_LONG_API_WARN_MS:STRING=750
//Path to a library.
OPENSSL_CRYPTO_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libcrypto.a
//Path to a file.
OPENSSL_INCLUDE_DIR:PATH=/usr/include
//Path to a library.
OPENSSL_SSL_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libssl.a
//Path to a program.
PERL_EXECUTABLE:FILEPATH=/usr/bin/perl
//pkg-config executable
PKG_CONFIG_EXECUTABLE:FILEPATH=/usr/bin/pkg-config
//Path to a file.
READLINE_INCLUDE_DIR:PATH=/usr/include
//Path to a library.
READLINE_LIBRARIES:FILEPATH=/usr/lib/x86_64-linux-gnu/libreadline.so
//Path to a file.
Readline_INCLUDE_DIR:PATH=/usr/include
//Path to a library.
Readline_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libreadline.so
//Path to a file.
Readline_ROOT_DIR:PATH=/usr
//OFF
UNITY_BUILD:BOOL=OFF
//Path to a file.
ZLIB_INCLUDE_DIR:PATH=/usr/include
//Path to a library.
ZLIB_LIBRARY_DEBUG:FILEPATH=ZLIB_LIBRARY_DEBUG-NOTFOUND
//Path to a library.
ZLIB_LIBRARY_RELEASE:FILEPATH=/usr/lib/x86_64-linux-gnu/libz.so
//Value Computed by CMake
fc_BINARY_DIR:STATIC=/home/pbattu/git/18.04/peerplays/libraries/fc
//Dependencies for the target
fc_LIB_DEPENDS:STATIC=general;-L/usr/local/lib;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_thread.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_date_time.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_filesystem.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_system.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_program_options.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_signals.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_serialization.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_chrono.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_unit_test_framework.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_context.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_locale.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_iostreams.a;general;/home/pbattu/git/18.04/boost_1_67_0/lib/libboost_coroutine.a;general;/usr/lib/x86_64-linux-gnu/libpthread.so;general;/usr/lib/x86_64-linux-gnu/libssl.a;general;/usr/lib/x86_64-linux-gnu/libcrypto.a;general;/usr/lib/x86_64-linux-gnu/libz.so;general;dl;general;rt;general;/usr/lib/x86_64-linux-gnu/libreadline.so;general;secp256k1;
//Value Computed by CMake
fc_SOURCE_DIR:STATIC=/home/pbattu/git/18.04/peerplays/libraries/fc
//Dependencies for the target
graphene_account_history_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;
//Dependencies for the target
graphene_accounts_list_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;
//Dependencies for the target
graphene_affiliate_stats_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;
//Dependencies for the target
graphene_app_LIB_DEPENDS:STATIC=general;graphene_market_history;general;graphene_account_history;general;graphene_accounts_list;general;graphene_affiliate_stats;general;graphene_chain;general;fc;general;graphene_db;general;graphene_net;general;graphene_time;general;graphene_utilities;general;graphene_debug_witness;general;graphene_bookie;
//Dependencies for the target
graphene_bookie_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;
//Dependencies for the target
graphene_chain_LIB_DEPENDS:STATIC=general;fc;general;graphene_db;
//Dependencies for the target
graphene_db_LIB_DEPENDS:STATIC=general;fc;
//Dependencies for the target
graphene_debug_witness_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;
//Dependencies for the target
graphene_delayed_node_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;
//Dependencies for the target
graphene_egenesis_brief_LIB_DEPENDS:STATIC=general;graphene_chain;general;fc;
//Dependencies for the target
graphene_egenesis_full_LIB_DEPENDS:STATIC=general;graphene_chain;general;fc;
//Dependencies for the target
graphene_egenesis_none_LIB_DEPENDS:STATIC=general;graphene_chain;general;fc;
//Dependencies for the target
graphene_generate_genesis_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;general;graphene_time;
//Dependencies for the target
graphene_generate_uia_sharedrop_genesis_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;general;graphene_time;
//Dependencies for the target
graphene_market_history_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;
//Dependencies for the target
graphene_net_LIB_DEPENDS:STATIC=general;fc;general;graphene_db;
//Dependencies for the target
graphene_snapshot_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;
//Dependencies for the target
graphene_time_LIB_DEPENDS:STATIC=general;fc;
//Dependencies for the target
graphene_utilities_LIB_DEPENDS:STATIC=general;fc;
//Dependencies for the target
graphene_wallet_LIB_DEPENDS:STATIC=general;graphene_app;general;graphene_net;general;graphene_chain;general;graphene_utilities;general;fc;general;dl;
//Dependencies for the target
graphene_witness_LIB_DEPENDS:STATIC=general;graphene_chain;general;graphene_app;
//Value Computed by CMake
websocketpp_BINARY_DIR:STATIC=/home/pbattu/git/18.04/peerplays/libraries/fc/vendor/websocketpp
//Value Computed by CMake
websocketpp_SOURCE_DIR:STATIC=/home/pbattu/git/18.04/peerplays/libraries/fc/vendor/websocketpp
########################
# INTERNAL cache entries
########################
//ADVANCED property for variable: BOOST_ROOT
BOOST_ROOT-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_CHRONO_LIBRARY_DEBUG
Boost_CHRONO_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_CHRONO_LIBRARY_RELEASE
Boost_CHRONO_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_CONTEXT_LIBRARY_DEBUG
Boost_CONTEXT_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_CONTEXT_LIBRARY_RELEASE
Boost_CONTEXT_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_COROUTINE_LIBRARY_DEBUG
Boost_COROUTINE_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_COROUTINE_LIBRARY_RELEASE
Boost_COROUTINE_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_DATE_TIME_LIBRARY_DEBUG
Boost_DATE_TIME_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_DATE_TIME_LIBRARY_RELEASE
Boost_DATE_TIME_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_DIR
Boost_DIR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_FILESYSTEM_LIBRARY_DEBUG
Boost_FILESYSTEM_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_FILESYSTEM_LIBRARY_RELEASE
Boost_FILESYSTEM_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_INCLUDE_DIR
Boost_INCLUDE_DIR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_IOSTREAMS_LIBRARY_DEBUG
Boost_IOSTREAMS_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_IOSTREAMS_LIBRARY_RELEASE
Boost_IOSTREAMS_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_LIBRARY_DIR
Boost_LIBRARY_DIR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_LIBRARY_DIR_DEBUG
Boost_LIBRARY_DIR_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_LIBRARY_DIR_RELEASE
Boost_LIBRARY_DIR_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_LOCALE_LIBRARY_DEBUG
Boost_LOCALE_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_LOCALE_LIBRARY_RELEASE
Boost_LOCALE_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_PROGRAM_OPTIONS_LIBRARY_DEBUG
Boost_PROGRAM_OPTIONS_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_PROGRAM_OPTIONS_LIBRARY_RELEASE
Boost_PROGRAM_OPTIONS_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_SERIALIZATION_LIBRARY_DEBUG
Boost_SERIALIZATION_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_SERIALIZATION_LIBRARY_RELEASE
Boost_SERIALIZATION_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_SIGNALS_LIBRARY_DEBUG
Boost_SIGNALS_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_SIGNALS_LIBRARY_RELEASE
Boost_SIGNALS_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_SYSTEM_LIBRARY_DEBUG
Boost_SYSTEM_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_SYSTEM_LIBRARY_RELEASE
Boost_SYSTEM_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_THREAD_LIBRARY_DEBUG
Boost_THREAD_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_THREAD_LIBRARY_RELEASE
Boost_THREAD_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_UNIT_TEST_FRAMEWORK_LIBRARY_DEBUG
Boost_UNIT_TEST_FRAMEWORK_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Boost_UNIT_TEST_FRAMEWORK_LIBRARY_RELEASE
Boost_UNIT_TEST_FRAMEWORK_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_AR
CMAKE_AR-ADVANCED:INTERNAL=1
//This is the directory where this CMakeCache.txt was created
CMAKE_CACHEFILE_DIR:INTERNAL=/home/pbattu/git/18.04/peerplays
//Major version of cmake used to create the current loaded cache
CMAKE_CACHE_MAJOR_VERSION:INTERNAL=3
//Minor version of cmake used to create the current loaded cache
CMAKE_CACHE_MINOR_VERSION:INTERNAL=10
//Patch version of cmake used to create the current loaded cache
CMAKE_CACHE_PATCH_VERSION:INTERNAL=2
//ADVANCED property for variable: CMAKE_COLOR_MAKEFILE
CMAKE_COLOR_MAKEFILE-ADVANCED:INTERNAL=1
//Path to CMake executable.
CMAKE_COMMAND:INTERNAL=/usr/bin/cmake
//Path to cpack program executable.
CMAKE_CPACK_COMMAND:INTERNAL=/usr/bin/cpack
//Path to ctest program executable.
CMAKE_CTEST_COMMAND:INTERNAL=/usr/bin/ctest
//ADVANCED property for variable: CMAKE_CXX_COMPILER
CMAKE_CXX_COMPILER-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_CXX_COMPILER_AR
CMAKE_CXX_COMPILER_AR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_CXX_COMPILER_RANLIB
CMAKE_CXX_COMPILER_RANLIB-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_CXX_FLAGS
CMAKE_CXX_FLAGS-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_CXX_FLAGS_DEBUG
CMAKE_CXX_FLAGS_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_CXX_FLAGS_MINSIZEREL
CMAKE_CXX_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_CXX_FLAGS_RELWITHDEBINFO
CMAKE_CXX_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_C_COMPILER
CMAKE_C_COMPILER-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_C_COMPILER_AR
CMAKE_C_COMPILER_AR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_C_COMPILER_RANLIB
CMAKE_C_COMPILER_RANLIB-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_C_FLAGS
CMAKE_C_FLAGS-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_C_FLAGS_DEBUG
CMAKE_C_FLAGS_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_C_FLAGS_MINSIZEREL
CMAKE_C_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_C_FLAGS_RELEASE
CMAKE_C_FLAGS_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_C_FLAGS_RELWITHDEBINFO
CMAKE_C_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1
//Executable file format
CMAKE_EXECUTABLE_FORMAT:INTERNAL=ELF
//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS
CMAKE_EXE_LINKER_FLAGS-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_DEBUG
CMAKE_EXE_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_MINSIZEREL
CMAKE_EXE_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_RELEASE
CMAKE_EXE_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO
CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_EXPORT_COMPILE_COMMANDS
CMAKE_EXPORT_COMPILE_COMMANDS-ADVANCED:INTERNAL=1
//Name of external makefile project generator.
CMAKE_EXTRA_GENERATOR:INTERNAL=
//Name of generator.
CMAKE_GENERATOR:INTERNAL=Unix Makefiles
//Name of generator platform.
CMAKE_GENERATOR_PLATFORM:INTERNAL=
//Name of generator toolset.
CMAKE_GENERATOR_TOOLSET:INTERNAL=
//Have symbol pthread_create
CMAKE_HAVE_LIBC_CREATE:INTERNAL=
//Have library pthreads
CMAKE_HAVE_PTHREADS_CREATE:INTERNAL=
//Have library pthread
CMAKE_HAVE_PTHREAD_CREATE:INTERNAL=1
//Have include pthread.h
CMAKE_HAVE_PTHREAD_H:INTERNAL=1
//Source directory with the top level CMakeLists.txt file for this
// project
CMAKE_HOME_DIRECTORY:INTERNAL=/home/pbattu/git/18.04/peerplays
//Install .so files without execute permission.
CMAKE_INSTALL_SO_NO_EXE:INTERNAL=1
//ADVANCED property for variable: CMAKE_LINKER
CMAKE_LINKER-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_MAKE_PROGRAM
CMAKE_MAKE_PROGRAM-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS
CMAKE_MODULE_LINKER_FLAGS-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_DEBUG
CMAKE_MODULE_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL
CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_RELEASE
CMAKE_MODULE_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO
CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_NM
CMAKE_NM-ADVANCED:INTERNAL=1
//number of local generators
CMAKE_NUMBER_OF_MAKEFILES:INTERNAL=37
//ADVANCED property for variable: CMAKE_OBJCOPY
CMAKE_OBJCOPY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_OBJDUMP
CMAKE_OBJDUMP-ADVANCED:INTERNAL=1
//Platform information initialized
CMAKE_PLATFORM_INFO_INITIALIZED:INTERNAL=1
//ADVANCED property for variable: CMAKE_RANLIB
CMAKE_RANLIB-ADVANCED:INTERNAL=1
//Path to CMake installation.
CMAKE_ROOT:INTERNAL=/usr/share/cmake-3.10
//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS
CMAKE_SHARED_LINKER_FLAGS-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_DEBUG
CMAKE_SHARED_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL
CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_RELEASE
CMAKE_SHARED_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO
CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_SKIP_INSTALL_RPATH
CMAKE_SKIP_INSTALL_RPATH-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_SKIP_RPATH
CMAKE_SKIP_RPATH-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS
CMAKE_STATIC_LINKER_FLAGS-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_DEBUG
CMAKE_STATIC_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL
CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_RELEASE
CMAKE_STATIC_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO
CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CMAKE_STRIP
CMAKE_STRIP-ADVANCED:INTERNAL=1
//uname command
CMAKE_UNAME:INTERNAL=/bin/uname
//ADVANCED property for variable: CMAKE_VERBOSE_MAKEFILE
CMAKE_VERBOSE_MAKEFILE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CURSES_CURSES_LIBRARY
CURSES_CURSES_LIBRARY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CURSES_FORM_LIBRARY
CURSES_FORM_LIBRARY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CURSES_INCLUDE_PATH
CURSES_INCLUDE_PATH-ADVANCED:INTERNAL=1
//ADVANCED property for variable: CURSES_NCURSES_LIBRARY
CURSES_NCURSES_LIBRARY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: DOXYGEN_DOT_EXECUTABLE
DOXYGEN_DOT_EXECUTABLE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: DOXYGEN_EXECUTABLE
DOXYGEN_EXECUTABLE-ADVANCED:INTERNAL=1
//Details about finding OpenSSL
FIND_PACKAGE_MESSAGE_DETAILS_OpenSSL:INTERNAL=[/usr/lib/x86_64-linux-gnu/libcrypto.a][/usr/include][v1.1.0g()]
//Details about finding Perl
FIND_PACKAGE_MESSAGE_DETAILS_Perl:INTERNAL=[/usr/bin/perl][v5.26.1()]
//Details about finding Readline
FIND_PACKAGE_MESSAGE_DETAILS_Readline:INTERNAL=[/usr/include][/usr/lib/x86_64-linux-gnu/libreadline.so][v()]
//Details about finding Threads
FIND_PACKAGE_MESSAGE_DETAILS_Threads:INTERNAL=[TRUE][v()]
//Details about finding ZLIB
FIND_PACKAGE_MESSAGE_DETAILS_ZLIB:INTERNAL=[/usr/lib/x86_64-linux-gnu/libz.so][/usr/include][v1.2.11()]
//ADVANCED property for variable: GIT_EXECUTABLE
GIT_EXECUTABLE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: OPENSSL_CRYPTO_LIBRARY
OPENSSL_CRYPTO_LIBRARY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: OPENSSL_INCLUDE_DIR
OPENSSL_INCLUDE_DIR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: OPENSSL_SSL_LIBRARY
OPENSSL_SSL_LIBRARY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: PERL_EXECUTABLE
PERL_EXECUTABLE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: PKG_CONFIG_EXECUTABLE
PKG_CONFIG_EXECUTABLE-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Readline_INCLUDE_DIR
Readline_INCLUDE_DIR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Readline_LIBRARY
Readline_LIBRARY-ADVANCED:INTERNAL=1
//ADVANCED property for variable: Readline_ROOT_DIR
Readline_ROOT_DIR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: ZLIB_INCLUDE_DIR
ZLIB_INCLUDE_DIR-ADVANCED:INTERNAL=1
//ADVANCED property for variable: ZLIB_LIBRARY_DEBUG
ZLIB_LIBRARY_DEBUG-ADVANCED:INTERNAL=1
//ADVANCED property for variable: ZLIB_LIBRARY_RELEASE
ZLIB_LIBRARY_RELEASE-ADVANCED:INTERNAL=1
//Last used BOOST_ROOT value.
_BOOST_ROOT_LAST:INTERNAL=/home/pbattu/git/18.04/boost_1_67_0
//Components requested for this build tree.
_Boost_COMPONENTS_SEARCHED:INTERNAL=chrono;context;coroutine;date_time;filesystem;iostreams;locale;program_options;serialization;signals;system;thread;unit_test_framework
//Last used Boost_INCLUDE_DIR value.
_Boost_INCLUDE_DIR_LAST:INTERNAL=/home/pbattu/git/18.04/boost_1_67_0/include
//Last used Boost_LIBRARY_DIR_DEBUG value.
_Boost_LIBRARY_DIR_DEBUG_LAST:INTERNAL=/home/pbattu/git/18.04/boost_1_67_0/lib
//Last used Boost_LIBRARY_DIR value.
_Boost_LIBRARY_DIR_LAST:INTERNAL=/home/pbattu/git/18.04/boost_1_67_0/lib
//Last used Boost_LIBRARY_DIR_RELEASE value.
_Boost_LIBRARY_DIR_RELEASE_LAST:INTERNAL=/home/pbattu/git/18.04/boost_1_67_0/lib
//Last used Boost_NAMESPACE value.
_Boost_NAMESPACE_LAST:INTERNAL=boost
//Last used Boost_USE_MULTITHREADED value.
_Boost_USE_MULTITHREADED_LAST:INTERNAL=TRUE
//Last used Boost_USE_STATIC_LIBS value.
_Boost_USE_STATIC_LIBS_LAST:INTERNAL=ON
_OPENSSL_CFLAGS:INTERNAL=
_OPENSSL_CFLAGS_I:INTERNAL=
_OPENSSL_CFLAGS_OTHER:INTERNAL=
_OPENSSL_FOUND:INTERNAL=1
_OPENSSL_INCLUDEDIR:INTERNAL=/usr/include
_OPENSSL_INCLUDE_DIRS:INTERNAL=
_OPENSSL_LDFLAGS:INTERNAL=-lssl;-lcrypto
_OPENSSL_LDFLAGS_OTHER:INTERNAL=
_OPENSSL_LIBDIR:INTERNAL=/usr/lib/x86_64-linux-gnu
_OPENSSL_LIBRARIES:INTERNAL=ssl;crypto
_OPENSSL_LIBRARY_DIRS:INTERNAL=
_OPENSSL_LIBS:INTERNAL=
_OPENSSL_LIBS_L:INTERNAL=
_OPENSSL_LIBS_OTHER:INTERNAL=
_OPENSSL_LIBS_PATHS:INTERNAL=
_OPENSSL_PREFIX:INTERNAL=/usr
_OPENSSL_STATIC_CFLAGS:INTERNAL=
_OPENSSL_STATIC_CFLAGS_I:INTERNAL=
_OPENSSL_STATIC_CFLAGS_OTHER:INTERNAL=
_OPENSSL_STATIC_INCLUDE_DIRS:INTERNAL=
_OPENSSL_STATIC_LDFLAGS:INTERNAL=-lssl;-ldl;-lcrypto;-ldl
_OPENSSL_STATIC_LDFLAGS_OTHER:INTERNAL=
_OPENSSL_STATIC_LIBDIR:INTERNAL=
_OPENSSL_STATIC_LIBRARIES:INTERNAL=ssl;dl;crypto;dl
_OPENSSL_STATIC_LIBRARY_DIRS:INTERNAL=
_OPENSSL_STATIC_LIBS:INTERNAL=
_OPENSSL_STATIC_LIBS_L:INTERNAL=
_OPENSSL_STATIC_LIBS_OTHER:INTERNAL=
_OPENSSL_STATIC_LIBS_PATHS:INTERNAL=
_OPENSSL_VERSION:INTERNAL=1.1.0g
_OPENSSL_openssl_INCLUDEDIR:INTERNAL=
_OPENSSL_openssl_LIBDIR:INTERNAL=
_OPENSSL_openssl_PREFIX:INTERNAL=
_OPENSSL_openssl_VERSION:INTERNAL=
__pkg_config_arguments__OPENSSL:INTERNAL=QUIET;openssl
__pkg_config_checked__OPENSSL:INTERNAL=1
prefix_result:INTERNAL=/usr/lib/x86_64-linux-gnu

View file

@ -1,6 +0,0 @@
#!/bin/bash
find ./libraries/app -regex ".*[c|h]pp" | xargs clang-format -i
find ./libraries/chain/hardfork.d -regex ".*hf" | xargs clang-format -i
find ./libraries/plugins/peerplays_sidechain -regex ".*[c|h]pp" | xargs clang-format -i
find ./programs/cli_wallet -regex ".*[c|h]pp" | xargs clang-format -i

61
docker/default_config.ini Normal file
View file

@ -0,0 +1,61 @@
# Endpoint for P2P node to listen on
p2p-endpoint = 0.0.0.0:9090
# P2P nodes to connect to on startup (may specify multiple times)
# seed-node =
# JSON array of P2P nodes to connect to on startup
# seed-nodes =
# Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.
# checkpoint =
# Endpoint for websocket RPC to listen on
rpc-endpoint = 0.0.0.0:8090
# Endpoint for TLS websocket RPC to listen on
# rpc-tls-endpoint =
# The TLS certificate file for this server
# server-pem =
# Password for this certificate
# server-pem-password =
# File to read Genesis State from
# genesis-json =
# Block signing key to use for init witnesses, overrides genesis file
# dbg-init-key =
# JSON file specifying API permissions
# api-access =
# Enable block production, even if the chain is stale.
enable-stale-production = false
# Percent of witnesses (0-99) that must be participating in order to produce blocks
required-participation = false
# ID of witness controlled by this node (e.g. "1.6.5", quotes are required, may specify multiple times)
# witness-id =
# Tuple of [PublicKey, WIF private key] (may specify multiple times)
# private-key = ["BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV","5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"]
# Account ID to track history for (may specify multiple times)
# track-account =
# Track market history by grouping orders into buckets of equal size measured in seconds specified as a JSON array of numbers
# bucket-size = [15,60,300,3600,86400]
bucket-size = [60,300,900,1800,3600,14400,86400]
# for 1 min, 5 mins, 30 mins, 1h, 4 hs and 1 day. i think this should be the default.
# How far back in time to track history for each bucket size, measured in the number of buckets (default: 1000)
history-per-size = 1000
# Max amount of operations to store in the database, per account (drastically reduces RAM requirements)
max-ops-per-account = 1000
# Remove old operation history # objects from RAM
partial-operations = true

82
docker/peerplaysentry.sh Normal file
View file

@ -0,0 +1,82 @@
#!/bin/bash
PEERPLAYSD="/usr/local/bin/witness_node"
# For blockchain download
VERSION=`cat /etc/peerplays/version`
## Supported Environmental Variables
#
# * $PEERPLAYSD_SEED_NODES
# * $PEERPLAYSD_RPC_ENDPOINT
# * $PEERPLAYSD_PLUGINS
# * $PEERPLAYSD_REPLAY
# * $PEERPLAYSD_RESYNC
# * $PEERPLAYSD_P2P_ENDPOINT
# * $PEERPLAYSD_WITNESS_ID
# * $PEERPLAYSD_PRIVATE_KEY
# * $PEERPLAYSD_TRACK_ACCOUNTS
# * $PEERPLAYSD_PARTIAL_OPERATIONS
# * $PEERPLAYSD_MAX_OPS_PER_ACCOUNT
# * $PEERPLAYSD_TRUSTED_NODE
#
ARGS=""
# Translate environmental variables
if [[ ! -z "$PEERPLAYSD_SEED_NODES" ]]; then
for NODE in $PEERPLAYSD_SEED_NODES ; do
ARGS+=" --seed-node=$NODE"
done
fi
if [[ ! -z "$PEERPLAYSD_RPC_ENDPOINT" ]]; then
ARGS+=" --rpc-endpoint=${PEERPLAYSD_RPC_ENDPOINT}"
fi
if [[ ! -z "$PEERPLAYSD_REPLAY" ]]; then
ARGS+=" --replay-blockchain"
fi
if [[ ! -z "$PEERPLAYSD_RESYNC" ]]; then
ARGS+=" --resync-blockchain"
fi
if [[ ! -z "$PEERPLAYSD_P2P_ENDPOINT" ]]; then
ARGS+=" --p2p-endpoint=${PEERPLAYSD_P2P_ENDPOINT}"
fi
if [[ ! -z "$PEERPLAYSD_WITNESS_ID" ]]; then
ARGS+=" --witness-id=$PEERPLAYSD_WITNESS_ID"
fi
if [[ ! -z "$PEERPLAYSD_PRIVATE_KEY" ]]; then
ARGS+=" --private-key=$PEERPLAYSD_PRIVATE_KEY"
fi
if [[ ! -z "$PEERPLAYSD_TRACK_ACCOUNTS" ]]; then
for ACCOUNT in $PEERPLAYSD_TRACK_ACCOUNTS ; do
ARGS+=" --track-account=$ACCOUNT"
done
fi
if [[ ! -z "$PEERPLAYSD_PARTIAL_OPERATIONS" ]]; then
ARGS+=" --partial-operations=${PEERPLAYSD_PARTIAL_OPERATIONS}"
fi
if [[ ! -z "$PEERPLAYSD_MAX_OPS_PER_ACCOUNT" ]]; then
ARGS+=" --max-ops-per-account=${PEERPLAYSD_MAX_OPS_PER_ACCOUNT}"
fi
if [[ ! -z "$PEERPLAYSD_TRUSTED_NODE" ]]; then
ARGS+=" --trusted-node=${PEERPLAYSD_TRUSTED_NODE}"
fi
## Link the peerplays config file into home
## This link has been created in Dockerfile, already
ln -f -s /etc/peerplays/config.ini /var/lib/peerplays
# Plugins need to be provided in a space-separated list, which
# makes it necessary to write it like this
if [[ ! -z "$PEERPLAYSD_PLUGINS" ]]; then
$PEERPLAYSD --data-dir ${HOME} ${ARGS} ${PEERPLAYSD_ARGS} --plugins "${PEERPLAYSD_PLUGINS}"
else
$PEERPLAYSD --data-dir ${HOME} ${ARGS} ${PEERPLAYSD_ARGS}
fi

2
docs

@ -1 +1 @@
Subproject commit 1e924950c2f92b166c34ceb294e8b8c4997a6c4e
Subproject commit 8d8b69d82482101279460fa02f814d0e4030966f

File diff suppressed because it is too large Load diff

View file

@ -1,11 +1,12 @@
add_subdirectory( app )
add_subdirectory( chain )
add_subdirectory( db )
add_subdirectory( egenesis )
add_subdirectory( fc )
add_subdirectory( db )
#add_subdirectory( deterministic_openssl_rand )
add_subdirectory( chain )
add_subdirectory( egenesis )
add_subdirectory( net )
add_subdirectory( plugins )
add_subdirectory( sha3 )
#add_subdirectory( p2p )
add_subdirectory( time )
add_subdirectory( utilities )
add_subdirectory( app )
add_subdirectory( plugins )
add_subdirectory( wallet )

View file

@ -4,8 +4,8 @@ file(GLOB EGENESIS_HEADERS "../egenesis/include/graphene/app/*.hpp")
add_library( graphene_app
api.cpp
application.cpp
config_util.cpp
database_api.cpp
impacted.cpp
plugin.cpp
${HEADERS}
${EGENESIS_HEADERS}
@ -13,10 +13,7 @@ add_library( graphene_app
# need to link graphene_debug_witness because plugins aren't sufficiently isolated #246
#target_link_libraries( graphene_app graphene_market_history graphene_account_history graphene_chain fc graphene_db graphene_net graphene_utilities graphene_debug_witness )
target_link_libraries( graphene_app
PUBLIC graphene_net graphene_utilities
graphene_account_history graphene_accounts_list graphene_affiliate_stats graphene_bookie graphene_debug_witness graphene_elasticsearch graphene_es_objects graphene_generate_genesis graphene_market_history peerplays_sidechain )
target_link_libraries( graphene_app graphene_market_history graphene_account_history graphene_accounts_list graphene_affiliate_stats graphene_chain fc graphene_db graphene_net graphene_time graphene_utilities graphene_debug_witness graphene_bookie )
target_include_directories( graphene_app
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include"
"${CMAKE_CURRENT_SOURCE_DIR}/../egenesis/include" )
@ -33,25 +30,3 @@ INSTALL( TARGETS
ARCHIVE DESTINATION lib
)
INSTALL( FILES ${HEADERS} DESTINATION "include/graphene/app" )
add_library( graphene_plugin
plugin.cpp
include/graphene/app/plugin.hpp
)
target_link_libraries( graphene_plugin
PUBLIC graphene_net graphene_utilities )
target_include_directories( graphene_plugin
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" )
INSTALL( TARGETS
graphene_app
RUNTIME DESTINATION bin
LIBRARY DESTINATION lib
ARCHIVE DESTINATION lib
)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,329 +0,0 @@
/*
* Copyright (c) 2018 Lubos Ilcik, and contributors.
*
* The MIT License
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <graphene/app/config_util.hpp>
#include <graphene/chain/config.hpp>
#include <fc/exception/exception.hpp>
#include <fc/log/console_appender.hpp>
#include <fc/log/file_appender.hpp>
#include <fc/log/logger_config.hpp>
#include <fc/reflect/variant.hpp>
#include <fc/string.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/algorithm/string/predicate.hpp>
#include <boost/algorithm/string/split.hpp>
#include <boost/property_tree/ini_parser.hpp>
#include <boost/property_tree/ptree.hpp>
#include <fstream>
namespace bpo = boost::program_options;
class deduplicator {
public:
deduplicator() :
modifier(nullptr) {
}
deduplicator(const boost::shared_ptr<bpo::option_description> (*mod_fn)(const boost::shared_ptr<bpo::option_description> &)) :
modifier(mod_fn) {
}
const boost::shared_ptr<bpo::option_description> next(const boost::shared_ptr<bpo::option_description> &o) {
const std::string name = o->long_name();
if (seen.find(name) != seen.end())
return nullptr;
seen.insert(name);
return modifier ? modifier(o) : o;
}
private:
boost::container::flat_set<std::string> seen;
const boost::shared_ptr<bpo::option_description> (*modifier)(const boost::shared_ptr<bpo::option_description> &);
};
// Currently, you can only specify the filenames and logging levels, which
// are all most users would want to change. At a later time, options can
// be added to control rotation intervals, compression, and other seldom-
// used features
static void write_default_logging_config_to_stream(std::ostream &out) {
out << "# declare an appender named \"stderr\" that writes messages to the console\n"
"[log.console_appender.stderr]\n"
"stream=std_error\n\n"
"# declare an appender named \"default\" that writes messages to default.log\n"
"[log.file_appender.default]\n"
"# filename can be absolute or relative to this config file\n"
"filename=logs/default/default.log\n"
"# Rotate log every ? minutes, if leave out default to 60\n"
"rotation_interval=60\n"
"# how long will logs be kept (in days), if leave out default to 1\n"
"rotation_limit=7\n\n"
"# declare an appender named \"p2p\" that writes messages to p2p.log\n"
"[log.file_appender.p2p]\n"
"# filename can be absolute or relative to this config file\n"
"filename=logs/p2p/p2p.log\n"
"# Rotate log every ? minutes, if leave out default to 60\n"
"rotation_interval=60\n"
"# how long will logs be kept (in days), if leave out default to 1\n"
"rotation_limit=7\n\n"
"# declare an appender named \"rpc\" that writes messages to rpc.log\n"
"[log.file_appender.rpc]\n"
"# filename can be absolute or relative to this config file\n"
"filename=logs/rpc/rpc.log\n"
"# Rotate log every ? minutes, if leave out default to 60\n"
"rotation_interval=60\n"
"# how long will logs be kept (in days), if leave out default to 1\n"
"rotation_limit=7\n\n"
"# route any messages logged to the default logger to the \"stderr\" appender and\n"
"# \"default\" appender we declared above, if they are info level or higher\n"
"[logger.default]\n"
"level=info\n"
"appenders=stderr,default\n\n"
"# route messages sent to the \"p2p\" logger to the \"p2p\" appender declared above\n"
"[logger.p2p]\n"
"level=warn\n"
"appenders=p2p\n\n"
"# route messages sent to the \"rpc\" logger to the \"rpc\" appender declared above\n"
"[logger.rpc]\n"
"level=error\n"
"appenders=rpc\n\n";
}
// logging config is too complicated to be parsed by boost::program_options,
// so we do it by hand
static fc::optional<fc::logging_config> load_logging_config_from_ini_file(const fc::path &config_ini_filename) {
try {
fc::logging_config logging_config;
bool found_logging_config = false;
boost::property_tree::ptree config_ini_tree;
boost::property_tree::ini_parser::read_ini(config_ini_filename.preferred_string().c_str(), config_ini_tree);
for (const auto &section : config_ini_tree) {
const std::string &section_name = section.first;
const boost::property_tree::ptree &section_tree = section.second;
const std::string console_appender_section_prefix = "log.console_appender.";
const std::string file_appender_section_prefix = "log.file_appender.";
const std::string logger_section_prefix = "logger.";
if (boost::starts_with(section_name, console_appender_section_prefix)) {
std::string console_appender_name = section_name.substr(console_appender_section_prefix.length());
std::string stream_name = section_tree.get<std::string>("stream");
// construct a default console appender config here
// stdout/stderr will be taken from ini file, everything else hard-coded here
fc::console_appender::config console_appender_config;
console_appender_config.level_colors.emplace_back(
fc::console_appender::level_color(fc::log_level::debug,
fc::console_appender::color::green));
console_appender_config.level_colors.emplace_back(
fc::console_appender::level_color(fc::log_level::warn,
fc::console_appender::color::brown));
console_appender_config.level_colors.emplace_back(
fc::console_appender::level_color(fc::log_level::error,
fc::console_appender::color::cyan));
console_appender_config.stream = fc::variant(stream_name).as<fc::console_appender::stream::type>(GRAPHENE_MAX_NESTED_OBJECTS);
logging_config.appenders.push_back(fc::appender_config(console_appender_name, "console", fc::variant(console_appender_config, GRAPHENE_MAX_NESTED_OBJECTS)));
found_logging_config = true;
} else if (boost::starts_with(section_name, file_appender_section_prefix)) {
std::string file_appender_name = section_name.substr(file_appender_section_prefix.length());
fc::path file_name = section_tree.get<std::string>("filename");
if (file_name.is_relative())
file_name = fc::absolute(config_ini_filename).parent_path() / file_name;
int interval = section_tree.get_optional<int>("rotation_interval").get_value_or(60);
int limit = section_tree.get_optional<int>("rotation_limit").get_value_or(1);
// construct a default file appender config here
// filename will be taken from ini file, everything else hard-coded here
fc::file_appender::config file_appender_config;
file_appender_config.filename = file_name;
file_appender_config.flush = true;
file_appender_config.rotate = true;
file_appender_config.rotation_interval = fc::minutes(interval);
file_appender_config.rotation_limit = fc::days(limit);
logging_config.appenders.push_back(fc::appender_config(file_appender_name, "file", fc::variant(file_appender_config, GRAPHENE_MAX_NESTED_OBJECTS)));
found_logging_config = true;
} else if (boost::starts_with(section_name, logger_section_prefix)) {
std::string logger_name = section_name.substr(logger_section_prefix.length());
std::string level_string = section_tree.get<std::string>("level");
std::string appenders_string = section_tree.get<std::string>("appenders");
fc::logger_config logger_config(logger_name);
logger_config.level = fc::variant(level_string).as<fc::log_level>(5);
boost::split(logger_config.appenders, appenders_string,
boost::is_any_of(" ,"),
boost::token_compress_on);
logging_config.loggers.push_back(logger_config);
found_logging_config = true;
}
}
if (found_logging_config)
return logging_config;
else
return fc::optional<fc::logging_config>();
}
FC_RETHROW_EXCEPTIONS(warn, "")
}
static const boost::shared_ptr<bpo::option_description> new_option_description(const std::string &name, const bpo::value_semantic *value, const std::string &description) {
bpo::options_description helper("");
helper.add_options()(name.c_str(), value, description.c_str());
return helper.options()[0];
}
static void load_config_file(const fc::path &config_ini_path, const bpo::options_description &cfg_options,
bpo::variables_map &options) {
deduplicator dedup;
bpo::options_description unique_options("Graphene Witness Node");
for (const boost::shared_ptr<bpo::option_description> opt : cfg_options.options()) {
const boost::shared_ptr<bpo::option_description> od = dedup.next(opt);
if (!od)
continue;
unique_options.add(od);
}
// get the basic options
bpo::store(bpo::parse_config_file<char>(config_ini_path.preferred_string().c_str(),
unique_options, true),
options);
}
static bool load_logging_config_file(const fc::path &config_ini_path) {
// try to get logging options from the config file.
try {
fc::optional<fc::logging_config> logging_config = load_logging_config_from_ini_file(config_ini_path);
if (logging_config) {
fc::configure_logging(*logging_config);
return true;
}
} catch (const fc::exception &ex) {
wlog("Error parsing logging config from logging config file ${config}, using default config", ("config", config_ini_path.preferred_string()));
}
return false;
}
static void create_new_config_file(const fc::path &config_ini_path, const fc::path &data_dir,
const bpo::options_description &cfg_options) {
ilog("Writing new config file at ${path}", ("path", config_ini_path));
if (!fc::exists(data_dir))
fc::create_directories(data_dir);
auto modify_option_defaults = [](const boost::shared_ptr<bpo::option_description> &o) -> const boost::shared_ptr<bpo::option_description> {
const std::string &name = o->long_name();
if (name == "partial-operations")
return new_option_description(name, bpo::value<bool>()->default_value(true), o->description());
if (name == "max-ops-per-account")
return new_option_description(name, bpo::value<int>()->default_value(100), o->description());
return o;
};
deduplicator dedup(modify_option_defaults);
std::ofstream out_cfg(config_ini_path.preferred_string());
std::string plugin_header_surrounding(78, '=');
for (const boost::shared_ptr<bpo::option_description> opt : cfg_options.options()) {
const boost::shared_ptr<bpo::option_description> od = dedup.next(opt);
if (!od)
continue;
if (od->long_name().find("plugin-cfg-header-") == 0) // it's a plugin header
{
out_cfg << "\n";
out_cfg << "# " << plugin_header_surrounding << "\n";
out_cfg << "# " << od->description() << "\n";
out_cfg << "# " << plugin_header_surrounding << "\n";
out_cfg << "\n";
continue;
}
if (!od->description().empty())
out_cfg << "# " << od->description() << "\n";
boost::any store;
if (!od->semantic()->apply_default(store))
out_cfg << "# " << od->long_name() << " = \n";
else {
auto example = od->format_parameter();
if (example.empty())
// This is a boolean switch
out_cfg << od->long_name() << " = "
<< "false\n";
else {
// The string is formatted "arg (=<interesting part>)"
example.erase(0, 6);
example.erase(example.length() - 1);
out_cfg << od->long_name() << " = " << example << "\n";
}
}
out_cfg << "\n";
}
out_cfg << "\n"
<< "# " << plugin_header_surrounding << "\n"
<< "# logging options\n"
<< "# " << plugin_header_surrounding << "\n"
<< "#\n"
<< "# Logging configuration is loaded from logging.ini by default.\n"
<< "# If logging.ini exists, logging configuration added in this file will be ignored.\n";
out_cfg.close();
}
static void create_logging_config_file(const fc::path &config_ini_path, const fc::path &data_dir) {
ilog("Writing new config file at ${path}", ("path", config_ini_path));
if (!exists(data_dir)) {
create_directories(data_dir);
}
std::ofstream out_cfg(config_ini_path.preferred_string());
write_default_logging_config_to_stream(out_cfg);
out_cfg.close();
}
namespace graphene { namespace app {
void load_configuration_options(const fc::path &data_dir, const bpo::options_description &cfg_options, bpo::variables_map &options) {
const auto config_ini_path = data_dir / "config.ini";
const auto logging_ini_path = data_dir / "logging.ini";
if (!exists(config_ini_path) && fc::exists(logging_ini_path)) {
// this is an uncommon case
create_new_config_file(config_ini_path, data_dir, cfg_options);
} else if (!exists(config_ini_path)) {
// create default config.ini and logging.ini
create_new_config_file(config_ini_path, data_dir, cfg_options);
create_logging_config_file(logging_ini_path, data_dir);
}
// load witness node configuration
load_config_file(config_ini_path, cfg_options, options);
// load logging configuration
if (fc::exists(logging_ini_path)) {
load_logging_config_file(logging_ini_path);
} else {
// this is the legacy config.ini case
load_logging_config_file(config_ini_path);
}
}
}} // namespace graphene::app

File diff suppressed because it is too large Load diff

315
libraries/app/impacted.cpp Normal file
View file

@ -0,0 +1,315 @@
/*
* Copyright (c) 2015 Cryptonomex, Inc., and contributors.
*
* The MIT License
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <graphene/chain/protocol/authority.hpp>
#include <graphene/app/impacted.hpp>
namespace graphene { namespace app {
using namespace fc;
using namespace graphene::chain;
// TODO: Review all of these, especially no-ops
struct get_impacted_account_visitor
{
flat_set<account_id_type>& _impacted;
get_impacted_account_visitor( flat_set<account_id_type>& impact ):_impacted(impact) {}
typedef void result_type;
void operator()( const transfer_operation& op )
{
_impacted.insert( op.to );
}
void operator()( const asset_claim_fees_operation& op ){}
void operator()( const limit_order_create_operation& op ) {}
void operator()( const limit_order_cancel_operation& op )
{
_impacted.insert( op.fee_paying_account );
}
void operator()( const call_order_update_operation& op ) {}
void operator()( const fill_order_operation& op )
{
_impacted.insert( op.account_id );
}
void operator()( const account_create_operation& op )
{
_impacted.insert( op.registrar );
_impacted.insert( op.referrer );
add_authority_accounts( _impacted, op.owner );
add_authority_accounts( _impacted, op.active );
}
void operator()( const account_update_operation& op )
{
_impacted.insert( op.account );
if( op.owner )
add_authority_accounts( _impacted, *(op.owner) );
if( op.active )
add_authority_accounts( _impacted, *(op.active) );
}
void operator()( const account_whitelist_operation& op )
{
_impacted.insert( op.account_to_list );
}
void operator()( const account_upgrade_operation& op ) {}
void operator()( const account_transfer_operation& op )
{
_impacted.insert( op.new_owner );
}
void operator()( const asset_create_operation& op ) {}
void operator()( const asset_update_operation& op )
{
if( op.new_issuer )
_impacted.insert( *(op.new_issuer) );
}
void operator()( const asset_update_bitasset_operation& op ) {}
void operator()( const asset_update_dividend_operation& op ) {}
void operator()( const asset_dividend_distribution_operation& op )
{
_impacted.insert( op.account_id );
}
void operator()( const asset_update_feed_producers_operation& op ) {}
void operator()( const asset_issue_operation& op )
{
_impacted.insert( op.issue_to_account );
}
void operator()( const asset_reserve_operation& op ) {}
void operator()( const asset_fund_fee_pool_operation& op ) {}
void operator()( const asset_settle_operation& op ) {}
void operator()( const asset_global_settle_operation& op ) {}
void operator()( const asset_publish_feed_operation& op ) {}
void operator()( const witness_create_operation& op )
{
_impacted.insert( op.witness_account );
}
void operator()( const witness_update_operation& op )
{
_impacted.insert( op.witness_account );
}
void operator()( const proposal_create_operation& op )
{
vector<authority> other;
for( const auto& proposed_op : op.proposed_ops )
operation_get_required_authorities( proposed_op.op, _impacted, _impacted, other );
for( auto& o : other )
add_authority_accounts( _impacted, o );
}
void operator()( const proposal_update_operation& op ) {}
void operator()( const proposal_delete_operation& op ) {}
void operator()( const withdraw_permission_create_operation& op )
{
_impacted.insert( op.authorized_account );
}
void operator()( const withdraw_permission_update_operation& op )
{
_impacted.insert( op.authorized_account );
}
void operator()( const withdraw_permission_claim_operation& op )
{
_impacted.insert( op.withdraw_from_account );
}
void operator()( const withdraw_permission_delete_operation& op )
{
_impacted.insert( op.authorized_account );
}
void operator()( const committee_member_create_operation& op )
{
_impacted.insert( op.committee_member_account );
}
void operator()( const committee_member_update_operation& op )
{
_impacted.insert( op.committee_member_account );
}
void operator()( const committee_member_update_global_parameters_operation& op ) {}
void operator()( const vesting_balance_create_operation& op )
{
_impacted.insert( op.owner );
}
void operator()( const vesting_balance_withdraw_operation& op ) {}
void operator()( const worker_create_operation& op ) {}
void operator()( const custom_operation& op ) {}
void operator()( const assert_operation& op ) {}
void operator()( const balance_claim_operation& op ) {}
void operator()( const override_transfer_operation& op )
{
_impacted.insert( op.to );
_impacted.insert( op.from );
_impacted.insert( op.issuer );
}
void operator()( const transfer_to_blind_operation& op )
{
_impacted.insert( op.from );
for( const auto& out : op.outputs )
add_authority_accounts( _impacted, out.owner );
}
void operator()( const blind_transfer_operation& op )
{
for( const auto& in : op.inputs )
add_authority_accounts( _impacted, in.owner );
for( const auto& out : op.outputs )
add_authority_accounts( _impacted, out.owner );
}
void operator()( const transfer_from_blind_operation& op )
{
_impacted.insert( op.to );
for( const auto& in : op.inputs )
add_authority_accounts( _impacted, in.owner );
}
void operator()( const asset_settle_cancel_operation& op )
{
_impacted.insert( op.account );
}
void operator()( const fba_distribute_operation& op )
{
_impacted.insert( op.account_id );
}
void operator()( const sport_create_operation& op ) {}
void operator()( const sport_update_operation& op ) {}
void operator()( const sport_delete_operation& op ) {}
void operator()( const event_group_create_operation& op ) {}
void operator()( const event_group_update_operation& op ) {}
void operator()( const event_group_delete_operation& op ) {}
void operator()( const event_create_operation& op ) {}
void operator()( const event_update_operation& op ) {}
void operator()( const event_update_status_operation& op ) {}
void operator()( const betting_market_rules_create_operation& op ) {}
void operator()( const betting_market_rules_update_operation& op ) {}
void operator()( const betting_market_group_create_operation& op ) {}
void operator()( const betting_market_group_update_operation& op ) {}
void operator()( const betting_market_create_operation& op ) {}
void operator()( const betting_market_update_operation& op ) {}
void operator()( const betting_market_group_resolve_operation& op ) {}
void operator()( const betting_market_group_cancel_unmatched_bets_operation& op ) {}
void operator()( const bet_place_operation& op )
{
_impacted.insert( op.bettor_id );
}
void operator()( const bet_cancel_operation& op )
{
_impacted.insert( op.bettor_id );
}
void operator()( const bet_canceled_operation& op )
{
_impacted.insert( op.bettor_id );
}
void operator()( const bet_adjusted_operation& op )
{
_impacted.insert( op.bettor_id );
}
void operator()( const bet_matched_operation& op )
{
_impacted.insert( op.bettor_id );
}
void operator()( const betting_market_group_resolved_operation& op )
{
_impacted.insert( op.bettor_id );
}
void operator()( const tournament_create_operation& op )
{
_impacted.insert( op.creator );
_impacted.insert( op.options.whitelist.begin(), op.options.whitelist.end() );
}
void operator()( const tournament_join_operation& op )
{
_impacted.insert( op.payer_account_id );
_impacted.insert( op.player_account_id );
}
void operator()( const tournament_leave_operation& op )
{
//if account canceling registration is not the player, it must be the payer
if (op.canceling_account_id != op.player_account_id)
_impacted.erase( op.canceling_account_id );
_impacted.erase( op.player_account_id );
}
void operator()( const game_move_operation& op )
{
_impacted.insert( op.player_account_id );
}
void operator()( const tournament_payout_operation& op )
{
_impacted.insert( op.payout_account_id );
}
void operator()( const affiliate_payout_operation& op )
{
_impacted.insert( op.affiliate );
}
void operator()( const affiliate_referral_payout_operation& op ) { }
void operator()( const lottery_asset_create_operation& op) { }
void operator()( const ticket_purchase_operation& op )
{
_impacted.insert( op.buyer );
}
void operator()( const lottery_reward_operation& op ) {
_impacted.insert( op.winner );
}
void operator()( const lottery_end_operation& op ) {
for( auto participant : op.participants ) {
_impacted.insert(participant.first);
}
}
void operator()( const sweeps_vesting_claim_operation& op ) {
_impacted.insert( op.account );
}
};
void operation_get_impacted_accounts( const operation& op, flat_set<account_id_type>& result )
{
get_impacted_account_visitor vtor = get_impacted_account_visitor( result );
op.visit( vtor );
}
void transaction_get_impacted_accounts( const transaction& tx, flat_set<account_id_type>& result )
{
for( const auto& op : tx.operations )
operation_get_impacted_accounts( op, result );
}
} }

View file

@ -25,23 +25,22 @@
#include <graphene/app/database_api.hpp>
#include <graphene/chain/protocol/confidential.hpp>
#include <graphene/chain/protocol/types.hpp>
#include <graphene/chain/protocol/confidential.hpp>
#include <graphene/market_history/market_history_plugin.hpp>
#include <graphene/accounts_list/accounts_list_plugin.hpp>
#include <graphene/debug_witness/debug_api.hpp>
#include <graphene/affiliate_stats/affiliate_stats_api.hpp>
#include <graphene/bookie/bookie_api.hpp>
#include <graphene/net/node.hpp>
#include <graphene/accounts_list/accounts_list_plugin.hpp>
#include <graphene/affiliate_stats/affiliate_stats_api.hpp>
#include <graphene/bookie/bookie_api.hpp>
#include <graphene/debug_witness/debug_api.hpp>
#include <graphene/elasticsearch/elasticsearch_plugin.hpp>
#include <graphene/market_history/market_history_plugin.hpp>
#include <graphene/peerplays_sidechain/sidechain_api.hpp>
#include <fc/api.hpp>
#include <fc/optional.hpp>
#include <fc/crypto/elliptic.hpp>
#include <fc/network/ip.hpp>
#include <fc/optional.hpp>
#include <boost/container/flat_set.hpp>
@ -51,415 +50,418 @@
#include <vector>
namespace graphene { namespace app {
using namespace graphene::chain;
using namespace graphene::market_history;
using namespace graphene::accounts_list;
using namespace fc::ecc;
using namespace std;
using namespace graphene::chain;
using namespace graphene::market_history;
using namespace graphene::accounts_list;
using namespace fc::ecc;
using namespace std;
class application;
class application;
struct verify_range_result {
bool success;
uint64_t min_val;
uint64_t max_val;
};
struct verify_range_proof_rewind_result {
bool success;
uint64_t min_val;
uint64_t max_val;
uint64_t value_out;
fc::ecc::blind_factor_type blind_out;
string message_out;
};
struct account_asset_balance {
string name;
account_id_type account_id;
share_type amount;
};
struct asset_holders {
asset_id_type asset_id;
int count;
};
/**
* @brief The history_api class implements the RPC API for account history
*
* This API contains methods to access account histories
*/
class history_api {
public:
history_api(application &app) :
_app(app),
database_api(std::ref(*app.chain_database())) {
}
/**
* @brief Get operations relevant to the specificed account
* @param account_id_or_name The account ID or name whose history should be queried
* @param stop ID of the earliest operation to retrieve
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start ID of the most recent operation to retrieve
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
vector<operation_history_object> get_account_history(const std::string account_id_or_name,
operation_history_id_type stop = operation_history_id_type(),
unsigned limit = 100,
operation_history_id_type start = operation_history_id_type()) const;
/**
* @brief Get only asked operations relevant to the specified account
* @param account_id_or_name The account ID or name whose history should be queried
* @param operation_id The ID of the operation we want to get operations in the account( 0 = transfer , 1 = limit order create, ...)
* @param stop ID of the earliest operation to retrieve
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start ID of the most recent operation to retrieve
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
vector<operation_history_object> get_account_history_operations(const std::string account_id_or_name,
int operation_id,
operation_history_id_type start = operation_history_id_type(),
operation_history_id_type stop = operation_history_id_type(),
unsigned limit = 100) const;
/**
* @breif Get operations relevant to the specified account referenced
* by an event numbering specific to the account. The current number of operations
* for the account can be found in the account statistics (or use 0 for start).
* @param account_id_or_name The account ID or name whose history should be queried
* @param stop Sequence number of earliest operation. 0 is default and will
* query 'limit' number of operations.
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start Sequence number of the most recent operation to retrieve.
* 0 is default, which will start querying from the most recent operation.
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
vector<operation_history_object> get_relative_account_history(const std::string account_id_or_name,
uint32_t stop = 0,
unsigned limit = 100,
uint32_t start = 0) const;
vector<order_history_object> get_fill_order_history(std::string asset_a, std::string asset_b, uint32_t limit) const;
vector<bucket_object> get_market_history(std::string asset_a, std::string asset_b, uint32_t bucket_seconds,
fc::time_point_sec start, fc::time_point_sec end) const;
vector<account_balance_object> list_core_accounts() const;
flat_set<uint32_t> get_market_history_buckets() const;
uint32_t api_limit_get_account_history_operations = 100;
uint32_t api_limit_get_account_history = 100;
uint32_t api_limit_get_relative_account_history = 100;
private:
application &_app;
graphene::app::database_api database_api;
};
/**
* @brief Block api
*/
class block_api {
public:
block_api(graphene::chain::database &db);
~block_api();
vector<optional<signed_block>> get_blocks(uint32_t block_num_from, uint32_t block_num_to) const;
private:
graphene::chain::database &_db;
};
/**
* @brief The network_broadcast_api class allows broadcasting of transactions.
*/
class network_broadcast_api : public std::enable_shared_from_this<network_broadcast_api> {
public:
network_broadcast_api(application &a);
struct transaction_confirmation {
transaction_id_type id;
uint32_t block_num;
uint32_t trx_num;
processed_transaction trx;
struct verify_range_result
{
bool success;
uint64_t min_val;
uint64_t max_val;
};
struct verify_range_proof_rewind_result
{
bool success;
uint64_t min_val;
uint64_t max_val;
uint64_t value_out;
fc::ecc::blind_factor_type blind_out;
string message_out;
};
typedef std::function<void(variant /*transaction_confirmation*/)> confirmation_callback;
struct account_asset_balance
{
string name;
account_id_type account_id;
share_type amount;
};
struct asset_holders
{
asset_id_type asset_id;
int count;
};
/**
* @brief Broadcast a transaction to the network
* @param trx The transaction to broadcast
* @brief The history_api class implements the RPC API for account history
*
* The transaction will be checked for validity in the local database prior to broadcasting. If it fails to
* apply locally, an error will be thrown and the transaction will not be broadcast.
* This API contains methods to access account histories
*/
void broadcast_transaction(const signed_transaction &trx);
class history_api
{
public:
history_api(application& app):_app(app){}
/** this version of broadcast transaction registers a callback method that will be called when the transaction is
* included into a block. The callback method includes the transaction id, block number, and transaction number in the
* block.
*/
void broadcast_transaction_with_callback(confirmation_callback cb, const signed_transaction &trx);
/**
* @brief Get operations relevant to the specificed account
* @param account The account whose history should be queried
* @param stop ID of the earliest operation to retrieve
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start ID of the most recent operation to retrieve
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
vector<operation_history_object> get_account_history(account_id_type account,
operation_history_id_type stop = operation_history_id_type(),
unsigned limit = 100,
operation_history_id_type start = operation_history_id_type())const;
/** this version of broadcast transaction registers a callback method that will be called when the transaction is
* included into a block. The callback method includes the transaction id, block number, and transaction number in the
* block.
*/
fc::variant broadcast_transaction_synchronous(const signed_transaction &trx);
/**
* @brief Get only asked operations relevant to the specified account
* @param account The account whose history should be queried
* @param operation_id The ID of the operation we want to get operations in the account( 0 = transfer , 1 = limit order create, ...)
* @param stop ID of the earliest operation to retrieve
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start ID of the most recent operation to retrieve
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
vector<operation_history_object> get_account_history_operations(account_id_type account,
int operation_id,
operation_history_id_type start = operation_history_id_type(),
operation_history_id_type stop = operation_history_id_type(),
unsigned limit = 100)const;
void broadcast_block(const signed_block &block);
/**
* @breif Get operations relevant to the specified account referenced
* by an event numbering specific to the account. The current number of operations
* for the account can be found in the account statistics (or use 0 for start).
* @param account The account whose history should be queried
* @param stop Sequence number of earliest operation. 0 is default and will
* query 'limit' number of operations.
* @param limit Maximum number of operations to retrieve (must not exceed 100)
* @param start Sequence number of the most recent operation to retrieve.
* 0 is default, which will start querying from the most recent operation.
* @return A list of operations performed by account, ordered from most recent to oldest.
*/
vector<operation_history_object> get_relative_account_history( account_id_type account,
uint32_t stop = 0,
unsigned limit = 100,
uint32_t start = 0) const;
vector<order_history_object> get_fill_order_history( asset_id_type a, asset_id_type b, uint32_t limit )const;
vector<bucket_object> get_market_history( asset_id_type a, asset_id_type b, uint32_t bucket_seconds,
fc::time_point_sec start, fc::time_point_sec end )const;
vector<account_balance_object> list_core_accounts()const;
flat_set<uint32_t> get_market_history_buckets()const;
private:
application& _app;
};
/**
* @brief Not reflected, thus not accessible to API clients.
* @brief Block api
*/
class block_api
{
public:
block_api(graphene::chain::database& db);
~block_api();
vector<optional<signed_block>> get_blocks(uint32_t block_num_from, uint32_t block_num_to)const;
private:
graphene::chain::database& _db;
};
/**
* @brief The network_broadcast_api class allows broadcasting of transactions.
*/
class network_broadcast_api : public std::enable_shared_from_this<network_broadcast_api>
{
public:
network_broadcast_api(application& a);
struct transaction_confirmation
{
transaction_id_type id;
uint32_t block_num;
uint32_t trx_num;
processed_transaction trx;
};
typedef std::function<void(variant/*transaction_confirmation*/)> confirmation_callback;
/**
* @brief Broadcast a transaction to the network
* @param trx The transaction to broadcast
*
* The transaction will be checked for validity in the local database prior to broadcasting. If it fails to
* apply locally, an error will be thrown and the transaction will not be broadcast.
*/
void broadcast_transaction(const signed_transaction& trx);
/** this version of broadcast transaction registers a callback method that will be called when the transaction is
* included into a block. The callback method includes the transaction id, block number, and transaction number in the
* block.
*/
void broadcast_transaction_with_callback( confirmation_callback cb, const signed_transaction& trx);
/** this version of broadcast transaction registers a callback method that will be called when the transaction is
* included into a block. The callback method includes the transaction id, block number, and transaction number in the
* block.
*/
fc::variant broadcast_transaction_synchronous(const signed_transaction& trx);
void broadcast_block( const signed_block& block );
/**
* @brief Not reflected, thus not accessible to API clients.
*
* This function is registered to receive the applied_block
* signal from the chain database when a block is received.
* It then dispatches callbacks to clients who have requested
* to be notified when a particular txid is included in a block.
*/
void on_applied_block( const signed_block& b );
private:
boost::signals2::scoped_connection _applied_block_connection;
map<transaction_id_type,confirmation_callback> _callbacks;
application& _app;
};
/**
* @brief The network_node_api class allows maintenance of p2p connections.
*/
class network_node_api
{
public:
network_node_api(application& a);
/**
* @brief Return general network information, such as p2p port
*/
fc::variant_object get_info() const;
/**
* @brief add_node Connect to a new peer
* @param ep The IP/Port of the peer to connect to
*/
void add_node(const fc::ip::endpoint& ep);
/**
* @brief Get status of all current connections to peers
*/
std::vector<net::peer_status> get_connected_peers() const;
/**
* @brief Get advanced node parameters, such as desired and max
* number of connections
*/
fc::variant_object get_advanced_node_parameters() const;
/**
* @brief Set advanced node parameters, such as desired and max
* number of connections
* @param params a JSON object containing the name/value pairs for the parameters to set
*/
void set_advanced_node_parameters(const fc::variant_object& params);
/**
* @brief Return list of potential peers
*/
std::vector<net::potential_peer_record> get_potential_peers() const;
/**
* @brief Return list of pending transactions.
*/
map<transaction_id_type, signed_transaction> list_pending_transactions() const;
/**
* @brief Subscribes caller for notifications about pending transactions.
* @param callback a functional object which will be called when new transaction is created.
*/
void subscribe_to_pending_transactions(std::function<void(const variant&)> callback);
/**
* @brief Unsubscribes caller from notifications about pending transactions.
*/
void unsubscribe_from_pending_transactions();
private:
application& _app;
map<transaction_id_type, signed_transaction> _pending_transactions;
boost::signals2::scoped_connection _pending_trx_connection;
boost::signals2::scoped_connection _applied_block_connection;
std::function<void(const variant&)> _on_pending_transaction;
};
class crypto_api
{
public:
crypto_api();
fc::ecc::commitment_type blind( const fc::ecc::blind_factor_type& blind, uint64_t value );
fc::ecc::blind_factor_type blind_sum( const std::vector<blind_factor_type>& blinds_in, uint32_t non_neg );
bool verify_sum( const std::vector<commitment_type>& commits_in, const std::vector<commitment_type>& neg_commits_in, int64_t excess );
verify_range_result verify_range( const fc::ecc::commitment_type& commit, const std::vector<char>& proof );
std::vector<char> range_proof_sign( uint64_t min_value,
const commitment_type& commit,
const blind_factor_type& commit_blind,
const blind_factor_type& nonce,
int8_t base10_exp,
uint8_t min_bits,
uint64_t actual_value );
verify_range_proof_rewind_result verify_range_proof_rewind( const blind_factor_type& nonce,
const fc::ecc::commitment_type& commit,
const std::vector<char>& proof );
range_proof_info range_get_info( const std::vector<char>& proof );
};
/**
* @brief
*/
class asset_api
{
public:
asset_api(graphene::chain::database& db);
~asset_api();
vector<account_asset_balance> get_asset_holders( asset_id_type asset_id, uint32_t start, uint32_t limit )const;
int get_asset_holders_count( asset_id_type asset_id )const;
vector<asset_holders> get_all_asset_holders() const;
private:
graphene::chain::database& _db;
};
/**
* @brief The login_api class implements the bottom layer of the RPC API
*
* This function is registered to receive the applied_block
* signal from the chain database when a block is received.
* It then dispatches callbacks to clients who have requested
* to be notified when a particular txid is included in a block.
* All other APIs must be requested from this API.
*/
void on_applied_block(const signed_block &b);
class login_api
{
public:
login_api(application& a);
~login_api();
private:
boost::signals2::scoped_connection _applied_block_connection;
map<transaction_id_type, confirmation_callback> _callbacks;
application &_app;
};
/**
* @brief Authenticate to the RPC server
* @param user Username to login with
* @param password Password to login with
* @return True if logged in successfully; false otherwise
*
* @note This must be called prior to requesting other APIs. Other APIs may not be accessible until the client
* has sucessfully authenticated.
*/
bool login(const string& user, const string& password);
/// @brief Retrieve the network block API
fc::api<block_api> block()const;
/// @brief Retrieve the network broadcast API
fc::api<network_broadcast_api> network_broadcast()const;
/// @brief Retrieve the database API
fc::api<database_api> database()const;
/// @brief Retrieve the history API
fc::api<history_api> history()const;
/// @brief Retrieve the network node API
fc::api<network_node_api> network_node()const;
/// @brief Retrieve the cryptography API
fc::api<crypto_api> crypto()const;
/// @brief Retrieve the asset API
fc::api<asset_api> asset()const;
/// @brief Retrieve the debug API (if available)
fc::api<graphene::debug_witness::debug_api> debug()const;
/// @brief Retrieve the bookie API (if available)
fc::api<graphene::bookie::bookie_api> bookie()const;
/// @brief Retrieve the affiliate_stats API (if available)
fc::api<graphene::affiliate_stats::affiliate_stats_api> affiliate_stats()const;
/**
* @brief The network_node_api class allows maintenance of p2p connections.
*/
class network_node_api {
public:
network_node_api(application &a);
/// @brief Called to enable an API, not reflected.
void enable_api( const string& api_name );
private:
/**
* @brief Return general network information, such as p2p port
*/
fc::variant_object get_info() const;
application& _app;
optional< fc::api<block_api> > _block_api;
optional< fc::api<database_api> > _database_api;
optional< fc::api<network_broadcast_api> > _network_broadcast_api;
optional< fc::api<network_node_api> > _network_node_api;
optional< fc::api<history_api> > _history_api;
optional< fc::api<crypto_api> > _crypto_api;
optional< fc::api<asset_api> > _asset_api;
optional< fc::api<graphene::debug_witness::debug_api> > _debug_api;
optional< fc::api<graphene::bookie::bookie_api> > _bookie_api;
optional< fc::api<graphene::affiliate_stats::affiliate_stats_api> > _affiliate_stats_api;
};
/**
* @brief add_node Connect to a new peer
* @param ep The IP/Port of the peer to connect to
*/
void add_node(const fc::ip::endpoint &ep);
}} // graphene::app
/**
* @brief Get status of all current connections to peers
*/
std::vector<net::peer_status> get_connected_peers() const;
FC_REFLECT( graphene::app::network_broadcast_api::transaction_confirmation,
(id)(block_num)(trx_num)(trx) )
FC_REFLECT( graphene::app::verify_range_result,
(success)(min_val)(max_val) )
FC_REFLECT( graphene::app::verify_range_proof_rewind_result,
(success)(min_val)(max_val)(value_out)(blind_out)(message_out) )
//FC_REFLECT_TYPENAME( fc::ecc::compact_signature );
//FC_REFLECT_TYPENAME( fc::ecc::commitment_type );
/**
* @brief Get advanced node parameters, such as desired and max
* number of connections
*/
fc::variant_object get_advanced_node_parameters() const;
/**
* @brief Set advanced node parameters, such as desired and max
* number of connections
* @param params a JSON object containing the name/value pairs for the parameters to set
*/
void set_advanced_node_parameters(const fc::variant_object &params);
/**
* @brief Return list of potential peers
*/
std::vector<net::potential_peer_record> get_potential_peers() const;
/**
* @brief Return list of pending transactions.
*/
map<transaction_id_type, signed_transaction> list_pending_transactions() const;
/**
* @brief Subscribes caller for notifications about pending transactions.
* @param callback a functional object which will be called when new transaction is created.
*/
void subscribe_to_pending_transactions(std::function<void(const variant &)> callback);
/**
* @brief Unsubscribes caller from notifications about pending transactions.
*/
void unsubscribe_from_pending_transactions();
private:
application &_app;
map<transaction_id_type, signed_transaction> _pending_transactions;
boost::signals2::scoped_connection _pending_trx_connection;
boost::signals2::scoped_connection _applied_block_connection;
std::function<void(const variant &)> _on_pending_transaction;
};
/**
* @brief
*/
class asset_api {
public:
asset_api(graphene::app::application &app);
~asset_api();
/**
* @brief Get asset holders for a specific asset
* @param asset The specific asset id or symbol
* @param start The start index
* @param limit Maximum limit must not exceed 100
* @return A list of asset holders for the specified asset
*/
vector<account_asset_balance> get_asset_holders(std::string asset, uint32_t start, uint32_t limit) const;
/**
* @brief Get asset holders count for a specific asset
* @param asset The specific asset id or symbol
* @return Holders count for the specified asset
*/
int get_asset_holders_count(std::string asset) const;
/**
* @brief Get all asset holders
* @return A list of all asset holders
*/
vector<asset_holders> get_all_asset_holders() const;
uint32_t api_limit_get_asset_holders = 100;
private:
graphene::app::application &_app;
graphene::chain::database &_db;
graphene::app::database_api database_api;
};
}} // namespace graphene::app
extern template class fc::api<graphene::app::block_api>;
extern template class fc::api<graphene::app::network_broadcast_api>;
extern template class fc::api<graphene::app::network_node_api>;
extern template class fc::api<graphene::app::history_api>;
extern template class fc::api<graphene::app::asset_api>;
extern template class fc::api<graphene::debug_witness::debug_api>;
namespace graphene { namespace app {
/**
* @brief The login_api class implements the bottom layer of the RPC API
*
* All other APIs must be requested from this API.
*/
class login_api {
public:
login_api(application &a);
~login_api();
/**
* @brief Authenticate to the RPC server
* @param user Username to login with
* @param password Password to login with
* @return True if logged in successfully; false otherwise
*
* @note This must be called prior to requesting other APIs. Other APIs may not be accessible until the client
* has sucessfully authenticated.
*/
bool login(const string &user, const string &password);
/// @brief Retrieve the network block API
fc::api<block_api> block() const;
/// @brief Retrieve the network broadcast API
fc::api<network_broadcast_api> network_broadcast() const;
/// @brief Retrieve the database API
fc::api<database_api> database() const;
/// @brief Retrieve the history API
fc::api<history_api> history() const;
/// @brief Retrieve the network node API
fc::api<network_node_api> network_node() const;
/// @brief Retrieve the asset API
fc::api<asset_api> asset() const;
/// @brief Retrieve the debug API (if available)
fc::api<graphene::debug_witness::debug_api> debug() const;
/// @brief Retrieve the bookie API (if available)
fc::api<graphene::bookie::bookie_api> bookie() const;
/// @brief Retrieve the affiliate_stats API (if available)
fc::api<graphene::affiliate_stats::affiliate_stats_api> affiliate_stats() const;
/// @brief Retrieve the sidechain_api API (if available)
fc::api<graphene::peerplays_sidechain::sidechain_api> sidechain() const;
/// @brief Called to enable an API, not reflected.
void enable_api(const string &api_name);
private:
application &_app;
optional<fc::api<block_api>> _block_api;
optional<fc::api<database_api>> _database_api;
optional<fc::api<network_broadcast_api>> _network_broadcast_api;
optional<fc::api<network_node_api>> _network_node_api;
optional<fc::api<history_api>> _history_api;
optional<fc::api<asset_api>> _asset_api;
optional<fc::api<graphene::debug_witness::debug_api>> _debug_api;
optional<fc::api<graphene::bookie::bookie_api>> _bookie_api;
optional<fc::api<graphene::affiliate_stats::affiliate_stats_api>> _affiliate_stats_api;
optional<fc::api<graphene::peerplays_sidechain::sidechain_api>> _sidechain_api;
};
}} // namespace graphene::app
extern template class fc::api<graphene::app::login_api>;
// clang-format off
FC_REFLECT(graphene::app::network_broadcast_api::transaction_confirmation,
(id)(block_num)(trx_num)(trx))
FC_REFLECT(graphene::app::verify_range_result,
(success)(min_val)(max_val))
FC_REFLECT(graphene::app::verify_range_proof_rewind_result,
(success)(min_val)(max_val)(value_out)(blind_out)(message_out))
FC_REFLECT(graphene::app::account_asset_balance,
(name)(account_id)(amount));
FC_REFLECT(graphene::app::asset_holders,
(asset_id)(count));
FC_REFLECT( graphene::app::account_asset_balance, (name)(account_id)(amount) );
FC_REFLECT( graphene::app::asset_holders, (asset_id)(count) );
FC_API(graphene::app::history_api,
(get_account_history)
(get_account_history_operations)
(get_relative_account_history)
(get_fill_order_history)
(get_market_history)
(get_market_history_buckets)
(list_core_accounts))
(get_account_history)
(get_account_history_operations)
(get_relative_account_history)
(get_fill_order_history)
(get_market_history)
(get_market_history_buckets)
(list_core_accounts)
)
FC_API(graphene::app::block_api,
(get_blocks))
(get_blocks)
)
FC_API(graphene::app::network_broadcast_api,
(broadcast_transaction)
(broadcast_transaction_with_callback)
(broadcast_transaction_synchronous)
(broadcast_block))
(broadcast_transaction)
(broadcast_transaction_with_callback)
(broadcast_transaction_synchronous)
(broadcast_block)
)
FC_API(graphene::app::network_node_api,
(get_info)
(add_node)
(get_connected_peers)
(get_potential_peers)
(get_advanced_node_parameters)
(set_advanced_node_parameters)
(list_pending_transactions)
(subscribe_to_pending_transactions)
(unsubscribe_from_pending_transactions))
(get_info)
(add_node)
(get_connected_peers)
(get_potential_peers)
(get_advanced_node_parameters)
(set_advanced_node_parameters)
(list_pending_transactions)
(subscribe_to_pending_transactions)
(unsubscribe_from_pending_transactions)
)
FC_API(graphene::app::crypto_api,
(blind)
(blind_sum)
(verify_sum)
(verify_range)
(range_proof_sign)
(verify_range_proof_rewind)
(range_get_info)
)
FC_API(graphene::app::asset_api,
(get_asset_holders)
(get_asset_holders_count)
(get_all_asset_holders))
(get_asset_holders)
(get_asset_holders_count)
(get_all_asset_holders)
)
FC_API(graphene::app::login_api,
(login)
(block)
(network_broadcast)
(database)
(history)
(network_node)
(asset)
(debug)
(bookie)
(affiliate_stats)
(sidechain))
// clang-format on
(login)
(block)
(network_broadcast)
(database)
(history)
(network_node)
(crypto)
(asset)
(debug)
(bookie)
(affiliate_stats)
)

View file

@ -31,26 +31,26 @@
namespace graphene { namespace app {
struct api_access_info {
struct api_access_info
{
std::string password_hash_b64;
std::string password_salt_b64;
std::vector<std::string> allowed_apis;
std::vector< std::string > allowed_apis;
};
struct api_access {
std::map<std::string, api_access_info> permission_map;
struct api_access
{
std::map< std::string, api_access_info > permission_map;
};
}} // namespace graphene::app
// clang-format off
} } // graphene::app
FC_REFLECT( graphene::app::api_access_info,
(password_hash_b64)
(password_salt_b64)
(allowed_apis))
(password_hash_b64)
(password_salt_b64)
(allowed_apis)
)
FC_REFLECT( graphene::app::api_access,
(permission_map))
// clang-format on
(permission_map)
)

View file

@ -24,86 +24,76 @@
#pragma once
#include <graphene/app/api_access.hpp>
#include <graphene/chain/database.hpp>
#include <graphene/net/node.hpp>
#include <graphene/chain/database.hpp>
#include <boost/program_options.hpp>
namespace graphene { namespace app {
namespace detail {
class application_impl;
}
using std::string;
namespace detail { class application_impl; }
using std::string;
class abstract_plugin;
class abstract_plugin;
class application {
public:
application();
~application();
class application
{
public:
application();
~application();
void set_program_options(boost::program_options::options_description &cli,
boost::program_options::options_description &cfg) const;
void initialize(const fc::path &data_dir, const boost::program_options::variables_map &options);
void initialize_plugins(const boost::program_options::variables_map &options);
void startup();
void shutdown();
void startup_plugins();
void shutdown_plugins();
void set_program_options( boost::program_options::options_description& command_line_options,
boost::program_options::options_description& configuration_file_options )const;
void initialize(const fc::path& data_dir, const boost::program_options::variables_map&options);
void initialize_plugins( const boost::program_options::variables_map& options );
void startup();
void shutdown();
void startup_plugins();
void shutdown_plugins();
template <typename PluginType>
std::shared_ptr<PluginType> register_plugin() {
auto plug = std::make_shared<PluginType>();
plug->plugin_set_app(this);
template<typename PluginType>
std::shared_ptr<PluginType> register_plugin()
{
auto plug = std::make_shared<PluginType>();
plug->plugin_set_app(this);
boost::program_options::options_description plugin_cli_options(plug->plugin_name() + " plugin. " + plug->plugin_description() + "\nOptions"), plugin_cfg_options;
plug->plugin_set_program_options(plugin_cli_options, plugin_cfg_options);
if (!plugin_cli_options.options().empty())
_cli_options.add(plugin_cli_options);
boost::program_options::options_description plugin_cli_options("Options for plugin " + plug->plugin_name()), plugin_cfg_options;
plug->plugin_set_program_options(plugin_cli_options, plugin_cfg_options);
if( !plugin_cli_options.options().empty() )
_cli_options.add(plugin_cli_options);
if( !plugin_cfg_options.options().empty() )
_cfg_options.add(plugin_cfg_options);
if (!plugin_cfg_options.options().empty()) {
std::string header_name = "plugin-cfg-header-" + plug->plugin_name();
std::string header_desc = plug->plugin_name() + " plugin options";
_cfg_options.add_options()(header_name.c_str(), header_desc.c_str());
_cfg_options.add(plugin_cfg_options);
}
add_plugin( plug->plugin_name(), plug );
return plug;
}
std::shared_ptr<abstract_plugin> get_plugin( const string& name )const;
add_available_plugin(plug);
return plug;
}
std::shared_ptr<abstract_plugin> get_plugin(const string &name) const;
template<typename PluginType>
std::shared_ptr<PluginType> get_plugin( const string& name ) const
{
std::shared_ptr<abstract_plugin> abs_plugin = get_plugin( name );
std::shared_ptr<PluginType> result = std::dynamic_pointer_cast<PluginType>( abs_plugin );
FC_ASSERT( result != std::shared_ptr<PluginType>() );
return result;
}
template <typename PluginType>
std::shared_ptr<PluginType> get_plugin(const string &name) const {
std::shared_ptr<abstract_plugin> abs_plugin = get_plugin(name);
std::shared_ptr<PluginType> result = std::dynamic_pointer_cast<PluginType>(abs_plugin);
FC_ASSERT(result != std::shared_ptr<PluginType>());
return result;
}
net::node_ptr p2p_node();
std::shared_ptr<chain::database> chain_database()const;
net::node_ptr p2p_node();
std::shared_ptr<chain::database> chain_database() const;
void set_block_production(bool producing_blocks);
fc::optional< api_access_info > get_api_access_info( const string& username )const;
void set_api_access_info(const string& username, api_access_info&& permissions);
void set_block_production(bool producing_blocks);
fc::optional<api_access_info> get_api_access_info(const string &username) const;
void set_api_access_info(const string &username, api_access_info &&permissions);
bool is_finished_syncing()const;
/// Emitted when syncing finishes (is_finished_syncing will return true)
boost::signals2::signal<void()> syncing_finished;
bool is_finished_syncing() const;
/// Emitted when syncing finishes (is_finished_syncing will return true)
boost::signals2::signal<void()> syncing_finished;
private:
void add_plugin( const string& name, std::shared_ptr<abstract_plugin> p );
std::shared_ptr<detail::application_impl> my;
void enable_plugin(const string &name);
boost::program_options::options_description _cli_options;
boost::program_options::options_description _cfg_options;
};
bool is_plugin_enabled(const string &name) const;
std::shared_ptr<fc::thread> elasticsearch_thread;
private:
void add_available_plugin(std::shared_ptr<abstract_plugin> p);
std::shared_ptr<detail::application_impl> my;
boost::program_options::options_description _cli_options;
boost::program_options::options_description _cfg_options;
};
}} // namespace graphene::app
} }

File diff suppressed because it is too large Load diff

View file

@ -24,53 +24,52 @@
#pragma once
#include <graphene/chain/account_object.hpp>
#include <graphene/chain/market_evaluator.hpp>
#include <graphene/chain/vesting_balance_object.hpp>
#include <graphene/chain/market_evaluator.hpp>
#include <graphene/chain/withdraw_permission_object.hpp>
namespace graphene { namespace app {
using namespace graphene::chain;
using namespace graphene::chain;
struct full_account {
account_object account;
account_statistics_object statistics;
string registrar_name;
string referrer_name;
string lifetime_referrer_name;
vector<variant> votes;
optional<vesting_balance_object> cashback_balance;
vector<account_balance_object> balances;
vector<vesting_balance_object> vesting_balances;
vector<limit_order_object> limit_orders;
vector<call_order_object> call_orders;
vector<force_settlement_object> settle_orders;
vector<proposal_object> proposals;
vector<asset_id_type> assets;
vector<withdraw_permission_object> withdraws;
// vector<pending_dividend_payout_balance_object> pending_dividend_payments;
vector<pending_dividend_payout_balance_for_holder_object> pending_dividend_payments;
};
struct full_account
{
account_object account;
account_statistics_object statistics;
string registrar_name;
string referrer_name;
string lifetime_referrer_name;
vector<variant> votes;
optional<vesting_balance_object> cashback_balance;
vector<account_balance_object> balances;
vector<vesting_balance_object> vesting_balances;
vector<limit_order_object> limit_orders;
vector<call_order_object> call_orders;
vector<force_settlement_object> settle_orders;
vector<proposal_object> proposals;
vector<asset_id_type> assets;
vector<withdraw_permission_object> withdraws;
// vector<pending_dividend_payout_balance_object> pending_dividend_payments;
vector<pending_dividend_payout_balance_for_holder_object> pending_dividend_payments;
};
}} // namespace graphene::app
} }
// clang-format off
FC_REFLECT(graphene::app::full_account,
(account)
(statistics)
(registrar_name)
(referrer_name)
(lifetime_referrer_name)
(votes)
(cashback_balance)
(balances)
(vesting_balances)
(limit_orders)
(call_orders)
(settle_orders)
(proposals)
(assets)
(withdraws)
(pending_dividend_payments))
// clang-format on
FC_REFLECT( graphene::app::full_account,
(account)
(statistics)
(registrar_name)
(referrer_name)
(lifetime_referrer_name)
(votes)
(cashback_balance)
(balances)
(vesting_balances)
(limit_orders)
(call_orders)
(settle_orders)
(proposals)
(assets)
(withdraws)
(proposals)
(pending_dividend_payments)
)

View file

@ -28,14 +28,15 @@
#include <graphene/chain/protocol/transaction.hpp>
#include <graphene/chain/protocol/types.hpp>
namespace graphene { namespace chain {
namespace graphene { namespace app {
void operation_get_impacted_accounts( const graphene::chain::operation& op,
fc::flat_set<graphene::chain::account_id_type>& result,
bool ignore_custom_operation_required_auths );
void operation_get_impacted_accounts(
const graphene::chain::operation& op,
fc::flat_set<graphene::chain::account_id_type>& result );
void transaction_get_impacted_accounts( const graphene::chain::transaction& tx,
fc::flat_set<graphene::chain::account_id_type>& result,
bool ignore_custom_operation_required_auths );
void transaction_get_impacted_accounts(
const graphene::chain::transaction& tx,
fc::flat_set<graphene::chain::account_id_type>& result
);
} } // graphene::app

View file

@ -30,119 +30,108 @@
namespace graphene { namespace app {
class abstract_plugin {
public:
virtual ~abstract_plugin() {
}
virtual std::string plugin_name() const = 0;
virtual std::string plugin_description() const = 0;
class abstract_plugin
{
public:
virtual ~abstract_plugin(){}
virtual std::string plugin_name()const = 0;
/**
* @brief Perform early startup routines and register plugin indexes, callbacks, etc.
*
* Plugins MUST supply a method initialize() which will be called early in the application startup. This method
* should contain early setup code such as initializing variables, adding indexes to the database, registering
* callback methods from the database, adding APIs, etc., as well as applying any options in the @ref options map
*
* This method is called BEFORE the database is open, therefore any routines which require any chain state MUST
* NOT be called by this method. These routines should be performed in startup() instead.
*
* @param options The options passed to the application, via configuration files or command line
*/
virtual void plugin_initialize(const boost::program_options::variables_map &options) = 0;
/**
* @brief Perform early startup routines and register plugin indexes, callbacks, etc.
*
* Plugins MUST supply a method initialize() which will be called early in the application startup. This method
* should contain early setup code such as initializing variables, adding indexes to the database, registering
* callback methods from the database, adding APIs, etc., as well as applying any options in the @ref options map
*
* This method is called BEFORE the database is open, therefore any routines which require any chain state MUST
* NOT be called by this method. These routines should be performed in startup() instead.
*
* @param options The options passed to the application, via configuration files or command line
*/
virtual void plugin_initialize( const boost::program_options::variables_map& options ) = 0;
/**
* @brief Begin normal runtime operations
*
* Plugins MUST supply a method startup() which will be called at the end of application startup. This method
* should contain code which schedules any tasks, or requires chain state.
*/
virtual void plugin_startup() = 0;
/**
* @brief Begin normal runtime operations
*
* Plugins MUST supply a method startup() which will be called at the end of application startup. This method
* should contain code which schedules any tasks, or requires chain state.
*/
virtual void plugin_startup() = 0;
/**
* @brief Cleanly shut down the plugin.
*
* This is called to request a clean shutdown (e.g. due to SIGINT or SIGTERM).
*/
virtual void plugin_shutdown() = 0;
/**
* @brief Cleanly shut down the plugin.
*
* This is called to request a clean shutdown (e.g. due to SIGINT or SIGTERM).
*/
virtual void plugin_shutdown() = 0;
/**
* @brief Register the application instance with the plugin.
*
* This is called by the framework to set the application.
*/
virtual void plugin_set_app(application *a) = 0;
/**
* @brief Register the application instance with the plugin.
*
* This is called by the framework to set the application.
*/
virtual void plugin_set_app( application* a ) = 0;
/**
* @brief Fill in command line parameters used by the plugin.
*
* @param command_line_options All options this plugin supports taking on the command-line
* @param config_file_options All options this plugin supports storing in a configuration file
*
* This method populates its arguments with any
* command-line and configuration file options the plugin supports.
* If a plugin does not need these options, it
* may simply provide an empty implementation of this method.
*/
virtual void plugin_set_program_options(boost::program_options::options_description &command_line_options,
boost::program_options::options_description &config_file_options) = 0;
/**
* @brief Fill in command line parameters used by the plugin.
*
* @param command_line_options All options this plugin supports taking on the command-line
* @param config_file_options All options this plugin supports storing in a configuration file
*
* This method populates its arguments with any
* command-line and configuration file options the plugin supports.
* If a plugin does not need these options, it
* may simply provide an empty implementation of this method.
*/
virtual void plugin_set_program_options(
boost::program_options::options_description& command_line_options,
boost::program_options::options_description& config_file_options
) = 0;
};
/**
* Provides basic default implementations of abstract_plugin functions.
*/
class plugin : public abstract_plugin {
public:
plugin();
virtual ~plugin() override;
class plugin : public abstract_plugin
{
public:
plugin();
virtual ~plugin() override;
virtual std::string plugin_name() const override;
virtual std::string plugin_description() const override;
virtual void plugin_initialize(const boost::program_options::variables_map &options) override;
virtual void plugin_startup() override;
virtual void plugin_shutdown() override;
virtual void plugin_set_app(application *app) override;
virtual void plugin_set_program_options(boost::program_options::options_description &command_line_options,
boost::program_options::options_description &config_file_options) override;
virtual std::string plugin_name()const override;
virtual void plugin_initialize( const boost::program_options::variables_map& options ) override;
virtual void plugin_startup() override;
virtual void plugin_shutdown() override;
virtual void plugin_set_app( application* app ) override;
virtual void plugin_set_program_options(
boost::program_options::options_description& command_line_options,
boost::program_options::options_description& config_file_options
) override;
chain::database &database() {
return *app().chain_database();
}
application &app() const {
assert(_app);
return *_app;
}
chain::database& database() { return *app().chain_database(); }
application& app()const { assert(_app); return *_app; }
protected:
net::node& p2p_node() { return *app().p2p_node(); }
protected:
net::node &p2p_node() {
return *app().p2p_node();
}
private:
application *_app = nullptr;
private:
application* _app = nullptr;
};
/// @group Some useful tools for boost::program_options arguments using vectors of JSON strings
/// @{
template <typename T>
T dejsonify(const string &s, uint32_t max_depth) {
return fc::json::from_string(s).as<T>(max_depth);
template<typename T>
T dejsonify(const string& s)
{
return fc::json::from_string(s).as<T>();
}
namespace impl {
template <typename T>
T dejsonify(const string &s) {
return graphene::app::dejsonify<T>(s, GRAPHENE_MAX_NESTED_OBJECTS);
}
} // namespace impl
#define DEFAULT_VALUE_VECTOR(value) default_value({fc::json::to_string(value)}, fc::json::to_string(value))
#define LOAD_VALUE_SET(options, name, container, type) \
if (options.count(name)) { \
const std::vector<std::string> &ops = options[name].as<std::vector<std::string>>(); \
std::transform(ops.begin(), ops.end(), std::inserter(container, container.end()), &graphene::app::impl::dejsonify<type>); \
}
#define LOAD_VALUE_SET(options, name, container, type) \
if( options.count(name) ) { \
const std::vector<std::string>& ops = options[name].as<std::vector<std::string>>(); \
std::transform(ops.begin(), ops.end(), std::inserter(container, container.end()), &graphene::app::dejsonify<type>); \
}
/// @}
}} // namespace graphene::app
} } //graphene::app

View file

@ -27,44 +27,49 @@
namespace graphene { namespace app {
plugin::plugin() {
plugin::plugin()
{
_app = nullptr;
return;
}
plugin::~plugin() {
plugin::~plugin()
{
return;
}
std::string plugin::plugin_name() const {
std::string plugin::plugin_name()const
{
return "<unknown plugin>";
}
std::string plugin::plugin_description() const {
return "<no description>";
}
void plugin::plugin_initialize(const boost::program_options::variables_map &options) {
void plugin::plugin_initialize( const boost::program_options::variables_map& options )
{
return;
}
void plugin::plugin_startup() {
void plugin::plugin_startup()
{
return;
}
void plugin::plugin_shutdown() {
void plugin::plugin_shutdown()
{
return;
}
void plugin::plugin_set_app(application *app) {
void plugin::plugin_set_app( application* app )
{
_app = app;
return;
}
void plugin::plugin_set_program_options(
boost::program_options::options_description &cli,
boost::program_options::options_description &cfg) {
boost::program_options::options_description& command_line_options,
boost::program_options::options_description& config_file_options
)
{
return;
}
}} // namespace graphene::app
} } // graphene::app

117
libraries/chain/CMakeLists.txt Executable file → Normal file
View file

@ -8,29 +8,118 @@ add_dependencies( build_hardfork_hpp cat-parts )
file(GLOB HEADERS "include/graphene/chain/*.hpp")
file(GLOB PROTOCOL_HEADERS "include/graphene/chain/protocol/*.hpp")
file(GLOB CPP_FILES "*.cpp")
file(GLOB PROTOCOL_CPP_FILES "protocol/*.cpp")
#if( GRAPHENE_DISABLE_UNITY_BUILD )
list(FILTER CPP_FILES EXCLUDE REGEX "[/]database[.]cpp$")
#message ("--- ${CPP_FILES}")
if( GRAPHENE_DISABLE_UNITY_BUILD )
set( GRAPHENE_DB_FILES
db_balance.cpp
db_bet.cpp
db_block.cpp
db_debug.cpp
db_getter.cpp
db_init.cpp
db_maint.cpp
db_management.cpp
db_market.cpp
db_update.cpp
db_witness_schedule.cpp
)
message( STATUS "Graphene database unity build disabled" )
#else( GRAPHENE_DISABLE_UNITY_BUILD )
# list(FILTER CPP_FILES EXCLUDE REGEX ".*db_.*[.]cpp$")
# #message ("--- ${CPP_FILES}")
# message( STATUS "Graphene database unity build enabled" )
#endif( GRAPHENE_DISABLE_UNITY_BUILD )
else( GRAPHENE_DISABLE_UNITY_BUILD )
set( GRAPHENE_DB_FILES
database.cpp )
message( STATUS "Graphene database unity build enabled" )
endif( GRAPHENE_DISABLE_UNITY_BUILD )
## SORT .cpp by most likely to change / break compile
add_library( graphene_chain
${CPP_FILES}
${PROTOCOL_CPP_FILES}
# As database takes the longest to compile, start it first
${GRAPHENE_DB_FILES}
fork_database.cpp
protocol/types.cpp
protocol/address.cpp
protocol/authority.cpp
protocol/asset.cpp
protocol/assert.cpp
protocol/account.cpp
protocol/transfer.cpp
protocol/committee_member.cpp
protocol/witness.cpp
protocol/market.cpp
protocol/proposal.cpp
protocol/withdraw_permission.cpp
protocol/asset_ops.cpp
protocol/lottery_ops.cpp
protocol/memo.cpp
protocol/worker.cpp
protocol/custom.cpp
protocol/operations.cpp
protocol/transaction.cpp
protocol/block.cpp
protocol/fee_schedule.cpp
protocol/confidential.cpp
protocol/vote.cpp
protocol/tournament.cpp
genesis_state.cpp
get_config.cpp
pts_address.cpp
evaluator.cpp
balance_evaluator.cpp
account_evaluator.cpp
assert_evaluator.cpp
witness_evaluator.cpp
committee_member_evaluator.cpp
asset_evaluator.cpp
lottery_evaluator.cpp
transfer_evaluator.cpp
proposal_evaluator.cpp
market_evaluator.cpp
vesting_balance_evaluator.cpp
tournament_evaluator.cpp
tournament_object.cpp
match_object.cpp
game_object.cpp
withdraw_permission_evaluator.cpp
worker_evaluator.cpp
confidential_evaluator.cpp
special_authority.cpp
buyback.cpp
account_object.cpp
asset_object.cpp
fba_object.cpp
proposal_object.cpp
vesting_balance_object.cpp
block_database.cpp
is_authorized_asset.cpp
protocol/sport.cpp
sport_evaluator.cpp
protocol/event_group.cpp
event_group_evaluator.cpp
event_group_object.cpp
protocol/event.cpp
event_evaluator.cpp
event_object.cpp
protocol/betting_market.cpp
betting_market_evaluator.cpp
betting_market_object.cpp
betting_market_group_object.cpp
affiliate_payout.cpp
${HEADERS}
${PROTOCOL_HEADERS}
"${CMAKE_CURRENT_BINARY_DIR}/include/graphene/chain/hardfork.hpp"
)
add_dependencies( graphene_chain build_hardfork_hpp )
target_link_libraries( graphene_chain graphene_db )
target_link_libraries( graphene_chain fc graphene_db )
target_include_directories( graphene_chain
PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include" )

View file

@ -22,6 +22,8 @@
* THE SOFTWARE.
*/
#include <fc/smart_ref_impl.hpp>
#include <graphene/chain/account_evaluator.hpp>
#include <graphene/chain/buyback.hpp>
#include <graphene/chain/buyback_object.hpp>
@ -53,54 +55,7 @@ void verify_authority_accounts( const database& db, const authority& a )
}
}
// Overwrites the num_son values from the origin to the destination for those sidechains which are found in the origin.
// Keeps the values of num_son for the sidechains which are found in the destination, but not in the origin.
// Returns false if an error is detected.
bool merge_num_sons( flat_map<sidechain_type, uint16_t>& destination,
const flat_map<sidechain_type, uint16_t>& origin,
fc::optional<time_point_sec> head_block_time = {})
{
const auto active_sidechains = head_block_time.valid() ? active_sidechain_types(*head_block_time) : all_sidechain_types;
bool success = true;
for (const auto &ns : origin)
{
destination[ns.first] = ns.second;
if (active_sidechains.find(ns.first) == active_sidechains.end())
{
success = false;
}
}
return success;
}
flat_map<sidechain_type, uint16_t> count_SON_votes_per_sidechain( const flat_set<vote_id_type>& votes )
{
flat_map<sidechain_type, uint16_t> SON_votes_per_sidechain = account_options::ext::empty_num_son();
for (const auto &vote : votes)
{
switch (vote.type())
{
case vote_id_type::son_bitcoin:
SON_votes_per_sidechain[sidechain_type::bitcoin]++;
break;
case vote_id_type::son_hive:
SON_votes_per_sidechain[sidechain_type::hive]++;
break;
case vote_id_type::son_ethereum:
SON_votes_per_sidechain[sidechain_type::ethereum]++;
break;
default:
break;
}
}
return SON_votes_per_sidechain;
}
void verify_account_votes( const database& db, const account_options& options, fc::optional<account_object> account = {} )
void verify_account_votes( const database& db, const account_options& options )
{
// ensure account's votes satisfy requirements
// NB only the part of vote checking that requires chain state is here,
@ -109,47 +64,10 @@ void verify_account_votes( const database& db, const account_options& options, f
const auto& gpo = db.get_global_properties();
const auto& chain_params = gpo.parameters;
FC_ASSERT( db.find_object(options.voting_account), "Invalid proxy account specified." );
FC_ASSERT( options.num_witness <= chain_params.maximum_witness_count,
"Voted for more witnesses than currently allowed (${c})", ("c", chain_params.maximum_witness_count) );
FC_ASSERT( options.num_committee <= chain_params.maximum_committee_count,
"Voted for more committee members than currently allowed (${c})", ("c", chain_params.maximum_committee_count) );
FC_ASSERT( chain_params.extensions.value.maximum_son_count.valid() , "Invalid maximum son count" );
flat_map<sidechain_type, uint16_t> merged_num_sons = account_options::ext::empty_num_son();
// Merge with existing account if exists
if ( account.valid() && account->options.extensions.value.num_son.valid())
{
merge_num_sons( merged_num_sons, *account->options.extensions.value.num_son, db.head_block_time() );
}
// Apply update operation on top
if ( options.extensions.value.num_son.valid() )
{
merge_num_sons( merged_num_sons, *options.extensions.value.num_son, db.head_block_time() );
}
for(const auto& num_sons : merged_num_sons)
{
FC_ASSERT( num_sons.second <= *chain_params.extensions.value.maximum_son_count,
"Voted for more sons than currently allowed (${c})", ("c", *chain_params.extensions.value.maximum_son_count) );
}
// Count the votes for SONs and confirm that the account did not vote for less SONs than num_son
flat_map<sidechain_type, uint16_t> SON_votes_per_sidechain = count_SON_votes_per_sidechain(options.votes);
for (const auto& number_of_votes : SON_votes_per_sidechain)
{
// Number of votes of account_options are also checked in account_options::do_evaluate,
// but there we are checking the value before merging num_sons, so the values should be checked again
const auto sidechain = number_of_votes.first;
FC_ASSERT( number_of_votes.second >= merged_num_sons[sidechain],
"Voted for less sons than specified in num_son (votes ${v} < num_son ${ns}) for sidechain ${s}",
("v", number_of_votes.second) ("ns", merged_num_sons[sidechain]) ("s", sidechain) );
}
FC_ASSERT( db.find_object(options.voting_account), "Invalid proxy account specified." );
uint32_t max_vote_id = gpo.next_available_vote_id;
@ -192,8 +110,6 @@ void_result account_create_evaluator::do_evaluate( const account_create_operatio
}
if( d.head_block_time() < HARDFORK_999_TIME )
FC_ASSERT( !op.extensions.value.affiliate_distributions.valid(), "Affiliate reward distributions not allowed yet" );
if (d.head_block_time() < HARDFORK_SON_TIME)
FC_ASSERT(op.name != "son-account", "Son account creation before SON hardfork");
FC_ASSERT( fee_paying_account->is_lifetime_member(), "Only Lifetime members may register an account." );
FC_ASSERT( op.referrer(d).is_member(d.head_block_time()), "The referrer must be either a lifetime or annual subscriber." );
@ -246,46 +162,33 @@ object_id_type account_create_evaluator::do_apply( const account_create_operatio
if( referrer_percent > GRAPHENE_100_PERCENT )
referrer_percent = GRAPHENE_100_PERCENT;
}
const auto& global_properties = d.get_global_properties();
const auto& new_acnt_object = d.create<account_object>( [&o,&d,&global_properties,referrer_percent]( account_object& obj )
{
obj.registrar = o.registrar;
obj.referrer = o.referrer;
obj.lifetime_referrer = o.referrer(d).lifetime_referrer;
const auto& new_acnt_object = db().create<account_object>( [&]( account_object& obj ){
obj.registrar = o.registrar;
obj.referrer = o.referrer;
obj.lifetime_referrer = o.referrer(db()).lifetime_referrer;
const auto& params = global_properties.parameters;
obj.network_fee_percentage = params.network_percent_of_fee;
obj.lifetime_referrer_fee_percentage = params.lifetime_referrer_percent_of_fee;
obj.referrer_rewards_percentage = referrer_percent;
auto& params = db().get_global_properties().parameters;
obj.network_fee_percentage = params.network_percent_of_fee;
obj.lifetime_referrer_fee_percentage = params.lifetime_referrer_percent_of_fee;
obj.referrer_rewards_percentage = referrer_percent;
obj.name = o.name;
obj.owner = o.owner;
obj.active = o.active;
obj.options = o.options;
obj.name = o.name;
obj.owner = o.owner;
obj.active = o.active;
obj.options = o.options;
obj.statistics = db().create<account_statistics_object>([&](account_statistics_object& s){s.owner = obj.id;}).id;
obj.options.extensions.value.num_son = account_options::ext::empty_num_son();
if ( o.options.extensions.value.num_son.valid() )
{
merge_num_sons( *obj.options.extensions.value.num_son, *o.options.extensions.value.num_son );
}
obj.statistics = d.create<account_statistics_object>([&obj](account_statistics_object& s){
s.owner = obj.id;
s.name = obj.name;
s.is_voting = obj.options.is_voting();
}).id;
if( o.extensions.value.owner_special_authority.valid() )
obj.owner_special_authority = *(o.extensions.value.owner_special_authority);
if( o.extensions.value.active_special_authority.valid() )
obj.active_special_authority = *(o.extensions.value.active_special_authority);
if( o.extensions.value.buyback_options.valid() )
{
obj.allowed_assets = o.extensions.value.buyback_options->markets;
obj.allowed_assets->emplace( o.extensions.value.buyback_options->asset_to_buy );
}
obj.affiliate_distributions = o.extensions.value.affiliate_distributions;
if( o.extensions.value.owner_special_authority.valid() )
obj.owner_special_authority = *(o.extensions.value.owner_special_authority);
if( o.extensions.value.active_special_authority.valid() )
obj.active_special_authority = *(o.extensions.value.active_special_authority);
if( o.extensions.value.buyback_options.valid() )
{
obj.allowed_assets = o.extensions.value.buyback_options->markets;
obj.allowed_assets->emplace( o.extensions.value.buyback_options->asset_to_buy );
}
obj.affiliate_distributions = o.extensions.value.affiliate_distributions;
});
if( has_small_percent )
@ -297,18 +200,17 @@ object_id_type account_create_evaluator::do_apply( const account_create_operatio
wlog( "Affected account object is ${o}", ("o", new_acnt_object) );
}
const auto& dynamic_properties = d.get_dynamic_global_properties();
d.modify(dynamic_properties, [](dynamic_global_property_object& p) {
const auto& dynamic_properties = db().get_dynamic_global_properties();
db().modify(dynamic_properties, [](dynamic_global_property_object& p) {
++p.accounts_registered_this_interval;
});
if( dynamic_properties.accounts_registered_this_interval % global_properties.parameters.accounts_per_fee_scale == 0
&& global_properties.parameters.account_fee_scale_bitshifts != 0 )
{
d.modify(global_properties, [&dynamic_properties](global_property_object& p) {
const auto& global_properties = db().get_global_properties();
if( dynamic_properties.accounts_registered_this_interval %
global_properties.parameters.accounts_per_fee_scale == 0 )
db().modify(global_properties, [&dynamic_properties](global_property_object& p) {
p.parameters.current_fees->get<account_create_operation>().basic_fee <<= p.parameters.account_fee_scale_bitshifts;
});
}
if( o.extensions.value.owner_special_authority.valid()
|| o.extensions.value.active_special_authority.valid() )
@ -369,7 +271,7 @@ void_result account_update_evaluator::do_evaluate( const account_update_operatio
acnt = &o.account(d);
if( o.new_options.valid() )
verify_account_votes( d, *o.new_options, *acnt );
verify_account_votes( d, *o.new_options );
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
@ -378,26 +280,18 @@ void_result account_update_evaluator::do_apply( const account_update_operation&
{ try {
database& d = db();
bool sa_before = acnt->has_special_authority();
// update account statistics
if( o.new_options.valid() )
{
d.modify( acnt->statistics( d ), [&]( account_statistics_object& aso )
{
fc::optional< bool > flag = o.extensions.value.update_last_voting_time;
if((o.new_options->votes != acnt->options.votes ||
o.new_options->voting_account != acnt->options.voting_account) ||
(flag.valid() && *flag))
o.new_options->voting_account != acnt->options.voting_account))
aso.last_vote_time = d.head_block_time();
if(o.new_options->is_voting() != acnt->options.is_voting())
aso.is_voting = !aso.is_voting;
} );
}
// update account object
d.modify( *acnt, [&o](account_object& a){
bool sa_before, sa_after;
d.modify( *acnt, [&](account_object& a){
if( o.owner )
{
a.owner = *o.owner;
@ -408,31 +302,8 @@ void_result account_update_evaluator::do_apply( const account_update_operation&
a.active = *o.active;
a.top_n_control_flags = 0;
}
// New num_son structure initialized to 0
flat_map<sidechain_type, uint16_t> new_num_son = account_options::ext::empty_num_son();
// If num_son of existing object is valid, we should merge the existing data
if ( a.options.extensions.value.num_son.valid() )
{
merge_num_sons( new_num_son, *a.options.extensions.value.num_son );
}
// If num_son of the operation are valid, they should merge the existing data
if ( o.new_options )
{
const auto new_options = *o.new_options;
if ( new_options.extensions.value.num_son.valid() )
{
merge_num_sons( new_num_son, *new_options.extensions.value.num_son );
}
a.options = *o.new_options;
}
a.options.extensions.value.num_son = new_num_son;
if( o.new_options ) a.options = *o.new_options;
sa_before = a.has_special_authority();
if( o.extensions.value.owner_special_authority.valid() )
{
a.owner_special_authority = *(o.extensions.value.owner_special_authority);
@ -443,10 +314,9 @@ void_result account_update_evaluator::do_apply( const account_update_operation&
a.active_special_authority = *(o.extensions.value.active_special_authority);
a.top_n_control_flags = 0;
}
sa_after = a.has_special_authority();
});
bool sa_after = acnt->has_special_authority();
if( sa_before & (!sa_after) )
{
const auto& sa_idx = d.get_index_type< special_authority_index >().indices().get<by_account>();

View file

@ -22,9 +22,9 @@
* THE SOFTWARE.
*/
#include <graphene/chain/account_object.hpp>
#include <graphene/chain/asset_object.hpp>
#include <graphene/chain/database.hpp>
#include <fc/io/raw.hpp>
#include <graphene/chain/hardfork.hpp>
#include <fc/uint128.hpp>
namespace graphene { namespace chain {
@ -46,8 +46,6 @@ void account_balance_object::adjust_balance(const asset& delta)
{
assert(delta.asset_id == asset_type);
balance += delta.amount;
if( asset_type == asset_id_type() ) // CORE asset
maintenance_flag = true;
}
void account_statistics_object::process_fees(const account_object& a, database& d) const
@ -59,8 +57,8 @@ void account_statistics_object::process_fees(const account_object& a, database&
// Check the referrer -- if he's no longer a member, pay to the lifetime referrer instead.
// No need to check the registrar; registrars are required to be lifetime members.
if( account.referrer(d).is_basic_account(d.head_block_time()) )
d.modify( account, [](account_object& acc) {
acc.referrer = acc.lifetime_referrer;
d.modify(account, [](account_object& a) {
a.referrer = a.lifetime_referrer;
});
share_type network_cut = cut_fee(core_fee_total, account.network_fee_percentage);
@ -76,8 +74,8 @@ void account_statistics_object::process_fees(const account_object& a, database&
share_type lifetime_cut = cut_fee(core_fee_total, account.lifetime_referrer_fee_percentage);
share_type referral = core_fee_total - network_cut - lifetime_cut;
d.modify( d.get_core_dynamic_data(), [network_cut](asset_dynamic_data_object& addo) {
addo.accumulated_fees += network_cut;
d.modify(asset_dynamic_data_id_type()(d), [network_cut](asset_dynamic_data_object& d) {
d.accumulated_fees += network_cut;
});
// Potential optimization: Skip some of this math and object lookups by special casing on the account type.
@ -121,9 +119,9 @@ set<account_id_type> account_member_index::get_account_members(const account_obj
result.insert(auth.first);
return result;
}
set<public_key_type, account_member_index::key_compare> account_member_index::get_key_members(const account_object& a)const
set<public_key_type> account_member_index::get_key_members(const account_object& a)const
{
set<public_key_type, key_compare> result;
set<public_key_type> result;
for( auto auth : a.owner.key_auths )
result.insert(auth.first);
for( auto auth : a.active.key_auths )
@ -215,7 +213,7 @@ void account_member_index::object_modified(const object& after)
{
set<public_key_type, key_compare> after_key_members = get_key_members(a);
set<public_key_type> after_key_members = get_key_members(a);
vector<public_key_type> removed; removed.reserve(before_key_members.size());
std::set_difference(before_key_members.begin(), before_key_members.end(),
@ -269,59 +267,4 @@ void account_referrer_index::object_modified( const object& after )
{
}
const uint8_t balances_by_account_index::bits = 20;
const uint64_t balances_by_account_index::mask = (1ULL << balances_by_account_index::bits) - 1;
void balances_by_account_index::object_inserted( const object& obj )
{
const auto& abo = dynamic_cast< const account_balance_object& >( obj );
while( balances.size() < (abo.owner.instance.value >> bits) + 1 )
{
balances.reserve( (abo.owner.instance.value >> bits) + 1 );
balances.resize( balances.size() + 1 );
balances.back().resize( 1ULL << bits );
}
balances[abo.owner.instance.value >> bits][abo.owner.instance.value & mask][abo.asset_type] = &abo;
}
void balances_by_account_index::object_removed( const object& obj )
{
const auto& abo = dynamic_cast< const account_balance_object& >( obj );
if( balances.size() < (abo.owner.instance.value >> bits) + 1 ) return;
balances[abo.owner.instance.value >> bits][abo.owner.instance.value & mask].erase( abo.asset_type );
}
void balances_by_account_index::about_to_modify( const object& before )
{
ids_being_modified.emplace( before.id );
}
void balances_by_account_index::object_modified( const object& after )
{
FC_ASSERT( ids_being_modified.top() == after.id, "Modification of ID is not supported!");
ids_being_modified.pop();
}
const map< asset_id_type, const account_balance_object* >& balances_by_account_index::get_account_balances( const account_id_type& acct )const
{
static const map< asset_id_type, const account_balance_object* > _empty;
if( balances.size() < (acct.instance.value >> bits) + 1 ) return _empty;
return balances[acct.instance.value >> bits][acct.instance.value & mask];
}
const account_balance_object* balances_by_account_index::get_account_balance( const account_id_type& acct, const asset_id_type& asset )const
{
if( balances.size() < (acct.instance.value >> bits) + 1 ) return nullptr;
const auto& mine = balances[acct.instance.value >> bits][acct.instance.value & mask];
const auto itr = mine.find( asset );
if( mine.end() == itr ) return nullptr;
return itr->second;
}
} } // graphene::chain
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::account_object )
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::account_balance_object )
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::account_statistics_object )
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::pending_dividend_payout_balance_for_holder_object )

View file

@ -1,162 +0,0 @@
#include <graphene/chain/account_role_evaluator.hpp>
#include <graphene/chain/database.hpp>
#include <graphene/chain/account_role_object.hpp>
#include <graphene/chain/hardfork.hpp>
#include <graphene/chain/rbac_hardfork_visitor.hpp>
namespace graphene
{
namespace chain
{
void_result account_role_create_evaluator::do_evaluate(const account_role_create_operation &op)
{
try
{
const database &d = db();
auto now = d.head_block_time();
FC_ASSERT(now >= HARDFORK_NFT_TIME, "Not allowed until NFT HF");
op.owner(d);
rbac_operation_hardfork_visitor arvtor(now);
for (const auto &op_type : op.allowed_operations)
{
arvtor(op_type);
}
for (const auto &acc : op.whitelisted_accounts)
{
acc(d);
}
FC_ASSERT(op.valid_to > now, "valid_to expiry should be in future");
FC_ASSERT((op.valid_to - now) <= fc::seconds(d.get_global_properties().parameters.account_roles_max_lifetime()), "Validity of the account role beyond max expiry");
const auto &ar_idx = d.get_index_type<account_role_index>().indices().get<by_owner>();
auto aro_range = ar_idx.equal_range(op.owner);
FC_ASSERT(std::distance(aro_range.first, aro_range.second) < d.get_global_properties().parameters.account_roles_max_per_account(), "Max account roles that can be created by one owner is reached");
return void_result();
}
FC_CAPTURE_AND_RETHROW((op))
}
object_id_type account_role_create_evaluator::do_apply(const account_role_create_operation &op)
{
try
{
database &d = db();
return d.create<account_role_object>([&op](account_role_object &obj) mutable {
obj.owner = op.owner;
obj.name = op.name;
obj.metadata = op.metadata;
obj.allowed_operations = op.allowed_operations;
obj.whitelisted_accounts = op.whitelisted_accounts;
obj.valid_to = op.valid_to;
})
.id;
}
FC_CAPTURE_AND_RETHROW((op))
}
void_result account_role_update_evaluator::do_evaluate(const account_role_update_operation &op)
{
try
{
const database &d = db();
auto now = d.head_block_time();
FC_ASSERT(now >= HARDFORK_NFT_TIME, "Not allowed until NFT HF");
op.owner(d);
const account_role_object &aobj = op.account_role_id(d);
FC_ASSERT(aobj.owner == op.owner, "Only owner account can update account role object");
for (const auto &op_type : op.allowed_operations_to_remove)
{
FC_ASSERT(aobj.allowed_operations.find(op_type) != aobj.allowed_operations.end(),
"Cannot remove non existent operation");
}
for (const auto &acc : op.accounts_to_remove)
{
FC_ASSERT(aobj.whitelisted_accounts.find(acc) != aobj.whitelisted_accounts.end(),
"Cannot remove non existent account");
}
rbac_operation_hardfork_visitor arvtor(now);
for (const auto &op_type : op.allowed_operations_to_add)
{
arvtor(op_type);
}
FC_ASSERT((aobj.allowed_operations.size() + op.allowed_operations_to_add.size() - op.allowed_operations_to_remove.size()) > 0, "Allowed operations should be positive");
for (const auto &acc : op.accounts_to_add)
{
acc(d);
}
FC_ASSERT((aobj.whitelisted_accounts.size() + op.accounts_to_add.size() - op.accounts_to_remove.size()) > 0, "Accounts should be positive");
if (op.valid_to)
{
FC_ASSERT(*op.valid_to > now, "valid_to expiry should be in future");
FC_ASSERT((*op.valid_to - now) <= fc::seconds(d.get_global_properties().parameters.account_roles_max_lifetime()), "Validity of the account role beyond max expiry");
}
return void_result();
}
FC_CAPTURE_AND_RETHROW((op))
}
void_result account_role_update_evaluator::do_apply(const account_role_update_operation &op)
{
try
{
database &d = db();
const account_role_object &aobj = op.account_role_id(d);
d.modify(aobj, [&op](account_role_object &obj) {
if (op.name)
obj.name = *op.name;
if (op.metadata)
obj.metadata = *op.metadata;
obj.allowed_operations.insert(op.allowed_operations_to_add.begin(), op.allowed_operations_to_add.end());
obj.whitelisted_accounts.insert(op.accounts_to_add.begin(), op.accounts_to_add.end());
for (const auto &op_type : op.allowed_operations_to_remove)
obj.allowed_operations.erase(op_type);
for (const auto &acc : op.accounts_to_remove)
obj.whitelisted_accounts.erase(acc);
if (op.valid_to)
obj.valid_to = *op.valid_to;
});
return void_result();
}
FC_CAPTURE_AND_RETHROW((op))
}
void_result account_role_delete_evaluator::do_evaluate(const account_role_delete_operation &op)
{
try
{
const database &d = db();
auto now = d.head_block_time();
FC_ASSERT(now >= HARDFORK_NFT_TIME, "Not allowed until NFT HF");
op.owner(d);
const account_role_object &aobj = op.account_role_id(d);
FC_ASSERT(aobj.owner == op.owner, "Only owner account can delete account role object");
return void_result();
}
FC_CAPTURE_AND_RETHROW((op))
}
void_result account_role_delete_evaluator::do_apply(const account_role_delete_operation &op)
{
try
{
database &d = db();
const account_role_object &aobj = op.account_role_id(d);
d.remove(aobj);
return void_result();
}
FC_CAPTURE_AND_RETHROW((op))
}
} // namespace chain
} // namespace graphene

View file

@ -42,8 +42,6 @@ void_result asset_create_evaluator::do_evaluate( const asset_create_operation& o
database& d = db();
FC_ASSERT(d.is_asset_creation_allowed(op.symbol), "Asset creation not allowed at current time");
const auto& chain_parameters = d.get_global_properties().parameters;
FC_ASSERT( op.common_options.whitelist_authorities.size() <= chain_parameters.maximum_asset_whitelist_authorities );
FC_ASSERT( op.common_options.blacklist_authorities.size() <= chain_parameters.maximum_asset_whitelist_authorities );
@ -78,7 +76,7 @@ void_result asset_create_evaluator::do_evaluate( const asset_create_operation& o
{
auto dotpos = op.symbol.rfind( '.' );
if( dotpos != std::string::npos )
{
auto prefix = op.symbol.substr( 0, dotpos );
auto asset_symbol_itr = asset_indx.find( prefix );
@ -121,7 +119,7 @@ void_result asset_create_evaluator::do_evaluate( const asset_create_operation& o
FC_ASSERT( op.bitasset_opts );
FC_ASSERT( op.precision == op.bitasset_opts->short_backing_asset(d).precision );
}
return void_result();
} FC_CAPTURE_AND_RETHROW( (op) ) }
@ -135,36 +133,33 @@ void asset_create_evaluator::pay_fee()
object_id_type asset_create_evaluator::do_apply( const asset_create_operation& op )
{ try {
database& d = db();
// includes changes from bitshares. (https://github.com/bitshares/bitshares-core/issues/429)
bool hf_429 = fee_is_odd && db().head_block_time() > HARDFORK_CORE_429_TIME;
const asset_dynamic_data_object& dyn_asset =
d.create<asset_dynamic_data_object>( [hf_429,this]( asset_dynamic_data_object& a ) {
db().create<asset_dynamic_data_object>( [&]( asset_dynamic_data_object& a ) {
a.current_supply = 0;
a.fee_pool = core_fee_paid - (hf_429 ? 1 : 0);
});
if( fee_is_odd && !hf_429 )
{
const auto& core_dd = d.get_core_asset().dynamic_data( d );
d.modify( core_dd, []( asset_dynamic_data_object& dd ) {
if( fee_is_odd && !hf_429 )
{
const auto& core_dd = db().get<asset_object>( asset_id_type() ).dynamic_data( db() );
db().modify( core_dd, [=]( asset_dynamic_data_object& dd ) {
dd.current_supply++;
});
}
auto next_asset_id = d.get_index_type<asset_index>().get_next_id();
});
}
asset_bitasset_data_id_type bit_asset_id;
if( op.bitasset_opts.valid() )
bit_asset_id = d.create<asset_bitasset_data_object>( [&]( asset_bitasset_data_object& a ) {
bit_asset_id = db().create<asset_bitasset_data_object>( [&]( asset_bitasset_data_object& a ) {
a.options = *op.bitasset_opts;
a.is_prediction_market = op.is_prediction_market;
a.asset_id = next_asset_id;
}).id;
auto next_asset_id = db().get_index_type<asset_index>().get_next_id();
const asset_object& new_asset =
d.create<asset_object>( [&]( asset_object& a ) {
db().create<asset_object>( [&]( asset_object& a ) {
a.issuer = op.issuer;
a.symbol = op.symbol;
a.precision = op.precision;
@ -176,11 +171,11 @@ object_id_type asset_create_evaluator::do_apply( const asset_create_operation& o
a.options.core_exchange_rate.base.asset_id = next_asset_id;
a.dynamic_asset_data_id = dyn_asset.id;
if( op.bitasset_opts.valid() )
a.bitasset_data_id = bit_asset_id;
});
FC_ASSERT( new_asset.id == next_asset_id );
assert( new_asset.id == next_asset_id );
return new_asset.id;
} FC_CAPTURE_AND_RETHROW( (op) ) }
@ -190,8 +185,6 @@ void_result lottery_asset_create_evaluator::do_evaluate( const lottery_asset_cre
database& d = db();
FC_ASSERT(d.is_asset_creation_allowed(op.symbol), "Lottery asset creation not allowed at current time");
const auto& chain_parameters = d.get_global_properties().parameters;
FC_ASSERT( op.common_options.whitelist_authorities.size() <= chain_parameters.maximum_asset_whitelist_authorities );
FC_ASSERT( op.common_options.blacklist_authorities.size() <= chain_parameters.maximum_asset_whitelist_authorities );
@ -226,7 +219,7 @@ void_result lottery_asset_create_evaluator::do_evaluate( const lottery_asset_cre
{
auto dotpos = op.symbol.rfind( '.' );
if( dotpos != std::string::npos )
{
auto prefix = op.symbol.substr( 0, dotpos );
auto asset_symbol_itr = asset_indx.find( prefix );
@ -288,36 +281,33 @@ void lottery_asset_create_evaluator::pay_fee()
object_id_type lottery_asset_create_evaluator::do_apply( const lottery_asset_create_operation& op )
{ try {
database& d = db();
// includes changes from bitshares. (https://github.com/bitshares/bitshares-core/issues/429)
bool hf_429 = fee_is_odd && d.head_block_time() > HARDFORK_CORE_429_TIME;
bool hf_429 = fee_is_odd && db().head_block_time() > HARDFORK_CORE_429_TIME;
const asset_dynamic_data_object& dyn_asset =
d.create<asset_dynamic_data_object>( [&]( asset_dynamic_data_object& a ) {
db().create<asset_dynamic_data_object>( [&]( asset_dynamic_data_object& a ) {
a.current_supply = 0;
a.fee_pool = core_fee_paid - (hf_429 ? 1 : 0);
});
if( fee_is_odd && !hf_429 )
{
const auto& core_dd = d.get<asset_object>( asset_id_type() ).dynamic_data( db() );
d.modify( core_dd, [=]( asset_dynamic_data_object& dd ) {
const auto& core_dd = db().get<asset_object>( asset_id_type() ).dynamic_data( db() );
db().modify( core_dd, [=]( asset_dynamic_data_object& dd ) {
dd.current_supply++;
});
}
auto next_asset_id = d.get_index_type<asset_index>().get_next_id();
asset_bitasset_data_id_type bit_asset_id;
if( op.bitasset_opts.valid() )
bit_asset_id = d.create<asset_bitasset_data_object>( [&op,next_asset_id]( asset_bitasset_data_object& a ) {
bit_asset_id = db().create<asset_bitasset_data_object>( [&]( asset_bitasset_data_object& a ) {
a.options = *op.bitasset_opts;
a.is_prediction_market = op.is_prediction_market;
a.asset_id = next_asset_id;
}).id;
auto next_asset_id = db().get_index_type<asset_index>().get_next_id();
const asset_object& new_asset =
d.create<asset_object>( [&op,next_asset_id,&dyn_asset,bit_asset_id,&d]( asset_object& a ) {
db().create<asset_object>( [&]( asset_object& a ) {
a.issuer = op.issuer;
a.symbol = op.symbol;
a.precision = op.precision;
@ -326,7 +316,7 @@ object_id_type lottery_asset_create_evaluator::do_apply( const lottery_asset_cre
a.lottery_options = op.extensions;
//a.lottery_options->balance = asset( 0, a.lottery_options->ticket_price.asset_id );
a.lottery_options->owner = a.id;
d.create<lottery_balance_object>([&a](lottery_balance_object& lbo) {
db().create<lottery_balance_object>([&](lottery_balance_object& lbo) {
lbo.lottery_id = a.id;
});
if( a.options.core_exchange_rate.base.asset_id.instance.value == 0 )
@ -337,7 +327,7 @@ object_id_type lottery_asset_create_evaluator::do_apply( const lottery_asset_cre
if( op.bitasset_opts.valid() )
a.bitasset_data_id = bit_asset_id;
});
FC_ASSERT( new_asset.id == next_asset_id, "Unexpected object database error, object id mismatch" );
assert( new_asset.id == next_asset_id );
return new_asset.id;
} FC_CAPTURE_AND_RETHROW( (op) ) }
@ -364,7 +354,7 @@ void_result asset_issue_evaluator::do_apply( const asset_issue_operation& o )
{ try {
db().adjust_balance( o.issue_to_account, o.asset_to_issue );
db().modify( *asset_dyn_data, [&o]( asset_dynamic_data_object& data ){
db().modify( *asset_dyn_data, [&]( asset_dynamic_data_object& data ){
data.current_supply += o.asset_to_issue.amount;
});
@ -396,7 +386,7 @@ void_result asset_reserve_evaluator::do_apply( const asset_reserve_operation& o
{ try {
db().adjust_balance( o.payer, -o.amount_to_reserve );
db().modify( *asset_dyn_data, [&o]( asset_dynamic_data_object& data ){
db().modify( *asset_dyn_data, [&]( asset_dynamic_data_object& data ){
data.current_supply -= o.amount_to_reserve.amount;
});
@ -418,7 +408,7 @@ void_result asset_fund_fee_pool_evaluator::do_apply(const asset_fund_fee_pool_op
{ try {
db().adjust_balance(o.from_account, -o.amount);
db().modify( *asset_dyn_data, [&o]( asset_dynamic_data_object& data ) {
db().modify( *asset_dyn_data, [&]( asset_dynamic_data_object& data ) {
data.fee_pool += o.amount;
});
@ -493,21 +483,7 @@ void_result asset_update_evaluator::do_apply(const asset_update_operation& o)
d.cancel_order(*itr);
}
// For market-issued assets, if core change rate changed, update flag in bitasset data
if( asset_to_update->is_market_issued()
&& asset_to_update->options.core_exchange_rate != o.new_options.core_exchange_rate )
{
const auto& bitasset = asset_to_update->bitasset_data(d);
if( !bitasset.asset_cer_updated )
{
d.modify( bitasset, [](asset_bitasset_data_object& b)
{
b.asset_cer_updated = true;
});
}
}
d.modify(*asset_to_update, [&o](asset_object& a) {
d.modify(*asset_to_update, [&](asset_object& a) {
if( o.new_issuer )
a.issuer = *o.new_issuer;
a.options = o.new_options;
@ -579,7 +555,7 @@ void_result asset_update_dividend_evaluator::do_evaluate(const asset_update_divi
auto& params = db().get_global_properties().parameters;
if (o.new_options.payout_interval &&
*o.new_options.payout_interval < params.maintenance_interval)
FC_THROW("New payout interval may not be less than the maintenance interval",
FC_THROW("New payout interval may not be less than the maintenance interval",
("new_payout_interval", o.new_options.payout_interval)("maintenance_interval", params.maintenance_interval));
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
@ -597,6 +573,7 @@ void_result asset_update_dividend_evaluator::do_apply( const asset_update_divide
obj.referrer = op.issuer;
obj.lifetime_referrer = op.issuer(db()).lifetime_referrer;
auto& params = db().get_global_properties().parameters;
obj.network_fee_percentage = GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE;
obj.lifetime_referrer_fee_percentage = GRAPHENE_DEFAULT_LIFETIME_REFERRER_PERCENT_OF_FEE;
obj.referrer_rewards_percentage = GRAPHENE_DEFAULT_LIFETIME_REFERRER_PERCENT_OF_FEE;

View file

@ -23,12 +23,11 @@
*/
#include <graphene/chain/asset_object.hpp>
#include <graphene/chain/database.hpp>
#include <graphene/chain/operation_history_object.hpp>
#include <graphene/chain/hardfork.hpp>
#include <fc/io/raw.hpp>
#include <fc/uint128.hpp>
#include <cmath>
using namespace graphene::chain;
share_type asset_bitasset_data_object::max_force_settlement_volume(share_type current_supply) const
@ -62,15 +61,12 @@ void asset_bitasset_data_object::update_median_feeds(time_point_sec current_time
if( current_feeds.size() < options.minimum_feeds )
{
//... don't calculate a median, and set a null feed
feed_cer_updated = false; // new median cer is null, won't update asset_object anyway, set to false for better performance
current_feed_publication_time = current_time;
current_feed = price_feed();
return;
}
if( current_feeds.size() == 1 )
{
if( current_feed.core_exchange_rate != current_feeds.front().get().core_exchange_rate )
feed_cer_updated = true;
current_feed = std::move(current_feeds.front());
return;
}
@ -89,8 +85,6 @@ void asset_bitasset_data_object::update_median_feeds(time_point_sec current_time
#undef CALCULATE_MEDIAN_VALUE
// *** End Median Calculations ***
if( current_feed.core_exchange_rate != median_feed.core_exchange_rate )
feed_cer_updated = true;
current_feed = median_feed;
}
@ -187,41 +181,6 @@ vector<account_id_type> asset_object::get_holders( database& db ) const
return holders;
}
vector<uint64_t> asset_object::get_ticket_ids( database& db ) const
{
auto& asset_bal_idx = db.get_index_type< account_balance_index >().indices().get< by_asset_balance >();
vector<uint64_t> ids;
const auto range = asset_bal_idx.equal_range( boost::make_tuple( get_id() ) );
for( const account_balance_object& bal : boost::make_iterator_range( range.first, range.second ) )
{
const auto& stats = bal.owner(db).statistics(db);
const account_transaction_history_object* ath = static_cast<const account_transaction_history_object*>(&stats.most_recent_op(db));
for( uint64_t balance = bal.balance.value; balance > 0;)
{
if(ath != nullptr)
{
const operation_history_object& oho = db.get<operation_history_object>( ath->operation_id );
if( oho.op.which() == operation::tag<ticket_purchase_operation>::value && get_id() == oho.op.get<ticket_purchase_operation>().lottery)
{
uint64_t tickets_count = oho.op.get<ticket_purchase_operation>().tickets_to_buy;
ids.insert(ids.end(), tickets_count, oho.id.instance());
balance -= tickets_count;
assert(balance >= 0);
}
if( ath->next == account_transaction_history_id_type() )
{
ath = nullptr;
break;
}
else ath = db.find(ath->next);
}
}
}
return ids;
}
void asset_object::distribute_benefactors_part( database& db )
{
transaction_evaluation_state eval( &db );
@ -243,7 +202,6 @@ map< account_id_type, vector< uint16_t > > asset_object::distribute_winners_part
transaction_evaluation_state eval( &db );
auto holders = get_holders( db );
vector<uint64_t> ticket_ids = get_ticket_ids(db);
FC_ASSERT( dynamic_data( db ).current_supply == holders.size() );
map<account_id_type, vector<uint16_t> > structurized_participants;
for( account_id_type holder : holders )
@ -266,17 +224,12 @@ map< account_id_type, vector< uint16_t > > asset_object::distribute_winners_part
*t += percents_to_distribute / holders.size();
}
auto sweeps_distribution_percentage = db.get_global_properties().parameters.sweeps_distribution_percentage();
for( size_t c = 0; c < winner_numbers.size(); ++c ) {
for( int c = 0; c < winner_numbers.size(); ++c ) {
auto winner_num = winner_numbers[c];
lottery_reward_operation reward_op;
reward_op.lottery = get_id();
reward_op.is_benefactor_reward = false;
reward_op.winner = holders[winner_num];
if(db.head_block_time() > HARDFORK_5050_1_TIME && ticket_ids.size() > winner_num)
{
const static_variant<uint64_t, void_t> tkt_id = ticket_ids[winner_num];
reward_op.winner_ticket_id = tkt_id;
}
reward_op.win_percentage = tickets[c];
reward_op.amount = asset( jackpot * tickets[c] * ( 1. - sweeps_distribution_percentage / (double)GRAPHENE_100_PERCENT ) / GRAPHENE_100_PERCENT , db.get_balance(id).asset_id );
db.apply_operation(eval, reward_op);
@ -338,11 +291,3 @@ void sweeps_vesting_balance_object::adjust_balance( const asset& delta )
FC_ASSERT( delta.asset_id == asset_id );
balance += delta.amount.value;
}
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::asset_dynamic_data_object )
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::asset_bitasset_data_object )
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::asset_dividend_data_object )
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::total_distributed_dividend_balance_object )
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::asset_object )
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::lottery_balance_object )
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::sweeps_vesting_balance_object )

View file

@ -22,7 +22,6 @@
* THE SOFTWARE.
*/
#include <graphene/chain/balance_evaluator.hpp>
#include <graphene/chain/pts_address.hpp>
namespace graphene { namespace chain {

View file

@ -22,6 +22,7 @@
* THE SOFTWARE.
*/
#define DEFAULT_LOGGER "betting"
#include <fc/smart_ref_impl.hpp>
#include <graphene/chain/betting_market_evaluator.hpp>
#include <graphene/chain/betting_market_object.hpp>

View file

@ -541,37 +541,37 @@ void betting_market_group_object::dispatch_new_status(database& db, betting_mark
} } // graphene::chain
namespace fc {
namespace fc {
// Manually reflect betting_market_group_object to variant to properly reflect "state"
void to_variant(const graphene::chain::betting_market_group_object& betting_market_group_obj, fc::variant& v, uint32_t max_depth)
void to_variant(const graphene::chain::betting_market_group_object& betting_market_group_obj, fc::variant& v)
{
fc::mutable_variant_object o;
o("id", fc::variant(betting_market_group_obj.id, max_depth))
("description", fc::variant(betting_market_group_obj.description, max_depth))
("event_id", fc::variant(betting_market_group_obj.event_id, max_depth))
("rules_id", fc::variant(betting_market_group_obj.rules_id, max_depth))
("asset_id", fc::variant(betting_market_group_obj.asset_id, max_depth))
("total_matched_bets_amount", fc::variant(betting_market_group_obj.total_matched_bets_amount, max_depth))
("never_in_play", fc::variant(betting_market_group_obj.never_in_play, max_depth))
("delay_before_settling", fc::variant(betting_market_group_obj.delay_before_settling, max_depth))
("settling_time", fc::variant(betting_market_group_obj.settling_time, max_depth))
("status", fc::variant(betting_market_group_obj.get_status(), max_depth));
o("id", betting_market_group_obj.id)
("description", betting_market_group_obj.description)
("event_id", betting_market_group_obj.event_id)
("rules_id", betting_market_group_obj.rules_id)
("asset_id", betting_market_group_obj.asset_id)
("total_matched_bets_amount", betting_market_group_obj.total_matched_bets_amount)
("never_in_play", betting_market_group_obj.never_in_play)
("delay_before_settling", betting_market_group_obj.delay_before_settling)
("settling_time", betting_market_group_obj.settling_time)
("status", betting_market_group_obj.get_status());
v = o;
}
// Manually reflect betting_market_group_object to variant to properly reflect "state"
void from_variant(const fc::variant& v, graphene::chain::betting_market_group_object& betting_market_group_obj, uint32_t max_depth)
void from_variant(const fc::variant& v, graphene::chain::betting_market_group_object& betting_market_group_obj)
{
betting_market_group_obj.id = v["id"].as<graphene::chain::betting_market_group_id_type>( max_depth );
betting_market_group_obj.description = v["description"].as<graphene::chain::internationalized_string_type>( max_depth );
betting_market_group_obj.event_id = v["event_id"].as<graphene::chain::event_id_type>( max_depth );
betting_market_group_obj.asset_id = v["asset_id"].as<graphene::chain::asset_id_type>( max_depth );
betting_market_group_obj.total_matched_bets_amount = v["total_matched_bets_amount"].as<graphene::chain::share_type>( max_depth );
betting_market_group_obj.never_in_play = v["never_in_play"].as<bool>( max_depth );
betting_market_group_obj.delay_before_settling = v["delay_before_settling"].as<uint32_t>( max_depth );
betting_market_group_obj.settling_time = v["settling_time"].as<fc::optional<fc::time_point_sec>>( max_depth );
graphene::chain::betting_market_group_status status = v["status"].as<graphene::chain::betting_market_group_status>( max_depth );
betting_market_group_obj.id = v["id"].as<graphene::chain::betting_market_group_id_type>();
betting_market_group_obj.description = v["description"].as<graphene::chain::internationalized_string_type>();
betting_market_group_obj.event_id = v["event_id"].as<graphene::chain::event_id_type>();
betting_market_group_obj.asset_id = v["asset_id"].as<graphene::chain::asset_id_type>();
betting_market_group_obj.total_matched_bets_amount = v["total_matched_bets_amount"].as<graphene::chain::share_type>();
betting_market_group_obj.never_in_play = v["never_in_play"].as<bool>();
betting_market_group_obj.delay_before_settling = v["delay_before_settling"].as<uint32_t>();
betting_market_group_obj.settling_time = v["settling_time"].as<fc::optional<fc::time_point_sec>>();
graphene::chain::betting_market_group_status status = v["status"].as<graphene::chain::betting_market_group_status>();
const_cast<int*>(betting_market_group_obj.my->state_machine.current_state())[0] = (int)status;
}
} //end namespace fc

View file

@ -466,30 +466,31 @@ void betting_market_object::on_canceled_event(database& db)
} } // graphene::chain
namespace fc {
namespace fc {
// Manually reflect betting_market_object to variant to properly reflect "state"
void to_variant(const graphene::chain::betting_market_object& event_obj, fc::variant& v, uint32_t max_depth)
void to_variant(const graphene::chain::betting_market_object& event_obj, fc::variant& v)
{
fc::mutable_variant_object o;
o("id", fc::variant(event_obj.id, max_depth) )
("group_id", fc::variant(event_obj.group_id, max_depth))
("description", fc::variant(event_obj.description, max_depth))
("payout_condition", fc::variant(event_obj.payout_condition, max_depth))
("resolution", fc::variant(event_obj.resolution, max_depth))
("status", fc::variant(event_obj.get_status(), max_depth));
o("id", event_obj.id)
("group_id", event_obj.group_id)
("description", event_obj.description)
("payout_condition", event_obj.payout_condition)
("resolution", event_obj.resolution)
("status", event_obj.get_status());
v = o;
}
// Manually reflect betting_market_object to variant to properly reflect "state"
void from_variant(const fc::variant& v, graphene::chain::betting_market_object& event_obj, uint32_t max_depth)
void from_variant(const fc::variant& v, graphene::chain::betting_market_object& event_obj)
{
event_obj.id = v["id"].as<graphene::chain::betting_market_id_type>( max_depth );
event_obj.group_id = v["name"].as<graphene::chain::betting_market_group_id_type>( max_depth );
event_obj.description = v["description"].as<graphene::chain::internationalized_string_type>( max_depth );
event_obj.payout_condition = v["payout_condition"].as<graphene::chain::internationalized_string_type>( max_depth );
event_obj.resolution = v["resolution"].as<fc::optional<graphene::chain::betting_market_resolution_type>>( max_depth );
graphene::chain::betting_market_status status = v["status"].as<graphene::chain::betting_market_status>( max_depth );
event_obj.id = v["id"].as<graphene::chain::betting_market_id_type>();
event_obj.group_id = v["name"].as<graphene::chain::betting_market_group_id_type>();
event_obj.description = v["description"].as<graphene::chain::internationalized_string_type>();
event_obj.payout_condition = v["payout_condition"].as<graphene::chain::internationalized_string_type>();
event_obj.resolution = v["resolution"].as<fc::optional<graphene::chain::betting_market_resolution_type>>();
graphene::chain::betting_market_status status = v["status"].as<graphene::chain::betting_market_status>();
const_cast<int*>(event_obj.my->state_machine.current_state())[0] = (int)status;
}
} //end namespace fc

View file

@ -24,6 +24,7 @@
#include <graphene/chain/block_database.hpp>
#include <graphene/chain/protocol/fee_schedule.hpp>
#include <fc/io/raw.hpp>
#include <fc/smart_ref_impl.hpp>
namespace graphene { namespace chain {
@ -44,15 +45,14 @@ void block_database::open( const fc::path& dbdir )
_block_num_to_pos.exceptions(std::ios_base::failbit | std::ios_base::badbit);
_blocks.exceptions(std::ios_base::failbit | std::ios_base::badbit);
_index_filename = dbdir / "index";
if( !fc::exists( _index_filename ) )
if( !fc::exists( dbdir/"index" ) )
{
_block_num_to_pos.open( _index_filename.generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out | std::fstream::trunc);
_block_num_to_pos.open( (dbdir/"index").generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out | std::fstream::trunc);
_blocks.open( (dbdir/"blocks").generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out | std::fstream::trunc);
}
else
{
_block_num_to_pos.open( _index_filename.generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out );
_block_num_to_pos.open( (dbdir/"index").generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out );
_blocks.open( (dbdir/"blocks").generic_string().c_str(), std::fstream::binary | std::fstream::in | std::fstream::out );
}
} FC_CAPTURE_AND_RETHROW( (dbdir) ) }
@ -76,10 +76,6 @@ void block_database::flush()
void block_database::store( const block_id_type& _id, const signed_block& b )
{
if (true == replay_mode){
return;
}
block_id_type id = _id;
if( id == block_id_type() )
{
@ -103,15 +99,8 @@ void block_database::remove( const block_id_type& id )
index_entry e;
auto index_pos = sizeof(e)*block_header::num_from_id(id);
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
std::streampos s_pos = _block_num_to_pos.tellg();
if (-1 == s_pos){
FC_THROW_EXCEPTION(fc::key_not_found_exception, "Block ${id} not contained in block database, _block_num_to_pos.tellg failed", ("id", id));
}
if ( static_cast<uint32_t>(s_pos) <= index_pos ){
if ( _block_num_to_pos.tellg() <= index_pos )
FC_THROW_EXCEPTION(fc::key_not_found_exception, "Block ${id} not contained in block database", ("id", id));
}
_block_num_to_pos.seekg( index_pos );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
@ -125,27 +114,20 @@ void block_database::remove( const block_id_type& id )
} FC_CAPTURE_AND_RETHROW( (id) ) }
bool block_database::contains( const block_id_type& id )const
{ try {
{
if( id == block_id_type() )
return false;
index_entry e;
auto index_pos = sizeof(e)*block_header::num_from_id(id);
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
std::streampos s_pos = _block_num_to_pos.tellg();
if (-1 == s_pos){
FC_THROW_EXCEPTION(fc::key_not_found_exception, "Block ${id} not contained in block database, _block_num_to_pos.tellg failed", ("id", id));
}
if ( static_cast<uint32_t>(s_pos) < index_pos + sizeof(e) )
if ( _block_num_to_pos.tellg() <= index_pos )
return false;
_block_num_to_pos.seekg( index_pos );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
return e.block_id == id && e.block_size > 0;
} FC_CAPTURE_AND_RETHROW( (id) ) }
}
block_id_type block_database::fetch_block_id( uint32_t block_num )const
{
@ -170,13 +152,7 @@ optional<signed_block> block_database::fetch_optional( const block_id_type& id )
index_entry e;
auto index_pos = sizeof(e)*block_header::num_from_id(id);
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
std::streampos s_pos = _block_num_to_pos.tellg();
if (-1 == s_pos){
FC_THROW_EXCEPTION(fc::key_not_found_exception, "Block ${id} not contained in block database, _block_num_to_pos.tellg failed", ("id", id));
}
if ( static_cast<uint32_t>(s_pos) <= index_pos )
if ( _block_num_to_pos.tellg() <= index_pos )
return {};
_block_num_to_pos.seekg( index_pos );
@ -208,12 +184,7 @@ optional<signed_block> block_database::fetch_by_number( uint32_t block_num )cons
index_entry e;
auto index_pos = sizeof(e)*block_num;
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
std::streampos s_pos = _block_num_to_pos.tellg();
if (-1 == s_pos){
FC_THROW_EXCEPTION(fc::key_not_found_exception, "Block ${block_num} not contained in block database, _block_num_to_pos.tellg failed", ("block_num", block_num));
}
if ( static_cast<uint32_t>(s_pos) <= index_pos )
if ( _block_num_to_pos.tellg() <= index_pos )
return {};
_block_num_to_pos.seekg( index_pos, _block_num_to_pos.beg );
@ -235,51 +206,34 @@ optional<signed_block> block_database::fetch_by_number( uint32_t block_num )cons
return optional<signed_block>();
}
optional<index_entry> block_database::last_index_entry()const {
optional<signed_block> block_database::last()const
{
try
{
index_entry e;
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
std::streampos pos = _block_num_to_pos.tellg();
if (-1 == pos){
FC_THROW_EXCEPTION(fc::key_not_found_exception, "last_index_entry tellg failed");
}
if( static_cast<size_t>(pos) < sizeof(index_entry) )
return optional<index_entry>();
if( _block_num_to_pos.tellp() < sizeof(index_entry) )
return optional<signed_block>();
pos -= pos % sizeof(index_entry);
_blocks.seekg( 0, _block_num_to_pos.end );
const std::streampos blocks_size = _blocks.tellg();
while( pos > 0 )
_block_num_to_pos.seekg( -sizeof(index_entry), _block_num_to_pos.end );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
uint64_t pos = _block_num_to_pos.tellg();
while( e.block_size == 0 && pos > 0 )
{
pos -= sizeof(index_entry);
_block_num_to_pos.seekg( pos );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
if( _block_num_to_pos.gcount() == sizeof(e) && e.block_size > 0
&& e.block_pos + static_cast<uint64_t>(e.block_size) <= static_cast<uint64_t>(blocks_size) )
try
{
vector<char> data( e.block_size );
_blocks.seekg( e.block_pos );
_blocks.read( data.data(), e.block_size );
if( _blocks.gcount() == e.block_size )
{
const signed_block block = fc::raw::unpack<signed_block>(data);
if( block.id() == e.block_id )
return e;
}
}
catch (const fc::exception&)
{
}
catch (const std::exception&)
{
}
fc::resize_file( _index_filename, pos );
}
if( e.block_size == 0 )
return optional<signed_block>();
vector<char> data( e.block_size );
_blocks.seekg( e.block_pos );
_blocks.read( data.data(), e.block_size );
auto result = fc::raw::unpack<signed_block>(data);
return result;
}
catch (const fc::exception&)
{
@ -287,26 +241,42 @@ optional<index_entry> block_database::last_index_entry()const {
catch (const std::exception&)
{
}
return optional<index_entry>();
}
optional<signed_block> block_database::last()const
{
optional<index_entry> entry = last_index_entry();
if( entry.valid() ) return fetch_by_number( block_header::num_from_id(entry->block_id) );
return optional<signed_block>();
}
optional<block_id_type> block_database::last_id()const
{
optional<index_entry> entry = last_index_entry();
if( entry.valid() ) return entry->block_id;
try
{
index_entry e;
_block_num_to_pos.seekg( 0, _block_num_to_pos.end );
if( _block_num_to_pos.tellp() < sizeof(index_entry) )
return optional<block_id_type>();
_block_num_to_pos.seekg( -sizeof(index_entry), _block_num_to_pos.end );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
uint64_t pos = _block_num_to_pos.tellg();
while( e.block_size == 0 && pos > 0 )
{
pos -= sizeof(index_entry);
_block_num_to_pos.seekg( pos );
_block_num_to_pos.read( (char*)&e, sizeof(e) );
}
if( e.block_size == 0 )
return optional<block_id_type>();
return e.block_id;
}
catch (const fc::exception&)
{
}
catch (const std::exception&)
{
}
return optional<block_id_type>();
}
void block_database::set_replay_mode(bool mode)
{
replay_mode = mode;
}
} }

View file

@ -30,6 +30,8 @@
#include <graphene/chain/protocol/vote.hpp>
#include <graphene/chain/transaction_evaluation_state.hpp>
#include <fc/smart_ref_impl.hpp>
namespace graphene { namespace chain {
void_result committee_member_create_evaluator::do_evaluate( const committee_member_create_operation& op )
@ -75,7 +77,15 @@ void_result committee_member_update_evaluator::do_apply( const committee_member_
void_result committee_member_update_global_parameters_evaluator::do_evaluate(const committee_member_update_global_parameters_operation& o)
{ try {
FC_ASSERT(trx_state->_is_proposed_trx);
if( db().head_block_time() < HARDFORK_1000_TIME ) // TODO: remove after hf
FC_ASSERT( !o.new_parameters.extensions.value.min_bet_multiplier.valid()
&& !o.new_parameters.extensions.value.max_bet_multiplier.valid()
&& !o.new_parameters.extensions.value.betting_rake_fee_percentage.valid()
&& !o.new_parameters.extensions.value.permitted_betting_odds_increments.valid()
&& !o.new_parameters.extensions.value.live_betting_delay_time.valid(),
"Parameter extensions are not allowed yet!" );
dgpo = &db().get_global_properties();
if( o.new_parameters.extensions.value.min_bet_multiplier.valid()
&& !o.new_parameters.extensions.value.max_bet_multiplier.valid() )

View file

@ -29,167 +29,155 @@
#include <graphene/chain/fba_accumulator_id.hpp>
#include <graphene/chain/hardfork.hpp>
#include <fc/smart_ref_impl.hpp>
namespace graphene { namespace chain {
void_result transfer_to_blind_evaluator::do_evaluate( const transfer_to_blind_operation& o )
{ try {
const auto& d = db();
if( d.head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME )
{
const auto& atype = o.amount.asset_id(d);
FC_ASSERT( atype.allow_confidential() );
FC_ASSERT( !atype.is_transfer_restricted() );
FC_ASSERT( !(atype.options.flags & white_list) );
const auto& d = db();
for( const auto& out : o.outputs )
{
for( const auto& a : out.owner.account_auths )
a.first(d); // verify all accounts exist and are valid
}
}
const auto& atype = o.amount.asset_id(db());
FC_ASSERT( atype.allow_confidential() );
FC_ASSERT( !atype.is_transfer_restricted() );
FC_ASSERT( !(atype.options.flags & white_list) );
return void_result();
for( const auto& out : o.outputs )
{
for( const auto& a : out.owner.account_auths )
a.first(d); // verify all accounts exist and are valid
}
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
void_result transfer_to_blind_evaluator::do_apply( const transfer_to_blind_operation& o )
void_result transfer_to_blind_evaluator::do_apply( const transfer_to_blind_operation& o )
{ try {
if( db().head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
db().adjust_balance(o.from, -o.amount);
db().adjust_balance( o.from, -o.amount );
const auto &add = o.amount.asset_id(db()).dynamic_asset_data_id(db()); // verify fee is a legit asset
db().modify(add, [&](asset_dynamic_data_object &obj) {
obj.confidential_supply += o.amount.amount;
FC_ASSERT(obj.confidential_supply >= 0);
});
for (const auto &out : o.outputs) {
db().create<blinded_balance_object>([&](blinded_balance_object &obj) {
obj.asset_id = o.amount.asset_id;
obj.owner = out.owner;
obj.commitment = out.commitment;
});
}
}
return void_result();
const auto& add = o.amount.asset_id(db()).dynamic_asset_data_id(db()); // verify fee is a legit asset
db().modify( add, [&]( asset_dynamic_data_object& obj ){
obj.confidential_supply += o.amount.amount;
FC_ASSERT( obj.confidential_supply >= 0 );
});
for( const auto& out : o.outputs )
{
db().create<blinded_balance_object>( [&]( blinded_balance_object& obj ){
obj.asset_id = o.amount.asset_id;
obj.owner = out.owner;
obj.commitment = out.commitment;
});
}
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
void transfer_to_blind_evaluator::pay_fee()
{
const auto& d = db();
if( d.head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
if (d.head_block_time() >= HARDFORK_563_TIME)
pay_fba_fee(fba_accumulator_id_transfer_to_blind);
else
generic_evaluator::pay_fee();
}
if( db().head_block_time() >= HARDFORK_563_TIME )
pay_fba_fee( fba_accumulator_id_transfer_to_blind );
else
generic_evaluator::pay_fee();
}
void_result transfer_from_blind_evaluator::do_evaluate( const transfer_from_blind_operation& o )
{ try {
const auto& d = db();
if( d.head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
o.fee.asset_id(d); // verify fee is a legit asset
const auto &bbi = d.get_index_type<blinded_balance_index>();
const auto &cidx = bbi.indices().get<by_commitment>();
for (const auto &in : o.inputs) {
auto itr = cidx.find(in.commitment);
FC_ASSERT(itr != cidx.end());
FC_ASSERT(itr->asset_id == o.fee.asset_id);
FC_ASSERT(itr->owner == in.owner);
}
}
return void_result();
const auto& d = db();
o.fee.asset_id(d); // verify fee is a legit asset
const auto& bbi = d.get_index_type<blinded_balance_index>();
const auto& cidx = bbi.indices().get<by_commitment>();
for( const auto& in : o.inputs )
{
auto itr = cidx.find( in.commitment );
FC_ASSERT( itr != cidx.end() );
FC_ASSERT( itr->asset_id == o.fee.asset_id );
FC_ASSERT( itr->owner == in.owner );
}
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
void_result transfer_from_blind_evaluator::do_apply( const transfer_from_blind_operation& o )
void_result transfer_from_blind_evaluator::do_apply( const transfer_from_blind_operation& o )
{ try {
if( db().head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
db().adjust_balance(o.fee_payer(), o.fee);
db().adjust_balance(o.to, o.amount);
const auto &bbi = db().get_index_type<blinded_balance_index>();
const auto &cidx = bbi.indices().get<by_commitment>();
for (const auto &in : o.inputs) {
auto itr = cidx.find(in.commitment);
FC_ASSERT(itr != cidx.end());
db().remove(*itr);
}
const auto &add = o.amount.asset_id(db()).dynamic_asset_data_id(db()); // verify fee is a legit asset
db().modify(add, [&](asset_dynamic_data_object &obj) {
obj.confidential_supply -= o.amount.amount + o.fee.amount;
FC_ASSERT(obj.confidential_supply >= 0);
});
}
return void_result();
db().adjust_balance( o.fee_payer(), o.fee );
db().adjust_balance( o.to, o.amount );
const auto& bbi = db().get_index_type<blinded_balance_index>();
const auto& cidx = bbi.indices().get<by_commitment>();
for( const auto& in : o.inputs )
{
auto itr = cidx.find( in.commitment );
FC_ASSERT( itr != cidx.end() );
db().remove( *itr );
}
const auto& add = o.amount.asset_id(db()).dynamic_asset_data_id(db()); // verify fee is a legit asset
db().modify( add, [&]( asset_dynamic_data_object& obj ){
obj.confidential_supply -= o.amount.amount + o.fee.amount;
FC_ASSERT( obj.confidential_supply >= 0 );
});
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
void transfer_from_blind_evaluator::pay_fee()
{
const auto& d = db();
if( d.head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
if (d.head_block_time() >= HARDFORK_563_TIME)
pay_fba_fee(fba_accumulator_id_transfer_from_blind);
else
generic_evaluator::pay_fee();
}
if( db().head_block_time() >= HARDFORK_563_TIME )
pay_fba_fee( fba_accumulator_id_transfer_from_blind );
else
generic_evaluator::pay_fee();
}
void_result blind_transfer_evaluator::do_evaluate( const blind_transfer_operation& o )
{ try {
const auto& d = db();
if( d.head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
o.fee.asset_id(d); // verify fee is a legit asset
const auto &bbi = d.get_index_type<blinded_balance_index>();
const auto &cidx = bbi.indices().get<by_commitment>();
for (const auto &out : o.outputs) {
for (const auto &a : out.owner.account_auths)
a.first(d); // verify all accounts exist and are valid
}
for (const auto &in : o.inputs) {
auto itr = cidx.find(in.commitment);
GRAPHENE_ASSERT(itr != cidx.end(), blind_transfer_unknown_commitment, "", ("commitment", in.commitment));
FC_ASSERT(itr->asset_id == o.fee.asset_id);
FC_ASSERT(itr->owner == in.owner);
}
}
return void_result();
const auto& d = db();
o.fee.asset_id(db()); // verify fee is a legit asset
const auto& bbi = db().get_index_type<blinded_balance_index>();
const auto& cidx = bbi.indices().get<by_commitment>();
for( const auto& out : o.outputs )
{
for( const auto& a : out.owner.account_auths )
a.first(d); // verify all accounts exist and are valid
}
for( const auto& in : o.inputs )
{
auto itr = cidx.find( in.commitment );
GRAPHENE_ASSERT( itr != cidx.end(), blind_transfer_unknown_commitment, "", ("commitment",in.commitment) );
FC_ASSERT( itr->asset_id == o.fee.asset_id );
FC_ASSERT( itr->owner == in.owner );
}
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
void_result blind_transfer_evaluator::do_apply( const blind_transfer_operation& o )
void_result blind_transfer_evaluator::do_apply( const blind_transfer_operation& o )
{ try {
if( db().head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
db().adjust_balance(o.fee_payer(), o.fee); // deposit the fee to the temp account
const auto &bbi = db().get_index_type<blinded_balance_index>();
const auto &cidx = bbi.indices().get<by_commitment>();
for (const auto &in : o.inputs) {
auto itr = cidx.find(in.commitment);
GRAPHENE_ASSERT(itr != cidx.end(), blind_transfer_unknown_commitment, "", ("commitment", in.commitment));
db().remove(*itr);
}
for (const auto &out : o.outputs) {
db().create<blinded_balance_object>([&](blinded_balance_object &obj) {
obj.asset_id = o.fee.asset_id;
obj.owner = out.owner;
obj.commitment = out.commitment;
});
}
const auto &add = o.fee.asset_id(db()).dynamic_asset_data_id(db());
db().modify(add, [&](asset_dynamic_data_object &obj) {
obj.confidential_supply -= o.fee.amount;
FC_ASSERT(obj.confidential_supply >= 0);
});
}
return void_result();
db().adjust_balance( o.fee_payer(), o.fee ); // deposit the fee to the temp account
const auto& bbi = db().get_index_type<blinded_balance_index>();
const auto& cidx = bbi.indices().get<by_commitment>();
for( const auto& in : o.inputs )
{
auto itr = cidx.find( in.commitment );
GRAPHENE_ASSERT( itr != cidx.end(), blind_transfer_unknown_commitment, "", ("commitment",in.commitment) );
db().remove( *itr );
}
for( const auto& out : o.outputs )
{
db().create<blinded_balance_object>( [&]( blinded_balance_object& obj ){
obj.asset_id = o.fee.asset_id;
obj.owner = out.owner;
obj.commitment = out.commitment;
});
}
const auto& add = o.fee.asset_id(db()).dynamic_asset_data_id(db());
db().modify( add, [&]( asset_dynamic_data_object& obj ){
obj.confidential_supply -= o.fee.amount;
FC_ASSERT( obj.confidential_supply >= 0 );
});
return void_result();
} FC_CAPTURE_AND_RETHROW( (o) ) }
void blind_transfer_evaluator::pay_fee()
{
const auto& d = db();
if( d.head_block_time() < HARDFORK_SON_FOR_ETHEREUM_TIME ) {
if (d.head_block_time() >= HARDFORK_563_TIME)
pay_fba_fee(fba_accumulator_id_blind_transfer);
else
generic_evaluator::pay_fee();
}
if( db().head_block_time() >= HARDFORK_563_TIME )
pay_fba_fee( fba_accumulator_id_blind_transfer );
else
generic_evaluator::pay_fee();
}
} } // graphene::chain

View file

@ -1,128 +0,0 @@
#include <graphene/chain/custom_account_authority_evaluator.hpp>
#include <graphene/chain/database.hpp>
#include <graphene/chain/custom_account_authority_object.hpp>
#include <graphene/chain/custom_permission_object.hpp>
#include <graphene/chain/hardfork.hpp>
#include <graphene/chain/rbac_hardfork_visitor.hpp>
namespace graphene
{
namespace chain
{
void_result create_custom_account_authority_evaluator::do_evaluate(const custom_account_authority_create_operation &op)
{
try
{
const database &d = db();
auto now = d.head_block_time();
FC_ASSERT(now >= HARDFORK_NFT_TIME, "Not allowed until NFT HF");
op.owner_account(d);
const custom_permission_object &pobj = op.permission_id(d);
FC_ASSERT(pobj.account == op.owner_account, "Only owner account can update account authority object");
FC_ASSERT(op.valid_to > now, "valid_to expiry should be in future");
FC_ASSERT((op.valid_to - op.valid_from) <= fc::seconds(d.get_global_properties().parameters.rbac_max_account_authority_lifetime()), "Validity of the auth beyond max expiry");
rbac_operation_hardfork_visitor rvtor(now);
rvtor(op.operation_type);
const auto& cindex = d.get_index_type<custom_account_authority_index>().indices().get<by_permission_and_op>();
auto count = cindex.count(boost::make_tuple(op.permission_id));
FC_ASSERT(count < d.get_global_properties().parameters.rbac_max_authorities_per_permission(), "Max operations that can be linked to a permission reached");
return void_result();
}
FC_CAPTURE_AND_RETHROW((op))
}
object_id_type create_custom_account_authority_evaluator::do_apply(const custom_account_authority_create_operation &op)
{
try
{
database &d = db();
return d.create<custom_account_authority_object>([&op](custom_account_authority_object &obj) mutable {
obj.permission_id = op.permission_id;
obj.operation_type = op.operation_type;
obj.valid_from = op.valid_from;
obj.valid_to = op.valid_to;
})
.id;
}
FC_CAPTURE_AND_RETHROW((op))
}
void_result update_custom_account_authority_evaluator::do_evaluate(const custom_account_authority_update_operation &op)
{
try
{
const database &d = db();
auto now = d.head_block_time();
FC_ASSERT(now >= HARDFORK_NFT_TIME, "Not allowed until NFT HF");
op.owner_account(d);
const custom_account_authority_object &aobj = op.auth_id(d);
const custom_permission_object &pobj = aobj.permission_id(d);
FC_ASSERT(pobj.account == op.owner_account, "Only owner account can update account authority object");
auto valid_from = aobj.valid_from;
auto valid_to = aobj.valid_to;
if (op.new_valid_from)
{
valid_from = *op.new_valid_from;
}
if (op.new_valid_to)
{
FC_ASSERT(*op.new_valid_to > now, "New valid_to expiry should be in the future");
valid_to = *op.new_valid_to;
}
FC_ASSERT(valid_from < valid_to, "valid_from should be before valid_to");
FC_ASSERT((valid_to - valid_from) <= fc::seconds(d.get_global_properties().parameters.rbac_max_account_authority_lifetime()), "Validity of the auth beyond max expiry");
return void_result();
}
FC_CAPTURE_AND_RETHROW((op))
}
object_id_type update_custom_account_authority_evaluator::do_apply(const custom_account_authority_update_operation &op)
{
try
{
database &d = db();
const custom_account_authority_object &aobj = op.auth_id(d);
d.modify(aobj, [&op](custom_account_authority_object &obj) {
if (op.new_valid_from)
obj.valid_from = *op.new_valid_from;
if (op.new_valid_to)
obj.valid_to = *op.new_valid_to;
});
return op.auth_id;
}
FC_CAPTURE_AND_RETHROW((op))
}
void_result delete_custom_account_authority_evaluator::do_evaluate(const custom_account_authority_delete_operation &op)
{
try
{
const database &d = db();
auto now = d.head_block_time();
FC_ASSERT(now >= HARDFORK_NFT_TIME, "Not allowed until NFT HF");
op.owner_account(d);
const custom_account_authority_object &aobj = op.auth_id(d);
const custom_permission_object &pobj = aobj.permission_id(d);
FC_ASSERT(pobj.account == op.owner_account, "Only owner account can delete account authority object");
return void_result();
}
FC_CAPTURE_AND_RETHROW((op))
}
void_result delete_custom_account_authority_evaluator::do_apply(const custom_account_authority_delete_operation &op)
{
try
{
database &d = db();
const custom_account_authority_object &aobj = op.auth_id(d);
d.remove(aobj);
return void_result();
}
FC_CAPTURE_AND_RETHROW((op))
}
} // namespace chain
} // namespace graphene

View file

@ -1,133 +0,0 @@
#include <graphene/chain/custom_permission_evaluator.hpp>
#include <graphene/chain/database.hpp>
#include <graphene/chain/custom_permission_object.hpp>
#include <graphene/chain/custom_account_authority_object.hpp>
#include <graphene/chain/hardfork.hpp>
namespace graphene
{
namespace chain
{
void_result create_custom_permission_evaluator::do_evaluate(const custom_permission_create_operation &op)
{
try
{
const database &d = db();
auto now = d.head_block_time();
FC_ASSERT(now >= HARDFORK_NFT_TIME, "Not allowed until NFT HF");
op.owner_account(d);
for (const auto &account_weight_pair : op.auth.account_auths)
{
account_weight_pair.first(d);
}
const auto &pindex = d.get_index_type<custom_permission_index>().indices().get<by_account_and_permission>();
auto pitr = pindex.find(boost::make_tuple(op.owner_account, op.permission_name));
FC_ASSERT(pitr == pindex.end(), "Permission name already exists for the given account");
auto count = pindex.count(boost::make_tuple(op.owner_account));
FC_ASSERT(count < d.get_global_properties().parameters.rbac_max_permissions_per_account(), "Max permissions per account reached");
return void_result();
}
FC_CAPTURE_AND_RETHROW((op))
}
object_id_type create_custom_permission_evaluator::do_apply(const custom_permission_create_operation &op)
{
try
{
database &d = db();
return d.create<custom_permission_object>([&op](custom_permission_object &obj) mutable {
obj.account = op.owner_account;
obj.permission_name = op.permission_name;
obj.auth = op.auth;
})
.id;
}
FC_CAPTURE_AND_RETHROW((op))
}
void_result update_custom_permission_evaluator::do_evaluate(const custom_permission_update_operation &op)
{
try
{
const database &d = db();
auto now = d.head_block_time();
FC_ASSERT(now >= HARDFORK_NFT_TIME, "Not allowed until NFT HF");
op.owner_account(d);
const custom_permission_object &pobj = op.permission_id(d);
FC_ASSERT(pobj.account == op.owner_account, "Only owner account can update permission object");
if (op.new_auth)
{
FC_ASSERT(!(*op.new_auth == pobj.auth), "New authority provided is not different from old authority");
for (const auto &account_weight_pair : op.new_auth->account_auths)
{
account_weight_pair.first(d);
}
}
return void_result();
}
FC_CAPTURE_AND_RETHROW((op))
}
object_id_type update_custom_permission_evaluator::do_apply(const custom_permission_update_operation &op)
{
try
{
database &d = db();
const custom_permission_object &pobj = op.permission_id(d);
d.modify(pobj, [&op](custom_permission_object &obj) {
if (op.new_auth)
obj.auth = *op.new_auth;
});
return op.permission_id;
}
FC_CAPTURE_AND_RETHROW((op))
}
void_result delete_custom_permission_evaluator::do_evaluate(const custom_permission_delete_operation &op)
{
try
{
const database &d = db();
auto now = d.head_block_time();
FC_ASSERT(now >= HARDFORK_NFT_TIME, "Not allowed until NFT HF");
op.owner_account(d);
const custom_permission_object &pobj = op.permission_id(d);
FC_ASSERT(pobj.account == op.owner_account, "Only owner account can delete permission object");
return void_result();
}
FC_CAPTURE_AND_RETHROW((op))
}
void_result delete_custom_permission_evaluator::do_apply(const custom_permission_delete_operation &op)
{
try
{
database &d = db();
const custom_permission_object &pobj = op.permission_id(d);
// Remove the account authority objects linked to this permission
const auto& cindex = d.get_index_type<custom_account_authority_index>().indices().get<by_permission_and_op>();
vector<std::reference_wrapper<const custom_account_authority_object>> custom_auths;
auto crange = cindex.equal_range(boost::make_tuple(pobj.id));
// Store the references to the account authorities
for(const custom_account_authority_object& cobj : boost::make_iterator_range(crange.first, crange.second))
{
custom_auths.push_back(cobj);
}
// Now remove the account authorities
for(const auto& cauth : custom_auths)
{
d.remove(cauth);
}
// Now finally remove the permission
d.remove(pobj);
return void_result();
}
FC_CAPTURE_AND_RETHROW((op))
}
} // namespace chain
} // namespace graphene

View file

@ -21,6 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <fc/smart_ref_impl.hpp>
#include "db_balance.cpp"
#include "db_bet.cpp"
#include "db_block.cpp"
@ -30,6 +31,6 @@
#include "db_maint.cpp"
#include "db_management.cpp"
#include "db_market.cpp"
#include "db_notify.cpp"
#include "db_update.cpp"
#include "db_witness_schedule.cpp"
#include "db_notify.cpp"

View file

@ -34,11 +34,11 @@ namespace graphene { namespace chain {
asset database::get_balance(account_id_type owner, asset_id_type asset_id) const
{
auto& index = get_index_type< primary_index< account_balance_index > >().get_secondary_index<balances_by_account_index>();
auto abo = index.get_account_balance( owner, asset_id );
if( !abo )
auto& index = get_index_type<account_balance_index>().indices().get<by_account_asset>();
auto itr = index.find(boost::make_tuple(owner, asset_id));
if( itr == index.end() )
return asset(0, asset_id);
return abo->get_balance();
return itr->get_balance();
}
asset database::get_balance(const account_object& owner, const asset_object& asset_obj) const
@ -65,9 +65,9 @@ void database::adjust_balance(account_id_type account, asset delta )
if( delta.amount == 0 )
return;
auto& index = get_index_type< primary_index< account_balance_index > >().get_secondary_index<balances_by_account_index>();
auto abo = index.get_account_balance( account, delta.asset_id );
if( !abo )
auto& index = get_index_type<account_balance_index>().indices().get<by_account_asset>();
auto itr = index.find(boost::make_tuple(account, delta.asset_id));
if(itr == index.end())
{
FC_ASSERT( delta.amount > 0, "Insufficient Balance: ${a}'s balance of ${b} is less than required ${r}",
("a",account(*this).name)
@ -77,14 +77,11 @@ void database::adjust_balance(account_id_type account, asset delta )
b.owner = account;
b.asset_type = delta.asset_id;
b.balance = delta.amount.value;
if( b.asset_type == asset_id_type() ) // CORE asset
b.maintenance_flag = true;
});
} else {
if( delta.amount < 0 )
FC_ASSERT( abo->get_balance() >= -delta, "Insufficient Balance: ${a}'s balance of ${b} is less than required ${r}",
("a",account(*this).name)("b",to_pretty_string(abo->get_balance()))("r",to_pretty_string(-delta)));
modify(*abo, [delta](account_balance_object& b) {
FC_ASSERT( itr->get_balance() >= -delta, "Insufficient Balance: ${a}'s balance of ${b} is less than required ${r}", ("a",account(*this).name)("b",to_pretty_string(itr->get_balance()))("r",to_pretty_string(-delta)));
modify(*itr, [delta](account_balance_object& b) {
b.adjust_balance(delta);
});
}
@ -140,10 +137,8 @@ void database::adjust_sweeps_vesting_balance(account_id_type account, int64_t de
b.balance = delta;
});
} else {
if( delta < 0 ) {
uint64_t delta_uint64 = -delta;
FC_ASSERT( itr->get_balance() >= delta_uint64, "Insufficient Balance: ${a}'s balance of ${b} is less than required ${r}", ("a",account)("b",itr->get_balance())("r",-delta));
}
if( delta < 0 )
FC_ASSERT( itr->get_balance() >= -delta, "Insufficient Balance: ${a}'s balance of ${b} is less than required ${r}", ("a",account)("b",itr->get_balance())("r",-delta));
modify(*itr, [&delta,&asset_id,this](sweeps_vesting_balance_object& b) {
b.adjust_balance( asset( delta, asset_id ) );
b.last_claim_date = head_block_time();
@ -212,7 +207,7 @@ void database::deposit_cashback(const account_object& acct, share_type amount, b
acct.get_id() == GRAPHENE_TEMP_ACCOUNT )
{
// The blockchain's accounts do not get cashback; it simply goes to the reserve pool.
modify( get_core_dynamic_data(), [amount](asset_dynamic_data_object& d) {
modify(get(asset_id_type()).dynamic_asset_data_id(*this), [amount](asset_dynamic_data_object& d) {
d.current_supply -= amount;
});
return;
@ -227,15 +222,10 @@ void database::deposit_cashback(const account_object& acct, share_type amount, b
if( new_vbid.valid() )
{
modify( acct, [&new_vbid]( account_object& _acct )
modify( acct, [&]( account_object& _acct )
{
_acct.cashback_vb = *new_vbid;
} );
modify( acct.statistics( *this ), []( account_statistics_object& aso )
{
aso.has_cashback_vb = true;
} );
}
return;

View file

@ -303,6 +303,8 @@ void database::settle_betting_market_group(const betting_market_group_object& be
remove(betting_market);
}
const event_object& event = betting_market_group.event_id(*this);
fc_dlog(fc::logger::get("betting"), "removing betting market group ${id}", ("id", betting_market_group.id));
remove(betting_market_group);
@ -535,9 +537,11 @@ int match_bet(database& db, const bet_object& taker_bet, const bet_object& maker
// because we matched at the maker's odds and not the taker's odds, the remaining amount to match
// may not be an even multiple of the taker's odds; round it down.
share_type taker_remaining_factor = unrounded_taker_remaining_amount_to_match / takers_odds_maker_odds_ratio;
share_type taker_remaining_maker_amount_to_match = taker_remaining_factor * takers_odds_maker_odds_ratio;
share_type taker_remaining_bet_amount = taker_remaining_factor * takers_odds_taker_odds_ratio;
taker_refund_amount = taker_bet.amount_to_bet.amount - taker_amount_to_match - taker_remaining_bet_amount;
//idump((taker_remaining_factor)(taker_remaining_maker_amount_to_match)(taker_remaining_bet_amount)(taker_refund_amount));
}
if (taker_refund_amount > share_type())

View file

@ -39,18 +39,16 @@
#include <graphene/chain/protocol/fee_schedule.hpp>
#include <graphene/chain/exceptions.hpp>
#include <graphene/chain/evaluator.hpp>
#include <graphene/chain/witness_schedule_object.hpp>
#include <graphene/db/object_database.hpp>
#include <fc/crypto/digest.hpp>
#include <boost/filesystem.hpp>
#include <fc/smart_ref_impl.hpp>
namespace {
struct proposed_operations_digest_accumulator
{
typedef void result_type;
void operator()(const graphene::chain::proposal_create_operation& proposal)
{
for (auto& operation: proposal.proposed_ops)
@ -58,20 +56,20 @@ namespace {
proposed_operations_digests.push_back(fc::digest(operation.op));
}
}
//empty template method is needed for all other operation types
//we can ignore them, we are interested in only proposal_create_operation
template<class T>
void operator()(const T&)
void operator()(const T&)
{}
std::vector<fc::sha256> proposed_operations_digests;
};
std::vector<fc::sha256> gather_proposed_operations_digests(const graphene::chain::transaction& trx)
{
proposed_operations_digest_accumulator digest_accumulator;
for (auto& operation: trx.operations)
{
if( operation.which() != graphene::chain::operation::tag<graphene::chain::betting_market_group_create_operation>::value
@ -80,7 +78,7 @@ namespace {
else
edump( ("Found dup"));
}
return digest_accumulator.proposed_operations_digests;
}
}
@ -150,27 +148,24 @@ std::vector<block_id_type> database::get_block_ids_on_fork(block_id_type head_of
result.emplace_back(branches.first.back()->previous_id());
return result;
}
void database::check_transaction_for_duplicated_operations(const signed_transaction& trx)
void database::check_tansaction_for_duplicated_operations(const signed_transaction& trx)
{
const auto& proposal_index = get_index<proposal_object>();
std::set<fc::sha256> existed_operations_digests;
proposal_index.inspect_all_objects( [&](const object& obj){
const proposal_object& proposal = static_cast<const proposal_object&>(obj);
auto proposed_operations_digests = gather_proposed_operations_digests( proposal.proposed_transaction );
existed_operations_digests.insert( proposed_operations_digests.begin(), proposed_operations_digests.end() );
});
for (auto& pending_transaction: _pending_tx)
{
const std::lock_guard<std::mutex> pending_tx_lock{_pending_tx_mutex};
for (auto &pending_transaction : _pending_tx)
{
auto proposed_operations_digests = gather_proposed_operations_digests(pending_transaction);
existed_operations_digests.insert(proposed_operations_digests.begin(), proposed_operations_digests.end());
}
auto proposed_operations_digests = gather_proposed_operations_digests(pending_transaction);
existed_operations_digests.insert(proposed_operations_digests.begin(), proposed_operations_digests.end());
}
auto proposed_operations_digests = gather_proposed_operations_digests(trx);
for (auto& digest: proposed_operations_digests)
{
@ -190,12 +185,7 @@ bool database::push_block(const signed_block& new_block, uint32_t skip)
bool result;
detail::with_skip_flags( *this, skip, [&]()
{
std::vector<processed_transaction> pending_tx = [this] {
const std::lock_guard<std::mutex> pending_tx_lock{_pending_tx_mutex};
return std::move(_pending_tx);
}();
detail::without_pending_transactions( *this, std::move(pending_tx),
detail::without_pending_transactions( *this, std::move(_pending_tx),
[&]()
{
result = _push_block(new_block);
@ -206,94 +196,74 @@ bool database::push_block(const signed_block& new_block, uint32_t skip)
bool database::_push_block(const signed_block& new_block)
{ try {
boost::filesystem::space_info si = boost::filesystem::space(get_data_dir());
FC_ASSERT((si.available) > 104857600, "Rejecting block due to low disk space"); // 104857600 bytes = 100 MB
uint32_t skip = get_node_properties().skip_flags;
const auto now = fc::time_point::now().sec_since_epoch();
if( _fork_db.head() && new_block.timestamp.sec_since_epoch() > now - 86400 )
if( !(skip&skip_fork_db) )
{
/// TODO: if the block is greater than the head block and before the next maitenance interval
// verify that the block signer is in the current set of active witnesses.
shared_ptr<fork_item> prev_block = _fork_db.fetch_block( new_block.previous );
GRAPHENE_ASSERT( prev_block, unlinkable_block_exception, "block does not link to known chain" );
if( prev_block->scheduled_witnesses && !(skip&(skip_witness_schedule_check|skip_witness_signature)) )
verify_signing_witness( new_block, *prev_block );
}
shared_ptr<fork_item> new_head = _fork_db.push_block(new_block);
//If the head block from the longest chain does not build off of the current head, we need to switch forks.
if( new_head->data.previous != head_block_id() )
{
//If the newly pushed block is the same height as head, we get head back in new_head
//Only switch forks if new_head is actually higher than head
if( new_head->data.block_num() > head_block_num() )
shared_ptr<fork_item> new_head = _fork_db.push_block(new_block);
//If the head block from the longest chain does not build off of the current head, we need to switch forks.
if( new_head->data.previous != head_block_id() )
{
wlog( "Switching to fork: ${id}", ("id",new_head->data.id()) );
auto branches = _fork_db.fetch_branch_from(new_head->data.id(), head_block_id());
// pop blocks until we hit the forked block
while( head_block_id() != branches.second.back()->data.previous )
//If the newly pushed block is the same height as head, we get head back in new_head
//Only switch forks if new_head is actually higher than head
if( new_head->data.block_num() > head_block_num() )
{
ilog( "popping block #${n} ${id}", ("n",head_block_num())("id",head_block_id()) );
pop_block();
wlog( "Switching to fork: ${id}", ("id",new_head->data.id()) );
auto branches = _fork_db.fetch_branch_from(new_head->data.id(), head_block_id());
// pop blocks until we hit the forked block
while( head_block_id() != branches.second.back()->data.previous )
pop_block();
// push all blocks on the new fork
for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr )
{
ilog( "pushing blocks from fork ${n} ${id}", ("n",(*ritr)->data.block_num())("id",(*ritr)->data.id()) );
optional<fc::exception> except;
try {
undo_database::session session = _undo_db.start_undo_session();
apply_block( (*ritr)->data, skip );
_block_id_to_block.store( (*ritr)->id, (*ritr)->data );
session.commit();
}
catch ( const fc::exception& e ) { except = e; }
if( except )
{
wlog( "exception thrown while switching forks ${e}", ("e",except->to_detail_string() ) );
// remove the rest of branches.first from the fork_db, those blocks are invalid
while( ritr != branches.first.rend() )
{
_fork_db.remove( (*ritr)->data.id() );
++ritr;
}
_fork_db.set_head( branches.second.front() );
// pop all blocks from the bad fork
while( head_block_id() != branches.second.back()->data.previous )
pop_block();
// restore all blocks from the good fork
for( auto ritr = branches.second.rbegin(); ritr != branches.second.rend(); ++ritr )
{
auto session = _undo_db.start_undo_session();
apply_block( (*ritr)->data, skip );
_block_id_to_block.store( new_block.id(), (*ritr)->data );
session.commit();
}
throw *except;
}
}
return true;
}
// push all blocks on the new fork
for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr )
{
ilog( "pushing block from fork #${n} ${id}", ("n",(*ritr)->data.block_num())("id",(*ritr)->id) );
optional<fc::exception> except;
try {
undo_database::session session = _undo_db.start_undo_session();
apply_block( (*ritr)->data, skip );
update_witnesses( **ritr );
_block_id_to_block.store( (*ritr)->id, (*ritr)->data );
session.commit();
}
catch ( const fc::exception& e ) { except = e; }
if( except )
{
wlog( "exception thrown while switching forks ${e}", ("e",except->to_detail_string() ) );
// remove the rest of branches.first from the fork_db, those blocks are invalid
while( ritr != branches.first.rend() )
{
ilog( "removing block from fork_db #${n} ${id}", ("n",(*ritr)->data.block_num())("id",(*ritr)->id) );
_fork_db.remove( (*ritr)->id );
++ritr;
}
_fork_db.set_head( branches.second.front() );
// pop all blocks from the bad fork
while( head_block_id() != branches.second.back()->data.previous )
{
ilog( "popping block #${n} ${id}", ("n",head_block_num())("id",head_block_id()) );
pop_block();
}
ilog( "Switching back to fork: ${id}", ("id",branches.second.front()->data.id()) );
// restore all blocks from the good fork
for( auto ritr2 = branches.second.rbegin(); ritr2 != branches.second.rend(); ++ritr2 )
{
ilog( "pushing block #${n} ${id}", ("n",(*ritr2)->data.block_num())("id",(*ritr2)->id) );
auto session = _undo_db.start_undo_session();
apply_block( (*ritr2)->data, skip );
_block_id_to_block.store( (*ritr2)->id, (*ritr2)->data );
session.commit();
}
throw *except;
}
}
return true;
else return false;
}
else return false;
}
try {
auto session = _undo_db.start_undo_session();
apply_block(new_block, skip);
if( new_block.timestamp.sec_since_epoch() > now - 86400 )
update_witnesses( *new_head );
_block_id_to_block.store(new_block.id(), new_block);
session.commit();
} catch ( const fc::exception& e ) {
@ -305,73 +275,6 @@ bool database::_push_block(const signed_block& new_block)
return false;
} FC_CAPTURE_AND_RETHROW( (new_block) ) }
void database::verify_signing_witness( const signed_block& new_block, const fork_item& fork_entry )const
{
FC_ASSERT( new_block.timestamp >= fork_entry.next_block_time );
uint32_t slot_num = ( new_block.timestamp - fork_entry.next_block_time ).to_seconds() / block_interval();
const global_property_object& gpo = get_global_properties();
if (gpo.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SHUFFLED_ALGORITHM)
{
uint64_t index = ( fork_entry.next_block_aslot + slot_num ) % fork_entry.scheduled_witnesses->size();
const auto& scheduled_witness = (*fork_entry.scheduled_witnesses)[index];
FC_ASSERT( new_block.witness == scheduled_witness.first, "Witness produced block at wrong time",
("block witness",new_block.witness)("scheduled",scheduled_witness)("slot_num",slot_num) );
FC_ASSERT( new_block.validate_signee( scheduled_witness.second ) );
}
if (gpo.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SCHEDULED_ALGORITHM &&
slot_num != 0 )
{
witness_id_type wid;
const witness_schedule_object& wso = get_witness_schedule_object();
// ask the near scheduler who goes in the given slot
bool slot_is_near = wso.scheduler.get_slot(slot_num, wid);
if(! slot_is_near)
{
// if the near scheduler doesn't know, we have to extend it to
// a far scheduler.
// n.b. instantiating it is slow, but block gaps long enough to
// need it are likely pretty rare.
witness_scheduler_rng far_rng(wso.rng_seed.begin(), GRAPHENE_FAR_SCHEDULE_CTR_IV);
far_future_witness_scheduler far_scheduler =
far_future_witness_scheduler(wso.scheduler, far_rng);
if(!far_scheduler.get_slot(slot_num, wid))
{
// no scheduled witness -- somebody set up us the bomb
// n.b. this code path is impossible, the present
// implementation of far_future_witness_scheduler
// returns true unconditionally
assert( false );
}
}
FC_ASSERT( new_block.witness == wid, "Witness produced block at wrong time",
("block witness",new_block.witness)("scheduled",wid)("slot_num",slot_num) );
FC_ASSERT( new_block.validate_signee( wid(*this).signing_key ) );
}
}
void database::update_witnesses( fork_item& fork_entry )const
{
if( fork_entry.scheduled_witnesses ) return;
const dynamic_global_property_object& dpo = get_dynamic_global_properties();
fork_entry.next_block_aslot = dpo.current_aslot + 1;
fork_entry.next_block_time = get_slot_time( 1 );
const witness_schedule_object& wso = get_witness_schedule_object();
fork_entry.scheduled_witnesses = std::make_shared< vector< pair< witness_id_type, public_key_type > > >();
fork_entry.scheduled_witnesses->reserve( wso.current_shuffled_witnesses.size() );
for( size_t i = 0; i < wso.current_shuffled_witnesses.size(); ++i )
{
const auto& witness = wso.current_shuffled_witnesses[i](*this);
fork_entry.scheduled_witnesses->emplace_back( wso.current_shuffled_witnesses[i], witness.signing_key );
}
}
/**
* Attempts to push the transaction into the pending queue
*
@ -395,43 +298,51 @@ processed_transaction database::_push_transaction( const signed_transaction& trx
{
// If this is the first transaction pushed after applying a block, start a new undo session.
// This allows us to quickly rewind to the clean state of the head block, in case a new block arrives.
{
const std::lock_guard<std::mutex> pending_tx_session_lock{_pending_tx_session_mutex};
if (!_pending_tx_session.valid()) {
const std::lock_guard<std::mutex> undo_db_lock{_undo_db_mutex};
_pending_tx_session = _undo_db.start_undo_session();
}
}
if( !_pending_tx_session.valid() )
_pending_tx_session = _undo_db.start_undo_session();
// Create a temporary undo session as a child of _pending_tx_session.
// The temporary session will be discarded by the destructor if
// _apply_transaction fails. If we make it to merge(), we
// apply the changes.
const std::lock_guard<std::mutex> undo_db_lock{_undo_db_mutex};
auto temp_session = _undo_db.start_undo_session();
auto processed_trx = _apply_transaction(trx);
{
const std::lock_guard<std::mutex> pending_tx_lock{_pending_tx_mutex};
_pending_tx.push_back(processed_trx);
}
auto processed_trx = _apply_transaction( trx );
_pending_tx.push_back(processed_trx);
// notify_changed_objects();
// The transaction applied successfully. Merge its changes into the pending block session.
temp_session.merge();
// notify anyone listening to pending transactions
notify_on_pending_transaction( trx );
on_pending_transaction( trx );
return processed_trx;
}
processed_transaction database::validate_transaction( const signed_transaction& trx )
{
const std::lock_guard<std::mutex> undo_db_lock{_undo_db_mutex};
auto session = _undo_db.start_undo_session();
return _apply_transaction( trx );
}
class push_proposal_nesting_guard {
public:
push_proposal_nesting_guard( uint32_t& nesting_counter, const database& db )
: orig_value(nesting_counter), counter(nesting_counter)
{
FC_ASSERT( counter < db.get_global_properties().active_witnesses.size() * 2, "Max proposal nesting depth exceeded!" );
counter++;
}
~push_proposal_nesting_guard()
{
if( --counter != orig_value )
elog( "Unexpected proposal nesting count value: ${n} != ${o}", ("n",counter)("o",orig_value) );
}
private:
const uint32_t orig_value;
uint32_t& counter;
};
processed_transaction database::push_proposal(const proposal_object& proposal)
{ try {
transaction_evaluation_state eval_state(this);
@ -443,12 +354,12 @@ processed_transaction database::push_proposal(const proposal_object& proposal)
size_t old_applied_ops_size = _applied_ops.size();
try {
push_proposal_nesting_guard guard( _push_proposal_nesting_depth, *this );
if( _undo_db.size() >= _undo_db.max_size() )
_undo_db.set_max_size( _undo_db.size() + 1 );
auto session = _undo_db.start_undo_session(true);
for( auto& op : proposal.proposed_transaction.operations )
eval_state.operation_results.emplace_back(apply_operation(eval_state, op));
remove_son_proposal(proposal);
remove(proposal);
session.merge();
} catch ( const fc::exception& e ) {
@ -456,12 +367,7 @@ processed_transaction database::push_proposal(const proposal_object& proposal)
{
for( size_t i=old_applied_ops_size,n=_applied_ops.size(); i<n; i++ )
{
if(_applied_ops[i].valid()) {
ilog("removing failed operation from applied_ops: ${op}", ("op", *(_applied_ops[i])));
}
else{
ilog("Can't remove failed operation from applied_ops (operation is not valid), op_id : ${op_id}", ("op_id", i));
}
ilog( "removing failed operation from applied_ops: ${op}", ("op", *(_applied_ops[i])) );
_applied_ops[i].reset();
}
}
@ -527,52 +433,47 @@ signed_block database::_generate_block(
// the value of the "when" variable is known, which means we need to
// re-apply pending transactions in this method.
//
{
const std::lock_guard<std::mutex> pending_tx_session_lock{_pending_tx_session_mutex};
_pending_tx_session.reset();
_pending_tx_session = _undo_db.start_undo_session();
}
_pending_tx_session.reset();
_pending_tx_session = _undo_db.start_undo_session();
uint64_t postponed_tx_count = 0;
// pop pending state (reset to head block state)
for( const processed_transaction& tx : _pending_tx )
{
const std::lock_guard<std::mutex> pending_tx_lock{_pending_tx_mutex};
for (const processed_transaction &tx : _pending_tx) {
size_t new_total_size = total_block_size + fc::raw::pack_size(tx);
size_t new_total_size = total_block_size + fc::raw::pack_size( tx );
// postpone transaction if it would make block too big
if (new_total_size >= maximum_block_size) {
postponed_tx_count++;
continue;
}
// postpone transaction if it would make block too big
if( new_total_size >= maximum_block_size )
{
postponed_tx_count++;
continue;
}
try {
auto temp_session = _undo_db.start_undo_session();
processed_transaction ptx = _apply_transaction(tx);
temp_session.merge();
try
{
auto temp_session = _undo_db.start_undo_session();
processed_transaction ptx = _apply_transaction( tx );
temp_session.merge();
// We have to recompute pack_size(ptx) because it may be different
// than pack_size(tx) (i.e. if one or more results increased
// their size)
total_block_size += fc::raw::pack_size(ptx);
pending_block.transactions.push_back(ptx);
} catch (const fc::exception &e) {
// Do nothing, transaction will not be re-applied
wlog("Transaction was not processed while generating block due to ${e}", ("e", e));
wlog("The transaction was ${t}", ("t", tx));
}
// We have to recompute pack_size(ptx) because it may be different
// than pack_size(tx) (i.e. if one or more results increased
// their size)
total_block_size += fc::raw::pack_size( ptx );
pending_block.transactions.push_back( ptx );
}
catch ( const fc::exception& e )
{
// Do nothing, transaction will not be re-applied
wlog( "Transaction was not processed while generating block due to ${e}", ("e", e) );
wlog( "The transaction was ${t}", ("t", tx) );
}
}
if( postponed_tx_count > 0 )
{
wlog( "Postponed ${n} transactions due to block size limit", ("n", postponed_tx_count) );
}
{
const std::lock_guard<std::mutex> pending_tx_session_lock{_pending_tx_session_mutex};
_pending_tx_session.reset();
}
_pending_tx_session.reset();
// We have temporarily broken the invariant that
// _pending_tx_session is the result of applying _pending_tx, as
@ -584,7 +485,7 @@ signed_block database::_generate_block(
pending_block.timestamp = when;
pending_block.transaction_merkle_root = pending_block.calculate_merkle_root();
pending_block.witness = witness_id;
// Genesis witnesses start with a default initial secret
if( witness_obj.next_secret_hash == secret_hash_type::hash( secret_hash_type() ) ) {
pending_block.previous_secret = secret_hash_type();
@ -594,7 +495,7 @@ signed_block database::_generate_block(
fc::raw::pack( last_enc, witness_obj.previous_secret );
pending_block.previous_secret = last_enc.result();
}
secret_hash_type::encoder next_enc;
fc::raw::pack( next_enc, block_signing_private_key );
fc::raw::pack( next_enc, pending_block.previous_secret );
@ -609,7 +510,7 @@ signed_block database::_generate_block(
FC_ASSERT( fc::raw::pack_size(pending_block) <= get_global_properties().parameters.maximum_block_size );
}
push_block( pending_block, skip | skip_transaction_signatures ); // skip authority check when pushing self-generated blocks
push_block( pending_block, skip );
return pending_block;
} FC_CAPTURE_AND_RETHROW( (witness_id) ) }
@ -620,16 +521,13 @@ signed_block database::_generate_block(
*/
void database::pop_block()
{ try {
{
const std::lock_guard<std::mutex> pending_tx_session_lock{_pending_tx_session_mutex};
_pending_tx_session.reset();
}
_pending_tx_session.reset();
auto head_id = head_block_id();
optional<signed_block> head_block = fetch_block_by_id( head_id );
GRAPHENE_ASSERT( head_block.valid(), pop_empty_chain, "there are no blocks to pop" );
_fork_db.pop_block();
_block_id_to_block.remove( head_id );
pop_undo();
_popped_tx.insert( _popped_tx.begin(), head_block->transactions.begin(), head_block->transactions.end() );
@ -638,8 +536,6 @@ void database::pop_block()
void database::clear_pending()
{ try {
const std::lock_guard<std::mutex> pending_tx_lock{_pending_tx_mutex};
const std::lock_guard<std::mutex> pending_tx_session_lock{_pending_tx_session_mutex};
assert( (_pending_tx.size() == 0) || _pending_tx_session.valid() );
_pending_tx.clear();
_pending_tx_session.reset();
@ -658,7 +554,7 @@ uint32_t database::push_applied_operation( const operation& op )
void database::set_applied_operation_result( uint32_t op_id, const operation_result& result )
{
assert( op_id < _applied_ops.size() );
if( _applied_ops[op_id].valid() )
if( _applied_ops[op_id] )
_applied_ops[op_id]->result = result;
else
{
@ -707,7 +603,7 @@ void database::_apply_block( const signed_block& next_block )
const witness_object& signing_witness = validate_block_header(skip, next_block);
const auto& global_props = get_global_properties();
const auto& dynamic_global_props = get_dynamic_global_properties();
const auto& dynamic_global_props = get<dynamic_global_property_object>(dynamic_global_property_id_type());
bool maint_needed = (dynamic_global_props.next_maintenance_time <= next_block.timestamp);
_current_block_num = next_block_num;
@ -715,8 +611,6 @@ void database::_apply_block( const signed_block& next_block )
_current_op_in_trx = 0;
_current_virtual_op = 0;
_issue_453_affected_assets.clear();
for( const auto& trx : next_block.transactions )
{
/* We do not need to push the undo state for each transaction
@ -731,68 +625,47 @@ void database::_apply_block( const signed_block& next_block )
// For VOPs derived directly from a real op,
// use the real op's (block_num,trx_in_block,op_in_trx), virtual_op starts from 1.
// For VOPs created after processed all transactions,
// trx_in_block = the_block.trsanctions.size(), virtual_op starts from 0.
// trx_in_block = the_block.trsanctions.size(), virtual_op starts from 0.
++_current_trx_in_block;
_current_op_in_trx = 0;
_current_virtual_op = 0;
_current_virtual_op = 0;
}
if (global_props.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SCHEDULED_ALGORITHM) {
update_witness_schedule(next_block);
for(const auto& active_sons : global_props.active_sons) {
if(!active_sons.second.empty()) {
update_son_schedule(active_sons.first, next_block);
}
}
}
const uint32_t missed = update_witness_missed_blocks( next_block );
update_global_dynamic_data( next_block, missed );
if (global_props.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SCHEDULED_ALGORITHM)
update_witness_schedule(next_block);
update_global_dynamic_data(next_block);
update_signing_witness(signing_witness, next_block);
update_last_irreversible_block();
// Are we at the maintenance interval?
if( maint_needed )
perform_chain_maintenance(next_block, global_props);
check_ending_lotteries();
check_ending_nft_lotteries();
create_block_summary(next_block);
place_delayed_bets(); // must happen after update_global_dynamic_data() updates the time
clear_expired_transactions();
clear_expired_proposals();
clear_expired_orders();
update_expired_feeds(); // this will update expired feeds and some core exchange rates
update_core_exchange_rates(); // this will update remaining core exchange rates
update_expired_feeds();
update_withdraw_permissions();
update_tournaments();
update_betting_markets(next_block.timestamp);
finalize_expired_offers();
// n.b., update_maintenance_flag() happens this late
// because get_slot_time() / get_slot_at_time() is needed above
// TODO: figure out if we could collapse this function into
// update_global_dynamic_data() as perhaps these methods only need
// to be called for header validation?
update_maintenance_flag( maint_needed );
if (global_props.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SHUFFLED_ALGORITHM) {
update_witness_schedule();
for(const auto& active_sidechain_type : active_sidechain_types(dynamic_global_props.time)) {
if(global_props.active_sons.at(active_sidechain_type).size() > 0) {
update_son_schedule(active_sidechain_type);
}
}
}
if (global_props.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SHUFFLED_ALGORITHM)
update_witness_schedule();
if( !_node_property_object.debug_updates.empty() )
apply_debug_updates();
// notify observers that the block has been applied
notify_applied_block( next_block ); //emit
applied_block( next_block ); //emit
_applied_ops.clear();
notify_changed_objects();
@ -810,19 +683,6 @@ processed_transaction database::apply_transaction(const signed_transaction& trx,
return result;
}
class undo_size_restorer {
public:
undo_size_restorer( undo_database& db ) : _db( db ), old_max( db.max_size() ) {
_db.set_max_size( old_max * 2 );
}
~undo_size_restorer() {
_db.set_max_size( old_max );
}
private:
undo_database& _db;
size_t old_max;
};
processed_transaction database::_apply_transaction(const signed_transaction& trx)
{ try {
uint32_t skip = get_node_properties().skip_flags;
@ -832,28 +692,18 @@ processed_transaction database::_apply_transaction(const signed_transaction& trx
auto& trx_idx = get_mutable_index_type<transaction_index>();
const chain_id_type& chain_id = get_chain_id();
transaction_id_type trx_id;
if( !(skip & skip_transaction_dupe_check) )
{
trx_id = trx.id();
FC_ASSERT( trx_idx.indices().get<by_trx_id>().find(trx_id) == trx_idx.indices().get<by_trx_id>().end() );
}
auto trx_id = trx.id();
FC_ASSERT( (skip & skip_transaction_dupe_check) ||
trx_idx.indices().get<by_trx_id>().find(trx_id) == trx_idx.indices().get<by_trx_id>().end() );
transaction_evaluation_state eval_state(this);
const chain_parameters& chain_parameters = get_global_properties().parameters;
eval_state._trx = &trx;
if( !(skip & (skip_transaction_signatures | skip_authority_check) ) )
{
auto get_active = [this]( account_id_type id ) { return &id(*this).active; };
auto get_owner = [this]( account_id_type id ) { return &id(*this).owner; };
auto get_custom = [this]( account_id_type id, const operation& op ) {
return get_account_custom_authorities(id, op);
};
trx.verify_authority( chain_id, get_active, get_owner, get_custom,
true,
get_global_properties().parameters.max_authority_depth );
auto get_active = [&]( account_id_type id ) { return &id(*this).active; };
auto get_owner = [&]( account_id_type id ) { return &id(*this).owner; };
trx.verify_authority( chain_id, get_active, get_owner, get_global_properties().parameters.max_authority_depth );
}
//Skip all manner of expiration and TaPoS checking if we're on block 1; It's impossible that the transaction is
@ -878,7 +728,7 @@ processed_transaction database::_apply_transaction(const signed_transaction& trx
//Insert transaction into unique transactions database.
if( !(skip & skip_transaction_dupe_check) )
{
create<transaction_object>([&trx_id,&trx](transaction_object& transaction) {
create<transaction_object>([&](transaction_object& transaction) {
transaction.trx_id = trx_id;
transaction.trx = trx;
});
@ -886,7 +736,6 @@ processed_transaction database::_apply_transaction(const signed_transaction& trx
eval_state.operation_results.reserve(trx.operations.size());
const undo_size_restorer undo_guard( _undo_db );
//Finally process the operations
processed_transaction ptrx(trx);
_current_op_in_trx = 0;
@ -900,9 +749,9 @@ processed_transaction database::_apply_transaction(const signed_transaction& trx
ptrx.operation_results = std::move(eval_state.operation_results);
//Make sure the temp account has no non-zero balances
const auto& balances = get_index_type< primary_index< account_balance_index > >().get_secondary_index< balances_by_account_index >().get_account_balances( GRAPHENE_TEMP_ACCOUNT );
for( const auto b : balances )
FC_ASSERT(b.second->balance == 0);
const auto& index = get_index_type<account_balance_index>().indices().get<by_account_asset>();
auto range = index.equal_range( boost::make_tuple( GRAPHENE_TEMP_ACCOUNT ) );
std::for_each(range.first, range.second, [](const account_balance_object& b) { FC_ASSERT(b.balance == 0); });
return ptrx;
} FC_CAPTURE_AND_RETHROW( (trx) ) }
@ -931,7 +780,7 @@ const witness_object& database::validate_block_header( uint32_t skip, const sign
FC_ASSERT( secret_hash_type::hash( next_block.previous_secret ) == witness.next_secret_hash, "",
( "previous_secret", next_block.previous_secret )( "next_secret_hash", witness.next_secret_hash ) );
if( !(skip&skip_witness_signature) )
if( !(skip&skip_witness_signature) )
FC_ASSERT( next_block.validate_signee( witness.signing_key ) );
if( !(skip&skip_witness_schedule_check) )

View file

@ -42,7 +42,7 @@ void database::debug_dump()
const asset_dynamic_data_object& core_asset_data = db.get_core_asset().dynamic_asset_data_id(db);
const auto& balance_index = db.get_index_type<account_balance_index>().indices();
const auto& statistics_index = db.get_index_type<account_stats_index>().indices();
const simple_index<account_statistics_object>& statistics_index = db.get_index_type<simple_index<account_statistics_object>>();
map<asset_id_type,share_type> total_balances;
map<asset_id_type,share_type> total_debts;
share_type core_in_orders;
@ -118,10 +118,10 @@ void debug_apply_update( database& db, const fc::variant_object& vo )
auto it_id = vo.find("id");
FC_ASSERT( it_id != vo.end() );
from_variant( it_id->value(), oid, GRAPHENE_MAX_NESTED_OBJECTS );
from_variant( it_id->value(), oid );
action = ( vo.size() == 1 ) ? db_action_delete : db_action_write;
from_variant( vo["id"], oid, GRAPHENE_MAX_NESTED_OBJECTS );
from_variant( vo["id"], oid );
if( vo.size() == 1 )
action = db_action_delete;
auto it_action = vo.find("_action" );
@ -143,19 +143,25 @@ void debug_apply_update( database& db, const fc::variant_object& vo )
switch( action )
{
case db_action_create:
/*
idx.create( [&]( object& obj )
{
idx.object_from_variant( vo, obj );
} );
*/
FC_ASSERT( false );
break;
case db_action_write:
db.modify( db.get_object( oid ), [&]( object& obj )
{
idx.object_default( obj );
idx.object_from_variant( vo, obj, GRAPHENE_MAX_NESTED_OBJECTS );
idx.object_from_variant( vo, obj );
} );
break;
case db_action_update:
db.modify( db.get_object( oid ), [&]( object& obj )
{
idx.object_from_variant( vo, obj, GRAPHENE_MAX_NESTED_OBJECTS );
idx.object_from_variant( vo, obj );
} );
break;
case db_action_delete:

View file

@ -27,12 +27,8 @@
#include <graphene/chain/asset_object.hpp>
#include <graphene/chain/chain_property_object.hpp>
#include <graphene/chain/global_property_object.hpp>
#include <graphene/chain/custom_permission_object.hpp>
#include <graphene/chain/custom_account_authority_object.hpp>
#include <graphene/chain/offer_object.hpp>
#include <graphene/chain/account_role_object.hpp>
#include <graphene/chain/son_object.hpp>
#include <graphene/chain/son_proposal_object.hpp>
#include <fc/smart_ref_impl.hpp>
#include <ctime>
#include <algorithm>
@ -41,47 +37,42 @@ namespace graphene { namespace chain {
const asset_object& database::get_core_asset() const
{
return *_p_core_asset_obj;
}
const asset_dynamic_data_object& database::get_core_dynamic_data() const
{
return *_p_core_dynamic_data_obj;
return get(asset_id_type());
}
const global_property_object& database::get_global_properties()const
{
return *_p_global_prop_obj;
return get( global_property_id_type() );
}
const chain_property_object& database::get_chain_properties()const
{
return *_p_chain_property_obj;
return get( chain_property_id_type() );
}
const dynamic_global_property_object& database::get_dynamic_global_properties() const
{
return *_p_dyn_global_prop_obj;
return get( dynamic_global_property_id_type() );
}
const fee_schedule& database::current_fee_schedule()const
{
return std::ref( *get_global_properties().parameters.current_fees );
return get_global_properties().parameters.current_fees;
}
time_point_sec database::head_block_time()const
{
return get_dynamic_global_properties().time;
return get( dynamic_global_property_id_type() ).time;
}
uint32_t database::head_block_num()const
{
return get_dynamic_global_properties().head_block_number;
return get( dynamic_global_property_id_type() ).head_block_number;
}
block_id_type database::head_block_id()const
{
return get_dynamic_global_properties().head_block_id;
return get( dynamic_global_property_id_type() ).head_block_id;
}
decltype( chain_parameters::block_interval ) database::block_interval( )const
@ -109,7 +100,7 @@ uint32_t database::last_non_undoable_block_num() const
return head_block_num() - _undo_db.size();
}
std::vector<uint32_t> database::get_seeds( asset_id_type for_asset, uint8_t count_winners ) const
std::vector<uint32_t> database::get_seeds(asset_id_type for_asset, uint8_t count_winners) const
{
FC_ASSERT( count_winners <= 64 );
std::string salted_string = std::string(_random_number_generator._seed) + std::to_string(for_asset.instance.value);
@ -150,250 +141,4 @@ const std::vector<uint32_t> database::get_winner_numbers( asset_id_type for_asse
return result;
}
const account_statistics_object& database::get_account_stats_by_owner( account_id_type owner )const
{
auto& idx = get_index_type<account_stats_index>().indices().get<by_owner>();
auto itr = idx.find( owner );
FC_ASSERT( itr != idx.end(), "Can not find account statistics object for owner ${a}", ("a",owner) );
return *itr;
}
const witness_schedule_object& database::get_witness_schedule_object()const
{
return *_p_witness_schedule_obj;
}
vector<authority> database::get_account_custom_authorities(account_id_type account, const operation& op)const
{
const auto& pindex = get_index_type<custom_permission_index>().indices().get<by_account_and_permission>();
const auto& cindex = get_index_type<custom_account_authority_index>().indices().get<by_permission_and_op>();
auto prange = pindex.equal_range(boost::make_tuple(account));
time_point_sec now = head_block_time();
vector<authority> custom_auths;
for(const custom_permission_object& pobj : boost::make_iterator_range(prange.first, prange.second))
{
auto crange = cindex.equal_range(boost::make_tuple(pobj.id, op.which()));
for(const custom_account_authority_object& cobj : boost::make_iterator_range(crange.first, crange.second))
{
if(now >= cobj.valid_from && now < cobj.valid_to)
{
custom_auths.push_back(pobj.auth);
}
}
}
return custom_auths;
}
bool database::item_locked(const nft_id_type &item) const
{
const auto &offer_idx = get_index_type<offer_index>();
const auto &oidx = dynamic_cast<const base_primary_index &>(offer_idx);
const auto &market_items = oidx.get_secondary_index<graphene::chain::offer_item_index>();
auto items_itr = market_items._locked_items.find(item);
return (items_itr != market_items._locked_items.end());
}
bool database::account_role_valid(const account_role_object &aro, account_id_type account, optional<int> op_type) const
{
return (aro.valid_to > head_block_time()) &&
(aro.whitelisted_accounts.find(account) != aro.whitelisted_accounts.end()) &&
(!op_type || (aro.allowed_operations.find(*op_type) != aro.allowed_operations.end()));
}
std::set<son_id_type> database::get_sons_being_deregistered()
{
std::set<son_id_type> ret;
const auto& son_proposal_idx = get_index_type<son_proposal_index>().indices().get< by_id >();
for( auto& son_proposal : son_proposal_idx )
{
if(son_proposal.proposal_type == son_proposal_type::son_deregister_proposal)
{
ret.insert(son_proposal.son_id);
}
}
return ret;
}
std::set<son_id_type> database::get_sons_to_be_deregistered()
{
std::set<son_id_type> ret;
const auto& son_idx = get_index_type<son_index>().indices().get< by_id >();
for( auto& son : son_idx )
{
bool need_to_be_deregistered = true;
for(const auto& status : son.statuses)
{
const auto& sidechain = status.first;
if(status.second != son_status::in_maintenance)
need_to_be_deregistered = false;
if(need_to_be_deregistered)
{
auto stats = son.statistics(*this);
// TODO : We need to add a function that returns if we can deregister SON
// i.e. with introduction of PW code, we have to make a decision if the SON
// is needed for release of funds from the PW
if(stats.last_active_timestamp.contains(sidechain)) {
if (head_block_time() - stats.last_active_timestamp.at(sidechain) < fc::seconds(get_global_properties().parameters.son_deregister_time())) {
need_to_be_deregistered = false;
}
}
}
}
if(need_to_be_deregistered)
{
ret.insert(son.id);
}
}
return ret;
}
std::set<son_id_type> database::get_sons_being_reported_down()
{
std::set<son_id_type> ret;
const auto& son_proposal_idx = get_index_type<son_proposal_index>().indices().get< by_id >();
for( auto& son_proposal : son_proposal_idx )
{
if(son_proposal.proposal_type == son_proposal_type::son_report_down_proposal)
{
ret.insert(son_proposal.son_id);
}
}
return ret;
}
fc::optional<operation> database::create_son_deregister_proposal( son_id_type son_id, account_id_type paying_son )
{
son_deregister_operation son_dereg_op;
son_dereg_op.payer = get_global_properties().parameters.son_account();
son_dereg_op.son_id = son_id;
proposal_create_operation proposal_op;
proposal_op.fee_paying_account = paying_son;
proposal_op.proposed_ops.push_back( op_wrapper( son_dereg_op ) );
uint32_t lifetime = ( get_global_properties().parameters.block_interval * get_global_properties().active_witnesses.size() ) * 3;
proposal_op.expiration_time = time_point_sec( head_block_time().sec_since_epoch() + lifetime );
return proposal_op;
}
signed_transaction database::create_signed_transaction( const fc::ecc::private_key& signing_private_key, const operation& op )
{
signed_transaction processed_trx;
auto dyn_props = get_dynamic_global_properties();
processed_trx.set_reference_block( dyn_props.head_block_id );
processed_trx.set_expiration( head_block_time() + get_global_properties().parameters.maximum_time_until_expiration );
processed_trx.operations.push_back( op );
current_fee_schedule().set_fee( processed_trx.operations.back() );
processed_trx.sign( signing_private_key, get_chain_id() );
return processed_trx;
}
bool database::is_son_dereg_valid( son_id_type son_id )
{
const auto& son_idx = get_index_type<son_index>().indices().get< by_id >();
auto son = son_idx.find( son_id );
if(son == son_idx.end())
{
return false;
}
bool status_son_dereg_valid = true;
for (const auto &active_sidechain_type : active_sidechain_types(head_block_time())) {
if(son->statuses.at(active_sidechain_type) != son_status::in_maintenance)
status_son_dereg_valid = false;
if(status_son_dereg_valid)
{
if(son->statistics(*this).last_active_timestamp.contains(active_sidechain_type)) {
if (head_block_time() - son->statistics(*this).last_active_timestamp.at(active_sidechain_type) < fc::seconds(get_global_properties().parameters.son_deregister_time())) {
status_son_dereg_valid = false;
}
}
}
}
return status_son_dereg_valid;
}
bool database::is_son_active( sidechain_type type, son_id_type son_id )
{
const auto& son_idx = get_index_type<son_index>().indices().get< by_id >();
auto son = son_idx.find( son_id );
if(son == son_idx.end()) {
return false;
}
const global_property_object& gpo = get_global_properties();
if(!gpo.active_sons.contains(type)) {
return false;
}
const auto& gpo_as = gpo.active_sons.at(type);
vector<son_id_type> active_son_ids;
active_son_ids.reserve(gpo_as.size());
std::transform(gpo_as.cbegin(), gpo_as.cend(),
std::inserter(active_son_ids, active_son_ids.end()),
[](const son_sidechain_info& swi) {
return swi.son_id;
});
if(active_son_ids.empty()) {
return false;
}
auto it_son = std::find(active_son_ids.begin(), active_son_ids.end(), son_id);
return (it_son != active_son_ids.end());
}
vector<uint64_t> database::get_random_numbers(uint64_t minimum, uint64_t maximum, uint64_t selections, bool duplicates)
{
FC_ASSERT( selections <= 100000 );
if (duplicates == false) {
FC_ASSERT( maximum - minimum >= selections );
}
vector<uint64_t> v;
v.reserve(selections);
if (duplicates) {
for (uint64_t i = 0; i < selections; i++) {
int64_t rnd = get_random_bits(maximum - minimum) + minimum;
v.push_back(rnd);
}
} else {
vector<uint64_t> tmpv;
tmpv.reserve(selections);
for (uint64_t i = minimum; i < maximum; i++) {
tmpv.push_back(i);
}
for (uint64_t i = 0; (i < selections) && (tmpv.size() > 0); i++) {
uint64_t idx = get_random_bits(tmpv.size());
v.push_back(tmpv.at(idx));
tmpv.erase(tmpv.begin() + idx);
}
}
return v;
}
bool database::is_asset_creation_allowed(const string &symbol)
{
if (symbol == "BTC")
{
if (head_block_time() < HARDFORK_SON_TIME)
return false;
}
return true;
}
}
}
} }

View file

@ -49,26 +49,13 @@
#include <graphene/chain/tournament_object.hpp>
#include <graphene/chain/match_object.hpp>
#include <graphene/chain/game_object.hpp>
#include <graphene/chain/custom_permission_object.hpp>
#include <graphene/chain/custom_account_authority_object.hpp>
#include <graphene/chain/offer_object.hpp>
#include <graphene/chain/account_role_object.hpp>
#include <graphene/chain/random_number_object.hpp>
#include <graphene/chain/nft_object.hpp>
#include <graphene/chain/sport_object.hpp>
#include <graphene/chain/event_group_object.hpp>
#include <graphene/chain/event_object.hpp>
#include <graphene/chain/betting_market_object.hpp>
#include <graphene/chain/global_betting_statistics_object.hpp>
#include <graphene/chain/son_object.hpp>
#include <graphene/chain/son_proposal_object.hpp>
#include <graphene/chain/son_wallet_object.hpp>
#include <graphene/chain/son_wallet_deposit_object.hpp>
#include <graphene/chain/son_wallet_withdraw_object.hpp>
#include <graphene/chain/sidechain_address_object.hpp>
#include <graphene/chain/sidechain_transaction_object.hpp>
#include <graphene/chain/account_evaluator.hpp>
#include <graphene/chain/asset_evaluator.hpp>
@ -90,22 +77,10 @@
#include <graphene/chain/event_evaluator.hpp>
#include <graphene/chain/betting_market_evaluator.hpp>
#include <graphene/chain/tournament_evaluator.hpp>
#include <graphene/chain/custom_permission_evaluator.hpp>
#include <graphene/chain/custom_account_authority_evaluator.hpp>
#include <graphene/chain/offer_evaluator.hpp>
#include <graphene/chain/nft_evaluator.hpp>
#include <graphene/chain/account_role_evaluator.hpp>
#include <graphene/chain/nft_lottery_evaluator.hpp>
#include <graphene/chain/son_evaluator.hpp>
#include <graphene/chain/son_wallet_evaluator.hpp>
#include <graphene/chain/son_wallet_deposit_evaluator.hpp>
#include <graphene/chain/son_wallet_withdraw_evaluator.hpp>
#include <graphene/chain/sidechain_address_evaluator.hpp>
#include <graphene/chain/sidechain_transaction_evaluator.hpp>
#include <graphene/chain/random_number_evaluator.hpp>
#include <graphene/chain/protocol/fee_schedule.hpp>
#include <fc/smart_ref_impl.hpp>
#include <fc/uint128.hpp>
#include <fc/crypto/digest.hpp>
@ -188,29 +163,12 @@ const uint8_t betting_market_object::type_id;
const uint8_t bet_object::space_id;
const uint8_t bet_object::type_id;
const uint8_t nft_object::space_id;
const uint8_t nft_object::type_id;
const uint8_t betting_market_position_object::space_id;
const uint8_t betting_market_position_object::type_id;
const uint8_t global_betting_statistics_object::space_id;
const uint8_t global_betting_statistics_object::type_id;
const uint8_t offer_object::space_id;
const uint8_t offer_object::type_id;
const uint8_t offer_history_object::space_id;
const uint8_t offer_history_object::type_id;
const uint8_t account_role_object::space_id;
const uint8_t account_role_object::type_id;
const uint8_t nft_lottery_balance_object::space_id;
const uint8_t nft_lottery_balance_object::type_id;
const uint8_t random_number_object::space_id;
const uint8_t random_number_object::type_id;
void database::initialize_evaluators()
{
@ -285,110 +243,23 @@ void database::initialize_evaluators()
register_evaluator<lottery_reward_evaluator>();
register_evaluator<lottery_end_evaluator>();
register_evaluator<sweeps_vesting_claim_evaluator>();
register_evaluator<create_custom_permission_evaluator>();
register_evaluator<update_custom_permission_evaluator>();
register_evaluator<delete_custom_permission_evaluator>();
register_evaluator<create_custom_account_authority_evaluator>();
register_evaluator<update_custom_account_authority_evaluator>();
register_evaluator<delete_custom_account_authority_evaluator>();
register_evaluator<offer_evaluator>();
register_evaluator<bid_evaluator>();
register_evaluator<cancel_offer_evaluator>();
register_evaluator<finalize_offer_evaluator>();
register_evaluator<nft_metadata_create_evaluator>();
register_evaluator<nft_metadata_update_evaluator>();
register_evaluator<nft_mint_evaluator>();
register_evaluator<nft_safe_transfer_from_evaluator>();
register_evaluator<nft_approve_evaluator>();
register_evaluator<nft_set_approval_for_all_evaluator>();
register_evaluator<account_role_create_evaluator>();
register_evaluator<account_role_update_evaluator>();
register_evaluator<account_role_delete_evaluator>();
register_evaluator<nft_lottery_token_purchase_evaluator>();
register_evaluator<nft_lottery_reward_evaluator>();
register_evaluator<nft_lottery_end_evaluator>();
register_evaluator<create_son_evaluator>();
register_evaluator<update_son_evaluator>();
register_evaluator<deregister_son_evaluator>();
register_evaluator<son_heartbeat_evaluator>();
register_evaluator<son_report_down_evaluator>();
register_evaluator<son_maintenance_evaluator>();
register_evaluator<recreate_son_wallet_evaluator>();
register_evaluator<update_son_wallet_evaluator>();
register_evaluator<create_son_wallet_deposit_evaluator>();
register_evaluator<process_son_wallet_deposit_evaluator>();
register_evaluator<create_son_wallet_withdraw_evaluator>();
register_evaluator<process_son_wallet_withdraw_evaluator>();
register_evaluator<add_sidechain_address_evaluator>();
register_evaluator<update_sidechain_address_evaluator>();
register_evaluator<delete_sidechain_address_evaluator>();
register_evaluator<sidechain_transaction_create_evaluator>();
register_evaluator<sidechain_transaction_sign_evaluator>();
register_evaluator<sidechain_transaction_send_evaluator>();
register_evaluator<sidechain_transaction_settle_evaluator>();
register_evaluator<random_number_store_evaluator>();
}
void database::initialize_hardforks()
{
_hardfork_times.emplace_back(HARDFORK_357_TIME);
_hardfork_times.emplace_back(HARDFORK_359_TIME);
_hardfork_times.emplace_back(HARDFORK_385_TIME);
_hardfork_times.emplace_back(HARDFORK_409_TIME);
_hardfork_times.emplace_back(HARDFORK_413_TIME);
_hardfork_times.emplace_back(HARDFORK_415_TIME);
_hardfork_times.emplace_back(HARDFORK_416_TIME);
_hardfork_times.emplace_back(HARDFORK_419_TIME);
_hardfork_times.emplace_back(HARDFORK_436_TIME);
_hardfork_times.emplace_back(HARDFORK_445_TIME);
_hardfork_times.emplace_back(HARDFORK_453_TIME);
_hardfork_times.emplace_back(HARDFORK_480_TIME);
_hardfork_times.emplace_back(HARDFORK_483_TIME);
_hardfork_times.emplace_back(HARDFORK_516_TIME);
_hardfork_times.emplace_back(HARDFORK_533_TIME);
_hardfork_times.emplace_back(HARDFORK_538_TIME);
_hardfork_times.emplace_back(HARDFORK_555_TIME);
_hardfork_times.emplace_back(HARDFORK_563_TIME);
_hardfork_times.emplace_back(HARDFORK_572_TIME);
_hardfork_times.emplace_back(HARDFORK_599_TIME);
_hardfork_times.emplace_back(HARDFORK_607_TIME);
_hardfork_times.emplace_back(HARDFORK_613_TIME);
_hardfork_times.emplace_back(HARDFORK_615_TIME);
_hardfork_times.emplace_back(HARDFORK_999_TIME);
_hardfork_times.emplace_back(HARDFORK_1000_TIME);
_hardfork_times.emplace_back(HARDFORK_1001_TIME);
_hardfork_times.emplace_back(HARDFORK_5050_1_TIME);
_hardfork_times.emplace_back(HARDFORK_CORE_429_TIME);
_hardfork_times.emplace_back(HARDFORK_GPOS_TIME);
_hardfork_times.emplace_back(HARDFORK_NFT_TIME);
_hardfork_times.emplace_back(HARDFORK_SON_FOR_HIVE_TIME);
_hardfork_times.emplace_back(HARDFORK_SON_TIME);
_hardfork_times.emplace_back(HARDFORK_SON2_TIME);
_hardfork_times.emplace_back(HARDFORK_SON_FOR_ETHEREUM_TIME);
_hardfork_times.emplace_back(HARDFORK_SWEEPS_TIME);
std::sort(_hardfork_times.begin(), _hardfork_times.end());
}
void database::initialize_indexes()
{
reset_indexes();
const std::lock_guard<std::mutex> undo_db_lock{_undo_db_mutex};
_undo_db.set_max_size(GRAPHENE_MIN_UNDO_HISTORY);
_undo_db.set_max_size( GRAPHENE_MIN_UNDO_HISTORY );
//Protocol object indexes
add_index< primary_index<asset_index, 13> >(); // 8192 assets per chunk
add_index< primary_index<asset_index> >();
add_index< primary_index<force_settlement_index> >();
auto acnt_index = add_index< primary_index<account_index, 20> >(); // ~1 million accounts per chunk
auto acnt_index = add_index< primary_index<account_index> >();
acnt_index->add_secondary_index<account_member_index>();
acnt_index->add_secondary_index<account_referrer_index>();
add_index< primary_index<committee_member_index, 8> >(); // 256 members per chunk
add_index< primary_index<son_index> >();
add_index< primary_index<witness_index, 10> >(); // 1024 witnesses per chunk
add_index< primary_index<committee_member_index> >();
add_index< primary_index<witness_index> >();
add_index< primary_index<limit_order_index > >();
add_index< primary_index<call_order_index > >();
@ -413,39 +284,19 @@ void database::initialize_indexes()
tournament_details_idx->add_secondary_index<tournament_players_index>();
add_index< primary_index<match_index> >();
add_index< primary_index<game_index> >();
add_index< primary_index<custom_permission_index> >();
add_index< primary_index<custom_account_authority_index> >();
auto offer_idx = add_index< primary_index<offer_index> >();
offer_idx->add_secondary_index<offer_item_index>();
add_index< primary_index<nft_metadata_index > >();
add_index< primary_index<nft_index > >();
add_index< primary_index<account_role_index> >();
add_index< primary_index<son_proposal_index> >();
add_index< primary_index<son_wallet_index> >();
add_index< primary_index<son_wallet_deposit_index> >();
add_index< primary_index<son_wallet_withdraw_index> >();
add_index< primary_index<sidechain_address_index> >();
add_index< primary_index<sidechain_transaction_index> >();
//Implementation object indexes
add_index< primary_index<transaction_index > >();
auto bal_idx = add_index< primary_index<account_balance_index > >();
bal_idx->add_secondary_index<balances_by_account_index>();
add_index< primary_index<asset_bitasset_data_index, 13 > >(); // 8192
add_index< primary_index<account_balance_index > >();
add_index< primary_index<asset_bitasset_data_index > >();
add_index< primary_index<asset_dividend_data_object_index > >();
add_index< primary_index<simple_index<global_property_object >> >();
add_index< primary_index<simple_index<dynamic_global_property_object >> >();
add_index< primary_index<account_stats_index > >();
add_index< primary_index<simple_index<account_statistics_object >> >();
add_index< primary_index<simple_index<asset_dynamic_data_object >> >();
add_index< primary_index<flat_index< block_summary_object >> >();
add_index< primary_index<simple_index<chain_property_object > > >();
add_index< primary_index<simple_index<witness_schedule_object > > >();
add_index< primary_index<simple_index<son_schedule_object > > >();
add_index< primary_index<simple_index<budget_record_object > > >();
add_index< primary_index< special_authority_index > >();
add_index< primary_index< buyback_index > >();
@ -459,10 +310,6 @@ void database::initialize_indexes()
add_index< primary_index<lottery_balance_index > >();
add_index< primary_index<sweeps_vesting_balance_index > >();
add_index< primary_index<offer_history_index > >();
add_index< primary_index<nft_lottery_balance_index > >();
add_index< primary_index<son_stats_index > >();
add_index< primary_index<random_number_index > >();
}
@ -476,9 +323,7 @@ void database::init_genesis(const genesis_state_type& genesis_state)
FC_ASSERT(genesis_state.initial_active_witnesses <= genesis_state.initial_witness_candidates.size(),
"initial_active_witnesses is larger than the number of candidate witnesses.");
const std::lock_guard<std::mutex> undo_db_lock{_undo_db_mutex};
_undo_db.disable();
struct auth_inhibitor {
auth_inhibitor(database& db) : db(db), old_flags(db.node_properties().skip_flags)
{ db.node_properties().skip_flags |= skip_authority_check; }
@ -507,19 +352,12 @@ void database::init_genesis(const genesis_state_type& genesis_state)
n.owner.weight_threshold = 1;
n.active.weight_threshold = 1;
n.name = "committee-account";
n.statistics = create<account_statistics_object>( [&n](account_statistics_object& s){
s.owner = n.id;
s.name = n.name;
s.core_in_balance = GRAPHENE_MAX_SHARE_SUPPLY;
}).id;
n.statistics = create<account_statistics_object>( [&](account_statistics_object& s){ s.owner = n.id; }).id;
});
FC_ASSERT(committee_account.get_id() == GRAPHENE_COMMITTEE_ACCOUNT);
FC_ASSERT(create<account_object>([this](account_object& a) {
a.name = "witness-account";
a.statistics = create<account_statistics_object>([&a](account_statistics_object& s){
s.owner = a.id;
s.name = a.name;
}).id;
a.statistics = create<account_statistics_object>([&](account_statistics_object& s){s.owner = a.id;}).id;
a.owner.weight_threshold = 1;
a.active.weight_threshold = 1;
a.registrar = a.lifetime_referrer = a.referrer = GRAPHENE_WITNESS_ACCOUNT;
@ -529,10 +367,7 @@ void database::init_genesis(const genesis_state_type& genesis_state)
}).get_id() == GRAPHENE_WITNESS_ACCOUNT);
FC_ASSERT(create<account_object>([this](account_object& a) {
a.name = "relaxed-committee-account";
a.statistics = create<account_statistics_object>([&a](account_statistics_object& s){
s.owner = a.id;
s.name = a.name;
}).id;
a.statistics = create<account_statistics_object>([&](account_statistics_object& s){s.owner = a.id;}).id;
a.owner.weight_threshold = 1;
a.active.weight_threshold = 1;
a.registrar = a.lifetime_referrer = a.referrer = GRAPHENE_RELAXED_COMMITTEE_ACCOUNT;
@ -542,10 +377,7 @@ void database::init_genesis(const genesis_state_type& genesis_state)
}).get_id() == GRAPHENE_RELAXED_COMMITTEE_ACCOUNT);
FC_ASSERT(create<account_object>([this](account_object& a) {
a.name = "null-account";
a.statistics = create<account_statistics_object>([&a](account_statistics_object& s){
s.owner = a.id;
s.name = a.name;
}).id;
a.statistics = create<account_statistics_object>([&](account_statistics_object& s){s.owner = a.id;}).id;
a.owner.weight_threshold = 1;
a.active.weight_threshold = 1;
a.registrar = a.lifetime_referrer = a.referrer = GRAPHENE_NULL_ACCOUNT;
@ -555,10 +387,7 @@ void database::init_genesis(const genesis_state_type& genesis_state)
}).get_id() == GRAPHENE_NULL_ACCOUNT);
FC_ASSERT(create<account_object>([this](account_object& a) {
a.name = "temp-account";
a.statistics = create<account_statistics_object>([&a](account_statistics_object& s){
s.owner = a.id;
s.name = a.name;
}).id;
a.statistics = create<account_statistics_object>([&](account_statistics_object& s){s.owner = a.id;}).id;
a.owner.weight_threshold = 0;
a.active.weight_threshold = 0;
a.registrar = a.lifetime_referrer = a.referrer = GRAPHENE_TEMP_ACCOUNT;
@ -568,10 +397,7 @@ void database::init_genesis(const genesis_state_type& genesis_state)
}).get_id() == GRAPHENE_TEMP_ACCOUNT);
FC_ASSERT(create<account_object>([this](account_object& a) {
a.name = "proxy-to-self";
a.statistics = create<account_statistics_object>([&a](account_statistics_object& s){
s.owner = a.id;
s.name = a.name;
}).id;
a.statistics = create<account_statistics_object>([&](account_statistics_object& s){s.owner = a.id;}).id;
a.owner.weight_threshold = 1;
a.active.weight_threshold = 1;
a.registrar = a.lifetime_referrer = a.referrer = GRAPHENE_NULL_ACCOUNT;
@ -581,10 +407,7 @@ void database::init_genesis(const genesis_state_type& genesis_state)
}).get_id() == GRAPHENE_PROXY_TO_SELF_ACCOUNT);
FC_ASSERT(create<account_object>([this](account_object& a) {
a.name = "default-dividend-distribution";
a.statistics = create<account_statistics_object>([&a](account_statistics_object& s){
s.owner = a.id;
s.name = a.name;
}).id;
a.statistics = create<account_statistics_object>([&](account_statistics_object& s){s.owner = a.id;}).id;
a.owner.weight_threshold = 1;
a.active.weight_threshold = 1;
a.registrar = a.lifetime_referrer = a.referrer = GRAPHENE_PROXY_TO_SELF_ACCOUNT;
@ -598,12 +421,9 @@ void database::init_genesis(const genesis_state_type& genesis_state)
uint64_t id = get_index<account_object>().get_next_id().instance();
if( id >= genesis_state.immutable_parameters.num_special_accounts )
break;
const account_object& acct = create<account_object>([this,id](account_object& a) {
const account_object& acct = create<account_object>([&](account_object& a) {
a.name = "special-account-" + std::to_string(id);
a.statistics = create<account_statistics_object>([&a](account_statistics_object& s){
s.owner = a.id;
s.name = a.name;
}).id;
a.statistics = create<account_statistics_object>([&](account_statistics_object& s){s.owner = a.id;}).id;
a.owner.weight_threshold = 1;
a.active.weight_threshold = 1;
a.registrar = a.lifetime_referrer = a.referrer = account_id_type(id);
@ -617,12 +437,12 @@ void database::init_genesis(const genesis_state_type& genesis_state)
// Create core asset
const asset_dynamic_data_object& dyn_asset =
create<asset_dynamic_data_object>([](asset_dynamic_data_object& a) {
create<asset_dynamic_data_object>([&](asset_dynamic_data_object& a) {
a.current_supply = GRAPHENE_MAX_SHARE_SUPPLY;
});
const asset_dividend_data_object& div_asset =
create<asset_dividend_data_object>([&genesis_state](asset_dividend_data_object& a) {
create<asset_dividend_data_object>([&](asset_dividend_data_object& a) {
a.options.minimum_distribution_interval = 3*24*60*60;
a.options.minimum_fee_percentage = 10*GRAPHENE_1_PERCENT;
a.options.next_payout_time = genesis_state.initial_timestamp + fc::days(1);
@ -631,7 +451,7 @@ void database::init_genesis(const genesis_state_type& genesis_state)
});
const asset_object& core_asset =
create<asset_object>( [&genesis_state,&div_asset,&dyn_asset]( asset_object& a ) {
create<asset_object>( [&]( asset_object& a ) {
a.symbol = GRAPHENE_SYMBOL;
a.options.max_supply = genesis_state.max_core_supply;
a.precision = GRAPHENE_BLOCKCHAIN_PRECISION_DIGITS;
@ -644,12 +464,9 @@ void database::init_genesis(const genesis_state_type& genesis_state)
a.options.core_exchange_rate.quote.asset_id = asset_id_type(0);
a.dynamic_asset_data_id = dyn_asset.id;
a.dividend_data_id = div_asset.id;
});
FC_ASSERT( dyn_asset.id == asset_dynamic_data_id_type() );
FC_ASSERT( asset_id_type(core_asset.id) == asset().asset_id );
FC_ASSERT( get_balance(account_id_type(), asset_id_type()) == asset(dyn_asset.current_supply) );
_p_core_asset_obj = &core_asset;
_p_core_dynamic_data_obj = &dyn_asset;
});
assert( asset_id_type(core_asset.id) == asset().asset_id );
assert( get_balance(account_id_type(), asset_id_type()) == asset(dyn_asset.current_supply) );
#ifdef _DEFAULT_DIVIDEND_ASSET
// Create default dividend asset
@ -682,7 +499,7 @@ void database::init_genesis(const genesis_state_type& genesis_state)
a.dynamic_asset_data_id = dyn_asset1.id;
a.dividend_data_id = div_asset1.id;
});
FC_ASSERT( default_asset.id == asset_id_type(1) );
assert( default_asset.id == asset_id_type(1) );
#endif
// Create more special assets
@ -692,10 +509,10 @@ void database::init_genesis(const genesis_state_type& genesis_state)
if( id >= genesis_state.immutable_parameters.num_special_assets )
break;
const asset_dynamic_data_object& dyn_asset =
create<asset_dynamic_data_object>([](asset_dynamic_data_object& a) {
create<asset_dynamic_data_object>([&](asset_dynamic_data_object& a) {
a.current_supply = 0;
});
const asset_object& asset_obj = create<asset_object>( [id,&dyn_asset]( asset_object& a ) {
const asset_object& asset_obj = create<asset_object>( [&]( asset_object& a ) {
a.symbol = "SPECIAL" + std::to_string( id );
a.options.max_supply = 0;
a.precision = GRAPHENE_BLOCKCHAIN_PRECISION_DIGITS;
@ -715,14 +532,14 @@ void database::init_genesis(const genesis_state_type& genesis_state)
chain_id_type chain_id = genesis_state.compute_chain_id();
// Create global properties
_p_global_prop_obj = & create<global_property_object>([&genesis_state](global_property_object& p) {
create<global_property_object>([&](global_property_object& p) {
p.parameters = genesis_state.initial_parameters;
// Set fees to zero initially, so that genesis initialization needs not pay them
// We'll fix it at the end of the function
p.parameters.current_fees->zero_all_fees();
});
_p_dyn_global_prop_obj = & create<dynamic_global_property_object>([&genesis_state](dynamic_global_property_object& p) {
create<dynamic_global_property_object>([&](dynamic_global_property_object& p) {
p.time = genesis_state.initial_timestamp;
p.dynamic_flags = 0;
p.witness_budget = 0;
@ -735,7 +552,7 @@ void database::init_genesis(const genesis_state_type& genesis_state)
FC_ASSERT( (genesis_state.immutable_parameters.min_witness_count & 1) == 1, "min_witness_count must be odd" );
FC_ASSERT( (genesis_state.immutable_parameters.min_committee_member_count & 1) == 1, "min_committee_member_count must be odd" );
_p_chain_property_obj = & create<chain_property_object>([chain_id,&genesis_state](chain_property_object& p)
create<chain_property_object>([&](chain_property_object& p)
{
p.chain_id = chain_id;
p.immutable_parameters = genesis_state.immutable_parameters;
@ -859,7 +676,7 @@ void database::init_genesis(const genesis_state_type& genesis_state)
cop.active = cop.owner;
account_id_type owner_account_id = apply_operation(genesis_eval_state, cop).get<object_id_type>();
modify( owner_account_id(*this).statistics(*this), [&collateral_rec]( account_statistics_object& o ) {
modify( owner_account_id(*this).statistics(*this), [&]( account_statistics_object& o ) {
o.total_core_in_orders = collateral_rec.collateral;
});
@ -922,7 +739,7 @@ void database::init_genesis(const genesis_state_type& genesis_state)
vbo.owner = get_account_id(account.name);
vbo.balance = asset(vesting_balance.amount, get_asset_id(vesting_balance.asset_symbol));
if (vesting_balance.policy_type == "linear") {
auto initial_linear_vesting_policy = vesting_balance.policy.as<genesis_state_type::initial_bts_account_type::initial_linear_vesting_policy>( 20 );
auto initial_linear_vesting_policy = vesting_balance.policy.as<genesis_state_type::initial_bts_account_type::initial_linear_vesting_policy>();
linear_vesting_policy new_vesting_policy;
new_vesting_policy.begin_timestamp = initial_linear_vesting_policy.begin_timestamp;
new_vesting_policy.vesting_cliff_seconds = initial_linear_vesting_policy.vesting_cliff_seconds;
@ -930,7 +747,7 @@ void database::init_genesis(const genesis_state_type& genesis_state)
new_vesting_policy.begin_balance = initial_linear_vesting_policy.begin_balance;
vbo.policy = new_vesting_policy;
} else if (vesting_balance.policy_type == "cdd") {
auto initial_cdd_vesting_policy = vesting_balance.policy.as<genesis_state_type::initial_bts_account_type::initial_cdd_vesting_policy>( 20 );
auto initial_cdd_vesting_policy = vesting_balance.policy.as<genesis_state_type::initial_bts_account_type::initial_cdd_vesting_policy>();
cdd_vesting_policy new_vesting_policy;
new_vesting_policy.vesting_seconds = initial_cdd_vesting_policy.vesting_seconds;
new_vesting_policy.coin_seconds_earned = initial_cdd_vesting_policy.coin_seconds_earned;
@ -994,6 +811,7 @@ void database::init_genesis(const genesis_state_type& genesis_state)
const auto& idx = get_index_type<asset_index>().indices().get<by_symbol>();
auto it = idx.begin();
bool has_imbalanced_assets = false;
while( it != idx.end() )
{
@ -1005,6 +823,7 @@ void database::init_genesis(const genesis_state_type& genesis_state)
FC_ASSERT( debt_itr != total_debts.end() );
if( supply_itr->second != debt_itr->second )
{
has_imbalanced_assets = true;
elog( "Genesis for asset ${aname} is not balanced\n"
" Debt is ${debt}\n"
" Supply is ${supply}\n",
@ -1016,6 +835,10 @@ void database::init_genesis(const genesis_state_type& genesis_state)
}
++it;
}
// @romek
#if 0
FC_ASSERT( !has_imbalanced_assets );
#endif
// Save tallied supplies
for( const auto& item : total_supplies )
@ -1069,7 +892,7 @@ void database::init_genesis(const genesis_state_type& genesis_state)
});
// Set active witnesses
modify(get_global_properties(), [&genesis_state](global_property_object& p) {
modify(get_global_properties(), [&](global_property_object& p) {
for( uint32_t i = 1; i <= genesis_state.initial_active_witnesses; ++i )
{
p.active_witnesses.insert(witness_id_type(i));
@ -1077,7 +900,10 @@ void database::init_genesis(const genesis_state_type& genesis_state)
});
// Initialize witness schedule
_p_witness_schedule_obj = & create<witness_schedule_object>([this](witness_schedule_object& _wso)
#ifndef NDEBUG
const witness_schedule_object& wso =
#endif
create<witness_schedule_object>([&](witness_schedule_object& _wso)
{
// for scheduled
memset(_wso.rng_seed.begin(), 0, _wso.rng_seed.size());
@ -1101,78 +927,20 @@ void database::init_genesis(const genesis_state_type& genesis_state)
for( const witness_id_type& wid : get_global_properties().active_witnesses )
_wso.current_shuffled_witnesses.push_back( wid );
});
FC_ASSERT( _p_witness_schedule_obj->id == witness_schedule_id_type() );
// Initialize witness schedule
#ifndef NDEBUG
const son_schedule_object& ssobitcoin =
#endif
create<son_schedule_object>([&](son_schedule_object& _sso)
{
// for scheduled
memset(_sso.rng_seed.begin(), 0, _sso.rng_seed.size());
witness_scheduler_rng rng(_sso.rng_seed.begin(), GRAPHENE_NEAR_SCHEDULE_CTR_IV);
auto init_bitcoin_sons = get_global_properties().active_sons.at(sidechain_type::bitcoin);
_sso.scheduler = son_scheduler();
_sso.scheduler._min_token_count = std::max(int(init_bitcoin_sons.size()) / 2, 1);
_sso.last_scheduling_block = 0;
_sso.recent_slots_filled = fc::uint128::max_value();
});
assert( ssobitcoin.id == son_schedule_id_type(get_son_schedule_id(sidechain_type::bitcoin)) );
#ifndef NDEBUG
const son_schedule_object& ssoethereum =
#endif
create<son_schedule_object>([&](son_schedule_object& _sso)
{
// for scheduled
memset(_sso.rng_seed.begin(), 0, _sso.rng_seed.size());
witness_scheduler_rng rng(_sso.rng_seed.begin(), GRAPHENE_NEAR_SCHEDULE_CTR_IV);
auto init_ethereum_sons = get_global_properties().active_sons.at(sidechain_type::ethereum);
_sso.scheduler = son_scheduler();
_sso.scheduler._min_token_count = std::max(int(init_ethereum_sons.size()) / 2, 1);
_sso.last_scheduling_block = 0;
_sso.recent_slots_filled = fc::uint128::max_value();
});
assert( ssoethereum.id == son_schedule_id_type(get_son_schedule_id(sidechain_type::ethereum)) );
#ifndef NDEBUG
const son_schedule_object& ssohive =
#endif
create<son_schedule_object>([&](son_schedule_object& _sso)
{
// for scheduled
memset(_sso.rng_seed.begin(), 0, _sso.rng_seed.size());
witness_scheduler_rng rng(_sso.rng_seed.begin(), GRAPHENE_NEAR_SCHEDULE_CTR_IV);
auto init_hive_sons = get_global_properties().active_sons.at(sidechain_type::hive);
_sso.scheduler = son_scheduler();
_sso.scheduler._min_token_count = std::max(int(init_hive_sons.size()) / 2, 1);
_sso.last_scheduling_block = 0;
_sso.recent_slots_filled = fc::uint128::max_value();
});
assert( ssohive.id == son_schedule_id_type(get_son_schedule_id(sidechain_type::hive)) );
assert( wso.id == witness_schedule_id_type() );
// Enable fees
modify(get_global_properties(), [&genesis_state](global_property_object& p) {
p.parameters.current_fees = genesis_state.initial_parameters.current_fees;
});
// Create witness scheduler
//create<witness_schedule_object>([&]( witness_schedule_object& wso )
//{
// for( const witness_id_type& wid : get_global_properties().active_witnesses )
// wso.current_shuffled_witnesses.push_back( wid );
//});
// Create FBA counters
create<fba_accumulator_object>([&]( fba_accumulator_object& acc )
{

File diff suppressed because it is too large Load diff

View file

@ -24,11 +24,7 @@
#include <graphene/chain/database.hpp>
#include <graphene/chain/chain_property_object.hpp>
#include <graphene/chain/witness_schedule_object.hpp>
#include <graphene/chain/special_authority_object.hpp>
#include <graphene/chain/operation_history_object.hpp>
#include <graphene/chain/nft_object.hpp>
#include <graphene/chain/protocol/fee_schedule.hpp>
#include <fc/io/fstream.hpp>
@ -44,7 +40,6 @@ database::database() :
{
initialize_indexes();
initialize_evaluators();
initialize_hardforks();
}
database::~database()
@ -52,84 +47,33 @@ database::~database()
clear_pending();
}
// Right now, we leave undo_db enabled when replaying when the bookie plugin is
// enabled. It depends on new/changed/removed object notifications, and those are
// only fired when the undo_db is enabled.
// So we use this helper object to disable undo_db only if it is not forbidden
// with _slow_replays flag.
class auto_undo_enabler
{
const bool _slow_replays;
undo_database& _undo_db;
bool _disabled;
public:
auto_undo_enabler(bool slow_replays, undo_database& undo_db) :
_slow_replays(slow_replays),
_undo_db(undo_db),
_disabled(false)
{
}
~auto_undo_enabler()
{
try{
enable();
} FC_CAPTURE_AND_LOG(("undo_db enabling crash"))
}
void enable()
{
if(!_disabled)
return;
_undo_db.enable();
_disabled = false;
}
void disable()
{
if(_disabled)
return;
if(_slow_replays)
return;
_undo_db.disable();
_disabled = true;
}
};
void database::reindex( fc::path data_dir )
void database::reindex(fc::path data_dir, const genesis_state_type& initial_allocation)
{ try {
ilog( "reindexing blockchain" );
wipe(data_dir, false);
open(data_dir, [&initial_allocation]{return initial_allocation;});
auto start = fc::time_point::now();
auto last_block = _block_id_to_block.last();
if( !last_block ) {
elog( "!no last block" );
edump((last_block));
return;
}
if( last_block->block_num() <= head_block_num()) return;
ilog( "reindexing blockchain" );
auto start = fc::time_point::now();
const auto last_block_num = last_block->block_num();
uint32_t undo_point = last_block_num < 50 ? 0 : last_block_num - 50;
ilog( "Replaying blocks, starting at ${next}...", ("next",head_block_num() + 1) );
auto_undo_enabler undo(_slow_replays, _undo_db);
if( head_block_num() >= undo_point )
ilog( "Replaying blocks..." );
// Right now, we leave undo_db enabled when replaying when the bookie plugin is
// enabled. It depends on new/changed/removed object notifications, and those are
// only fired when the undo_db is enabled
if (!_slow_replays)
_undo_db.disable();
for( uint32_t i = 1; i <= last_block_num; ++i )
{
if( head_block_num() > 0 )
_fork_db.start_block( *fetch_block_by_number( head_block_num() ) );
}
else
{
undo.disable();
}
for( uint32_t i = head_block_num() + 1; i <= last_block_num; ++i )
{
if( i % 1000000 == 0 )
{
ilog( "Writing database to disk at block ${i}", ("i",i) );
flush();
ilog( "Done" );
}
if( i == 1 ||
i % 10000 == 0 )
std::cerr << " " << double(i*100)/last_block_num << "% "<< i << " of " <<last_block_num<<" \n";
fc::optional< signed_block > block = _block_id_to_block.fetch_by_number(i);
if( !block.valid() )
{
@ -150,27 +94,24 @@ void database::reindex( fc::path data_dir )
wlog( "Dropped ${n} blocks from after the gap", ("n", dropped_count) );
break;
}
if( i < undo_point && !_slow_replays)
{
if (_slow_replays)
push_block(*block, skip_fork_db |
skip_witness_signature |
skip_transaction_signatures |
skip_transaction_dupe_check |
skip_tapos_check |
skip_witness_schedule_check |
skip_authority_check);
else
apply_block(*block, skip_witness_signature |
skip_transaction_signatures |
skip_transaction_dupe_check |
skip_tapos_check |
skip_witness_schedule_check |
skip_authority_check);
}
else
{
undo.enable();
push_block(*block, skip_witness_signature |
skip_transaction_signatures |
skip_transaction_dupe_check |
skip_tapos_check |
skip_witness_schedule_check |
skip_authority_check);
}
}
undo.enable();
if (!_slow_replays)
_undo_db.enable();
auto end = fc::time_point::now();
ilog( "Done reindexing, elapsed time: ${t} sec", ("t",double((end-start).count())/1000000.0 ) );
} FC_CAPTURE_AND_RETHROW( (data_dir) ) }
@ -178,9 +119,7 @@ void database::reindex( fc::path data_dir )
void database::wipe(const fc::path& data_dir, bool include_blocks)
{
ilog("Wiping database", ("include_blocks", include_blocks));
if (_opened) {
close(false);
}
close();
object_database::wipe(data_dir);
if( include_blocks )
fc::remove_all( data_dir / "database" );
@ -188,68 +127,33 @@ void database::wipe(const fc::path& data_dir, bool include_blocks)
void database::open(
const fc::path& data_dir,
std::function<genesis_state_type()> genesis_loader,
const std::string& db_version)
std::function<genesis_state_type()> genesis_loader)
{
try
{
bool wipe_object_db = false;
if( !fc::exists( data_dir / "db_version" ) )
wipe_object_db = true;
else
{
std::string version_string;
fc::read_file_contents( data_dir / "db_version", version_string );
wipe_object_db = ( version_string != db_version );
}
if( wipe_object_db ) {
ilog("Wiping object_database due to missing or wrong version");
object_database::wipe( data_dir );
std::ofstream version_file( (data_dir / "db_version").generic_string().c_str(),
std::ios::out | std::ios::binary | std::ios::trunc );
version_file.write( db_version.c_str(), db_version.size() );
version_file.close();
}
object_database::open(data_dir);
_block_id_to_block.open(data_dir / "database" / "block_num_to_block");
if( !find(global_property_id_type()) )
init_genesis(genesis_loader());
else
{
_p_core_asset_obj = &get( asset_id_type() );
_p_core_dynamic_data_obj = &get( asset_dynamic_data_id_type() );
_p_global_prop_obj = &get( global_property_id_type() );
_p_chain_property_obj = &get( chain_property_id_type() );
_p_dyn_global_prop_obj = &get( dynamic_global_property_id_type() );
_p_witness_schedule_obj = &get( witness_schedule_id_type() );
}
fc::optional<block_id_type> last_block = _block_id_to_block.last_id();
fc::optional<signed_block> last_block = _block_id_to_block.last();
if( last_block.valid() )
{
FC_ASSERT( *last_block >= head_block_id(),
"last block ID does not match current chain state",
("last_block->id", last_block)("head_block_id",head_block_num()) );
_block_id_to_block.set_replay_mode(true);
reindex( data_dir );
_block_id_to_block.set_replay_mode(false);
_fork_db.start_block( *last_block );
if( last_block->id() != head_block_id() )
{
FC_ASSERT( head_block_num() == 0, "last block ID does not match current chain state",
("last_block->id", last_block->id())("head_block_num",head_block_num()) );
}
}
_opened = true;
}
FC_CAPTURE_LOG_AND_RETHROW( (data_dir) )
}
void database::close(bool rewind)
{
if (!_opened)
return;
// TODO: Save pending tx's on close()
clear_pending();
@ -263,9 +167,17 @@ void database::close(bool rewind)
while( head_block_num() > cutoff )
{
// elog("pop");
block_id_type popped_block_id = head_block_id();
pop_block();
_fork_db.remove(popped_block_id); // doesn't throw on missing
try
{
_block_id_to_block.remove(popped_block_id);
}
catch (const fc::key_not_found_exception&)
{
}
}
}
catch ( const fc::exception& e )
@ -286,8 +198,6 @@ void database::close(bool rewind)
_block_id_to_block.close();
_fork_db.reset();
_opened = false;
}
void database::force_slow_replays()
@ -299,7 +209,7 @@ void database::force_slow_replays()
void database::check_ending_lotteries()
{
try {
const auto& lotteries_idx = get_index_type<asset_index>().indices().get<active_lotteries>();
const auto& lotteries_idx = get_index_type<asset_index>().indices().get<active_lotteries>();
for( auto checking_asset: lotteries_idx )
{
FC_ASSERT( checking_asset.is_lottery() );
@ -311,24 +221,6 @@ void database::check_ending_lotteries()
} catch( ... ) {}
}
void database::check_ending_nft_lotteries()
{
try {
const auto &nft_lotteries_idx = get_index_type<nft_metadata_index>().indices().get<active_nft_lotteries>();
for (auto checking_token : nft_lotteries_idx)
{
FC_ASSERT(checking_token.is_lottery());
const auto &lottery_options = checking_token.lottery_data->lottery_options;
FC_ASSERT(lottery_options.is_active);
// Check the current supply of lottery tokens
auto current_supply = checking_token.get_token_current_supply(*this);
if ((lottery_options.ending_on_soldout && (current_supply == checking_token.max_supply)) ||
(lottery_options.end_date != time_point_sec() && (lottery_options.end_date <= head_block_time())))
checking_token.end_lottery(*this);
}
} catch( ... ) {}
}
void database::check_lottery_end_by_participants( asset_id_type asset_id )
{
try {

View file

@ -426,16 +426,14 @@ bool database::fill_order(const force_settlement_object& settle, const asset& pa
*
* @return true if a margin call was executed.
*/
bool database::check_call_orders( const asset_object& mia, bool enable_black_swan, bool for_new_limit_order,
const asset_bitasset_data_object* bitasset_ptr )
bool database::check_call_orders(const asset_object& mia, bool enable_black_swan)
{ try {
if( !mia.is_market_issued() ) return false;
const asset_bitasset_data_object& bitasset = ( bitasset_ptr ? *bitasset_ptr : mia.bitasset_data(*this) );
if( check_for_blackswan( mia, enable_black_swan, &bitasset ) )
if( check_for_blackswan( mia, enable_black_swan ) )
return false;
const asset_bitasset_data_object& bitasset = mia.bitasset_data(*this);
if( bitasset.is_prediction_market ) return false;
if( bitasset.current_feed.settlement_price.is_null() ) return false;
@ -466,12 +464,7 @@ bool database::check_call_orders( const asset_object& mia, bool enable_black_swa
bool filled_limit = false;
bool margin_called = false;
auto head_time = head_block_time();
auto head_num = head_block_num();
bool after_hardfork_436 = ( head_time > HARDFORK_436_TIME );
while( !check_for_blackswan( mia, enable_black_swan, &bitasset ) && call_itr != call_end )
while( !check_for_blackswan( mia, enable_black_swan ) && call_itr != call_end )
{
bool filled_call = false;
price match_price;
@ -488,7 +481,7 @@ bool database::check_call_orders( const asset_object& mia, bool enable_black_swa
// would be margin called, but there is no matching order #436
bool feed_protected = ( bitasset.current_feed.settlement_price > ~call_itr->call_price );
if( feed_protected && after_hardfork_436 )
if( feed_protected && (head_block_time() > HARDFORK_436_TIME) )
return margin_called;
// would be margin called, but there is no matching order
@ -513,8 +506,7 @@ bool database::check_call_orders( const asset_object& mia, bool enable_black_swa
if( usd_to_buy * match_price > call_itr->get_collateral() )
{
elog( "black swan detected on asset ${symbol} (${id}) at block ${b}",
("id",mia.id)("symbol",mia.symbol)("b",head_num) );
elog( "black swan detected" );
edump((enable_black_swan));
FC_ASSERT( enable_black_swan );
globally_settle_asset(mia, bitasset.current_feed.settlement_price );

View file

@ -24,7 +24,6 @@
#include <fc/container/flat.hpp>
#include <graphene/chain/database.hpp>
#include <graphene/chain/protocol/authority.hpp>
#include <graphene/chain/protocol/operations.hpp>
#include <graphene/chain/protocol/transaction.hpp>
@ -34,19 +33,6 @@
#include <graphene/chain/confidential_object.hpp>
#include <graphene/chain/market_object.hpp>
#include <graphene/chain/committee_member_object.hpp>
#include <graphene/chain/exceptions.hpp>
#include <graphene/chain/witness_object.hpp>
#include <graphene/chain/proposal_object.hpp>
#include <graphene/chain/operation_history_object.hpp>
#include <graphene/chain/vesting_balance_object.hpp>
#include <graphene/chain/transaction_object.hpp>
#include <graphene/chain/impacted.hpp>
#include <graphene/chain/hardfork.hpp>
#include <graphene/chain/account_object.hpp>
#include <graphene/chain/account_role_object.hpp>
#include <graphene/chain/son_object.hpp>
#include <graphene/chain/sidechain_address_object.hpp>
using namespace fc;
using namespace graphene::chain;
@ -55,13 +41,8 @@ using namespace graphene::chain;
struct get_impacted_account_visitor
{
flat_set<account_id_type>& _impacted;
bool _ignore_custom_op_reqd_auths;
get_impacted_account_visitor( flat_set<account_id_type>& impact, bool ignore_custom_operation_required_auths )
: _impacted( impact ), _ignore_custom_op_reqd_auths( ignore_custom_operation_required_auths )
{}
using result_type = void;
get_impacted_account_visitor( flat_set<account_id_type>& impact ):_impacted(impact) {}
typedef void result_type;
void operator()( const transfer_operation& op )
{
@ -147,7 +128,7 @@ struct get_impacted_account_visitor
{
vector<authority> other;
for( const auto& proposed_op : op.proposed_ops )
operation_get_required_authorities( proposed_op.op, _impacted, _impacted, other, _ignore_custom_op_reqd_auths );
operation_get_required_authorities( proposed_op.op, _impacted, _impacted, other );
for( auto& o : other )
add_authority_accounts( _impacted, o );
}
@ -203,10 +184,27 @@ struct get_impacted_account_visitor
_impacted.insert( op.issuer );
}
//! We don't use this operations
void operator()( const transfer_to_blind_operation& op ){}
void operator()( const blind_transfer_operation& op ){}
void operator()( const transfer_from_blind_operation& op ){}
void operator()( const transfer_to_blind_operation& op )
{
_impacted.insert( op.from );
for( const auto& out : op.outputs )
add_authority_accounts( _impacted, out.owner );
}
void operator()( const blind_transfer_operation& op )
{
for( const auto& in : op.inputs )
add_authority_accounts( _impacted, in.owner );
for( const auto& out : op.outputs )
add_authority_accounts( _impacted, out.owner );
}
void operator()( const transfer_from_blind_operation& op )
{
_impacted.insert( op.to );
for( const auto& in : op.inputs )
add_authority_accounts( _impacted, in.owner );
}
void operator()( const asset_settle_cancel_operation& op )
{
@ -287,146 +285,22 @@ struct get_impacted_account_visitor
void operator()( const sweeps_vesting_claim_operation& op ) {
_impacted.insert( op.account );
}
void operator()( const custom_permission_create_operation& op ){
_impacted.insert( op.owner_account );
}
void operator()( const custom_permission_update_operation& op ){
_impacted.insert( op.owner_account );
}
void operator()( const custom_permission_delete_operation& op ){
_impacted.insert( op.owner_account );
}
void operator()( const custom_account_authority_create_operation& op ){
_impacted.insert( op.owner_account );
}
void operator()( const custom_account_authority_update_operation& op ){
_impacted.insert( op.owner_account );
}
void operator()( const custom_account_authority_delete_operation& op ){
_impacted.insert( op.owner_account );
}
void operator()( const nft_metadata_create_operation& op ) {
_impacted.insert( op.owner );
}
void operator()( const nft_metadata_update_operation& op ) {
_impacted.insert( op.owner );
}
void operator()( const nft_mint_operation& op ) {
_impacted.insert( op.owner );
}
void operator()( const nft_safe_transfer_from_operation& op ) {
_impacted.insert( op.from );
_impacted.insert( op.to );
}
void operator()( const nft_approve_operation& op ) {
_impacted.insert( op.operator_ );
_impacted.insert( op.approved );
}
void operator()( const nft_set_approval_for_all_operation& op ) {
_impacted.insert( op.owner );
_impacted.insert( op.operator_ );
}
void operator()( const offer_operation& op ) {
_impacted.insert( op.issuer );
}
void operator()( const bid_operation& op ) {
_impacted.insert( op.bidder );
}
void operator()( const cancel_offer_operation& op ) {
_impacted.insert( op.issuer );
}
void operator()( const finalize_offer_operation& op ) {
_impacted.insert( op.fee_paying_account );
}
void operator()( const account_role_create_operation& op ){
_impacted.insert( op.owner );
}
void operator()( const account_role_update_operation& op ){
_impacted.insert( op.owner );
}
void operator()( const account_role_delete_operation& op ){
_impacted.insert( op.owner );
}
void operator()( const nft_lottery_token_purchase_operation& op ){
_impacted.insert( op.buyer );
}
void operator()( const nft_lottery_reward_operation& op ) {
_impacted.insert( op.winner );
}
void operator()( const nft_lottery_end_operation& op ) {}
void operator()( const son_create_operation& op ) {
_impacted.insert( op.owner_account );
}
void operator()( const son_update_operation& op ) {
_impacted.insert( op.owner_account );
}
void operator()( const son_deregister_operation& op ) {
_impacted.insert( op.payer);
}
void operator()( const son_heartbeat_operation& op ) {
_impacted.insert( op.owner_account );
}
void operator()( const son_report_down_operation& op ) {
_impacted.insert( op.payer );
}
void operator()( const son_maintenance_operation& op ) {
_impacted.insert( op.owner_account );
}
void operator()( const son_wallet_recreate_operation& op ) {
_impacted.insert( op.payer );
}
void operator()( const son_wallet_update_operation& op ) {
_impacted.insert( op.payer );
}
void operator()( const son_wallet_deposit_create_operation& op ) {
_impacted.insert( op.payer );
}
void operator()( const son_wallet_deposit_process_operation& op ) {
_impacted.insert( op.payer );
}
void operator()( const son_wallet_withdraw_create_operation& op ) {
_impacted.insert( op.payer );
}
void operator()( const son_wallet_withdraw_process_operation& op ) {
_impacted.insert( op.payer );
}
void operator()( const sidechain_address_add_operation& op ) {
_impacted.insert( op.payer );
}
void operator()( const sidechain_address_update_operation& op ) {
_impacted.insert( op.payer );
}
void operator()( const sidechain_address_delete_operation& op ) {
_impacted.insert( op.payer );
}
void operator()( const sidechain_transaction_create_operation& op ) {
_impacted.insert( op.payer );
}
void operator()( const sidechain_transaction_sign_operation& op ) {
_impacted.insert( op.payer );
}
void operator()( const sidechain_transaction_send_operation& op ) {
_impacted.insert( op.payer );
}
void operator()( const sidechain_transaction_settle_operation& op ) {
_impacted.insert( op.payer );
}
void operator()( const random_number_store_operation& op ) {
_impacted.insert( op.account );
}
};
void graphene::chain::operation_get_impacted_accounts( const operation& op, flat_set<account_id_type>& result, bool ignore_custom_operation_required_auths ) {
get_impacted_account_visitor vtor = get_impacted_account_visitor( result, ignore_custom_operation_required_auths );
void operation_get_impacted_accounts( const operation& op, flat_set<account_id_type>& result )
{
get_impacted_account_visitor vtor = get_impacted_account_visitor( result );
op.visit( vtor );
}
void graphene::chain::transaction_get_impacted_accounts( const transaction& tx, flat_set<account_id_type>& result, bool ignore_custom_operation_required_auths ) {
void transaction_get_impacted_accounts( const transaction& tx, flat_set<account_id_type>& result )
{
for( const auto& op : tx.operations )
operation_get_impacted_accounts( op, result, ignore_custom_operation_required_auths );
operation_get_impacted_accounts( op, result );
}
void get_relevant_accounts( const object* obj, flat_set<account_id_type>& accounts, bool ignore_custom_operation_required_auths ) {
void get_relevant_accounts( const object* obj, flat_set<account_id_type>& accounts )
{
if( obj->id.space() == protocol_ids )
{
switch( (object_type)obj->id.type() )
@ -473,14 +347,12 @@ void get_relevant_accounts( const object* obj, flat_set<account_id_type>& accoun
} case proposal_object_type:{
const auto& aobj = dynamic_cast<const proposal_object*>(obj);
assert( aobj != nullptr );
transaction_get_impacted_accounts( aobj->proposed_transaction, accounts,
ignore_custom_operation_required_auths);
transaction_get_impacted_accounts( aobj->proposed_transaction, accounts );
break;
} case operation_history_object_type:{
const auto& aobj = dynamic_cast<const operation_history_object*>(obj);
assert( aobj != nullptr );
operation_get_impacted_accounts( aobj->op, accounts,
ignore_custom_operation_required_auths);
operation_get_impacted_accounts( aobj->op, accounts );
break;
} case withdraw_permission_object_type:{
const auto& aobj = dynamic_cast<const withdraw_permission_object*>(obj);
@ -501,33 +373,6 @@ void get_relevant_accounts( const object* obj, flat_set<account_id_type>& accoun
} case balance_object_type:{
/** these are free from any accounts */
break;
} case account_role_type:{
const auto& aobj = dynamic_cast<const account_role_object*>(obj);
assert( aobj != nullptr );
accounts.insert( aobj->owner );
accounts.insert( aobj->whitelisted_accounts.begin(), aobj->whitelisted_accounts.end() );
break;
} case son_object_type:{
const auto& aobj = dynamic_cast<const son_object*>(obj);
assert( aobj != nullptr );
accounts.insert( aobj->son_account );
break;
} case son_wallet_object_type:{
break;
} case son_wallet_deposit_object_type:{
break;
} case son_wallet_withdraw_object_type:{
break;
} case sidechain_address_object_type:{
const auto& aobj = dynamic_cast<const sidechain_address_object*>(obj);
assert( aobj != nullptr );
accounts.insert( aobj->sidechain_address_account );
break;
} case sidechain_transaction_object_type:{
break;
}
default: {
break;
}
}
}
@ -558,8 +403,7 @@ void get_relevant_accounts( const object* obj, flat_set<account_id_type>& accoun
} case impl_transaction_object_type:{
const auto& aobj = dynamic_cast<const transaction_object*>(obj);
assert( aobj != nullptr );
transaction_get_impacted_accounts( aobj->trx, accounts,
ignore_custom_operation_required_auths);
transaction_get_impacted_accounts( aobj->trx, accounts );
break;
} case impl_blinded_balance_object_type:{
const auto& aobj = dynamic_cast<const blinded_balance_object*>(obj);
@ -583,26 +427,12 @@ void get_relevant_accounts( const object* obj, flat_set<account_id_type>& accoun
break;
case impl_fba_accumulator_object_type:
break;
case impl_nft_lottery_balance_object_type:
break;
default:
break;
}
}
} // end get_relevant_accounts( const object* obj, flat_set<account_id_type>& accounts )
namespace graphene { namespace chain {
void database::notify_applied_block( const signed_block& block )
{
GRAPHENE_TRY_NOTIFY( applied_block, block )
}
void database::notify_on_pending_transaction( const signed_transaction& tx )
{
GRAPHENE_TRY_NOTIFY( on_pending_transaction, tx )
}
void database::notify_changed_objects()
{ try {
if( _undo_db.enabled() )
@ -619,10 +449,10 @@ void database::notify_changed_objects()
new_ids.push_back(item);
auto obj = find_object(item);
if(obj != nullptr)
get_relevant_accounts(obj, new_accounts_impacted, true);
get_relevant_accounts(obj, new_accounts_impacted);
}
GRAPHENE_TRY_NOTIFY( new_objects, new_ids, new_accounts_impacted)
new_objects(new_ids, new_accounts_impacted);
}
// Changed
@ -633,10 +463,10 @@ void database::notify_changed_objects()
for( const auto& item : head_undo.old_values )
{
changed_ids.push_back(item.first);
get_relevant_accounts(item.second.get(), changed_accounts_impacted, true);
get_relevant_accounts(item.second.get(), changed_accounts_impacted);
}
GRAPHENE_TRY_NOTIFY( changed_objects, changed_ids, changed_accounts_impacted)
changed_objects(changed_ids, changed_accounts_impacted);
}
// Removed
@ -650,10 +480,10 @@ void database::notify_changed_objects()
removed_ids.emplace_back( item.first );
auto obj = item.second.get();
removed.emplace_back( obj );
get_relevant_accounts(obj, removed_accounts_impacted, true);
get_relevant_accounts(obj, removed_accounts_impacted);
}
GRAPHENE_TRY_NOTIFY( removed_objects, removed_ids, removed, removed_accounts_impacted)
removed_objects(removed_ids, removed, removed_accounts_impacted);
}
}
} FC_CAPTURE_AND_LOG( (0) ) }

View file

@ -26,18 +26,16 @@
#include <graphene/chain/db_with.hpp>
#include <graphene/chain/asset_object.hpp>
#include <graphene/chain/betting_market_object.hpp>
#include <graphene/chain/game_object.hpp>
#include <graphene/chain/global_property_object.hpp>
#include <graphene/chain/hardfork.hpp>
#include <graphene/chain/market_object.hpp>
#include <graphene/chain/offer_object.hpp>
#include <graphene/chain/proposal_object.hpp>
#include <graphene/chain/son_proposal_object.hpp>
#include <graphene/chain/tournament_object.hpp>
#include <graphene/chain/transaction_object.hpp>
#include <graphene/chain/withdraw_permission_object.hpp>
#include <graphene/chain/witness_object.hpp>
#include <graphene/chain/tournament_object.hpp>
#include <graphene/chain/game_object.hpp>
#include <graphene/chain/betting_market_object.hpp>
#include <graphene/chain/protocol/fee_schedule.hpp>
@ -45,12 +43,43 @@
namespace graphene { namespace chain {
void database::update_global_dynamic_data( const signed_block& b, const uint32_t missed_blocks )
void database::update_global_dynamic_data( const signed_block& b )
{
const dynamic_global_property_object& _dgp = get_dynamic_global_properties();
const dynamic_global_property_object& _dgp = dynamic_global_property_id_type(0)(*this);
const global_property_object& gpo = get_global_properties();
uint32_t missed_blocks = get_slot_at_time( b.timestamp );
//#define DIRTY_TRICK // problem with missed_blocks can occur when "maintenance_interval" set to few minutes
#ifdef DIRTY_TRICK
if (missed_blocks != 0) {
#else
assert( missed_blocks != 0 );
#endif
// bad if-condition, this code needs to execute for both shuffled and rng algorithms
// if (gpo.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SHUFFLED_ALGORITHM)
// {
missed_blocks--;
for( uint32_t i = 0; i < missed_blocks; ++i ) {
const auto& witness_missed = get_scheduled_witness( i+1 )(*this);
if( witness_missed.id != b.witness ) {
/*
const auto& witness_account = witness_missed.witness_account(*this);
if( (fc::time_point::now() - b.timestamp) < fc::seconds(30) )
wlog( "Witness ${name} missed block ${n} around ${t}", ("name",witness_account.name)("n",b.block_num())("t",b.timestamp) );
*/
modify( witness_missed, [&]( witness_object& w ) {
w.total_missed++;
});
}
}
// }
#ifdef DIRTY_TRICK
}
#endif
// dynamic global properties updating
modify( _dgp, [&b,this,missed_blocks]( dynamic_global_property_object& dgp ){
modify( _dgp, [&]( dynamic_global_property_object& dgp ){
secret_hash_type::encoder enc;
fc::raw::pack( enc, dgp.random );
fc::raw::pack( enc, b.previous_secret );
@ -58,10 +87,9 @@ void database::update_global_dynamic_data( const signed_block& b, const uint32_t
_random_number_generator = fc::hash_ctr_rng<secret_hash_type, 20>(dgp.random.data());
const uint32_t block_num = b.block_num();
if( BOOST_UNLIKELY( block_num == 1 ) )
if( BOOST_UNLIKELY( b.block_num() == 1 ) )
dgp.recently_missed_count = 0;
else if( _checkpoints.size() && _checkpoints.rbegin()->first >= block_num )
else if( _checkpoints.size() && _checkpoints.rbegin()->first >= b.block_num() )
dgp.recently_missed_count = 0;
else if( missed_blocks )
dgp.recently_missed_count += GRAPHENE_RECENTLY_MISSED_COUNT_INCREMENT*missed_blocks;
@ -70,7 +98,7 @@ void database::update_global_dynamic_data( const signed_block& b, const uint32_t
else if( dgp.recently_missed_count > 0 )
dgp.recently_missed_count--;
dgp.head_block_number = block_num;
dgp.head_block_number = b.block_num();
dgp.head_block_id = b.id();
dgp.time = b.timestamp;
dgp.current_witness = b.witness;
@ -122,7 +150,6 @@ void database::update_last_irreversible_block()
const global_property_object& gpo = get_global_properties();
const dynamic_global_property_object& dpo = get_dynamic_global_properties();
// TODO for better performance, move this to db_maint, because only need to do it once per maintenance interval
vector< const witness_object* > wit_objs;
wit_objs.reserve( gpo.active_witnesses.size() );
for( const witness_id_type& wid : gpo.active_witnesses )
@ -226,7 +253,6 @@ void database::clear_expired_proposals()
elog("Failed to apply proposed transaction on its expiration. Deleting it.\n${proposal}\n${error}",
("proposal", proposal)("error", e.to_detail_string()));
}
remove_son_proposal(proposal);
remove(proposal);
}
}
@ -240,12 +266,11 @@ void database::clear_expired_proposals()
*
* A black swan occurs if MAX(HB,SP) <= LC
*/
bool database::check_for_blackswan( const asset_object& mia, bool enable_black_swan,
const asset_bitasset_data_object* bitasset_ptr )
bool database::check_for_blackswan( const asset_object& mia, bool enable_black_swan )
{
if( !mia.is_market_issued() ) return false;
const asset_bitasset_data_object& bitasset = ( bitasset_ptr ? *bitasset_ptr : mia.bitasset_data(*this) );
const asset_bitasset_data_object& bitasset = mia.bitasset_data(*this);
if( bitasset.has_settlement() ) return true; // already force settled
auto settle_price = bitasset.current_feed.settlement_price;
if( settle_price.is_null() ) return false; // no feed
@ -470,84 +495,32 @@ void database::clear_expired_orders()
void database::update_expired_feeds()
{
const auto head_time = head_block_time();
bool after_hardfork_615 = ( head_time >= HARDFORK_615_TIME );
const auto& idx = get_index_type<asset_bitasset_data_index>().indices().get<by_feed_expiration>();
auto itr = idx.begin();
while( itr != idx.end() && itr->feed_is_expired( head_time ) )
auto& asset_idx = get_index_type<asset_index>().indices().get<by_type>();
auto itr = asset_idx.lower_bound( true /** market issued */ );
while( itr != asset_idx.end() )
{
const asset_bitasset_data_object& b = *itr;
++itr; // not always process begin() because old code skipped updating some assets before hf 615
bool update_cer = false; // for better performance, to only update bitasset once, also check CER in this function
const asset_object* asset_ptr = nullptr;
// update feeds, check margin calls
if( after_hardfork_615 || b.feed_is_expired_before_hardfork_615( head_time ) )
const asset_object& a = *itr;
++itr;
assert( a.is_market_issued() );
const asset_bitasset_data_object& b = a.bitasset_data(*this);
bool feed_is_expired;
if( head_block_time() < HARDFORK_615_TIME )
feed_is_expired = b.feed_is_expired_before_hardfork_615( head_block_time() );
else
feed_is_expired = b.feed_is_expired( head_block_time() );
if( feed_is_expired )
{
auto old_median_feed = b.current_feed;
modify( b, [head_time,&update_cer]( asset_bitasset_data_object& abdo )
{
abdo.update_median_feeds( head_time );
if( abdo.need_to_update_cer() )
{
update_cer = true;
abdo.asset_cer_updated = false;
abdo.feed_cer_updated = false;
}
modify(b, [this](asset_bitasset_data_object& a) {
a.update_median_feeds(head_block_time());
});
if( !b.current_feed.settlement_price.is_null() && !( b.current_feed == old_median_feed ) ) // `==` check is safe here
{
asset_ptr = &b.asset_id( *this );
check_call_orders( *asset_ptr, true, false, &b );
}
check_call_orders(b.current_feed.settlement_price.base.asset_id(*this));
}
// update CER
if( update_cer )
{
if( !asset_ptr )
asset_ptr = &b.asset_id( *this );
if( asset_ptr->options.core_exchange_rate != b.current_feed.core_exchange_rate )
{
modify( *asset_ptr, [&b]( asset_object& ao )
{
ao.options.core_exchange_rate = b.current_feed.core_exchange_rate;
});
}
}
} // for each asset whose feed is expired
// process assets affected by bitshares-core issue 453 before hard fork 615
if( !after_hardfork_615 )
{
for( asset_id_type a : _issue_453_affected_assets )
{
check_call_orders( a(*this) );
}
}
}
void database::update_core_exchange_rates()
{
const auto& idx = get_index_type<asset_bitasset_data_index>().indices().get<by_cer_update>();
if( idx.begin() != idx.end() )
{
for( auto itr = idx.rbegin(); itr->need_to_update_cer(); itr = idx.rbegin() )
{
const asset_bitasset_data_object& b = *itr;
const asset_object& a = b.asset_id( *this );
if( a.options.core_exchange_rate != b.current_feed.core_exchange_rate )
{
modify( a, [&b]( asset_object& ao )
{
ao.options.core_exchange_rate = b.current_feed.core_exchange_rate;
});
}
modify( b, []( asset_bitasset_data_object& abdo )
{
abdo.asset_cer_updated = false;
abdo.feed_cer_updated = false;
if( !b.current_feed.core_exchange_rate.is_null() &&
a.options.core_exchange_rate != b.current_feed.core_exchange_rate )
modify(a, [&b](asset_object& a) {
a.options.core_exchange_rate = b.current_feed.core_exchange_rate;
});
}
}
}
@ -707,75 +680,4 @@ void database::update_betting_markets(fc::time_point_sec current_block_time)
remove_completed_events();
}
void database::finalize_expired_offers(){
try {
detail::with_skip_flags( *this,
get_node_properties().skip_flags | skip_authority_check, [&](){
transaction_evaluation_state cancel_context(this);
//Cancel expired limit orders
auto& limit_index = get_index_type<offer_index>().indices().get<by_expiration_date>();
auto itr = limit_index.begin();
while( itr != limit_index.end() && itr->offer_expiration_date <= head_block_time() )
{
const offer_object& offer = *itr;
++itr;
finalize_offer_operation finalize;
finalize.fee_paying_account = offer.issuer;
finalize.offer_id = offer.id;
finalize.fee = asset( 0, asset_id_type() );
finalize.result = offer.bidder ? result_type::Expired : result_type::ExpiredNoBid;
cancel_context.skip_fee_schedule_check = true;
apply_operation(cancel_context, finalize);
}
});
} FC_CAPTURE_AND_RETHROW()}
void database::remove_son_proposal( const proposal_object& proposal )
{ try {
if( proposal.proposed_transaction.operations.size() == 1 &&
( proposal.proposed_transaction.operations.back().which() == operation::tag<son_deregister_operation>::value ||
proposal.proposed_transaction.operations.back().which() == operation::tag<son_report_down_operation>::value) )
{
const auto& son_proposal_idx = get_index_type<son_proposal_index>().indices().get<by_proposal>();
auto son_proposal_itr = son_proposal_idx.find( proposal.id );
if( son_proposal_itr == son_proposal_idx.end() ) {
return;
}
remove( *son_proposal_itr );
}
} FC_CAPTURE_AND_RETHROW( (proposal) ) }
void database::remove_inactive_son_down_proposals( const vector<son_id_type>& son_ids_to_remove )
{
const auto& son_proposal_idx = get_index_type<son_proposal_index>().indices().get< by_id >();
std::vector<proposal_id_type> proposals_to_remove;
for( auto& son_proposal : son_proposal_idx )
{
if(son_proposal.proposal_type == son_proposal_type::son_report_down_proposal)
{
auto it = std::find(son_ids_to_remove.begin(), son_ids_to_remove.end(), son_proposal.son_id);
if (it != son_ids_to_remove.end())
{
ilog( "Removing inactive proposal ${p} for son ${s}", ("p", son_proposal.proposal_id) ("s",son_proposal.son_id));
proposals_to_remove.push_back(son_proposal.proposal_id);
}
}
}
for( auto& proposal_id : proposals_to_remove )
{
const auto& proposal_obj = proposal_id(*this);
remove_son_proposal(proposal_obj);
remove(proposal_obj);
}
}
void database::remove_inactive_son_proposals( const vector<son_id_type>& son_ids_to_remove )
{
remove_inactive_son_down_proposals( son_ids_to_remove );
}
} }

View file

@ -26,8 +26,6 @@
#include <graphene/chain/global_property_object.hpp>
#include <graphene/chain/witness_object.hpp>
#include <graphene/chain/witness_schedule_object.hpp>
#include <graphene/chain/son_object.hpp>
#include <graphene/chain/son_info.hpp>
namespace graphene { namespace chain {
@ -40,14 +38,14 @@ witness_id_type database::get_scheduled_witness( uint32_t slot_num )const
if (gpo.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SHUFFLED_ALGORITHM)
{
const dynamic_global_property_object& dpo = get_dynamic_global_properties();
const witness_schedule_object& wso = get_witness_schedule_object();;
const witness_schedule_object& wso = witness_schedule_id_type()(*this);
uint64_t current_aslot = dpo.current_aslot + slot_num;
return wso.current_shuffled_witnesses[ current_aslot % wso.current_shuffled_witnesses.size() ];
}
if (gpo.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SCHEDULED_ALGORITHM &&
slot_num != 0 )
{
const witness_schedule_object& wso = get_witness_schedule_object();
const witness_schedule_object& wso = witness_schedule_id_type()(*this);
// ask the near scheduler who goes in the given slot
bool slot_is_near = wso.scheduler.get_slot(slot_num-1, wid);
if(! slot_is_near)
@ -74,58 +72,6 @@ witness_id_type database::get_scheduled_witness( uint32_t slot_num )const
return wid;
}
unsigned_int database::get_son_schedule_id( sidechain_type type )const
{
static const map<sidechain_type, unsigned_int> schedule_map = {
{ sidechain_type::bitcoin, 0 },
{ sidechain_type::ethereum, 1 },
{ sidechain_type::hive, 2 }
};
return schedule_map.at(type);
}
son_id_type database::get_scheduled_son( sidechain_type type, uint32_t slot_num )const
{
son_id_type sid;
const global_property_object& gpo = get_global_properties();
if (gpo.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SHUFFLED_ALGORITHM)
{
const dynamic_global_property_object& dpo = get_dynamic_global_properties();
const son_schedule_object& sso = son_schedule_id_type(get_son_schedule_id(type))(*this);
uint64_t current_aslot = dpo.current_aslot + slot_num;
return sso.current_shuffled_sons[ current_aslot % sso.current_shuffled_sons.size() ];
}
if (gpo.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SCHEDULED_ALGORITHM &&
slot_num != 0 )
{
const son_schedule_object& sso = son_schedule_id_type(get_son_schedule_id(type))(*this);
// ask the near scheduler who goes in the given slot
bool slot_is_near = sso.scheduler.get_slot(slot_num-1, sid);
if(! slot_is_near)
{
// if the near scheduler doesn't know, we have to extend it to
// a far scheduler.
// n.b. instantiating it is slow, but block gaps long enough to
// need it are likely pretty rare.
witness_scheduler_rng far_rng(sso.rng_seed.begin(), GRAPHENE_FAR_SCHEDULE_CTR_IV);
far_future_son_scheduler far_scheduler =
far_future_son_scheduler(sso.scheduler, far_rng);
if(!far_scheduler.get_slot(slot_num-1, sid))
{
// no scheduled son -- somebody set up us the bomb
// n.b. this code path is impossible, the present
// implementation of far_future_son_scheduler
// returns true unconditionally
assert( false );
}
}
}
return sid;
}
fc::time_point_sec database::get_slot_time(uint32_t slot_num)const
{
if( slot_num == 0 )
@ -167,7 +113,7 @@ uint32_t database::get_slot_at_time(fc::time_point_sec when)const
void database::update_witness_schedule()
{
const witness_schedule_object& wso = get_witness_schedule_object();
const witness_schedule_object& wso = witness_schedule_id_type()(*this);
const global_property_object& gpo = get_global_properties();
if( head_block_num() % gpo.active_witnesses.size() == 0 )
@ -200,47 +146,9 @@ void database::update_witness_schedule()
}
}
void database::update_son_schedule(sidechain_type type)
{
const global_property_object& gpo = get_global_properties();
const son_schedule_object& sidechain_sso = get(son_schedule_id_type(get_son_schedule_id(type)));
if( gpo.active_sons.at(type).size() != 0 &&
head_block_num() % gpo.active_sons.at(type).size() == 0)
{
modify( sidechain_sso, [&]( son_schedule_object& _sso )
{
_sso.current_shuffled_sons.clear();
_sso.current_shuffled_sons.reserve( gpo.active_sons.at(type).size() );
for ( const auto &w : gpo.active_sons.at(type) ) {
_sso.current_shuffled_sons.push_back(w.son_id);
}
auto now_hi = uint64_t(head_block_time().sec_since_epoch()) << 32;
for (uint32_t i = 0; i < _sso.current_shuffled_sons.size(); ++i)
{
/// High performance random generator
/// http://xorshift.di.unimi.it/
uint64_t k = now_hi + uint64_t(i) * 2685821657736338717ULL;
k ^= (k >> 12);
k ^= (k << 25);
k ^= (k >> 27);
k *= 2685821657736338717ULL;
uint32_t jmax = _sso.current_shuffled_sons.size() - i;
uint32_t j = i + k % jmax;
std::swap(_sso.current_shuffled_sons[i],
_sso.current_shuffled_sons[j]);
}
});
}
}
vector<witness_id_type> database::get_near_witness_schedule()const
{
const witness_schedule_object& wso = get_witness_schedule_object();
const witness_schedule_object& wso = witness_schedule_id_type()(*this);
vector<witness_id_type> result;
result.reserve(wso.scheduler.size());
@ -257,7 +165,7 @@ void database::update_witness_schedule(const signed_block& next_block)
{
auto start = fc::time_point::now();
const global_property_object& gpo = get_global_properties();
const witness_schedule_object& wso = get_witness_schedule_object();
const witness_schedule_object& wso = get(witness_schedule_id_type());
uint32_t schedule_needs_filled = gpo.active_witnesses.size();
uint32_t schedule_slot = get_slot_at_time(next_block.timestamp);
@ -318,90 +226,6 @@ void database::update_witness_schedule(const signed_block& next_block)
idump( ( double(total_time/1000000.0)/calls) );
}
void database::update_son_schedule(sidechain_type type, const signed_block& next_block)
{
auto start = fc::time_point::now();
#ifndef NDEBUG
const son_schedule_object& sso = get(son_schedule_id_type());
#endif
const global_property_object& gpo = get_global_properties();
const uint32_t schedule_needs_filled = gpo.active_sons.at(type).size();
const uint32_t schedule_slot = get_slot_at_time(next_block.timestamp);
// We shouldn't be able to generate _pending_block with timestamp
// in the past, and incoming blocks from the network with timestamp
// in the past shouldn't be able to make it this far without
// triggering FC_ASSERT elsewhere
assert( schedule_slot > 0 );
const dynamic_global_property_object& dpo = get_dynamic_global_properties();
assert( dpo.random.data_size() == witness_scheduler_rng::seed_length );
assert( witness_scheduler_rng::seed_length == sso.rng_seed.size() );
const son_schedule_object& sidechain_sso = get(son_schedule_id_type(get_son_schedule_id(type)));
son_id_type first_son;
bool slot_is_near = sidechain_sso.scheduler.get_slot( schedule_slot-1, first_son );
son_id_type son_id;
modify(sidechain_sso, [&](son_schedule_object& _sso)
{
_sso.slots_since_genesis += schedule_slot;
witness_scheduler_rng rng(_sso.rng_seed.data, _sso.slots_since_genesis);
_sso.scheduler._min_token_count = std::max(int(gpo.active_sons.at(type).size()) / 2, 1);
if( slot_is_near )
{
uint32_t drain = schedule_slot;
while( drain > 0 )
{
if( _sso.scheduler.size() == 0 )
break;
_sso.scheduler.consume_schedule();
--drain;
}
}
else
{
_sso.scheduler.reset_schedule( first_son );
}
while( !_sso.scheduler.get_slot(schedule_needs_filled, son_id) )
{
if( _sso.scheduler.produce_schedule(rng) & emit_turn )
memcpy(_sso.rng_seed.begin(), dpo.random.data(), dpo.random.data_size());
}
_sso.last_scheduling_block = next_block.block_num();
_sso.recent_slots_filled = (
(_sso.recent_slots_filled << 1)
+ 1) << (schedule_slot - 1);
});
auto end = fc::time_point::now();
static uint64_t total_time = 0;
static uint64_t calls = 0;
total_time += (end - start).count();
if( ++calls % 1000 == 0 )
idump( ( double(total_time/1000000.0)/calls) );
}
uint32_t database::update_witness_missed_blocks( const signed_block& b )
{
uint32_t missed_blocks = get_slot_at_time( b.timestamp );
FC_ASSERT( missed_blocks != 0, "Trying to push double-produced block onto current block?!" );
missed_blocks--;
const auto& witnesses = witness_schedule_id_type()(*this).current_shuffled_witnesses;
if( missed_blocks < witnesses.size() )
for( uint32_t i = 0; i < missed_blocks; ++i ) {
const auto& witness_missed = get_scheduled_witness( i+1 )(*this);
modify( witness_missed, []( witness_object& w ) {
w.total_missed++;
});
}
return missed_blocks;
}
uint32_t database::witness_participation_rate()const
{
const global_property_object& gpo = get_global_properties();
@ -412,7 +236,7 @@ uint32_t database::witness_participation_rate()const
}
if (gpo.parameters.witness_schedule_algorithm == GRAPHENE_WITNESS_SCHEDULED_ALGORITHM)
{
const witness_schedule_object& wso = get_witness_schedule_object();
const witness_schedule_object& wso = get(witness_schedule_id_type());
return uint64_t(GRAPHENE_100_PERCENT) * wso.recent_slots_filled.popcount() / 128;
}
return 0;

View file

@ -47,7 +47,7 @@ namespace graphene { namespace chain {
};
} }
FC_REFLECT_ENUM(graphene::chain::event_state,
FC_REFLECT_ENUM(graphene::chain::event_state,
(upcoming)
(frozen_upcoming)
(in_progress)
@ -61,12 +61,12 @@ namespace graphene { namespace chain {
namespace msm = boost::msm;
namespace mpl = boost::mpl;
namespace
namespace
{
// Events -- most events happen when the witnesses publish an event_update operation with a new
// status, so if they publish an event with the status set to `frozen`, we'll generate a `frozen_event`
struct upcoming_event
struct upcoming_event
{
database& db;
upcoming_event(database& db) : db(db) {}
@ -76,12 +76,12 @@ namespace graphene { namespace chain {
database& db;
in_progress_event(database& db) : db(db) {}
};
struct frozen_event
struct frozen_event
{
database& db;
frozen_event(database& db) : db(db) {}
};
struct finished_event
struct finished_event
{
database& db;
finished_event(database& db) : db(db) {}
@ -104,7 +104,7 @@ namespace graphene { namespace chain {
betting_market_group_resolved_event(database& db, betting_market_group_id_type resolved_group, bool was_canceled) : db(db), resolved_group(resolved_group), was_canceled(was_canceled) {}
};
// event triggered when a betting market group is closed. When we get this,
// event triggered when a betting market group is closed. When we get this,
// if all child betting market groups are closed, transition to finished
struct betting_market_group_closed_event
{
@ -127,7 +127,7 @@ namespace graphene { namespace chain {
void on_entry(const upcoming_event& event, event_state_machine_& fsm) {
dlog("event ${id} -> upcoming", ("id", fsm.event_obj->id));
auto& betting_market_group_index = event.db.get_index_type<betting_market_group_object_index>().indices().get<by_event_id>();
for (const betting_market_group_object& betting_market_group :
for (const betting_market_group_object& betting_market_group :
boost::make_iterator_range(betting_market_group_index.equal_range(fsm.event_obj->id)))
try
{
@ -147,7 +147,7 @@ namespace graphene { namespace chain {
void on_entry(const in_progress_event& event, event_state_machine_& fsm) {
dlog("event ${id} -> in_progress", ("id", fsm.event_obj->id));
auto& betting_market_group_index = event.db.get_index_type<betting_market_group_object_index>().indices().get<by_event_id>();
for (const betting_market_group_object& betting_market_group :
for (const betting_market_group_object& betting_market_group :
boost::make_iterator_range(betting_market_group_index.equal_range(fsm.event_obj->id)))
try
{
@ -203,7 +203,7 @@ namespace graphene { namespace chain {
void freeze_betting_market_groups(const frozen_event& event) {
auto& betting_market_group_index = event.db.get_index_type<betting_market_group_object_index>().indices().get<by_event_id>();
for (const betting_market_group_object& betting_market_group :
for (const betting_market_group_object& betting_market_group :
boost::make_iterator_range(betting_market_group_index.equal_range(event_obj->id)))
{
try
@ -222,7 +222,7 @@ namespace graphene { namespace chain {
void close_all_betting_market_groups(const finished_event& event) {
auto& betting_market_group_index = event.db.get_index_type<betting_market_group_object_index>().indices().get<by_event_id>();
for (const betting_market_group_object& betting_market_group :
for (const betting_market_group_object& betting_market_group :
boost::make_iterator_range(betting_market_group_index.equal_range(event_obj->id)))
{
try
@ -241,7 +241,7 @@ namespace graphene { namespace chain {
void cancel_all_betting_market_groups(const canceled_event& event) {
auto& betting_market_group_index = event.db.template get_index_type<betting_market_group_object_index>().indices().template get<by_event_id>();
for (const betting_market_group_object& betting_market_group :
for (const betting_market_group_object& betting_market_group :
boost::make_iterator_range(betting_market_group_index.equal_range(event_obj->id)))
event.db.modify(betting_market_group, [&event](betting_market_group_object& betting_market_group_obj) {
betting_market_group_obj.on_canceled_event(event.db, true);
@ -252,15 +252,15 @@ namespace graphene { namespace chain {
bool all_betting_market_groups_are_closed(const betting_market_group_closed_event& event)
{
auto& betting_market_group_index = event.db.get_index_type<betting_market_group_object_index>().indices().get<by_event_id>();
for (const betting_market_group_object& betting_market_group :
for (const betting_market_group_object& betting_market_group :
boost::make_iterator_range(betting_market_group_index.equal_range(event_obj->id)))
if (betting_market_group.id != event.closed_group)
{
betting_market_group_status status = betting_market_group.get_status();
if (status != betting_market_group_status::closed &&
status != betting_market_group_status::graded &&
status != betting_market_group_status::re_grading &&
status != betting_market_group_status::settled &&
if (status != betting_market_group_status::closed &&
status != betting_market_group_status::graded &&
status != betting_market_group_status::re_grading &&
status != betting_market_group_status::settled &&
status != betting_market_group_status::canceled)
return false;
}
@ -276,7 +276,7 @@ namespace graphene { namespace chain {
if (event_obj->at_least_one_betting_market_group_settled)
return false;
auto& betting_market_group_index = event.db.get_index_type<betting_market_group_object_index>().indices().get<by_event_id>();
for (const betting_market_group_object& betting_market_group :
for (const betting_market_group_object& betting_market_group :
boost::make_iterator_range(betting_market_group_index.equal_range(event_obj->id)))
if (betting_market_group.id != event.resolved_group)
if (betting_market_group.get_status() != betting_market_group_status::canceled)
@ -290,7 +290,7 @@ namespace graphene { namespace chain {
event_obj->at_least_one_betting_market_group_settled = true;
auto& betting_market_group_index = event.db.get_index_type<betting_market_group_object_index>().indices().get<by_event_id>();
for (const betting_market_group_object& betting_market_group :
for (const betting_market_group_object& betting_market_group :
boost::make_iterator_range(betting_market_group_index.equal_range(event_obj->id))) {
if (betting_market_group.id != event.resolved_group) {
betting_market_group_status status = betting_market_group.get_status();
@ -344,6 +344,7 @@ namespace graphene { namespace chain {
{
FC_THROW_EXCEPTION(graphene::chain::no_transition, "No transition");
}
template <class Fsm>
void no_transition(canceled_event const& e, Fsm&, int state)
{
@ -371,7 +372,7 @@ namespace graphene { namespace chain {
{
}
event_object::event_object(const event_object& rhs) :
event_object::event_object(const event_object& rhs) :
graphene::db::abstract_object<event_object>(rhs),
name(rhs.name),
season(rhs.season),
@ -407,7 +408,7 @@ namespace graphene { namespace chain {
}
namespace {
bool verify_event_status_constants()
{
unsigned error_count = 0;
@ -442,19 +443,19 @@ namespace graphene { namespace chain {
dlog("Event status constants are correct");
else
wlog("There were ${count} errors in the event status constants", ("count", error_count));
return error_count == 0;
}
} // end anonymous namespace
event_status event_object::get_status() const
{
static bool state_constants_are_correct = verify_event_status_constants();
(void)&state_constants_are_correct;
event_state state = (event_state)my->state_machine.current_state()[0];
ddump((state));
switch (state)
{
case event_state::upcoming:
@ -522,8 +523,8 @@ namespace graphene { namespace chain {
my->state_machine.process_event(betting_market_group_closed_event(db, closed_group));
}
// These are the only statuses that can be explicitly set by witness operations. The missing
// status, 'settled', is automatically set when all of the betting market groups have
// These are the only statuses that can be explicitly set by witness operations. The missing
// status, 'settled', is automatically set when all of the betting market groups have
// settled/canceled
void event_object::dispatch_new_status(database& db, event_status new_status)
{
@ -532,16 +533,16 @@ namespace graphene { namespace chain {
on_upcoming_event(db);
break;
case event_status::in_progress: // by witnesses when the event starts
on_in_progress_event(db);
on_in_progress_event(db);
break;
case event_status::frozen: // by witnesses when the event needs to be frozen
on_frozen_event(db);
on_frozen_event(db);
break;
case event_status::finished: // by witnesses when the event is complete
on_finished_event(db);
on_finished_event(db);
break;
case event_status::canceled: // by witnesses to cancel the event
on_canceled_event(db);
on_canceled_event(db);
break;
default:
FC_THROW("Status ${new_status} cannot be explicitly set", ("new_status", new_status));
@ -550,32 +551,32 @@ namespace graphene { namespace chain {
} } // graphene::chain
namespace fc {
namespace fc {
// Manually reflect event_object to variant to properly reflect "state"
void to_variant(const graphene::chain::event_object& event_obj, fc::variant& v, uint32_t max_depth)
void to_variant(const graphene::chain::event_object& event_obj, fc::variant& v)
{
fc::mutable_variant_object o;
o("id", fc::variant(event_obj.id, max_depth))
("name", fc::variant(event_obj.name, max_depth))
("season", fc::variant(event_obj.season, max_depth))
("start_time", fc::variant(event_obj.start_time, max_depth))
("event_group_id", fc::variant(event_obj.event_group_id, max_depth))
("scores", fc::variant(event_obj.scores, max_depth))
("status", fc::variant(event_obj.get_status(), max_depth));
o("id", event_obj.id)
("name", event_obj.name)
("season", event_obj.season)
("start_time", event_obj.start_time)
("event_group_id", event_obj.event_group_id)
("scores", event_obj.scores)
("status", event_obj.get_status());
v = o;
}
// Manually reflect event_object to variant to properly reflect "state"
void from_variant(const fc::variant& v, graphene::chain::event_object& event_obj, uint32_t max_depth)
void from_variant(const fc::variant& v, graphene::chain::event_object& event_obj)
{
event_obj.id = v["id"].as<graphene::chain::event_id_type>( max_depth );
event_obj.name = v["name"].as<graphene::chain::internationalized_string_type>( max_depth );
event_obj.season = v["season"].as<graphene::chain::internationalized_string_type>( max_depth );
event_obj.start_time = v["start_time"].as<optional<time_point_sec> >( max_depth );
event_obj.event_group_id = v["event_group_id"].as<graphene::chain::event_group_id_type>( max_depth );
event_obj.scores = v["scores"].as<std::vector<std::string>>( max_depth );
graphene::chain::event_status status = v["status"].as<graphene::chain::event_status>( max_depth );
event_obj.id = v["id"].as<graphene::chain::event_id_type>();
event_obj.name = v["name"].as<graphene::chain::internationalized_string_type>();
event_obj.season = v["season"].as<graphene::chain::internationalized_string_type>();
event_obj.start_time = v["start_time"].as<optional<time_point_sec> >();
event_obj.event_group_id = v["event_group_id"].as<graphene::chain::event_group_id_type>();
event_obj.scores = v["scores"].as<std::vector<std::string>>();
graphene::chain::event_status status = v["status"].as<graphene::chain::event_status>();
const_cast<int*>(event_obj.my->state_machine.current_state())[0] = (int)status;
}
} //end namespace fc

View file

@ -24,6 +24,7 @@
#include <graphene/chain/fork_database.hpp>
#include <graphene/chain/exceptions.hpp>
#include <graphene/chain/protocol/fee_schedule.hpp>
#include <fc/smart_ref_impl.hpp>
namespace graphene { namespace chain {
fork_database::fork_database()

View file

@ -547,35 +547,35 @@ namespace graphene { namespace chain {
} } // graphene::chain
namespace fc {
namespace fc {
// Manually reflect game_object to variant to properly reflect "state"
void to_variant(const graphene::chain::game_object& game_obj, fc::variant& v, uint32_t max_depth)
void to_variant(const graphene::chain::game_object& game_obj, fc::variant& v)
{
fc_elog(fc::logger::get("tournament"), "In game_obj to_variant");
elog("In game_obj to_variant");
fc::mutable_variant_object o;
o("id", fc::variant(game_obj.id, max_depth ))
("match_id", fc::variant(game_obj.match_id, max_depth ))
("players", fc::variant(game_obj.players, max_depth ))
("winners", fc::variant(game_obj.winners, max_depth ))
("game_details", fc::variant(game_obj.game_details, max_depth ))
("next_timeout", fc::variant(game_obj.next_timeout, max_depth ))
("state", fc::variant(game_obj.get_state(), max_depth ));
o("id", game_obj.id)
("match_id", game_obj.match_id)
("players", game_obj.players)
("winners", game_obj.winners)
("game_details", game_obj.game_details)
("next_timeout", game_obj.next_timeout)
("state", game_obj.get_state());
v = o;
}
// Manually reflect game_object to variant to properly reflect "state"
void from_variant(const fc::variant& v, graphene::chain::game_object& game_obj, uint32_t max_depth)
void from_variant(const fc::variant& v, graphene::chain::game_object& game_obj)
{
fc_elog(fc::logger::get("tournament"), "In game_obj from_variant");
game_obj.id = v["id"].as<graphene::chain::game_id_type>( max_depth );
game_obj.match_id = v["match_id"].as<graphene::chain::match_id_type>( max_depth );
game_obj.players = v["players"].as<std::vector<graphene::chain::account_id_type> >( max_depth );
game_obj.winners = v["winners"].as<flat_set<graphene::chain::account_id_type> >( max_depth );
game_obj.game_details = v["game_details"].as<graphene::chain::game_specific_details>( max_depth );
game_obj.next_timeout = v["next_timeout"].as<fc::optional<time_point_sec> >( max_depth );
graphene::chain::game_state state = v["state"].as<graphene::chain::game_state>( max_depth );
game_obj.id = v["id"].as<graphene::chain::game_id_type>();
game_obj.match_id = v["match_id"].as<graphene::chain::match_id_type>();
game_obj.players = v["players"].as<std::vector<graphene::chain::account_id_type> >();
game_obj.winners = v["winners"].as<flat_set<graphene::chain::account_id_type> >();
game_obj.game_details = v["game_details"].as<graphene::chain::game_specific_details>();
game_obj.next_timeout = v["next_timeout"].as<fc::optional<time_point_sec> >();
graphene::chain::game_state state = v["state"].as<graphene::chain::game_state>();
const_cast<int*>(game_obj.my->state_machine.current_state())[0] = (int)state;
}
} //end namespace fc

View file

@ -25,6 +25,7 @@
#include <graphene/chain/genesis_state.hpp>
// these are required to serialize a genesis_state
#include <fc/smart_ref_impl.hpp> // required for gcc in release mode
#include <graphene/chain/protocol/fee_schedule.hpp>
namespace graphene { namespace chain {
@ -35,72 +36,3 @@ chain_id_type genesis_state_type::compute_chain_id() const
}
} } // graphene::chain
FC_REFLECT_DERIVED_NO_TYPENAME(graphene::chain::genesis_state_type::initial_account_type, BOOST_PP_SEQ_NIL, (name)(owner_key)(active_key)(is_lifetime_member))
FC_REFLECT_DERIVED_NO_TYPENAME(graphene::chain::genesis_state_type::initial_asset_type, BOOST_PP_SEQ_NIL,
(symbol)(issuer_name)(description)(precision)(max_supply)(accumulated_fees)(is_bitasset)(collateral_records))
FC_REFLECT_DERIVED_NO_TYPENAME(graphene::chain::genesis_state_type::initial_asset_type::initial_collateral_position, BOOST_PP_SEQ_NIL,
(owner)(collateral)(debt))
FC_REFLECT_DERIVED_NO_TYPENAME(graphene::chain::genesis_state_type::initial_balance_type, BOOST_PP_SEQ_NIL,
(owner)(asset_symbol)(amount))
FC_REFLECT_DERIVED_NO_TYPENAME(graphene::chain::genesis_state_type::initial_vesting_balance_type, BOOST_PP_SEQ_NIL,
(owner)(asset_symbol)(amount)(begin_timestamp)(vesting_cliff_seconds)(vesting_duration_seconds)(begin_balance))
FC_REFLECT_DERIVED_NO_TYPENAME(graphene::chain::genesis_state_type::initial_witness_type, BOOST_PP_SEQ_NIL, (owner_name)(block_signing_key))
FC_REFLECT_DERIVED_NO_TYPENAME(graphene::chain::genesis_state_type::initial_committee_member_type, BOOST_PP_SEQ_NIL, (owner_name))
FC_REFLECT_DERIVED_NO_TYPENAME(graphene::chain::genesis_state_type::initial_worker_type, BOOST_PP_SEQ_NIL, (owner_name)(daily_pay))
FC_REFLECT_DERIVED_NO_TYPENAME(graphene::chain::genesis_state_type::initial_bts_account_type::initial_authority, BOOST_PP_SEQ_NIL,
(weight_threshold)
(account_auths)
(key_auths)
(address_auths))
FC_REFLECT_DERIVED_NO_TYPENAME(graphene::chain::genesis_state_type::initial_bts_account_type::initial_cdd_vesting_policy, BOOST_PP_SEQ_NIL,
(vesting_seconds)
(coin_seconds_earned)
(start_claim)
(coin_seconds_earned_last_update))
FC_REFLECT_DERIVED_NO_TYPENAME(graphene::chain::genesis_state_type::initial_bts_account_type::initial_linear_vesting_policy, BOOST_PP_SEQ_NIL,
(begin_timestamp)
(vesting_cliff_seconds)
(vesting_duration_seconds)
(begin_balance))
FC_REFLECT_DERIVED_NO_TYPENAME(graphene::chain::genesis_state_type::initial_bts_account_type::initial_vesting_balance, BOOST_PP_SEQ_NIL,
(asset_symbol)
(amount)
(policy_type)
(policy))
FC_REFLECT_DERIVED_NO_TYPENAME(graphene::chain::genesis_state_type::initial_bts_account_type, BOOST_PP_SEQ_NIL,
(name)
(owner_authority)
(active_authority)
(core_balance)
(vesting_balances))
FC_REFLECT_DERIVED_NO_TYPENAME(graphene::chain::genesis_state_type, BOOST_PP_SEQ_NIL,
(initial_timestamp)(max_core_supply)(initial_parameters)(initial_bts_accounts)(initial_accounts)(initial_assets)(initial_balances)
(initial_vesting_balances)(initial_active_witnesses)(initial_witness_candidates)
(initial_committee_candidates)(initial_worker_candidates)
(initial_chain_id)
(immutable_parameters))
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::genesis_state_type::initial_account_type)
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::genesis_state_type::initial_asset_type)
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::genesis_state_type::initial_asset_type::initial_collateral_position)
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::genesis_state_type::initial_balance_type)
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::genesis_state_type::initial_vesting_balance_type)
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::genesis_state_type::initial_witness_type)
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::genesis_state_type::initial_committee_member_type)
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::genesis_state_type::initial_worker_type)
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::genesis_state_type::initial_bts_account_type::initial_authority)
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::genesis_state_type::initial_bts_account_type::initial_cdd_vesting_policy)
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::genesis_state_type::initial_bts_account_type::initial_linear_vesting_policy)
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::genesis_state_type::initial_bts_account_type::initial_vesting_balance)
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::genesis_state_type::initial_bts_account_type)
GRAPHENE_EXTERNAL_SERIALIZATION( /*not extern*/, graphene::chain::genesis_state_type)

View file

@ -103,16 +103,11 @@ fc::variant_object get_config()
result[ "GRAPHENE_DEFAULT_WITNESS_PAY_VESTING_SECONDS" ] = GRAPHENE_DEFAULT_WITNESS_PAY_VESTING_SECONDS;
result[ "GRAPHENE_DEFAULT_WORKER_BUDGET_PER_DAY" ] = GRAPHENE_DEFAULT_WORKER_BUDGET_PER_DAY;
result[ "GRAPHENE_MAX_INTEREST_APR" ] = GRAPHENE_MAX_INTEREST_APR;
result[ "GRAPHENE_COMMITTEE_ACCOUNT" ] = fc::variant(GRAPHENE_COMMITTEE_ACCOUNT, GRAPHENE_MAX_NESTED_OBJECTS);
result[ "GRAPHENE_WITNESS_ACCOUNT" ] = fc::variant(GRAPHENE_WITNESS_ACCOUNT, GRAPHENE_MAX_NESTED_OBJECTS);
result[ "GRAPHENE_RELAXED_COMMITTEE_ACCOUNT" ] = fc::variant(GRAPHENE_RELAXED_COMMITTEE_ACCOUNT, GRAPHENE_MAX_NESTED_OBJECTS);
result[ "GRAPHENE_NULL_ACCOUNT" ] = fc::variant(GRAPHENE_NULL_ACCOUNT, GRAPHENE_MAX_NESTED_OBJECTS);
result[ "GRAPHENE_TEMP_ACCOUNT" ] = fc::variant(GRAPHENE_TEMP_ACCOUNT, GRAPHENE_MAX_NESTED_OBJECTS);
result[ "GRAPHENE_PROXY_TO_SELF_ACCOUNT" ] = fc::variant(GRAPHENE_TEMP_ACCOUNT, GRAPHENE_MAX_NESTED_OBJECTS);
result[ "GRAPHENE_RAKE_FEE_ACCOUNT_ID" ] = fc::variant(GRAPHENE_TEMP_ACCOUNT, GRAPHENE_MAX_NESTED_OBJECTS);
result[ "GRAPHENE_NULL_WITNESS" ] = fc::variant(GRAPHENE_TEMP_ACCOUNT, GRAPHENE_MAX_NESTED_OBJECTS);
result[ "GRAPHENE_FBA_STEALTH_DESIGNATED_ASSET" ] = fc::variant(GRAPHENE_TEMP_ACCOUNT, GRAPHENE_MAX_NESTED_OBJECTS);
result[ "GRAPHENE_DEFAULT_RAKE_FEE_PERCENTAGE" ] = fc::variant(GRAPHENE_TEMP_ACCOUNT, GRAPHENE_MAX_NESTED_OBJECTS);
result[ "GRAPHENE_COMMITTEE_ACCOUNT" ] = GRAPHENE_COMMITTEE_ACCOUNT;
result[ "GRAPHENE_WITNESS_ACCOUNT" ] = GRAPHENE_WITNESS_ACCOUNT;
result[ "GRAPHENE_RELAXED_COMMITTEE_ACCOUNT" ] = GRAPHENE_RELAXED_COMMITTEE_ACCOUNT;
result[ "GRAPHENE_NULL_ACCOUNT" ] = GRAPHENE_NULL_ACCOUNT;
result[ "GRAPHENE_TEMP_ACCOUNT" ] = GRAPHENE_TEMP_ACCOUNT;
return result;
}

View file

@ -1,7 +1,3 @@
#ifndef HARDFORK_1000_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_1000_TIME (fc::time_point_sec::from_iso_string("2018-10-20T01:46:40"))
#else
#define HARDFORK_1000_TIME (fc::time_point_sec::from_iso_string("2019-02-18T12:00:00"))
#endif
#define HARDFORK_1000_TIME (fc::time_point_sec( 1540000000 ))
#endif

View file

@ -1,8 +1,4 @@
// added delete sport and delete event group operations
#ifndef HARDFORK_1001_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_1001_TIME (fc::time_point_sec::from_iso_string("2018-10-20T01:46:40"))
#else
#define HARDFORK_1001_TIME (fc::time_point_sec::from_iso_string("2019-02-18T12:00:00"))
#endif
#define HARDFORK_1001_TIME (fc::time_point_sec( 1540000000 ))
#endif

View file

@ -0,0 +1,4 @@
// Approve Proposal Enabling
#ifndef HARDFORK_1003_TIME
#define HARDFORK_1003_TIME (fc::time_point_sec( 1566988401 ))
#endif

View file

@ -1,8 +1,4 @@
// #357 Disallow publishing certain malformed price feeds
#ifndef HARDFORK_357_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_357_TIME (fc::time_point_sec::from_iso_string("2015-10-09T18:45:00"))
#else
#define HARDFORK_357_TIME (fc::time_point_sec::from_iso_string("2015-10-09T18:45:00"))
#endif
#define HARDFORK_357_TIME (fc::time_point_sec( 1444416300 ))
#endif

View file

@ -1,8 +1,4 @@
// #359 Allow digits in asset name
#ifndef HARDFORK_359_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_359_TIME (fc::time_point_sec::from_iso_string("2015-10-09T18:45:00"))
#else
#define HARDFORK_359_TIME (fc::time_point_sec::from_iso_string("2015-10-09T18:45:00"))
#endif
#define HARDFORK_359_TIME (fc::time_point_sec( 1444416300 ))
#endif

View file

@ -1,8 +1,4 @@
// #385 October 23 enforce PARENT.CHILD and allow short names
#ifndef HARDFORK_385_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_385_TIME (fc::time_point_sec::from_iso_string("2015-10-23T00:00:00"))
#else
#define HARDFORK_385_TIME (fc::time_point_sec::from_iso_string("2015-10-23T00:00:00"))
#endif
#define HARDFORK_385_TIME (fc::time_point_sec( 1445558400 ))
#endif

View file

@ -1,8 +1,4 @@
// #409 Allow creation of sub-assets
#ifndef HARDFORK_409_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_409_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#else
#define HARDFORK_409_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#endif
#define HARDFORK_409_TIME (fc::time_point_sec( 1446652800 ))
#endif

View file

@ -1,8 +1,4 @@
// #413 Add operation to claim asset fees
#ifndef HARDFORK_413_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_413_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#else
#define HARDFORK_413_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#endif
#define HARDFORK_413_TIME (fc::time_point_sec( 1446652800 ))
#endif

View file

@ -1,8 +1,4 @@
// #415 Default accept policy for asset with no whitelist authorities
#ifndef HARDFORK_415_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_415_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#else
#define HARDFORK_415_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#endif
#define HARDFORK_415_TIME (fc::time_point_sec( 1446652800 ))
#endif

View file

@ -1,8 +1,4 @@
// #416 enforce_white_list is inconsistently applied
#ifndef HARDFORK_416_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_416_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#else
#define HARDFORK_416_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#endif
#define HARDFORK_416_TIME (fc::time_point_sec( 1446652800 ))
#endif

View file

@ -1,8 +1,4 @@
// #419 Account can pay fees in blacklisted asset
#ifndef HARDFORK_419_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_419_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#else
#define HARDFORK_419_TIME (fc::time_point_sec::from_iso_string("2015-11-04T16:00:00"))
#endif
#define HARDFORK_419_TIME (fc::time_point_sec( 1446652800 ))
#endif

View file

@ -1,8 +1,4 @@
// #436 Prevent margin call from being triggered unless feed < call price
#ifndef HARDFORK_436_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_436_TIME (fc::time_point_sec::from_iso_string("2015-12-16T18:00:00"))
#else
#define HARDFORK_436_TIME (fc::time_point_sec::from_iso_string("2015-12-16T18:00:00"))
#endif
#define HARDFORK_436_TIME (fc::time_point_sec( 1450288800 ))
#endif

View file

@ -1,8 +1,4 @@
// #445 Refund create order fees on cancel
#ifndef HARDFORK_445_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_445_TIME (fc::time_point_sec::from_iso_string("2015-12-16T18:00:00"))
#else
#define HARDFORK_445_TIME (fc::time_point_sec::from_iso_string("2015-12-16T18:00:00"))
#endif
#define HARDFORK_445_TIME (fc::time_point_sec( 1450288800 ))
#endif

View file

@ -1,8 +1,4 @@
// #453 Hardfork to retroactively correct referral percentages
#ifndef HARDFORK_453_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_453_TIME (fc::time_point_sec::from_iso_string("2015-12-16T18:00:00"))
#else
#define HARDFORK_453_TIME (fc::time_point_sec::from_iso_string("2015-12-16T18:00:00"))
#endif
#define HARDFORK_453_TIME (fc::time_point_sec( 1450288800 ))
#endif

View file

@ -1,8 +1,4 @@
// #480 Fix non-BTS MIA core_exchange_rate check
#ifndef HARDFORK_480_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_480_TIME (fc::time_point_sec::from_iso_string("2015-12-17T19:00:00"))
#else
#define HARDFORK_480_TIME (fc::time_point_sec::from_iso_string("2015-12-17T19:00:00"))
#endif
#define HARDFORK_480_TIME (fc::time_point_sec( 1450378800 ))
#endif

View file

@ -1,8 +1,4 @@
// #483 Operation history numbering change
#ifndef HARDFORK_483_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_483_TIME (fc::time_point_sec::from_iso_string("2015-12-17T19:00:00"))
#else
#define HARDFORK_483_TIME (fc::time_point_sec::from_iso_string("2015-12-17T19:00:00"))
#endif
#define HARDFORK_483_TIME (fc::time_point_sec( 1450378800 ))
#endif

View file

@ -1,7 +0,0 @@
#ifndef HARDFORK_5050_1_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_5050_1_TIME (fc::time_point_sec::from_iso_string("2020-04-15T20:00:00"))
#else
#define HARDFORK_5050_1_TIME (fc::time_point_sec::from_iso_string("2020-04-22T20:00:00"))
#endif
#endif

View file

@ -1,8 +1,4 @@
// #516 Special authorities
#ifndef HARDFORK_516_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_516_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#else
#define HARDFORK_516_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#endif
#define HARDFORK_516_TIME (fc::time_point_sec( 1456250400 ))
#endif

View file

@ -1,8 +1,4 @@
// #533 Improve vote counting implementation
#ifndef HARDFORK_533_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_533_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#else
#define HARDFORK_533_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#endif
#define HARDFORK_533_TIME (fc::time_point_sec( 1456250400 ))
#endif

View file

@ -1,8 +1,4 @@
// #538 Buyback accounts
#ifndef HARDFORK_538_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_538_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#else
#define HARDFORK_538_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#endif
#define HARDFORK_538_TIME (fc::time_point_sec( 1456250400 ))
#endif

View file

@ -1,8 +1,4 @@
// #555 Buyback accounts
#ifndef HARDFORK_555_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_555_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#else
#define HARDFORK_555_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#endif
#define HARDFORK_555_TIME (fc::time_point_sec( 1456250400 ))
#endif

View file

@ -1,8 +1,4 @@
// #563 Stealth fee routing
#ifndef HARDFORK_563_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_563_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#else
#define HARDFORK_563_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#endif
#define HARDFORK_563_TIME (fc::time_point_sec( 1456250400 ))
#endif

View file

@ -1,8 +1,4 @@
// #572 Allow asset to update permission flags when no supply exists
#ifndef HARDFORK_572_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_572_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#else
#define HARDFORK_572_TIME (fc::time_point_sec::from_iso_string("2016-02-23T18:00:00"))
#endif
#define HARDFORK_572_TIME (fc::time_point_sec( 1456250400 ))
#endif

View file

@ -1,8 +1,4 @@
// #599 Unpacking of extension is incorrect
#ifndef HARDFORK_599_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_599_TIME (fc::time_point_sec::from_iso_string("2016-04-04T17:00:00"))
#else
#define HARDFORK_599_TIME (fc::time_point_sec::from_iso_string("2016-04-04T17:00:00"))
#endif
#define HARDFORK_599_TIME (fc::time_point_sec( 1459789200 ))
#endif

View file

@ -1,8 +1,4 @@
// #607 Disable negative voting on workers
#ifndef HARDFORK_607_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_607_TIME (fc::time_point_sec::from_iso_string("2016-03-23T17:00:00"))
#else
#define HARDFORK_607_TIME (fc::time_point_sec::from_iso_string("2016-03-23T17:00:00"))
#endif
#define HARDFORK_607_TIME (fc::time_point_sec( 1458752400 ))
#endif

View file

@ -1,8 +1,4 @@
// #613 Deprecate annual membership
#ifndef HARDFORK_613_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_613_TIME (fc::time_point_sec::from_iso_string("2016-03-23T17:00:00"))
#else
#define HARDFORK_613_TIME (fc::time_point_sec::from_iso_string("2016-03-23T17:00:00"))
#endif
#define HARDFORK_613_TIME (fc::time_point_sec( 1458752400 ))
#endif

View file

@ -1,8 +1,4 @@
// #615 Fix price feed expiration check, so websocket server will never spam too much data
#ifndef HARDFORK_615_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_615_TIME (fc::time_point_sec::from_iso_string("2016-03-23T17:00:00"))
#else
#define HARDFORK_615_TIME (fc::time_point_sec::from_iso_string("2016-03-23T17:00:00"))
#endif
#define HARDFORK_615_TIME (fc::time_point_sec( 1458752400 ))
#endif

View file

@ -1,8 +1,4 @@
// Placeholder HF for affiliate reward system
#ifndef HARDFORK_999_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_999_TIME (fc::time_point_sec::from_iso_string("2018-10-20T01:46:40"))
#else
#define HARDFORK_999_TIME (fc::time_point_sec::from_iso_string("2019-02-18T12:00:00"))
#endif
#define HARDFORK_999_TIME (fc::time_point_sec( 1540000000 ))
#endif

View file

@ -1,8 +1,4 @@
// bitshares-core #429 rounding issue when creating assets
#ifndef HARDFORK_CORE_429_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_CORE_429_TIME (fc::time_point_sec::from_iso_string("2019-08-26T02:00:00"))
#else
#define HARDFORK_CORE_429_TIME (fc::time_point_sec::from_iso_string("2019-09-13T02:00:00"))
#endif
#define HARDFORK_CORE_429_TIME (fc::time_point_sec( 1566784800 ))
#endif

View file

@ -1,7 +0,0 @@
#ifndef HARDFORK_GPOS_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_GPOS_TIME (fc::time_point_sec::from_iso_string("2020-01-06T01:00:00"))
#else
#define HARDFORK_GPOS_TIME (fc::time_point_sec::from_iso_string("2020-02-17T22:00:00"))
#endif
#endif

View file

@ -1,7 +0,0 @@
#ifndef HARDFORK_HOTFIX_2024_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_HOTFIX_2024_TIME (fc::time_point_sec::from_iso_string("2023-12-20T00:00:00"))
#else
#define HARDFORK_HOTFIX_2024_TIME (fc::time_point_sec::from_iso_string("2023-12-20T00:00:00"))
#endif
#endif

View file

@ -1,7 +0,0 @@
#ifndef HARDFORK_NFT_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_NFT_TIME (fc::time_point_sec::from_iso_string("2020-08-15T00:00:00"))
#else
#define HARDFORK_NFT_TIME (fc::time_point_sec::from_iso_string("2020-12-21T00:00:00"))
#endif
#endif

View file

@ -1,7 +0,0 @@
#ifndef HARDFORK_SIDECHAIN_DELETE_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_SIDECHAIN_DELETE_TIME (fc::time_point_sec::from_iso_string("2022-11-16T02:00:00"))
#else
#define HARDFORK_SIDECHAIN_DELETE_TIME (fc::time_point_sec::from_iso_string("2022-11-16T02:00:00"))
#endif
#endif

View file

@ -1,7 +0,0 @@
#ifndef HARDFORK_SON_TIME
#ifdef BUILD_PEERPLAYS_TESTNET
#define HARDFORK_SON_TIME (fc::time_point_sec::from_iso_string("2020-10-28T00:00:00"))
#else
#define HARDFORK_SON_TIME (fc::time_point_sec::from_iso_string("2020-12-21T00:00:00"))
#endif
#endif

Some files were not shown because too many files have changed in this diff Show more