From 72fb86154e736c8eff0d21f370c64dbac8db18a3 Mon Sep 17 00:00:00 2001 From: oxarbitrage Date: Sat, 11 Nov 2017 10:32:53 -0300 Subject: [PATCH] Merge pull request #444 from oxarbitrage/elasticsearch Elasticsearch plugin --- libraries/app/application.cpp | 10 + .../chain/operation_history_object.hpp | 10 + libraries/plugins/CMakeLists.txt | 1 + .../plugins/elasticsearch/CMakeLists.txt | 23 ++ .../elasticsearch/elasticsearch_plugin.cpp | 388 ++++++++++++++++++ .../elasticsearch/elasticsearch_plugin.hpp | 159 +++++++ programs/witness_node/CMakeLists.txt | 2 +- programs/witness_node/main.cpp | 2 + 8 files changed, 594 insertions(+), 1 deletion(-) create mode 100644 libraries/plugins/elasticsearch/CMakeLists.txt create mode 100644 libraries/plugins/elasticsearch/elasticsearch_plugin.cpp create mode 100644 libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp diff --git a/libraries/app/application.cpp b/libraries/app/application.cpp index 0f0c0690..a71a4f52 100644 --- a/libraries/app/application.cpp +++ b/libraries/app/application.cpp @@ -983,8 +983,18 @@ void application::initialize(const fc::path& data_dir, const boost::program_opti wanted.push_back("account_history"); wanted.push_back("market_history"); } + int es_ah_conflict_counter = 0; for (auto& it : wanted) { + if(it == "account_history") + ++es_ah_conflict_counter; + if(it == "elasticsearch") + ++es_ah_conflict_counter; + + if(es_ah_conflict_counter > 1) { + elog("Can't start program with elasticsearch and account_history plugin at the same time"); + std::exit(EXIT_FAILURE); + } if (!it.empty()) enable_plugin(it); } } diff --git a/libraries/chain/include/graphene/chain/operation_history_object.hpp b/libraries/chain/include/graphene/chain/operation_history_object.hpp index d8b90b58..fa27e45c 100644 --- a/libraries/chain/include/graphene/chain/operation_history_object.hpp +++ b/libraries/chain/include/graphene/chain/operation_history_object.hpp @@ -103,6 +103,16 @@ namespace graphene { namespace chain { struct by_seq; struct by_op; struct by_opid; + +typedef multi_index_container< + operation_history_object, + indexed_by< + ordered_unique< tag, member< object, object_id_type, &object::id > > + > +> operation_history_multi_index_type; + +typedef generic_index operation_history_index; + typedef multi_index_container< account_transaction_history_object, indexed_by< diff --git a/libraries/plugins/CMakeLists.txt b/libraries/plugins/CMakeLists.txt index fb944627..6a89fd36 100644 --- a/libraries/plugins/CMakeLists.txt +++ b/libraries/plugins/CMakeLists.txt @@ -2,6 +2,7 @@ add_subdirectory( witness ) add_subdirectory( account_history ) add_subdirectory( accounts_list ) add_subdirectory( affiliate_stats ) +add_subdirectory( elasticsearch ) add_subdirectory( market_history ) add_subdirectory( delayed_node ) add_subdirectory( bookie ) diff --git a/libraries/plugins/elasticsearch/CMakeLists.txt b/libraries/plugins/elasticsearch/CMakeLists.txt new file mode 100644 index 00000000..f4815576 --- /dev/null +++ b/libraries/plugins/elasticsearch/CMakeLists.txt @@ -0,0 +1,23 @@ +file(GLOB HEADERS "include/graphene/elasticsearch/*.hpp") + +add_library( graphene_elasticsearch + elasticsearch_plugin.cpp + ) + +target_link_libraries( graphene_elasticsearch graphene_chain graphene_app curl ) +target_include_directories( graphene_elasticsearch + PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) + +if(MSVC) + set_source_files_properties(elasticsearch_plugin.cpp PROPERTIES COMPILE_FLAGS "/bigobj" ) +endif(MSVC) + +install( TARGETS + graphene_elasticsearch + + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib +) +INSTALL( FILES ${HEADERS} DESTINATION "include/graphene/elasticsearch" ) + diff --git a/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp new file mode 100644 index 00000000..b63802db --- /dev/null +++ b/libraries/plugins/elasticsearch/elasticsearch_plugin.cpp @@ -0,0 +1,388 @@ +/* + * Copyright (c) 2017 Cryptonomex, Inc., and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace graphene { namespace elasticsearch { + +namespace detail +{ + +class elasticsearch_plugin_impl +{ + public: + elasticsearch_plugin_impl(elasticsearch_plugin& _plugin) + : _self( _plugin ) + { curl = curl_easy_init(); } + virtual ~elasticsearch_plugin_impl(); + + void update_account_histories( const signed_block& b ); + + graphene::chain::database& database() + { + return _self.database(); + } + + elasticsearch_plugin& _self; + primary_index< operation_history_index >* _oho_index; + + std::string _elasticsearch_node_url = "http://localhost:9200/"; + uint32_t _elasticsearch_bulk_replay = 10000; + uint32_t _elasticsearch_bulk_sync = 100; + bool _elasticsearch_logs = true; + bool _elasticsearch_visitor = false; + CURL *curl; // curl handler + vector bulk; // vector of op lines + private: + void add_elasticsearch( const account_id_type account_id, const optional& oho, const signed_block& b ); + void createBulkLine(account_transaction_history_object ath, operation_history_struct os, int op_type, block_struct bs, visitor_struct vs); + void sendBulk(std::string _elasticsearch_node_url, bool _elasticsearch_logs); + +}; + +elasticsearch_plugin_impl::~elasticsearch_plugin_impl() +{ + return; +} + +void elasticsearch_plugin_impl::update_account_histories( const signed_block& b ) +{ + graphene::chain::database& db = database(); + const vector >& hist = db.get_applied_operations(); + for( const optional< operation_history_object >& o_op : hist ) { + optional oho; + + auto create_oho = [&]() { + return optional( + db.create([&](operation_history_object &h) { + if (o_op.valid()) + { + h.op = o_op->op; + h.result = o_op->result; + h.block_num = o_op->block_num; + h.trx_in_block = o_op->trx_in_block; + h.op_in_trx = o_op->op_in_trx; + h.virtual_op = o_op->virtual_op; + } + })); + }; + + if( !o_op.valid() ) { + _oho_index->use_next_id(); + continue; + } + oho = create_oho(); + + const operation_history_object& op = *o_op; + + // get the set of accounts this operation applies to + flat_set impacted; + vector other; + operation_get_required_authorities( op.op, impacted, impacted, other ); // fee_payer is added here + + if( op.op.which() == operation::tag< account_create_operation >::value ) + impacted.insert( op.result.get() ); + else + graphene::app::operation_get_impacted_accounts( op.op, impacted ); + + for( auto& a : other ) + for( auto& item : a.account_auths ) + impacted.insert( item.first ); + + for( auto& account_id : impacted ) + { + add_elasticsearch( account_id, oho, b ); + } + } +} + +void elasticsearch_plugin_impl::add_elasticsearch( const account_id_type account_id, const optional & oho, const signed_block& b) +{ + graphene::chain::database& db = database(); + const auto &stats_obj = account_id(db).statistics(db); + + // add new entry + const auto &ath = db.create([&](account_transaction_history_object &obj) { + obj.operation_id = oho->id; + obj.account = account_id; + obj.sequence = stats_obj.total_ops + 1; + obj.next = stats_obj.most_recent_op; + }); + + // keep stats growing as no op will be removed + db.modify(stats_obj, [&](account_statistics_object &obj) { + obj.most_recent_op = ath.id; + obj.total_ops = ath.sequence; + }); + + // operation_type + int op_type = -1; + if (!oho->id.is_null()) + op_type = oho->op.which(); + + // operation history data + operation_history_struct os; + os.trx_in_block = oho->trx_in_block; + os.op_in_trx = oho->op_in_trx; + os.operation_result = fc::json::to_string(oho->result); + os.virtual_op = oho->virtual_op; + os.op = fc::json::to_string(oho->op); + + // visitor data + visitor_struct vs; + if(_elasticsearch_visitor) { + operation_visitor o_v; + oho->op.visit(o_v); + + vs.fee_data.asset = o_v.fee_asset; + vs.fee_data.amount = o_v.fee_amount; + vs.transfer_data.asset = o_v.transfer_asset_id; + vs.transfer_data.amount = o_v.transfer_amount; + vs.transfer_data.from = o_v.transfer_from; + vs.transfer_data.to = o_v.transfer_to; + } + + // block data + std::string trx_id = ""; + if(!b.transactions.empty() && oho->trx_in_block < b.transactions.size()) { + trx_id = b.transactions[oho->trx_in_block].id().str(); + } + block_struct bs; + bs.block_num = b.block_num(); + bs.block_time = b.timestamp; + bs.trx_id = trx_id; + + // check if we are in replay or in sync and change number of bulk documents accordingly + uint32_t limit_documents = 0; + if((fc::time_point::now() - b.timestamp) < fc::seconds(30)) + limit_documents = _elasticsearch_bulk_sync; + else + limit_documents = _elasticsearch_bulk_replay; + + createBulkLine(ath, os, op_type, bs, vs); // we have everything, creating bulk line + + if (curl && bulk.size() >= limit_documents) { // we are in bulk time, ready to add data to elasticsearech + sendBulk(_elasticsearch_node_url, _elasticsearch_logs); + } + + // remove everything except current object from ath + const auto &his_idx = db.get_index_type(); + const auto &by_seq_idx = his_idx.indices().get(); + auto itr = by_seq_idx.lower_bound(boost::make_tuple(account_id, 0)); + if (itr != by_seq_idx.end() && itr->account == account_id && itr->id != ath.id) { + // if found, remove the entry + const auto remove_op_id = itr->operation_id; + const auto itr_remove = itr; + ++itr; + db.remove( *itr_remove ); + // modify previous node's next pointer + // this should be always true, but just have a check here + if( itr != by_seq_idx.end() && itr->account == account_id ) + { + db.modify( *itr, [&]( account_transaction_history_object& obj ){ + obj.next = account_transaction_history_id_type(); + }); + } + // do the same on oho + const auto &by_opid_idx = his_idx.indices().get(); + if (by_opid_idx.find(remove_op_id) == by_opid_idx.end()) { + db.remove(remove_op_id(db)); + } + } +} + +void elasticsearch_plugin_impl::createBulkLine(account_transaction_history_object ath, operation_history_struct os, int op_type, block_struct bs, visitor_struct vs) +{ + bulk_struct bulks; + bulks.account_history = ath; + bulks.operation_history = os; + bulks.operation_type = op_type; + bulks.block_data = bs; + bulks.additional_data = vs; + + std::string alltogether = fc::json::to_string(bulks); + + auto block_date = bulks.block_data.block_time.to_iso_string(); + std::vector parts; + boost::split(parts, block_date, boost::is_any_of("-")); + std::string index_name = "graphene-" + parts[0] + "-" + parts[1]; + + // bulk header before each line, op_type = create to avoid dups, index id will be ath id(2.9.X). + std::string _id = fc::json::to_string(ath.id); + bulk.push_back("{ \"index\" : { \"_index\" : \""+index_name+"\", \"_type\" : \"data\", \"op_type\" : \"create\", \"_id\" : "+_id+" } }"); // header + bulk.push_back(alltogether); +} + +void elasticsearch_plugin_impl::sendBulk(std::string _elasticsearch_node_url, bool _elasticsearch_logs) +{ + + // curl buffers to read + std::string readBuffer; + std::string readBuffer_logs; + + std::string bulking = ""; + + bulking = boost::algorithm::join(bulk, "\n"); + bulking = bulking + "\n"; + bulk.clear(); + + //wlog((bulking)); + + struct curl_slist *headers = NULL; + curl_slist_append(headers, "Content-Type: application/json"); + std::string url = _elasticsearch_node_url + "_bulk"; + curl_easy_setopt(curl, CURLOPT_URL, url.c_str()); + curl_easy_setopt(curl, CURLOPT_POST, true); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + curl_easy_setopt(curl, CURLOPT_POSTFIELDS, bulking.c_str()); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteCallback); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, (void *)&readBuffer); + curl_easy_setopt(curl, CURLOPT_USERAGENT, "libcrp/0.1"); + //curl_easy_setopt(curl, CURLOPT_VERBOSE, true); + curl_easy_perform(curl); + + long http_code = 0; + curl_easy_getinfo (curl, CURLINFO_RESPONSE_CODE, &http_code); + if(http_code == 200) { + // all good, do nothing + } + else if(http_code == 429) { + // repeat request? + } + else { + // exit everything ? + } + + if(_elasticsearch_logs) { + auto logs = readBuffer; + // do logs + std::string url_logs = _elasticsearch_node_url + "logs/data/"; + curl_easy_setopt(curl, CURLOPT_URL, url_logs.c_str()); + curl_easy_setopt(curl, CURLOPT_POST, true); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + curl_easy_setopt(curl, CURLOPT_POSTFIELDS, logs.c_str()); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, WriteCallback); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, (void *) &readBuffer_logs); + curl_easy_setopt(curl, CURLOPT_USERAGENT, "libcrp/0.1"); + //curl_easy_setopt(curl, CURLOPT_VERBOSE, true); + //ilog("log here curl: ${output}", ("output", readBuffer_logs)); + curl_easy_perform(curl); + + http_code = 0; + curl_easy_getinfo (curl, CURLINFO_RESPONSE_CODE, &http_code); + if(http_code == 200) { + // all good, do nothing + } + else if(http_code == 429) { + // repeat request? + } + else { + // exit everything ? + } + } +} + +} // end namespace detail + +elasticsearch_plugin::elasticsearch_plugin() : + my( new detail::elasticsearch_plugin_impl(*this) ) +{ +} + +elasticsearch_plugin::~elasticsearch_plugin() +{ +} + +std::string elasticsearch_plugin::plugin_name()const +{ + return "elasticsearch"; +} +std::string elasticsearch_plugin::plugin_description()const +{ + return "Stores account history data in elasticsearch database(EXPERIMENTAL)."; +} + +void elasticsearch_plugin::plugin_set_program_options( + boost::program_options::options_description& cli, + boost::program_options::options_description& cfg + ) +{ + cli.add_options() + ("elasticsearch-node-url", boost::program_options::value(), "Elastic Search database node url") + ("elasticsearch-bulk-replay", boost::program_options::value(), "Number of bulk documents to index on replay(5000)") + ("elasticsearch-bulk-sync", boost::program_options::value(), "Number of bulk documents to index on a syncronied chain(10)") + ("elasticsearch-logs", boost::program_options::value(), "Log bulk events to database") + ("elasticsearch-visitor", boost::program_options::value(), "Use visitor to index additional data(slows down the replay)") + ; + cfg.add(cli); +} + +void elasticsearch_plugin::plugin_initialize(const boost::program_options::variables_map& options) +{ + database().applied_block.connect( [&]( const signed_block& b){ my->update_account_histories(b); } ); + my->_oho_index = database().add_index< primary_index< operation_history_index > >(); + database().add_index< primary_index< account_transaction_history_index > >(); + + if (options.count("elasticsearch-node-url")) { + my->_elasticsearch_node_url = options["elasticsearch-node-url"].as(); + } + if (options.count("elasticsearch-bulk-replay")) { + my->_elasticsearch_bulk_replay = options["elasticsearch-bulk-replay"].as(); + } + if (options.count("elasticsearch-bulk-sync")) { + my->_elasticsearch_bulk_sync = options["elasticsearch-bulk-sync"].as(); + } + if (options.count("elasticsearch-logs")) { + my->_elasticsearch_logs = options["elasticsearch-logs"].as(); + } + if (options.count("elasticsearch-visitor")) { + my->_elasticsearch_visitor = options["elasticsearch-visitor"].as(); + } +} + +void elasticsearch_plugin::plugin_startup() +{ +} + +} } diff --git a/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp new file mode 100644 index 00000000..cc51c247 --- /dev/null +++ b/libraries/plugins/elasticsearch/include/graphene/elasticsearch/elasticsearch_plugin.hpp @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2017 Cryptonomex, Inc., and contributors. + * + * The MIT License + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#pragma once + +#include +#include +#include + +namespace graphene { namespace elasticsearch { + using namespace chain; + //using namespace graphene::db; + //using boost::multi_index_container; + //using namespace boost::multi_index; + +// +// Plugins should #define their SPACE_ID's so plugins with +// conflicting SPACE_ID assignments can be compiled into the +// same binary (by simply re-assigning some of the conflicting #defined +// SPACE_ID's in a build script). +// +// Assignment of SPACE_ID's cannot be done at run-time because +// various template automagic depends on them being known at compile +// time. +// +#ifndef ELASTICSEARCH_SPACE_ID +#define ELASTICSEARCH_SPACE_ID 6 +#endif + +static size_t WriteCallback(void *contents, size_t size, size_t nmemb, void *userp) +{ + ((std::string*)userp)->append((char*)contents, size * nmemb); + return size * nmemb; +} + +namespace detail +{ + class elasticsearch_plugin_impl; +} + +class elasticsearch_plugin : public graphene::app::plugin +{ + public: + elasticsearch_plugin(); + virtual ~elasticsearch_plugin(); + + std::string plugin_name()const override; + std::string plugin_description()const override; + virtual void plugin_set_program_options( + boost::program_options::options_description& cli, + boost::program_options::options_description& cfg) override; + virtual void plugin_initialize(const boost::program_options::variables_map& options) override; + virtual void plugin_startup() override; + + friend class detail::elasticsearch_plugin_impl; + std::unique_ptr my; +}; + + +struct operation_visitor +{ + typedef void result_type; + + share_type fee_amount; + asset_id_type fee_asset; + + asset_id_type transfer_asset_id; + share_type transfer_amount; + account_id_type transfer_from; + account_id_type transfer_to; + + void operator()( const graphene::chain::transfer_operation& o ) + { + fee_asset = o.fee.asset_id; + fee_amount = o.fee.amount; + + transfer_asset_id = o.amount.asset_id; + transfer_amount = o.amount.amount; + transfer_from = o.from; + transfer_to = o.to; + } + template + void operator()( const T& o ) + { + fee_asset = o.fee.asset_id; + fee_amount = o.fee.amount; + } +}; + +struct operation_history_struct { + int trx_in_block; + int op_in_trx; + std::string operation_result; + int virtual_op; + std::string op; +}; + +struct block_struct { + int block_num; + fc::time_point_sec block_time; + std::string trx_id; +}; + +struct fee_struct { + asset_id_type asset; + share_type amount; +}; + +struct transfer_struct { + asset_id_type asset; + share_type amount; + account_id_type from; + account_id_type to; +}; + +struct visitor_struct { + fee_struct fee_data; + transfer_struct transfer_data; +}; + +struct bulk_struct { + account_transaction_history_object account_history; + operation_history_struct operation_history; + int operation_type; + block_struct block_data; + visitor_struct additional_data; +}; + + +} } //graphene::elasticsearch + +FC_REFLECT( graphene::elasticsearch::operation_history_struct, (trx_in_block)(op_in_trx)(operation_result)(virtual_op)(op) ) +FC_REFLECT( graphene::elasticsearch::block_struct, (block_num)(block_time)(trx_id) ) +FC_REFLECT( graphene::elasticsearch::fee_struct, (asset)(amount) ) +FC_REFLECT( graphene::elasticsearch::transfer_struct, (asset)(amount)(from)(to) ) +FC_REFLECT( graphene::elasticsearch::visitor_struct, (fee_data)(transfer_data) ) +FC_REFLECT( graphene::elasticsearch::bulk_struct, (account_history)(operation_history)(operation_type)(block_data)(additional_data) ) + + diff --git a/programs/witness_node/CMakeLists.txt b/programs/witness_node/CMakeLists.txt index e43172a0..26c790ec 100644 --- a/programs/witness_node/CMakeLists.txt +++ b/programs/witness_node/CMakeLists.txt @@ -11,7 +11,7 @@ endif() # We have to link against graphene_debug_witness because deficiency in our API infrastructure doesn't allow plugins to be fully abstracted #246 target_link_libraries( witness_node - PRIVATE graphene_app graphene_account_history graphene_affiliate_stats graphene_market_history graphene_witness graphene_chain graphene_debug_witness graphene_bookie graphene_egenesis_full fc peerplays_sidechain ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) + PRIVATE graphene_app graphene_account_history graphene_affiliate_stats graphene_elasticsearch graphene_market_history graphene_witness graphene_chain graphene_debug_witness graphene_bookie graphene_egenesis_full fc peerplays_sidechain ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ) # also add dependencies to graphene_generate_genesis graphene_generate_uia_sharedrop_genesis if you want those plugins install( TARGETS diff --git a/programs/witness_node/main.cpp b/programs/witness_node/main.cpp index 6675ee87..f6259ddb 100644 --- a/programs/witness_node/main.cpp +++ b/programs/witness_node/main.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include //#include //#include @@ -85,6 +86,7 @@ int main(int argc, char** argv) { auto witness_plug = node->register_plugin(); auto history_plug = node->register_plugin(); + auto elasticsearch_plug = node->register_plugin(); auto market_history_plug = node->register_plugin(); //auto generate_genesis_plug = node->register_plugin(); //auto generate_uia_sharedrop_genesis_plug = node->register_plugin();