diff --git a/.github/workflows/c-cpp.yml b/.github/workflows/c-cpp.yml index b0f1868..0827b80 100644 --- a/.github/workflows/c-cpp.yml +++ b/.github/workflows/c-cpp.yml @@ -2,6 +2,7 @@ name: C/C++ CI on: push: + branches: ["master"] paths-ignore: - 'docker-compose/**' - 'docs/**' diff --git a/.github/workflows/clang-tidy.yml b/.github/workflows/clang-tidy.yml index bcaa5da..69a94c4 100644 --- a/.github/workflows/clang-tidy.yml +++ b/.github/workflows/clang-tidy.yml @@ -2,6 +2,7 @@ name: clang-tidy on: push: + branches: ["master"] paths-ignore: - 'docker-compose/**' - 'docs/**' diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index f4df4b2..dac766f 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -13,6 +13,7 @@ name: "CodeQL" on: push: + branches: ["master"] paths-ignore: - 'docker-compose/**' - 'docs/**' diff --git a/.github/workflows/cppcheck.yml b/.github/workflows/cppcheck.yml index f6c4df1..d7002e0 100644 --- a/.github/workflows/cppcheck.yml +++ b/.github/workflows/cppcheck.yml @@ -2,6 +2,7 @@ name: cppcheck on: push: + branches: ["master"] paths-ignore: - 'docker-compose/**' - 'docs/**' diff --git a/.github/workflows/msvc-analysis.yml b/.github/workflows/msvc-analysis.yml index 5c7c51c..b6a058c 100644 --- a/.github/workflows/msvc-analysis.yml +++ b/.github/workflows/msvc-analysis.yml @@ -10,6 +10,7 @@ name: Microsoft C++ Code Analysis on: push: + branches: ["master"] paths-ignore: - 'docker-compose/**' - 'docs/**' diff --git a/.github/workflows/source-snapshot.yml b/.github/workflows/source-snapshot.yml index 9114982..e8c3dd2 100644 --- a/.github/workflows/source-snapshot.yml +++ b/.github/workflows/source-snapshot.yml @@ -1,6 +1,10 @@ name: source-snapshot -on: [push, pull_request] +on: + push: + branches: ["master"] + + pull_request: jobs: source-snapshot: diff --git a/.github/workflows/test-sync.yml b/.github/workflows/test-sync.yml index 9ec2832..9ed1e0d 100644 --- a/.github/workflows/test-sync.yml +++ b/.github/workflows/test-sync.yml @@ -2,6 +2,7 @@ name: Sync test on: push: + branches: ["master"] paths-ignore: - 'docker-compose/**' - 'docs/**' diff --git a/CMakeLists.txt b/CMakeLists.txt index ee04baa..658d73b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -17,7 +17,9 @@ option(WITH_RANDOMX "Include the RandomX library in the build. If this is turned option(WITH_LTO "Use link-time compiler optimization (if linking fails for you, run cmake with -DWITH_LTO=OFF)" ON) option(WITH_UPNP "Include UPnP support. If this is turned off, p2pool will not be able to configure port forwarding on UPnP-enabled routers." ON) option(WITH_GRPC "Include gRPC support. If this is turned off, p2pool will not be able to merge mine with Tari." ON) -option(WITH_TLS "Include TLS support. If this is turned off, p2pool will not support Stratum TLS connections." ON) +option(WITH_TLS "Include TLS support. If this is turned off, p2pool will not support Stratum TLS connections, and lack many other security features. It's recommended to keep it ON!" ON) + +option(WITH_MERGE_MINING_DONATION "Merge mine donations to the author. This doesn't affect your hashrate or payouts in any way - only unused merge mining capacity will be utilised. If you merge mine yourself, your settings will take priority." ON) option(DEV_TEST_SYNC "[Developer only] Sync test, stop p2pool after sync is complete" OFF) option(DEV_WITH_TSAN "[Developer only] Compile with thread sanitizer" OFF) @@ -56,6 +58,10 @@ if (WITH_UPNP) set(LIBS ${LIBS} libminiupnpc-static) endif() +if (WITH_MERGE_MINING_DONATION) + add_definitions(-DWITH_MERGE_MINING_DONATION) +endif() + if (DEV_TEST_SYNC) add_definitions(-DDEV_TEST_SYNC) endif() diff --git a/README.md b/README.md index 0b0f3ea..d00c1ad 100644 --- a/README.md +++ b/README.md @@ -349,3 +349,13 @@ If you'd like to support further development of Monero P2Pool, you're welcome to ``` 44MnN1f3Eto8DZYUWuE5XZNUtE3vcRzt2j6PzqWpPau34e6Cf4fAxt6X2MBmrm6F9YMEiMNjN6W4Shn4pLcfNAja621jwyg ``` + +Starting from v4.6, P2Pool will also have an ability to merge mine donations for the author. + +Note that this will not affect your hashrate or payouts in any way. P2Pool will always (100% of the time) keep mining to your configured Monero wallet address, but it might also use the available merge mining capacity to donate to the author. + +If you merge mine yourself, your settings will take priority. + +The merge mining donation runs without a donation server - there is no "phoning home" to a server, everything is done through the P2Pool's network in a decentralized way. + +To opt out of this, build P2Pool with `-DWITH_MERGE_MINING_DONATION=OFF` in cmake command line. Due to the architecture of the decentralized P2Pool network, your node will still relay donation messages to other nodes even if you opted out (but it will not process them). diff --git a/cppcheck/run.cmd b/cppcheck/run.cmd index 870a905..34a87de 100644 --- a/cppcheck/run.cmd +++ b/cppcheck/run.cmd @@ -1,5 +1,5 @@ @echo off -"..\cppcheck-main\bin\cppcheck.exe" ../src/*.cpp -D__cppcheck__ -DSIZE_MAX=UINT64_MAX -DRAPIDJSON_ENDIAN=RAPIDJSON_LITTLEENDIAN -D_WIN32=1 -D_WIN64=1 -DWIN32=1 -D_WINDOWS=1 -DNDEBUG=1 -DWITH_GRPC=1 -DPROTOBUF_ENABLE_DEBUG_LOGGING_MAY_LEAK_PII=0 -DWITH_RANDOMX=1 -DWITH_UPNP=1 -DCURL_STATICLIB=1 -DWIN32_LEAN_AND_MEAN=1 -D_WIN32_WINNT=0x0600 -D_DISABLE_VECTOR_ANNOTATION=1 -D_DISABLE_STRING_ANNOTATION=1 -DZMQ_STATIC=1 -DHAVE_BITSCANREVERSE64=1 -DRAPIDJSON_PARSE_DEFAULT_FLAGS=kParseTrailingCommasFlag -DMINIUPNP_STATICLIB=1 -DCARES_STATICLIB=1 -DCMAKE_INTDIR="Release" -D__SSE2__=1 -D_MSC_VER=1929 --platform=win64 --std=c++17 --enable=all --inconclusive --inline-suppr --template="{file}:{line}:{id}{inconclusive: INCONCLUSIVE} {message}" --includes-file=includes.txt --suppressions-list=suppressions.txt --output-file=errors_full.txt --max-ctu-depth=3 --check-level=exhaustive --checkers-report=checkers_report.txt +"..\cppcheck-main\bin\cppcheck.exe" ../src/*.cpp -D__cppcheck__ -DSIZE_MAX=UINT64_MAX -DRAPIDJSON_ENDIAN=RAPIDJSON_LITTLEENDIAN -D_WIN32=1 -D_WIN64=1 -DWIN32=1 -D_WINDOWS=1 -DNDEBUG=1 -DWITH_GRPC=1 -DPROTOBUF_ENABLE_DEBUG_LOGGING_MAY_LEAK_PII=0 -DWITH_RANDOMX=1 -DWITH_UPNP=1 -DWITH_TLS=1 -DWITH_MERGE_MINING_DONATION=1 -DCURL_STATICLIB=1 -DWIN32_LEAN_AND_MEAN=1 -D_WIN32_WINNT=0x0600 -D_DISABLE_VECTOR_ANNOTATION=1 -D_DISABLE_STRING_ANNOTATION=1 -DZMQ_STATIC=1 -DHAVE_BITSCANREVERSE64=1 -DRAPIDJSON_PARSE_DEFAULT_FLAGS=kParseTrailingCommasFlag -DMINIUPNP_STATICLIB=1 -DCARES_STATICLIB=1 -DCMAKE_INTDIR="Release" -D__SSE2__=1 -D_MSC_VER=1929 --platform=win64 --std=c++17 --enable=all --inconclusive --inline-suppr --template="{file}:{line}:{id}{inconclusive: INCONCLUSIVE} {message}" --includes-file=includes.txt --suppressions-list=suppressions.txt --output-file=errors_full.txt --max-ctu-depth=3 --check-level=exhaustive --checkers-report=checkers_report.txt findstr /V /C:"external\src" errors_full.txt > errors_filtered0.txt findstr /V /C:":checkersReport" errors_filtered0.txt > errors_filtered.txt diff --git a/external/src/crypto/sha256.c b/external/src/crypto/sha256.c index a334210..d625251 100644 --- a/external/src/crypto/sha256.c +++ b/external/src/crypto/sha256.c @@ -21,6 +21,14 @@ #include /****************************** MACROS ******************************/ +typedef struct { + uint8_t data[64]; + uint32_t datalen; + uint32_t padding; + uint64_t bitlen; + uint32_t state[8]; +} SHA256_CTX; + #define ROTLEFT(a,b) (((a) << (b)) | ((a) >> (32-(b)))) #define ROTRIGHT(a,b) (((a) >> (b)) | ((a) << (32-(b)))) @@ -85,7 +93,7 @@ static void sha256_transform(SHA256_CTX *ctx, const uint8_t* data) ctx->state[7] += h; } -void sha256_init(SHA256_CTX *ctx) +static void sha256_init(SHA256_CTX *ctx) { ctx->datalen = 0; ctx->bitlen = 0; @@ -99,7 +107,7 @@ void sha256_init(SHA256_CTX *ctx) ctx->state[7] = 0x5be0cd19; } -void sha256_update(SHA256_CTX *ctx, const uint8_t* data, uint32_t len) +static void sha256_update(SHA256_CTX *ctx, const uint8_t* data, uint32_t len) { uint32_t i; @@ -114,7 +122,7 @@ void sha256_update(SHA256_CTX *ctx, const uint8_t* data, uint32_t len) } } -void sha256_final(SHA256_CTX *ctx, uint8_t* hash) +static void sha256_final(SHA256_CTX *ctx, uint8_t* hash) { uint32_t i; diff --git a/external/src/crypto/sha256.h b/external/src/crypto/sha256.h index 00fb1e4..c68c952 100644 --- a/external/src/crypto/sha256.h +++ b/external/src/crypto/sha256.h @@ -16,22 +16,6 @@ extern "C" { /*************************** HEADER FILES ***************************/ #include -/****************************** MACROS ******************************/ -#define SHA256_BLOCK_SIZE 32 // SHA256 outputs a 32 byte digest - -typedef struct { - uint8_t data[64]; - uint32_t datalen; - uint32_t padding; - uint64_t bitlen; - uint32_t state[8]; -} SHA256_CTX; - -/*********************** FUNCTION DECLARATIONS **********************/ -void sha256_init(SHA256_CTX *ctx); -void sha256_update(SHA256_CTX *ctx, const uint8_t* data, uint32_t len); -void sha256_final(SHA256_CTX *ctx, uint8_t* hash); - void sha256(const void* data, uint32_t len, uint8_t* hash); #ifdef __cplusplus diff --git a/src/block_template.cpp b/src/block_template.cpp index c22b657..115efec 100644 --- a/src/block_template.cpp +++ b/src/block_template.cpp @@ -612,6 +612,20 @@ void BlockTemplate::update(const MinerData& data, const Mempool& mempool, const m_poolBlockTemplate->m_auxChains = data.aux_chains; m_poolBlockTemplate->m_auxNonce = data.aux_nonce; + m_poolBlockTemplate->m_mergeMiningExtra.clear(); + + for (const AuxChainData& c : data.aux_chains) { + std::vector v; + v.reserve(HASH_SIZE + 16); + + v.assign(c.data.h, c.data.h + HASH_SIZE); + + writeVarint(c.difficulty.lo, v); + writeVarint(c.difficulty.hi, v); + + m_poolBlockTemplate->m_mergeMiningExtra.emplace(c.unique_id, std::move(v)); + } + init_merge_mining_merkle_proof(); const std::vector sidechain_data = m_poolBlockTemplate->serialize_sidechain_data(); diff --git a/src/merge_mining_client.h b/src/merge_mining_client.h index 8988d6c..1f6f07b 100644 --- a/src/merge_mining_client.h +++ b/src/merge_mining_client.h @@ -21,6 +21,7 @@ namespace p2pool { class p2pool; class BlockTemplate; +struct PoolBlock; class IMergeMiningClient { @@ -38,7 +39,8 @@ public: virtual ~IMergeMiningClient() {} [[nodiscard]] virtual bool get_params(ChainParameters& out_params) const = 0; - virtual void submit_solution(const BlockTemplate* block_tpl, const uint8_t (&hashing_blob)[128], size_t nonce_offset, const hash& seed_hash, const std::vector& blob, const std::vector& merkle_proof, uint32_t merkle_proof_path) = 0; + virtual void on_external_block(const PoolBlock& block) = 0; + virtual void submit_solution(const std::vector& coinbase_merkle_proof, const uint8_t (&hashing_blob)[128], size_t nonce_offset, const hash& seed_hash, const std::vector& blob, const std::vector& merkle_proof, uint32_t merkle_proof_path) = 0; virtual void print_status() const = 0; }; diff --git a/src/merge_mining_client_json_rpc.cpp b/src/merge_mining_client_json_rpc.cpp index bf8630a..2f602b8 100644 --- a/src/merge_mining_client_json_rpc.cpp +++ b/src/merge_mining_client_json_rpc.cpp @@ -287,7 +287,7 @@ bool MergeMiningClientJSON_RPC::parse_merge_mining_get_aux_block(const char* dat return true; } -void MergeMiningClientJSON_RPC::submit_solution(const BlockTemplate* /*block_tpl*/, const uint8_t (&/*hashing_blob*/)[128], size_t /*nonce_offset*/, const hash& seed_hash, const std::vector& blob, const std::vector& merkle_proof, uint32_t merkle_proof_path) +void MergeMiningClientJSON_RPC::submit_solution(const std::vector& /*coinbase_merkle_proof*/, const uint8_t (&/*hashing_blob*/)[128], size_t /*nonce_offset*/, const hash& seed_hash, const std::vector& blob, const std::vector& merkle_proof, uint32_t merkle_proof_path) { ReadLock lock(m_lock); diff --git a/src/merge_mining_client_json_rpc.h b/src/merge_mining_client_json_rpc.h index b3a6286..c1e63e9 100644 --- a/src/merge_mining_client_json_rpc.h +++ b/src/merge_mining_client_json_rpc.h @@ -22,6 +22,7 @@ namespace p2pool { class p2pool; +struct PoolBlock; class MergeMiningClientJSON_RPC : public IMergeMiningClient { @@ -30,7 +31,8 @@ public: ~MergeMiningClientJSON_RPC() override; bool get_params(ChainParameters& out_params) const override; - void submit_solution(const BlockTemplate* block_tpl, const uint8_t (&hashing_blob)[128], size_t nonce_offset, const hash& seed_hash, const std::vector& blob, const std::vector& merkle_proof, uint32_t merkle_proof_path) override; + void on_external_block(const PoolBlock& /*block*/) override {} + void submit_solution(const std::vector& coinbase_merkle_proof, const uint8_t (&hashing_blob)[128], size_t nonce_offset, const hash& seed_hash, const std::vector& blob, const std::vector& merkle_proof, uint32_t merkle_proof_path) override; void print_status() const override; diff --git a/src/merge_mining_client_tari.cpp b/src/merge_mining_client_tari.cpp index 1627578..a9033d2 100644 --- a/src/merge_mining_client_tari.cpp +++ b/src/merge_mining_client_tari.cpp @@ -22,6 +22,9 @@ #include "params.h" #include "block_template.h" #include "keccak.h" +#include "pool_block.h" +#include "merkle.h" +#include "side_chain.h" LOG_CATEGORY(MergeMiningClientTari) @@ -79,7 +82,14 @@ MergeMiningClientTari::MergeMiningClientTari(p2pool* pool, std::string host, con log::Stream s(buf); s << "127.0.0.1:" << m_server->external_listen_port(); - m_TariNode = new BaseNode::Stub(grpc::CreateChannel(buf, grpc::InsecureChannelCredentials())); + grpc::ChannelArguments cArgs; + + cArgs.SetInt(GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS, 1000); + + cArgs.SetInt(GRPC_ARG_MIN_RECONNECT_BACKOFF_MS, 1000); + cArgs.SetInt(GRPC_ARG_MAX_RECONNECT_BACKOFF_MS, 10000); + + m_TariNode = new BaseNode::Stub(grpc::CreateCustomChannel(buf, grpc::InsecureChannelCredentials(), cArgs)); uv_mutex_init_checked(&m_workerLock); uv_cond_init_checked(&m_workerCond); @@ -127,7 +137,148 @@ bool MergeMiningClientTari::get_params(ChainParameters& out_params) const return true; } -void MergeMiningClientTari::submit_solution(const BlockTemplate* block_tpl, const uint8_t (&hashing_blob)[128], size_t nonce_offset, const hash& seed_hash, const std::vector& blob, const std::vector& merkle_proof, uint32_t merkle_proof_path) +void MergeMiningClientTari::on_external_block(const PoolBlock& block) +{ + // Sanity check + if (block.m_transactions.empty() || (block.m_hashingBlob.size() > 128)) { + return; + } + + // Don't continue if our aux chain is not there + if (block.m_mergeMiningExtra.find(m_chainParams.aux_id) == block.m_mergeMiningExtra.end()) { + return; + } + + std::vector aux_ids; + std::vector aux_chains; + + // All aux chains in this block + the P2Pool sidechain + aux_ids.reserve(block.m_mergeMiningExtra.size() + 1); + + // All aux chains in this block + aux_chains.reserve(block.m_mergeMiningExtra.size()); + + for (const auto& i : block.m_mergeMiningExtra) { + const std::vector& v = i.second; + + const uint8_t* p = v.data(); + const uint8_t* e = v.data() + v.size(); + + if (p + HASH_SIZE > e) { + return; + } + + hash data; + memcpy(data.h, p, HASH_SIZE); + p += HASH_SIZE; + + difficulty_type diff; + p = readVarint(p, e, diff.lo); + if (!p) { + return; + } + + p = readVarint(p, e, diff.hi); + if (!p) { + return; + } + + // If it's our aux chain, check that it's the same job and that there is enough PoW + if (i.first == m_chainParams.aux_id) { + if ((data != m_chainParams.aux_hash) || (diff != m_chainParams.aux_diff)) { + LOGWARN(3, "External aux job solution found, but it's stale"); + return; + } + if (!diff.check_pow(block.m_powHash)) { + LOGINFO(3, "External aux job solution found, but it doesn't have enough PoW"); + return; + } + } + + aux_ids.emplace_back(i.first); + aux_chains.emplace_back(i.first, data, diff); + } + + aux_ids.emplace_back(m_pool->side_chain().consensus_hash()); + + LOGINFO(3, log::LightGreen() << "External aux job solution found. Processing it!"); + + // coinbase_merkle_proof + + std::vector> tree; + + merkle_hash_full_tree(block.m_transactions, tree); + + std::vector proof; + uint32_t path; + + if (!get_merkle_proof(tree, block.m_transactions[0], proof, path)) { + LOGWARN(3, "on_external_block: get_merkle_proof failed for coinbase transaction"); + return; + } + + std::vector coinbase_merkle_proof; + for (const hash& h : proof) { + coinbase_merkle_proof.insert(coinbase_merkle_proof.end(), h.h, h.h + HASH_SIZE); + } + + // hashing_blob + + uint8_t hashing_blob[128] = {}; + memcpy(hashing_blob, block.m_hashingBlob.data(), block.m_hashingBlob.size()); + + // nonce_offset and blob + + size_t header_size = 0; + const std::vector blob = block.serialize_mainchain_data(&header_size); + + if (header_size <= NONCE_SIZE) { + LOGWARN(3, "on_external_block: invalid header_size"); + return; + } + + const uint32_t nonce_offset = static_cast(header_size - NONCE_SIZE); + + // aux_merkle_proof, aux_merkle_proof_path + + std::vector aux_merkle_proof; + uint32_t aux_merkle_proof_path = 0; + + const hash sidechain_id = block.m_sidechainId; + const uint32_t n_aux_chains = static_cast(block.m_mergeMiningExtra.size() + 1); + + std::vector hashes(n_aux_chains); + + uint32_t aux_nonce; + if (!find_aux_nonce(aux_ids, aux_nonce, 1000)) { + LOGWARN(3, "on_external_block: failed to find aux_nonce"); + return; + } + + for (const AuxChainData& aux_data : aux_chains) { + const uint32_t aux_slot = get_aux_slot(aux_data.unique_id, aux_nonce, n_aux_chains); + hashes[aux_slot] = aux_data.data; + } + + const uint32_t aux_slot = get_aux_slot(m_pool->side_chain().consensus_hash(), aux_nonce, n_aux_chains); + hashes[aux_slot] = sidechain_id; + + merkle_hash_full_tree(hashes, tree); + + if (tree.empty() || tree.back().empty() || (tree.back().front() != block.m_merkleRoot)) { + LOGWARN(3, "on_external_block: merkle root didn't match"); + return; + } + + if (!get_merkle_proof(tree, m_chainParams.aux_hash, aux_merkle_proof, aux_merkle_proof_path)) { + LOGWARN(3, "on_external_block: get_merkle_proof failed for the aux hash"); + return; + } + + submit_solution(coinbase_merkle_proof, hashing_blob, nonce_offset, block.m_seed, blob, aux_merkle_proof, aux_merkle_proof_path); +} + +void MergeMiningClientTari::submit_solution(const std::vector& coinbase_merkle_proof, const uint8_t (&hashing_blob)[128], size_t nonce_offset, const hash& seed_hash, const std::vector& blob, const std::vector& merkle_proof, uint32_t merkle_proof_path) { Block block; { @@ -164,8 +315,6 @@ void MergeMiningClientTari::submit_solution(const BlockTemplate* block_tpl, cons data.append(reinterpret_cast(hashing_blob + nonce_offset + sizeof(uint32_t)), HASH_SIZE); // Coinbase transaction's Merkle proof - const std::vector coinbase_merkle_proof = block_tpl->get_coinbase_merkle_proof(); - // Number of hashes in the proof (varint, but an O(logN) proof will never get bigger than 127) data.append(1, static_cast(coinbase_merkle_proof.size() / HASH_SIZE)); @@ -247,7 +396,35 @@ void MergeMiningClientTari::submit_solution(const BlockTemplate* block_tpl, cons uv_work_t req; MergeMiningClientTari* client; Block block; - } *work = new Work{ {}, this, std::move(block) }; + + FORCEINLINE Work(MergeMiningClientTari* c, Block&& b) : req{}, client(c), block(std::move(b)) {} + + void process() const + { + grpc::ClientContext ctx; + SubmitBlockResponse response; + + const grpc::Status status = client->m_TariNode->SubmitBlock(&ctx, block, &response); + + if (!status.ok()) { + LOGWARN(4, "SubmitBlock failed: " << status.error_message()); + if (!status.error_details().empty()) { + LOGWARN(4, "SubmitBlock failed: " << status.error_details()); + } + } + else { + const std::string& h = response.block_hash(); + LOGINFO(0, log::LightGreen() << "Mined Tari block " << log::hex_buf(h.data(), h.size()) << " at height " << block.header().height()); + } + } + } *work = new Work(this, std::move(block)); + + if (!is_main_thread()) { + LOGINFO(5, "Running SubmitBlock in the current thread because uv_default_loop can only be used in the main thread"); + work->process(); + delete work; + return; + } work->req.data = work; @@ -255,23 +432,7 @@ void MergeMiningClientTari::submit_solution(const BlockTemplate* block_tpl, cons [](uv_work_t* req) { BACKGROUND_JOB_START(MergeMiningClientTari::submit_solution); - - grpc::ClientContext ctx; - SubmitBlockResponse response; - - const Work* w = reinterpret_cast(req->data); - const grpc::Status status = w->client->m_TariNode->SubmitBlock(&ctx, w->block, &response); - - if (!status.ok()) { - LOGWARN(5, "SubmitBlock failed: " << status.error_message()); - if (!status.error_details().empty()) { - LOGWARN(5, "SubmitBlock failed: " << status.error_details()); - } - } - else { - const std::string& h = response.block_hash(); - LOGINFO(0, log::LightGreen() << "Mined Tari block " << log::hex_buf(h.data(), h.size()) << " at height " << w->block.header().height()); - } + reinterpret_cast(req->data)->process(); }, [](uv_work_t* req, int /*status*/) { @@ -366,9 +527,9 @@ void MergeMiningClientTari::run() const grpc::Status status = m_TariNode->GetNewBlockTemplateWithCoinbases(&ctx, request, &response); if (!status.ok()) { - LOGWARN(5, "GetNewBlockTemplateWithCoinbases failed: " << status.error_message()); + LOGWARN(4, "GetNewBlockTemplateWithCoinbases failed: " << status.error_message()); if (!status.error_details().empty()) { - LOGWARN(5, "GetNewBlockTemplateWithCoinbases failed: " << status.error_details()); + LOGWARN(4, "GetNewBlockTemplateWithCoinbases failed: " << status.error_details()); } } else { @@ -414,7 +575,7 @@ void MergeMiningClientTari::run() m_tariBlock = response.block(); - LOGINFO(5, "Tari block template: height = " << job_params.height + LOGINFO(4, "Tari aux block template: height = " << job_params.height << ", diff = " << job_params.diff << ", reward = " << job_params.reward << ", fees = " << job_params.fees diff --git a/src/merge_mining_client_tari.h b/src/merge_mining_client_tari.h index 3b265ae..2a3a7d2 100644 --- a/src/merge_mining_client_tari.h +++ b/src/merge_mining_client_tari.h @@ -23,6 +23,7 @@ namespace p2pool { class p2pool; +struct PoolBlock; class MergeMiningClientTari : public IMergeMiningClient, public nocopy_nomove { @@ -33,7 +34,8 @@ public: static constexpr char TARI_PREFIX[] = "tari://"; bool get_params(ChainParameters& out_params) const override; - void submit_solution(const BlockTemplate* block_tpl, const uint8_t (&hashing_blob)[128], size_t nonce_offset, const hash& seed_hash, const std::vector& blob, const std::vector& merkle_proof, uint32_t merkle_proof_path) override; + void on_external_block(const PoolBlock& block) override; + void submit_solution(const std::vector& coinbase_merkle_proof, const uint8_t (&hashing_blob)[128], size_t nonce_offset, const hash& seed_hash, const std::vector& blob, const std::vector& merkle_proof, uint32_t merkle_proof_path) override; void print_status() const override; diff --git a/src/p2p_server.cpp b/src/p2p_server.cpp index 3ee3841..6595931 100644 --- a/src/p2p_server.cpp +++ b/src/p2p_server.cpp @@ -29,6 +29,16 @@ #include "p2pool_api.h" #include "stratum_server.h" #include "rapidjson_wrapper.h" +#include "merge_mining_client.h" +#include "sha256.h" + +#ifdef WITH_TLS +#include +#else +#define ED25519_PUBLIC_KEY_LEN 32 +#define ED25519_SIGNATURE_LEN 64 +#endif + #include #include @@ -62,10 +72,13 @@ P2PServer::P2PServer(p2pool* pool) , m_lookForMissingBlocks(true) , m_fastestPeer(nullptr) , m_newP2PoolVersionDetected(false) + , m_auxJobLastMessageTimestamp(0) { m_callbackBuf.resize(P2P_BUF_SIZE); m_blockDeserializeBuf.reserve(MAX_BLOCK_SIZE); + m_auxJobMessages.reserve(1024); + // Diffuse the initial state in case it has low quality m_rng.discard(10000); @@ -105,6 +118,17 @@ P2PServer::P2PServer(p2pool* pool) } m_showPeersAsync.data = this; +#ifdef WITH_MERGE_MINING_DONATION + uv_mutex_init_checked(&m_AuxJobBroadcastLock); + + err = uv_async_init(&m_loop, &m_AuxJobBroadcastAsync, on_aux_job_broadcast); + if (err) { + LOGERR(1, "uv_async_init failed, error " << uv_err_name(err)); + PANIC_STOP(); + } + m_AuxJobBroadcastAsync.data = this; +#endif + err = uv_timer_init(&m_loop, &m_timer); if (err) { LOGERR(1, "failed to create timer, error " << uv_err_name(err)); @@ -142,6 +166,10 @@ P2PServer::~P2PServer() uv_mutex_destroy(&m_connectToPeersLock); uv_mutex_destroy(&m_showPeersLock); +#ifdef WITH_MERGE_MINING_DONATION + uv_mutex_destroy(&m_AuxJobBroadcastLock); +#endif + delete m_block; delete m_cache; @@ -1008,7 +1036,7 @@ void P2PServer::on_broadcast() return p - buf; }); if (!result) { - LOGWARN(5, "failed to broadcast to " << static_cast(client->m_addrString) << ", disconnecting"); + LOGWARN(5, "failed to broadcast a block to " << static_cast(client->m_addrString) << ", disconnecting"); client->close(); break; } @@ -1132,6 +1160,7 @@ void P2PServer::on_timer() check_block_template(); check_for_updates(); api_update_local_stats(); + clean_aux_job_messages(); } void P2PServer::flush_cache() @@ -1374,6 +1403,13 @@ void P2PServer::on_shutdown() MutexLock lock(m_showPeersLock); uv_close(reinterpret_cast(&m_showPeersAsync), nullptr); } + +#ifdef WITH_MERGE_MINING_DONATION + { + MutexLock lock(m_AuxJobBroadcastLock); + uv_close(reinterpret_cast(&m_AuxJobBroadcastAsync), nullptr); + } +#endif } void P2PServer::api_update_local_stats() @@ -1429,6 +1465,112 @@ void P2PServer::api_update_local_stats() }); } +#ifdef WITH_MERGE_MINING_DONATION +void P2PServer::broadcast_aux_job_donation_async(const uint8_t* data, uint32_t data_size, uint64_t timestamp) +{ + MutexLock lock(m_AuxJobBroadcastLock); + + m_AuxJobBroadcast.job.assign(data, data + data_size); + m_AuxJobBroadcast.timestamp = timestamp; + + const int err = uv_async_send(&m_AuxJobBroadcastAsync); + if (err) { + LOGERR(1, "uv_async_send failed, error " << uv_err_name(err)); + } +} + +void P2PServer::broadcast_aux_job_donation_handler() +{ + AuxJobBroadcast auxJobBroadcast; + { + MutexLock lock(m_AuxJobBroadcastLock); + auxJobBroadcast = std::move(m_AuxJobBroadcast); + } + + if (auxJobBroadcast.job.empty()) { + return; + } + + broadcast_aux_job_donation(auxJobBroadcast.job.data(), static_cast(auxJobBroadcast.job.size()), auxJobBroadcast.timestamp, nullptr, false); +} +#endif + +void P2PServer::broadcast_aux_job_donation(const uint8_t* data, uint32_t data_size, uint64_t timestamp, const P2PClient* source, bool duplicate_check_done) +{ + check_event_loop_thread(__func__); + + if (!duplicate_check_done) { + hash digest; + sha256(data, data_size, digest.h); + + // Every message can be received from multiple peers, so broadcast it only once + if (!m_auxJobMessages.emplace(*digest.u64(), timestamp).second) { + return; + } + } + + m_auxJobLastMessage.assign(data, data + data_size); + m_auxJobLastMessageTimestamp = timestamp; + + for (P2PClient* client = static_cast(m_connectedClientsList->m_next); client != m_connectedClientsList; client = static_cast(client->m_next)) { + if ((source && (client == source)) || !client->is_good() || (client->m_protocolVersion < PROTOCOL_VERSION_1_3)) { + continue; + } + send_aux_job_donation(client, data, data_size); + } +} + +void P2PServer::send_aux_job_donation(P2PServer::P2PClient* client, const uint8_t* data, uint32_t data_size) +{ + const bool result = send(client, [client, data, data_size](uint8_t* buf, size_t buf_size) -> size_t { + LOGINFO(6, "sending AUX_JOB_DONATION to " << static_cast(client->m_addrString)); + + if (buf_size < 1 + sizeof(uint32_t) + data_size) { + return 0; + } + + uint8_t* p = buf; + + *(p++) = static_cast(MessageId::AUX_JOB_DONATION); + + memcpy(p, &data_size, sizeof(uint32_t)); + p += sizeof(uint32_t); + + memcpy(p, data, data_size); + p += data_size; + + return p - buf; + }); + + if (!result) { + LOGWARN(5, "failed to send AUX_JOB_DONATION to " << static_cast(client->m_addrString) << ", disconnecting"); + client->close(); + } +} + +void P2PServer::clean_aux_job_messages() +{ + if ((m_timerCounter & 255) != 0) { + return; + } + + if (m_auxJobMessages.empty()) { + return; + } + + const uint64_t cur_time = time(nullptr); + + for (auto it = m_auxJobMessages.begin(); it != m_auxJobMessages.end();) { + // Delete old messages only after 3x the timeout to give some leeway for system clock adjustments + if (cur_time > it->second + AUX_JOB_TIMEOUT * 3) { + it = m_auxJobMessages.erase(it); + } + else { + ++it; + } + } +} + P2PServer::P2PClient::~P2PClient() { } @@ -1725,6 +1867,23 @@ bool P2PServer::P2PClient::on_read(const char* data, uint32_t size) on_block_notify(buf + 1); } break; + + case MessageId::AUX_JOB_DONATION: + LOGINFO(6, "peer " << log::Gray() << static_cast(m_addrString) << log::NoColor() << " sent AUX_JOB_DONATION"); + + if (bytes_left >= 1 + sizeof(uint32_t)) { + const uint32_t msg_size = read_unaligned(reinterpret_cast(buf + 1)); + if (bytes_left >= 1 + sizeof(uint32_t) + msg_size) { + bytes_read = 1 + sizeof(uint32_t) + msg_size; + + if (!on_aux_job_donation(buf + 1 + sizeof(uint32_t), msg_size)) { + ban(DEFAULT_BAN_TIME); + server->remove_peer_from_list(this); + return false; + } + } + } + break; } if (bytes_read) { @@ -2432,6 +2591,13 @@ void P2PServer::P2PClient::on_peer_list_response(const uint8_t* buf) << "runs an unknown software with id = " << log::Hex(id_value) ); } + + // We know this peer's protocol version now. Send protocol-specific messages here. + if ((m_protocolVersion >= PROTOCOL_VERSION_1_3) && !server->m_auxJobLastMessage.empty()) { + if (static_cast(time(nullptr)) < server->m_auxJobLastMessageTimestamp + AUX_JOB_TIMEOUT) { + server->send_aux_job_donation(this, server->m_auxJobLastMessage.data(), static_cast(server->m_auxJobLastMessage.size())); + } + } } continue; } @@ -2510,6 +2676,135 @@ void P2PServer::P2PClient::on_block_notify(const uint8_t* buf) } } +bool P2PServer::P2PClient::on_aux_job_donation(const uint8_t* buf, uint32_t size) +{ + P2PServer* server = static_cast(m_owner); + + // Ignore the same message coming from other peers + if ((server->m_auxJobLastMessage.size() == size) && (memcmp(server->m_auxJobLastMessage.data(), buf, size) == 0)) { + return true; + } + + const time_t cur_time = time(nullptr); + + // Layout of the message: + // + // 32 bytes | Secondary public key + // 8 bytes | Secondary public key's expiration timestamp + // 64 bytes | Master key signature signing the above 40 bytes + // (size - 168) bytes | Data + // 64 bytes | Secondary key signature signing the data + + constexpr uint32_t DATA_OFFSET = ED25519_PUBLIC_KEY_LEN + 8 + ED25519_SIGNATURE_LEN; + constexpr uint32_t OVERHEAD = DATA_OFFSET + ED25519_SIGNATURE_LEN; + constexpr uint32_t DATA_ENTRY_SIZE = HASH_SIZE * 2 + sizeof(difficulty_type); + constexpr uint32_t MIN_DATA_SIZE = sizeof(uint64_t) + DATA_ENTRY_SIZE; + + // Ignore invalid or empty messages + if (size < OVERHEAD + MIN_DATA_SIZE) { + LOGWARN(4, "peer " << static_cast(m_addrString) << " sent an invalid AUX_JOB_DONATION message (" << size << " < " << (OVERHEAD + MIN_DATA_SIZE) << ')'); + return false; + } + + // Layout of the data: + // + // 8 bytes | timestamp + // + // Next come one or multiple data entries: + // + // 32 bytes | aux_id + // 32 bytes | aux_hash + // 16 bytes | aux_diff + + const uint32_t data_size = size - OVERHEAD; + + const int64_t secondary_key_expiration_time = read_unaligned(reinterpret_cast(buf + ED25519_PUBLIC_KEY_LEN)); + + // Ignore messages signed with an outdated secondary key + if (cur_time >= secondary_key_expiration_time) { + LOGWARN(4, "peer " << static_cast(m_addrString) << " sent an AUX_JOB_DONATION message with an expired secondary key (" << cur_time << " >= " << secondary_key_expiration_time << ")"); + return true; + } + + // Check secondary public key's signature + uint8_t signature[ED25519_SIGNATURE_LEN]; + memcpy(signature, buf + ED25519_PUBLIC_KEY_LEN + 8, sizeof(signature)); + + // Ignore messages with invalid signatures +#ifdef WITH_TLS // Need BoringSSL to verify signatures + if (!ED25519_verify(buf, ED25519_PUBLIC_KEY_LEN + 8, signature, ED25519_MASTER_PUBLIC_KEY)) { + LOGWARN(4, "peer " << static_cast(m_addrString) << " sent an AUX_JOB_DONATION message with an invalid master key signature"); + return true; + } +#endif + + const uint8_t* p = buf + DATA_OFFSET; + const uint8_t* data_end = p + data_size; + + memcpy(signature, data_end, sizeof(signature)); + + uint8_t secondary_public_key[ED25519_PUBLIC_KEY_LEN]; + memcpy(secondary_public_key, buf, sizeof(secondary_public_key)); + + // Ignore messages with invalid signatures +#ifdef WITH_TLS // Need BoringSSL to verify signatures + if (!ED25519_verify(p, data_size, signature, secondary_public_key)) { + LOGWARN(4, "peer " << static_cast(m_addrString) << " sent an AUX_JOB_DONATION message with an invalid secondary key signature"); + return true; + } +#endif + + const int64_t data_timestamp = read_unaligned(reinterpret_cast(p)); + p += sizeof(int64_t); + + // Ignore outdated messages + if (cur_time >= data_timestamp + AUX_JOB_TIMEOUT) { + LOGWARN(4, "peer " << static_cast(m_addrString) << " sent an outdated AUX_JOB_DONATION message (" << cur_time << " >= " << data_timestamp << " + " << static_cast(AUX_JOB_TIMEOUT) << ')'); + return true; + } + + if ((data_end - p) % DATA_ENTRY_SIZE) { + LOGWARN(4, "peer " << static_cast(m_addrString) << " sent an invalid AUX_JOB_DONATION message (" << (data_end - p) << " is not a multiple of " << DATA_ENTRY_SIZE << ')'); + return true; + } + + hash digest; + sha256(buf, size, digest.h); + + // Ignore repeated old messages + if (!server->m_auxJobMessages.emplace(*digest.u64(), data_timestamp).second) { + LOGWARN(5, "peer " << static_cast(m_addrString) << " sent an old AUX_JOB_DONATION message (" << (cur_time - data_timestamp) << " seconds old)"); + return true; + } + +#if defined(WITH_MERGE_MINING_DONATION) && defined(WITH_TLS) // Only work on verified jobs + const uint32_t N = static_cast(data_end - p) / DATA_ENTRY_SIZE; + + std::vector chain_params_vec(N); + + for (uint32_t i = 0; i < N; ++i) { + IMergeMiningClient::ChainParameters& chain_params = chain_params_vec[i]; + + memcpy(chain_params.aux_id.h, p, HASH_SIZE); + p += HASH_SIZE; + + memcpy(chain_params.aux_hash.h, p, HASH_SIZE); + p += HASH_SIZE; + + chain_params.aux_diff.lo = read_unaligned(reinterpret_cast(p)); + p += sizeof(uint64_t); + + chain_params.aux_diff.hi = read_unaligned(reinterpret_cast(p)); + p += sizeof(uint64_t); + } + + server->m_pool->set_aux_job_donation(chain_params_vec); +#endif + + server->broadcast_aux_job_donation(buf, size, data_timestamp, this, true); + return true; +} + bool P2PServer::P2PClient::handle_incoming_block_async(const PoolBlock* block, uint64_t max_time_delta) { P2PServer* server = static_cast(m_owner); diff --git a/src/p2p_server.h b/src/p2p_server.h index b917356..66e9d53 100644 --- a/src/p2p_server.h +++ b/src/p2p_server.h @@ -38,8 +38,9 @@ static constexpr int DEFAULT_P2P_PORT_MINI = 37888; static constexpr uint32_t PROTOCOL_VERSION_1_0 = 0x00010000UL; static constexpr uint32_t PROTOCOL_VERSION_1_1 = 0x00010001UL; static constexpr uint32_t PROTOCOL_VERSION_1_2 = 0x00010002UL; +static constexpr uint32_t PROTOCOL_VERSION_1_3 = 0x00010003UL; -static constexpr uint32_t SUPPORTED_PROTOCOL_VERSION = PROTOCOL_VERSION_1_2; +static constexpr uint32_t SUPPORTED_PROTOCOL_VERSION = PROTOCOL_VERSION_1_3; class P2PServer : public TCPServer { @@ -55,7 +56,9 @@ public: PEER_LIST_RESPONSE, BLOCK_BROADCAST_COMPACT, BLOCK_NOTIFY, - LAST = BLOCK_NOTIFY, + // Donation messages are signed by author's private keys to prevent their abuse/misuse. + AUX_JOB_DONATION, + LAST = AUX_JOB_DONATION, }; explicit P2PServer(p2pool *pool); @@ -113,6 +116,7 @@ public: [[nodiscard]] bool on_peer_list_request(const uint8_t* buf); void on_peer_list_response(const uint8_t* buf); void on_block_notify(const uint8_t* buf); + [[nodiscard]] bool on_aux_job_donation(const uint8_t* buf, uint32_t size); [[nodiscard]] bool handle_incoming_block_async(const PoolBlock* block, uint64_t max_time_delta = 0); static void handle_incoming_block(p2pool* pool, PoolBlock& block, std::vector& missing_blocks, bool& result); @@ -188,6 +192,12 @@ public: bool disconnected() const { return m_seenGoodPeers && (m_numConnections == 0); }; +#ifdef WITH_MERGE_MINING_DONATION + void broadcast_aux_job_donation_async(const uint8_t* data, uint32_t data_size, uint64_t timestamp); +#endif + + void broadcast_aux_job_donation(const uint8_t* data, uint32_t data_size, uint64_t timestamp, const P2PClient* source, bool duplicate_check_done); + private: [[nodiscard]] const char* get_log_category() const override; @@ -291,6 +301,34 @@ private: void on_shutdown() override; void api_update_local_stats(); + + enum { + AUX_JOB_TIMEOUT = 3600, + }; + + unordered_set> m_auxJobMessages; + std::vector m_auxJobLastMessage; + uint64_t m_auxJobLastMessageTimestamp; + + void send_aux_job_donation(P2PServer::P2PClient* client, const uint8_t* data, uint32_t data_size); + + void clean_aux_job_messages(); + +#ifdef WITH_MERGE_MINING_DONATION + struct AuxJobBroadcast + { + std::vector job; + uint64_t timestamp = 0; + }; + + uv_mutex_t m_AuxJobBroadcastLock; + AuxJobBroadcast m_AuxJobBroadcast; + + uv_async_t m_AuxJobBroadcastAsync; + + static void on_aux_job_broadcast(uv_async_t* handle) { reinterpret_cast(handle->data)->broadcast_aux_job_donation_handler(); } + void broadcast_aux_job_donation_handler(); +#endif }; } // namespace p2pool diff --git a/src/p2pool.cpp b/src/p2pool.cpp index 7e46668..fe15678 100644 --- a/src/p2pool.cpp +++ b/src/p2pool.cpp @@ -38,6 +38,11 @@ #include "keccak.h" #include "merkle.h" #include "merge_mining_client.h" + +#ifdef WITH_TLS +#include +#endif + #include #include #include @@ -148,11 +153,18 @@ p2pool::p2pool(int argc, char* argv[]) uv_rwlock_init_checked(&m_minerDataLock); uv_rwlock_init_checked(&m_ZMQReaderLock); uv_rwlock_init_checked(&m_mergeMiningClientsLock); + +#ifdef WITH_MERGE_MINING_DONATION + uv_rwlock_init_checked(&m_auxJobDonationLock); +#endif + uv_rwlock_init_checked(&m_auxIdLock); uv_mutex_init_checked(&m_foundBlocksLock); + #ifdef WITH_RANDOMX uv_mutex_init_checked(&m_minerLock); #endif + uv_mutex_init_checked(&m_submitBlockDataLock); uv_mutex_init_checked(&m_submitAuxBlockDataLock); @@ -222,11 +234,18 @@ p2pool::~p2pool() uv_rwlock_destroy(&m_minerDataLock); uv_rwlock_destroy(&m_ZMQReaderLock); uv_rwlock_destroy(&m_mergeMiningClientsLock); + +#ifdef WITH_MERGE_MINING_DONATION + uv_rwlock_destroy(&m_auxJobDonationLock); +#endif + uv_rwlock_destroy(&m_auxIdLock); uv_mutex_destroy(&m_foundBlocksLock); + #ifdef WITH_RANDOMX uv_mutex_destroy(&m_minerLock); #endif + uv_mutex_destroy(&m_submitBlockDataLock); uv_mutex_destroy(&m_submitAuxBlockDataLock); @@ -565,6 +584,27 @@ void p2pool::handle_chain_main(ChainMain& data, const char* extra) m_zmqLastActive = seconds_since_epoch(); } + +#ifdef WITH_MERGE_MINING_DONATION +void p2pool::set_aux_job_donation(const std::vector& chain_params) +{ + if (m_stopped) { + return; + } + + const uint64_t t = seconds_since_epoch(); + { + WriteLock lock(m_auxJobDonationLock); + m_auxJobDonation = chain_params; + m_auxJobDonationLastUpdated = t; + } + + if (!chain_params.empty()) { + update_aux_data(chain_params.front().aux_id); + } +} +#endif + void p2pool::update_aux_data(const hash& chain_id) { if (m_stopped) { @@ -574,6 +614,22 @@ void p2pool::update_aux_data(const hash& chain_id) MinerData data; std::vector aux_id; +#ifdef WITH_MERGE_MINING_DONATION + std::vector mm_donation_params; + uint64_t mm_donation_params_last_updated; + { + ReadLock lock(m_auxJobDonationLock); + mm_donation_params = m_auxJobDonation; + mm_donation_params_last_updated = m_auxJobDonationLastUpdated; + } + + const uint64_t t = seconds_since_epoch(); + + if (!mm_donation_params.empty()) { + LOGINFO(5, "update_aux_data: there are " << mm_donation_params.size() << " aux chain datas for merge mining donation, last updated " << (t - mm_donation_params_last_updated) << " seconds ago"); + } +#endif + { ReadLock lock(m_mergeMiningClientsLock); @@ -588,13 +644,33 @@ void p2pool::update_aux_data(const hash& chain_id) if (c->get_params(chain_params)) { data.aux_chains.emplace_back(chain_params.aux_id, chain_params.aux_hash, chain_params.aux_diff); aux_id.emplace_back(chain_params.aux_id); + +#ifdef WITH_MERGE_MINING_DONATION + // If the user is already merge mining this chain, don't use it for donation + mm_donation_params.erase(std::remove_if(mm_donation_params.begin(), mm_donation_params.end(), [&chain_params](const auto& t) { return t.aux_id == chain_params.aux_id; }), mm_donation_params.end()); +#endif } } - aux_id.emplace_back(m_sideChain->consensus_hash()); } } - if (!aux_id.empty()) { +#ifdef WITH_MERGE_MINING_DONATION + // Use the donation job for 30 minutes at most if it doesn't get updated anymore + if (t <= mm_donation_params_last_updated + 1800) { + LOGINFO(5, "update_aux_data: using " << mm_donation_params.size() << " aux chain datas for merge mining donation"); + + for (const IMergeMiningClient::ChainParameters& c : mm_donation_params) { + data.aux_chains.emplace_back(c.aux_id, c.aux_hash, c.aux_diff); + aux_id.emplace_back(c.aux_id); + } + } + else { + LOGINFO(5, "update_aux_data: merge mining donation data is stale, not using it"); + } +#endif + + aux_id.emplace_back(m_sideChain->consensus_hash()); + { WriteLock lock(m_auxIdLock); if (aux_id == m_auxId) { @@ -605,7 +681,7 @@ void p2pool::update_aux_data(const hash& chain_id) m_auxNonce = data.aux_nonce; } else { - LOGERR(1, "Failed to find the aux nonce for merge mining. Merge mining will be off this round."); + LOGERR(1, "update_aux_data: failed to find the aux nonce for merge mining. Merge mining will be off this round."); data.aux_chains.clear(); } } @@ -623,16 +699,111 @@ void p2pool::update_aux_data(const hash& chain_id) } if (!chain_id.empty()) { - LOGINFO(4, "New aux data from chain " << chain_id); + LOGINFO(4, "update_aux_data: new aux data from chain " << chain_id); + if (!is_main_thread()) { update_block_template_async(); } else { update_block_template(); } + +#if defined(WITH_MERGE_MINING_DONATION) && defined(WITH_TLS) + send_aux_job_donation(); +#endif } } +#ifdef WITH_MERGE_MINING_DONATION +void p2pool::send_aux_job_donation() +{ +#ifdef WITH_TLS + if (m_params->m_authorKeyFile.empty()) { + return; + } + + std::ifstream f(m_params->m_authorKeyFile, std::ios::binary | std::ios::ate); + + if (!f.good()) { + LOGERR(1, "send_aux_job_donation: failed to open " << m_params->m_authorKeyFile); + return; + } + + Params::AuthorKey key; + + if (f.tellg() != static_cast(sizeof(key))) { + LOGERR(1, "send_aux_job_donation: " << m_params->m_authorKeyFile << " has an invalid size"); + return; + } + + f.seekg(0); + f.read(reinterpret_cast(&key), sizeof(key)); + + if (!f.good()) { + LOGERR(1, "send_aux_job_donation: failed to read data from " << m_params->m_authorKeyFile); + return; + } + + const uint64_t timestamp = time(nullptr); + + if (timestamp >= read_unaligned(reinterpret_cast(key.expiration_time))) { + LOGERR(1, "send_aux_job_donation: " << m_params->m_authorKeyFile << " is expired"); + return; + } + + const uint8_t* p = reinterpret_cast(&key); + + if (!ED25519_verify(p, sizeof(key.pub_key) + sizeof(key.expiration_time), key.master_key_signature, ED25519_MASTER_PUBLIC_KEY)) { + LOGERR(1, "send_aux_job_donation: " << m_params->m_authorKeyFile << ": signature verification failed"); + return; + } + + std::vector job; + job.reserve(512); + + constexpr uint32_t DATA_OFFSET = sizeof(key.pub_key) + sizeof(key.expiration_time) + sizeof(key.master_key_signature); + + job.assign(p, p + DATA_OFFSET); + + job.insert(job.end(), reinterpret_cast(×tamp), reinterpret_cast(×tamp) + sizeof(uint64_t)); + + { + ReadLock lock(m_minerDataLock); + + if (m_minerData.aux_chains.empty()) { + LOGERR(1, "send_aux_job_donation: no merge mined chains found"); + return; + } + + for (const AuxChainData& c : m_minerData.aux_chains) { + job.insert(job.end(), c.unique_id.h, c.unique_id.h + HASH_SIZE); + job.insert(job.end(), c.data.h, c.data.h + HASH_SIZE); + job.insert(job.end(), reinterpret_cast(&c.difficulty), reinterpret_cast(&c.difficulty) + sizeof(difficulty_type)); + } + } + + uint8_t signature[64]; + if (!ED25519_sign(signature, job.data() + DATA_OFFSET, job.size() - DATA_OFFSET, key.priv_key)) { + LOGERR(1, "send_aux_job_donation: failed to sign the donation job"); + return; + } + + if (!ED25519_verify(job.data() + DATA_OFFSET, job.size() - DATA_OFFSET, signature, key.pub_key)) { + LOGERR(1, "send_aux_job_donation: failed to verify the donation job's signature"); + return; + } + + OPENSSL_cleanse(&key, sizeof(key)); + + job.insert(job.end(), signature, signature + sizeof(signature)); + + m_p2pServer->broadcast_aux_job_donation_async(job.data(), static_cast(job.size()), timestamp); +#else + LOGERR(1, "p2pool::send_aux_job_donation() must be built with TLS"); +#endif +} +#endif + void p2pool::submit_block_async(uint32_t template_id, uint32_t nonce, uint32_t extra_nonce) { { @@ -769,7 +940,7 @@ void p2pool::submit_aux_block() const } } - c->submit_solution(block_tpl, hashing_blob, nonce_offset, seed_hash, blob, proof, path); + c->submit_solution(block_tpl->get_coinbase_merkle_proof(), hashing_blob, nonce_offset, seed_hash, blob, proof, path); } else { LOGWARN(3, "submit_aux_block: failed to get merkle proof for chain_id " << chain_id); @@ -1769,6 +1940,15 @@ void p2pool::api_update_block_found(const ChainMain* data, const PoolBlock* bloc } } +void p2pool::on_external_block(const PoolBlock& block) +{ + ReadLock lock(m_mergeMiningClientsLock); + + for (IMergeMiningClient* c : m_mergeMiningClients) { + c->on_external_block(block); + } +} + bool p2pool::get_difficulty_at_height(uint64_t height, difficulty_type& diff) { ReadLock lock(m_mainchainLock); diff --git a/src/p2pool.h b/src/p2pool.h index 9ca01f3..d4f3f63 100644 --- a/src/p2pool.h +++ b/src/p2pool.h @@ -19,6 +19,7 @@ #include "uv_util.h" #include "params.h" +#include "merge_mining_client.h" #include namespace p2pool { @@ -86,6 +87,11 @@ public: virtual void handle_miner_data(MinerData& data) override; virtual void handle_chain_main(ChainMain& data, const char* extra) override; +#ifdef WITH_MERGE_MINING_DONATION + void set_aux_job_donation(const std::vector& chain_params); + void send_aux_job_donation(); +#endif + void update_aux_data(const hash& chain_id); void submit_block_async(uint32_t template_id, uint32_t nonce, uint32_t extra_nonce); @@ -111,6 +117,7 @@ public: bool chainmain_get_by_hash(const hash& id, ChainMain& data) const; void api_update_block_found(const ChainMain* data, const PoolBlock* block, bool update_stats_mod = true); + void on_external_block(const PoolBlock& block); bool get_difficulty_at_height(uint64_t height, difficulty_type& diff); @@ -251,6 +258,12 @@ private: mutable uv_rwlock_t m_mergeMiningClientsLock; std::vector m_mergeMiningClients; +#ifdef WITH_MERGE_MINING_DONATION + mutable uv_rwlock_t m_auxJobDonationLock; + std::vector m_auxJobDonation; + uint64_t m_auxJobDonationLastUpdated = 0; +#endif + mutable uv_rwlock_t m_auxIdLock; std::vector m_auxId; uint32_t m_auxNonce = 0; diff --git a/src/params.cpp b/src/params.cpp index e196d42..9e2d642 100644 --- a/src/params.cpp +++ b/src/params.cpp @@ -238,6 +238,13 @@ Params::Params(int argc, char* const argv[]) ok = true; } +#ifdef WITH_MERGE_MINING_DONATION + if ((strcmp(argv[i], "--adkf") == 0) && (i + 1 < argc)) { + m_authorKeyFile = argv[++i]; + ok = true; + } +#endif + if (!ok) { // Wait to avoid log messages overlapping with printf() calls and making a mess on screen std::this_thread::sleep_for(std::chrono::milliseconds(10)); diff --git a/src/params.h b/src/params.h index 698520e..d02a1c0 100644 --- a/src/params.h +++ b/src/params.h @@ -101,6 +101,17 @@ struct Params std::string m_tlsCertKey; #endif bool m_enableStratumHTTP = true; + +#ifdef WITH_MERGE_MINING_DONATION + std::string m_authorKeyFile; + + struct AuthorKey { + uint8_t pub_key[32]; + uint8_t expiration_time[8]; + uint8_t master_key_signature[64]; + uint8_t priv_key[64]; + }; +#endif }; } // namespace p2pool diff --git a/src/pool_block.cpp b/src/pool_block.cpp index 15c4efd..ebc5993 100644 --- a/src/pool_block.cpp +++ b/src/pool_block.cpp @@ -128,6 +128,11 @@ PoolBlock& PoolBlock::operator=(const PoolBlock& b) m_auxChains = b.m_auxChains; m_auxNonce = b.m_auxNonce; + m_hashingBlob = b.m_hashingBlob; + + m_powHash = b.m_powHash; + m_seed = b.m_seed; + return *this; } @@ -321,6 +326,12 @@ void PoolBlock::reset_offchain_data() m_auxChains.shrink_to_fit(); m_auxNonce = 0; + + m_hashingBlob.clear(); + m_hashingBlob.shrink_to_fit(); + + m_powHash = {}; + m_seed = {}; } bool PoolBlock::get_pow_hash(RandomX_Hasher_Base* hasher, uint64_t height, const hash& seed_hash, hash& pow_hash, bool force_light_mode) @@ -383,6 +394,9 @@ bool PoolBlock::get_pow_hash(RandomX_Hasher_Base* hasher, uint64_t height, const writeVarint(count, [&blob, &blob_size](uint8_t b) { blob[blob_size++] = b; }); + // cppcheck-suppress danglingLifetime + m_hashingBlob.assign(blob, blob + blob_size); + return hasher->calculate(blob, blob_size, height, seed_hash, pow_hash, force_light_mode); } diff --git a/src/pool_block.h b/src/pool_block.h index 0338c6e..4a6e7ad 100644 --- a/src/pool_block.h +++ b/src/pool_block.h @@ -144,7 +144,7 @@ struct PoolBlock // Merge mining extra data // Format: vector of (chain ID, chain data) pairs - // Chain data format is arbitrary and depends on the merge mined chain's requirements + // Chain data always has merge mining hash and difficulty in the beginning, the rest is arbitrary and depends on the merge mined chain's requirements std::map> m_mergeMiningExtra; // Arbitrary extra data @@ -173,6 +173,10 @@ struct PoolBlock std::vector m_auxChains; uint32_t m_auxNonce; + std::vector m_hashingBlob; + hash m_powHash; + hash m_seed; + std::vector serialize_mainchain_data(size_t* header_size = nullptr, size_t* miner_tx_size = nullptr, int* outputs_offset = nullptr, int* outputs_blob_size = nullptr, const uint32_t* nonce = nullptr, const uint32_t* extra_nonce = nullptr) const; std::vector serialize_sidechain_data() const; diff --git a/src/side_chain.cpp b/src/side_chain.cpp index 9c78eaa..682b776 100644 --- a/src/side_chain.cpp +++ b/src/side_chain.cpp @@ -573,15 +573,13 @@ bool SideChain::add_external_block(PoolBlock& block, std::vector& missing_ LOGWARN(3, "add_external_block: block is built on top of an unknown mainchain block " << block.m_prevId << ", mainchain reorg might've happened"); } - hash seed; - if (!m_pool->get_seed(block.m_txinGenHeight, seed)) { + if (!m_pool->get_seed(block.m_txinGenHeight, block.m_seed)) { LOGWARN(3, "add_external_block mined by " << block.m_minerWallet << ": couldn't get seed hash for mainchain height " << block.m_txinGenHeight); forget_incoming_block(block); return false; } - hash pow_hash; - if (!block.get_pow_hash(m_pool->hasher(), block.m_txinGenHeight, seed, pow_hash)) { + if (!block.get_pow_hash(m_pool->hasher(), block.m_txinGenHeight, block.m_seed, block.m_powHash)) { LOGWARN(3, "add_external_block: couldn't get PoW hash for height = " << block.m_sidechainHeight << ", mainchain height " << block.m_txinGenHeight << ". Ignoring it."); forget_incoming_block(block); return true; @@ -589,7 +587,7 @@ bool SideChain::add_external_block(PoolBlock& block, std::vector& missing_ // Check if it has the correct parent and difficulty to go right to monerod for checking MinerData miner_data = m_pool->miner_data(); - if ((block.m_prevId == miner_data.prev_id) && miner_data.difficulty.check_pow(pow_hash)) { + if ((block.m_prevId == miner_data.prev_id) && miner_data.difficulty.check_pow(block.m_powHash)) { LOGINFO(0, log::LightGreen() << "add_external_block: block " << block.m_sidechainId << " has enough PoW for Monero network, submitting it"); m_pool->submit_block_async(block.serialize_mainchain_data()); } @@ -598,13 +596,13 @@ bool SideChain::add_external_block(PoolBlock& block, std::vector& missing_ if (!m_pool->get_difficulty_at_height(block.m_txinGenHeight, diff)) { LOGWARN(3, "add_external_block: couldn't get mainchain difficulty for height = " << block.m_txinGenHeight); } - else if (diff.check_pow(pow_hash)) { + else if (diff.check_pow(block.m_powHash)) { LOGINFO(0, log::LightGreen() << "add_external_block: block " << block.m_sidechainId << " has enough PoW for Monero height " << block.m_txinGenHeight << ", submitting it"); m_pool->submit_block_async(block.serialize_mainchain_data()); } } - if (!block.m_difficulty.check_pow(pow_hash)) { + if (!block.m_difficulty.check_pow(block.m_powHash)) { LOGWARN(3, "add_external_block mined by " << block.m_minerWallet << ": not enough PoW for height = " << block.m_sidechainHeight << @@ -617,8 +615,8 @@ bool SideChain::add_external_block(PoolBlock& block, std::vector& missing_ // Calculate the same hash second time to check if it's an unstable hardware that caused this hash pow_hash2; - if (block.get_pow_hash(m_pool->hasher(), block.m_txinGenHeight, seed, pow_hash2, true) && (pow_hash2 != pow_hash)) { - LOGERR(0, "UNSTABLE HARDWARE DETECTED: Calculated the same hash twice, got different results: " << pow_hash << " != " << pow_hash2 << " (sidechain id = " << block.m_sidechainId << ')'); + if (block.get_pow_hash(m_pool->hasher(), block.m_txinGenHeight, block.m_seed, pow_hash2, true) && (pow_hash2 != block.m_powHash)) { + LOGERR(0, "UNSTABLE HARDWARE DETECTED: Calculated the same hash twice, got different results: " << block.m_powHash << " != " << pow_hash2 << " (sidechain id = " << block.m_sidechainId << ')'); if (block.m_difficulty.check_pow(pow_hash2)) { LOGINFO(3, "add_external_block second result has enough PoW for height = " << block.m_sidechainHeight << ", id = " << block.m_sidechainId); not_enough_pow = false; @@ -630,6 +628,8 @@ bool SideChain::add_external_block(PoolBlock& block, std::vector& missing_ } } + m_pool->on_external_block(block); + bool block_found = false; missing_blocks.clear(); diff --git a/src/util.cpp b/src/util.cpp index 654f993..1ee62c2 100644 --- a/src/util.cpp +++ b/src/util.cpp @@ -60,6 +60,8 @@ const char* VERSION = "v" STR2(P2POOL_VERSION_MAJOR) "." STR2(P2POOL_VERSION_MIN #endif " on " __DATE__ ")"; +const uint8_t ED25519_MASTER_PUBLIC_KEY[32] = {51,175,37,73,203,241,188,115,195,255,123,53,218,120,90,74,186,240,82,178,67,139,124,91,180,106,188,181,187,51,236,10}; + std::string DATA_DIR; SoftwareID get_software_id(uint32_t value) diff --git a/src/util.h b/src/util.h index 1fc2b69..a222fc0 100644 --- a/src/util.h +++ b/src/util.h @@ -42,6 +42,8 @@ constexpr uint32_t P2POOL_VERSION = (P2POOL_VERSION_MAJOR << 16) | (P2POOL_VERSI extern const char* VERSION; +extern const uint8_t ED25519_MASTER_PUBLIC_KEY[32]; + extern std::string DATA_DIR; enum class SoftwareID : uint32_t { diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 0af314e..5c52208 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -30,6 +30,8 @@ add_subdirectory(cmake/ssl) include_directories(../external/src/grpc/third_party/boringssl-with-bazel/src/include) add_definitions(-DWITH_TLS) +add_definitions(-DWITH_MERGE_MINING_DONATION) + add_definitions(-DP2POOL_UNIT_TESTS) add_definitions(-DP2POOL_SIDECHAIN_EXTRA_1=1)