diff --git a/.cirrus.yml b/.cirrus.yml index e8d7c0bcbe..f3ac641401 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -156,7 +156,7 @@ task: task: name: '[no depends, sanitizers: fuzzer,address,undefined,integer] [focal]' - only_if: $CIRRUS_BRANCH == $CIRRUS_DEFAULT_BRANCH || $CIRRUS_BASE_BRANCH == $CIRRUS_DEFAULT_BRANCH + # only_if: $CIRRUS_BRANCH == $CIRRUS_DEFAULT_BRANCH || $CIRRUS_BASE_BRANCH == $CIRRUS_DEFAULT_BRANCH << : *GLOBAL_TASK_TEMPLATE container: image: ubuntu:focal diff --git a/src/chain.cpp b/src/chain.cpp index 6323aaf0df..4d694a06c9 100644 --- a/src/chain.cpp +++ b/src/chain.cpp @@ -5,6 +5,8 @@ #include +#include // pblocktree + /** * CChain implementation */ @@ -48,6 +50,24 @@ CBlockLocator CChain::GetLocator(const CBlockIndex *pindex) const { return CBlockLocator(vHave); } +void CBlockIndex::untrim() { + if (!trimmed()) + return; + CBlockIndex tmp; + const CBlockIndex* pindexfull = untrim_to(&tmp); + assert(pindexfull!=this); + m_trimmed = false; + set_stored(); + proof = pindexfull->proof; + m_dynafed_params = pindexfull->m_dynafed_params; + m_signblock_witness = pindexfull->m_signblock_witness; +} + +const CBlockIndex *CBlockIndex::untrim_to(CBlockIndex *pindexNew) const +{ + return pblocktree->RegenerateFullIndex(this, pindexNew); +} + const CBlockIndex *CChain::FindFork(const CBlockIndex *pindex) const { if (pindex == nullptr) { return nullptr; diff --git a/src/chain.h b/src/chain.h index 40ec115728..d804690a4e 100644 --- a/src/chain.h +++ b/src/chain.h @@ -201,6 +201,7 @@ class CBlockIndex bool m_trimmed{false}; bool m_trimmed_dynafed_block{false}; + bool m_stored_lvl{false}; friend class CBlockTreeDB; @@ -208,19 +209,30 @@ class CBlockIndex // Irrevocably remove blocksigning and dynafed-related stuff from this // in-memory copy of the block header. - void trim() { + bool trim() { assert_untrimmed(); + if (!m_stored_lvl) { + // We can't trim in-memory data if it's not on disk yet, but we can if it's already been recovered once + return false; + } m_trimmed = true; m_trimmed_dynafed_block = !m_dynafed_params.value().IsNull(); proof = std::nullopt; m_dynafed_params = std::nullopt; m_signblock_witness = std::nullopt; + return true; } + void untrim(); + const CBlockIndex *untrim_to(CBlockIndex *pindexNew) const; + inline bool trimmed() const { return m_trimmed; } + inline void set_stored() { + m_stored_lvl = true; + } inline void assert_untrimmed() const { assert(!m_trimmed); } @@ -463,6 +475,9 @@ class CDiskBlockIndex : public CBlockIndex // For compatibility with elements 0.14 based chains if (g_signed_blocks) { + if (!ser_action.ForRead()) { + obj.assert_untrimmed(); + } if (is_dyna) { READWRITE(obj.m_dynafed_params.value()); READWRITE(obj.m_signblock_witness.value().stack); diff --git a/src/dynafed.cpp b/src/dynafed.cpp index 8ef680e296..764b393747 100644 --- a/src/dynafed.cpp +++ b/src/dynafed.cpp @@ -1,8 +1,12 @@ #include #include +#include -bool NextBlockIsParameterTransition(const CBlockIndex* pindexPrev, const Consensus::Params& consensus, DynaFedParamEntry& winning_entry) + +/* Returns true if the next block would be the first block of an epoch with new + * parameters. It also returns the parameter set that is being transitioned to. */ +static bool NextBlockIsParameterTransition(const CBlockIndex* pindexPrev, const Consensus::Params& consensus, DynaFedParamEntry& winning_entry) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { uint32_t next_height = pindexPrev->nHeight + 1; assert(consensus.dynamic_epoch_length != 0); @@ -15,6 +19,7 @@ bool NextBlockIsParameterTransition(const CBlockIndex* pindexPrev, const Consens for (int32_t height = next_height - 1; height >= (int32_t)(next_height - consensus.dynamic_epoch_length); --height) { const CBlockIndex* p_epoch_walk = pindexPrev->GetAncestor(height); assert(p_epoch_walk); + ForceUntrimHeader(p_epoch_walk); const DynaFedParamEntry& proposal = p_epoch_walk->dynafed_params().m_proposed; const uint256 proposal_root = proposal.CalculateRoot(); vote_tally[proposal_root]++; @@ -37,6 +42,7 @@ DynaFedParamEntry ComputeNextBlockFullCurrentParameters(const CBlockIndex* pinde const uint32_t epoch_length = consensus.dynamic_epoch_length; uint32_t epoch_age = next_height % epoch_length; + LOCK(cs_main); DynaFedParamEntry winning_proposal; // Early return when there is a winning proposal if (NextBlockIsParameterTransition(pindexPrev, consensus, winning_proposal)) { @@ -60,6 +66,7 @@ DynaFedParamEntry ComputeNextBlockFullCurrentParameters(const CBlockIndex* pinde // may be pre-dynafed params const CBlockIndex* p_epoch_start = pindexPrev->GetAncestor(epoch_start_height); assert(p_epoch_start); + ForceUntrimHeader(p_epoch_start); if (p_epoch_start->dynafed_params().IsNull()) { // We need to construct the "full" current parameters of pre-dynafed // consensus @@ -93,6 +100,8 @@ DynaFedParamEntry ComputeNextBlockCurrentParameters(const CBlockIndex* pindexPre { assert(pindexPrev); + LOCK(cs_main); + ForceUntrimHeader(pindexPrev); DynaFedParamEntry entry = ComputeNextBlockFullCurrentParameters(pindexPrev, consensus); uint32_t next_height = pindexPrev->nHeight+1; diff --git a/src/dynafed.h b/src/dynafed.h index 8db3288dbd..73de703b17 100644 --- a/src/dynafed.h +++ b/src/dynafed.h @@ -6,10 +6,6 @@ #include #include -/* Returns true if the next block would be the first block of an epoch with new - * parameters. It also returns the parameter set that is being transitioned to. */ -bool NextBlockIsParameterTransition(const CBlockIndex* pindexPrev, const Consensus::Params& consensus, DynaFedParamEntry& winning_entry); - /* Compute the next block's enforced parameters */ DynaFedParamEntry ComputeNextBlockFullCurrentParameters(const CBlockIndex* pindexPrev, const Consensus::Params& consensus); /* Compute the next block's expected published parameters. Blocks at "epoch_age" of non-0 only diff --git a/src/init.cpp b/src/init.cpp index 1fc22643ff..05872c4e55 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -987,13 +987,13 @@ bool AppInitParameterInteraction(const ArgsManager& args) } if (args.GetBoolArg("-trim_headers", false)) { - LogPrintf("Configured for header-trimming mode. This will reduce memory usage substantially, but we will be unable to serve as a full P2P peer, and certain header fields may be missing from JSON RPC output.\n"); + LogPrintf("Configured for header-trimming mode. This will reduce memory usage substantially, but will increase IO usage when the headers need to be temporarily untrimmed.\n"); fTrimHeaders = true; // This calculation is driven by GetValidFedpegScripts in pegins.cpp, which walks the chain // back to current epoch start, and then an additional total_valid_epochs on top of that. // We add one epoch here for the current partial epoch, and then another one for good luck. - nMustKeepFullHeaders = (chainparams.GetConsensus().total_valid_epochs + 2) * epoch_length; + nMustKeepFullHeaders = chainparams.GetConsensus().total_valid_epochs * epoch_length; // This is the number of headers we can have in flight downloading at a time, beyond the // set of blocks we've already validated. Capping this is necessary to keep memory usage // bounded during IBD. @@ -1711,7 +1711,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // if pruning, unset the service bit and perform the initial blockstore prune // after any wallet rescanning has taken place. - if (fPruneMode || fTrimHeaders) { + if (fPruneMode) { LogPrintf("Unsetting NODE_NETWORK on prune mode\n"); nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK); if (!fReindex) { @@ -1723,11 +1723,6 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) } } - if (fTrimHeaders) { - LogPrintf("Unsetting NODE_NETWORK_LIMITED on header trim mode\n"); - nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK_LIMITED); - } - if (DeploymentEnabled(chainparams.GetConsensus(), Consensus::DEPLOYMENT_SEGWIT)) { // Advertise witness capabilities. // The option to not set NODE_WITNESS is only used in the tests and should be removed. diff --git a/src/net.cpp b/src/net.cpp index 3a1bb138ab..456b043ba0 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -1361,6 +1361,9 @@ bool CConnman::GenerateSelectSet(std::set &recv_set, std::set &s // write buffer in this case before receiving more. This avoids // needlessly queueing received data, if the remote peer is not themselves // receiving data. This means properly utilizing TCP flow control signalling. + // This logic can put both nodes in deadlock if they are both "not receiving", + // so there is a special case where we only stop receiving new messages, but + // keep processing the in-progress ones. // * Otherwise, if there is space left in the receive buffer, select() for // receiving data. // * Hand off all complete messages to the processor, to be handled without @@ -1380,7 +1383,9 @@ bool CConnman::GenerateSelectSet(std::set &recv_set, std::set &s error_set.insert(pnode->hSocket); if (select_send) { send_set.insert(pnode->hSocket); - continue; + // Only stop receiving new messages, but keep processing incomplete ones + if (!pnode->m_deserializer->IsEmpty()) + continue; } if (select_recv) { recv_set.insert(pnode->hSocket); diff --git a/src/net.h b/src/net.h index 1b6a7e0928..ba8fd6a5f6 100644 --- a/src/net.h +++ b/src/net.h @@ -302,6 +302,8 @@ class CNetMessage { */ class TransportDeserializer { public: + // returns true if the current deserialization is empty + virtual bool IsEmpty() const = 0; // returns true if the current deserialization is complete virtual bool Complete() const = 0; // set the serialization context version @@ -352,6 +354,10 @@ class V1TransportDeserializer final : public TransportDeserializer Reset(); } + bool IsEmpty() const override + { + return (nHdrPos == 0); + } bool Complete() const override { if (!in_data) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 021e54d4b7..d4b4bd94d4 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -3183,12 +3183,13 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) { if (pindex->trimmed()) { - // For simplicity, if any of the headers they're asking for are trimmed, - // just drop the request. - LogPrint(BCLog::NET, "%s: ignoring getheaders from peer=%i which would return at least one trimmed header\n", __func__, pfrom.GetId()); - return; + // Header is trimmed, reload from disk before sending + CBlockIndex tmpBlockIndexFull; + const CBlockIndex* pindexfull = pindex->untrim_to(&tmpBlockIndexFull); + vHeaders.push_back(pindexfull->GetBlockHeader()); + } else { + vHeaders.push_back(pindex->GetBlockHeader()); } - vHeaders.push_back(pindex->GetBlockHeader()); if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) break; } diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index fa1890a524..89ad1fba56 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -408,17 +408,6 @@ bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus return true; } -bool ReadBlockHeaderFromDisk(CBlockHeader& header, const CBlockIndex* pindex, const Consensus::Params& consensusParams) -{ - // Not very efficient: read a block and throw away all but the header. - CBlock tmp; - if (!ReadBlockFromDisk(tmp, pindex, consensusParams)) { - return false; - } - header = tmp.GetBlockHeader(); - return true; -} - bool ReadRawBlockFromDisk(std::vector& block, const FlatFilePos& pos, const CMessageHeader::MessageStartChars& message_start) { FlatFilePos hpos = pos; diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index 0a632eae0f..404ec4d52c 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -79,7 +79,6 @@ bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus bool ReadRawBlockFromDisk(std::vector& block, const FlatFilePos& pos, const CMessageHeader::MessageStartChars& message_start); bool ReadRawBlockFromDisk(std::vector& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start); // ELEMENTS: -bool ReadBlockHeaderFromDisk(class CBlockHeader& header, const CBlockIndex* pindex, const Consensus::Params& consensusParams); bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex); bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams); diff --git a/src/pegins.cpp b/src/pegins.cpp index dc133d1fbc..9146d2da50 100644 --- a/src/pegins.cpp +++ b/src/pegins.cpp @@ -26,6 +26,8 @@ // ELEMENTS // +#include + namespace { static secp256k1_context* secp256k1_ctx_validation; @@ -473,6 +475,7 @@ std::vector> GetValidFedpegScripts(const CBlockIndex fedpegscripts.push_back(std::make_pair(next_param.m_fedpeg_program, next_param.m_fedpegscript)); } + LOCK(cs_main); // Next we walk backwards up to M epoch starts for (int32_t i = 0; i < (int32_t) params.total_valid_epochs; i++) { // We are within total_valid_epochs of the genesis @@ -487,6 +490,7 @@ std::vector> GetValidFedpegScripts(const CBlockIndex break; } + ForceUntrimHeader(p_epoch_start); if (!p_epoch_start->dynafed_params().IsNull()) { fedpegscripts.push_back(std::make_pair(p_epoch_start->dynafed_params().m_current.m_fedpeg_program, p_epoch_start->dynafed_params().m_current.m_fedpegscript)); } else { diff --git a/src/rest.cpp b/src/rest.cpp index 519412e228..fd384d0ea9 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -221,13 +221,9 @@ static bool rest_headers(const std::any& context, case RetFormat::BINARY: { CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION); for (const CBlockIndex *pindex : headers) { - if (pindex->trimmed()) { - CBlockHeader tmp; - ReadBlockHeaderFromDisk(tmp, pindex, Params().GetConsensus()); - ssHeader << tmp; - } else { - ssHeader << pindex->GetBlockHeader(); - } + CBlockIndex tmpBlockIndexFull; + const CBlockIndex* pindexfull = pindex->untrim_to(&tmpBlockIndexFull); + ssHeader << pindexfull->GetBlockHeader(); } std::string binaryHeader = ssHeader.str(); @@ -239,14 +235,9 @@ static bool rest_headers(const std::any& context, case RetFormat::HEX: { CDataStream ssHeader(SER_NETWORK, PROTOCOL_VERSION); for (const CBlockIndex *pindex : headers) { - if (pindex->trimmed()) { - CBlockHeader tmp; - ReadBlockHeaderFromDisk(tmp, pindex, Params().GetConsensus()); - ssHeader << tmp; - - } else { - ssHeader << pindex->GetBlockHeader(); - } + CBlockIndex tmpBlockIndexFull; + const CBlockIndex* pindexfull = pindex->untrim_to(&tmpBlockIndexFull); + ssHeader << pindexfull->GetBlockHeader(); } std::string strHex = HexStr(ssHeader) + "\n"; diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 61713f1ee1..8deef8c96b 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -226,15 +226,18 @@ CBlockIndex* ParseHashOrHeight(const UniValue& param, ChainstateManager& chainma } } -UniValue blockheaderToJSON(const CBlockIndex* tip, const CBlockIndex* blockindex) +UniValue blockheaderToJSON(const CBlockIndex* tip, const CBlockIndex* blockindex_) { // Serialize passed information without accessing chain state of the active chain! AssertLockNotHeld(cs_main); // For performance reasons + CBlockIndex tmpBlockIndexFull; + const CBlockIndex* blockindex = blockindex_->untrim_to(&tmpBlockIndexFull); + UniValue result(UniValue::VOBJ); result.pushKV("hash", blockindex->GetBlockHash().GetHex()); const CBlockIndex* pnext; - int confirmations = ComputeNextBlockAndDepth(tip, blockindex, pnext); + int confirmations = ComputeNextBlockAndDepth(tip, blockindex_, pnext); result.pushKV("confirmations", confirmations); result.pushKV("height", blockindex->nHeight); result.pushKV("version", blockindex->nVersion); @@ -271,7 +274,7 @@ UniValue blockheaderToJSON(const CBlockIndex* tip, const CBlockIndex* blockindex } } result.pushKV("nTx", (uint64_t)blockindex->nTx); - if (blockindex->pprev) + if (blockindex_->pprev) result.pushKV("previousblockhash", blockindex->pprev->GetBlockHash().GetHex()); if (pnext) result.pushKV("nextblockhash", pnext->GetBlockHash().GetHex()); @@ -966,7 +969,7 @@ static RPCHelpMan getblockheader() if (!request.params[1].isNull()) fVerbose = request.params[1].get_bool(); - const CBlockIndex* pblockindex; + CBlockIndex* pblockindex; const CBlockIndex* tip; { ChainstateManager& chainman = EnsureAnyChainman(request.context); @@ -982,13 +985,9 @@ static RPCHelpMan getblockheader() if (!fVerbose) { CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION); - if (pblockindex->trimmed()) { - CBlockHeader tmp; - ReadBlockHeaderFromDisk(tmp, pblockindex, Params().GetConsensus()); - ssBlock << tmp; - } else { - ssBlock << pblockindex->GetBlockHeader(); - } + CBlockIndex tmpBlockIndexFull; + const CBlockIndex* pblockindexfull = pblockindex->untrim_to(&tmpBlockIndexFull); + ssBlock << pblockindexfull->GetBlockHeader(); std::string strHex = HexStr(ssBlock); return strHex; } @@ -2261,6 +2260,7 @@ static RPCHelpMan getblockstats() if (out.nValue.IsExplicit() && out.nAsset.IsExplicit() && out.nAsset.GetAsset() == asset) { tx_total_out += out.nValue.GetAmount(); } + utxo_size_inc += GetSerializeSize(out, PROTOCOL_VERSION) + PER_UTXO_OVERHEAD; } } } else { diff --git a/src/txdb.cpp b/src/txdb.cpp index a679b0f542..146dd50179 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -18,6 +18,7 @@ // ELEMENTS #include // CheckProof +#include // Params() static constexpr uint8_t DB_COIN{'C'}; static constexpr uint8_t DB_COINS{'c'}; @@ -307,39 +308,49 @@ bool CBlockTreeDB::WritePAKList(const std::vector >& return Write(std::make_pair(DB_PAK, uint256S("1")), offline_list) && Write(std::make_pair(DB_PAK, uint256S("2")), online_list) && Write(std::make_pair(DB_PAK, uint256S("3")), reject); } -/** Note that we only get a conservative (lower) estimate of the max header height here, - * obtained by sampling the first 10,000 headers on disk (which are in random order) and - * taking the highest block we see. */ -bool CBlockTreeDB::WalkBlockIndexGutsForMaxHeight(int* nHeight) { - std::unique_ptr pcursor(NewIterator()); - *nHeight = 0; - int i = 0; - pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256())); - while (pcursor->Valid()) { - if (ShutdownRequested()) return false; - std::pair key; - if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX) { - i++; - if (i > 10'000) { - // Under the (accurate) assumption that the headers on disk are effectively in random height order, - // we have a good-enough (conservative) estimate of the max height very quickly, and don't need to - // waste more time. Shortcutting like this will cause us to keep a few extra headers, which is fine. - break; - } - CDiskBlockIndex diskindex; - if (pcursor->GetValue(diskindex)) { - if (diskindex.nHeight > *nHeight) { - *nHeight = diskindex.nHeight; - } - pcursor->Next(); - } else { - return error("%s: failed to read value", __func__); - } - } else { - break; - } +const CBlockIndex *CBlockTreeDB::RegenerateFullIndex(const CBlockIndex *pindexTrimmed, CBlockIndex *pindexNew) const +{ + if(!pindexTrimmed->trimmed()) { + return pindexTrimmed; } - return true; + CBlockHeader tmp; + bool BlockRead = false; + { + // At this point we can either be locked or unlocked depending on where we're being called + // but cs_main is a RecursiveMutex, so it doesn't matter + LOCK(cs_main); + // In unpruned nodes, same data could be read from blocks using ReadBlockFromDisk, but that turned out to + // be about 6x slower than reading from the index + std::pair key(DB_BLOCK_INDEX, pindexTrimmed->GetBlockHash()); + CDiskBlockIndex diskindex; + BlockRead = this->Read(key, diskindex); + tmp = diskindex.GetBlockHeader(); + } + assert(BlockRead); + // Clone the needed data from the original trimmed block + pindexNew->pprev = pindexTrimmed->pprev; + pindexNew->phashBlock = pindexTrimmed->phashBlock; + // Construct block index object + pindexNew->nHeight = pindexTrimmed->nHeight; + pindexNew->nFile = pindexTrimmed->nFile; + pindexNew->nDataPos = pindexTrimmed->nDataPos; + pindexNew->nUndoPos = pindexTrimmed->nUndoPos; + pindexNew->nVersion = pindexTrimmed->nVersion; + pindexNew->hashMerkleRoot = pindexTrimmed->hashMerkleRoot; + pindexNew->nTime = pindexTrimmed->nTime; + pindexNew->nBits = pindexTrimmed->nBits; + pindexNew->nNonce = pindexTrimmed->nNonce; + pindexNew->nStatus = pindexTrimmed->nStatus; + pindexNew->nTx = pindexTrimmed->nTx; + + pindexNew->proof = tmp.proof; + pindexNew->m_dynafed_params = tmp.m_dynafed_params; + pindexNew->m_signblock_witness = tmp.m_signblock_witness; + + if (pindexTrimmed->nHeight && pindexTrimmed->nHeight % 1000 == 0) { + assert(CheckProof(pindexNew->GetBlockHeader(), Params().GetConsensus())); + } + return pindexNew; } bool CBlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function insertBlockIndex, int trimBelowHeight) @@ -373,23 +384,26 @@ bool CBlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, pindexNew->nStatus = diskindex.nStatus; pindexNew->nTx = diskindex.nTx; + pindexNew->proof = diskindex.proof; + pindexNew->m_dynafed_params = diskindex.m_dynafed_params; + pindexNew->m_signblock_witness = diskindex.m_signblock_witness; + + assert(!(g_signed_blocks && diskindex.m_dynafed_params.value().IsNull() && diskindex.proof.value().IsNull())); + + pindexNew->set_stored(); n_total++; + + const uint256 block_hash = pindexNew->GetBlockHash(); + // Only validate one of every 1000 block header for sanity check + if (pindexNew->nHeight % 1000 == 0 && + block_hash != consensusParams.hashGenesisBlock && + !CheckProof(pindexNew->GetBlockHeader(), consensusParams)) { + return error("%s: CheckProof: %s, %s", __func__, block_hash.ToString(), pindexNew->ToString()); + } if (diskindex.nHeight >= trimBelowHeight) { n_untrimmed++; - pindexNew->proof = diskindex.proof; - pindexNew->m_dynafed_params = diskindex.m_dynafed_params; - pindexNew->m_signblock_witness = diskindex.m_signblock_witness; - - const uint256 block_hash = pindexNew->GetBlockHash(); - // Only validate one of every 1000 block header for sanity check - if (pindexNew->nHeight % 1000 == 0 && - block_hash != consensusParams.hashGenesisBlock && - !CheckProof(pindexNew->GetBlockHeader(), consensusParams)) { - return error("%s: CheckProof: %s, %s", __func__, block_hash.ToString(), pindexNew->ToString()); - } } else { - pindexNew->m_trimmed = true; - pindexNew->m_trimmed_dynafed_block = !diskindex.m_dynafed_params.value().IsNull(); + pindexNew->trim(); } pcursor->Next(); diff --git a/src/txdb.h b/src/txdb.h index 9d2461d473..2ec256c233 100644 --- a/src/txdb.h +++ b/src/txdb.h @@ -87,7 +87,7 @@ class CBlockTreeDB : public CDBWrapper bool ReadFlag(const std::string &name, bool &fValue); bool LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function insertBlockIndex, int trimBelowHeight); // ELEMENTS: - bool WalkBlockIndexGutsForMaxHeight(int* nHeight); + const CBlockIndex* RegenerateFullIndex(const CBlockIndex *pindexTrimmed, CBlockIndex *pindexNew) const; bool ReadPAKList(std::vector >& offline_list, std::vector >& online_list, bool& reject); bool WritePAKList(const std::vector >& offline_list, const std::vector >& online_list, bool reject); }; diff --git a/src/validation.cpp b/src/validation.cpp index f3cf1e2572..749c0f98a5 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2348,6 +2348,7 @@ bool CChainState::FlushStateToDisk( vBlocks.reserve(setDirtyBlockIndex.size()); std::set setTrimmableBlockIndex(setDirtyBlockIndex); for (std::set::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) { + (*it)->untrim(); vBlocks.push_back(*it); setDirtyBlockIndex.erase(it++); } @@ -2355,29 +2356,36 @@ bool CChainState::FlushStateToDisk( return AbortNode(state, "Failed to write to block index database"); } - if (fTrimHeaders) { + // This should be done inside WriteBatchSync, but CBlockIndex is const there + for (std::set::iterator it = setTrimmableBlockIndex.begin(); it != setTrimmableBlockIndex.end(); it++) { + (*it)->set_stored(); + } + + int trim_height = pindexBestHeader ? pindexBestHeader->nHeight - nMustKeepFullHeaders : 0; + if (fTrimHeaders && trim_height > 0 && !ShutdownRequested()) { + static int nMinTrimHeight{0}; LogPrintf("Flushing block index, trimming headers, setTrimmableBlockIndex.size(): %d\n", setTrimmableBlockIndex.size()); - int trim_height = m_chain.Height() - nMustKeepFullHeaders; - int min_height = std::numeric_limits::max(); - CBlockIndex* min_index = nullptr; for (std::set::iterator it = setTrimmableBlockIndex.begin(); it != setTrimmableBlockIndex.end(); it++) { (*it)->assert_untrimmed(); if ((*it)->nHeight < trim_height) { (*it)->trim(); - if ((*it)->nHeight < min_height) { - min_height = (*it)->nHeight; - min_index = *it; - } } } - + CBlockIndex* min_index = pindexBestHeader->GetAncestor(trim_height-1); // Handle any remaining untrimmed blocks that were too recent for trimming last time we flushed. if (min_index) { - min_index = min_index->pprev; - while (min_index && !min_index->trimmed()) { - min_index->trim(); + int nMaxTrimHeightRound = std::max(nMinTrimHeight, min_index->nHeight + 1); + while (min_index && min_index->nHeight >= nMinTrimHeight) { + if (!min_index->trimmed()) { + // there may be gaps due to untrimmed blocks, we need to check them all + if (!min_index->trim()) { + // Header could not be trimmed, we'll need to try again next round + nMaxTrimHeightRound = min_index->nHeight; + } + } min_index = min_index->pprev; } + nMinTrimHeight = nMaxTrimHeightRound; } } } @@ -2453,6 +2461,18 @@ static void AppendWarning(bilingual_str& res, const bilingual_str& warn) res += warn; } +void ForceUntrimHeader(const CBlockIndex *pindex_) +{ + assert(pindex_); + if (!pindex_->trimmed()) { + return; + } + AssertLockHeld(cs_main); + CBlockIndex* pindex = const_cast(pindex_); + pindex->untrim(); + setDirtyBlockIndex.insert(pindex); +} + void CChainState::UpdateTip(const CBlockIndex* pindexNew) { // New best block @@ -2490,11 +2510,13 @@ void CChainState::UpdateTip(const CBlockIndex* pindexNew) this->CoinsTip().DynamicMemoryUsage() * (1.0 / (1<<20)), this->CoinsTip().GetCacheSize(), !warning_messages.empty() ? strprintf(" warning='%s'", warning_messages.original) : ""); + ForceUntrimHeader(pindexNew); // Do some logging if dynafed parameters changed. if (pindexNew->pprev && !pindexNew->dynafed_params().IsNull()) { int height = pindexNew->nHeight; uint256 hash = pindexNew->GetBlockHash(); uint256 root = pindexNew->dynafed_params().m_current.CalculateRoot(); + ForceUntrimHeader(pindexNew->pprev); if (pindexNew->pprev->dynafed_params().IsNull()) { LogPrintf("Dynafed activated in block %d:%s: %s\n", height, hash.GetHex(), root.GetHex()); } else if (root != pindexNew->pprev->dynafed_params().m_current.CalculateRoot()) { @@ -4139,15 +4161,7 @@ bool BlockManager::LoadBlockIndex( { int trim_below_height = 0; if (fTrimHeaders) { - int max_height = 0; - if (!blocktree.WalkBlockIndexGutsForMaxHeight(&max_height)) { - LogPrintf("LoadBlockIndex: Failed to WalkBlockIndexGutsForMaxHeight.\n"); - return false; - } - - int must_keep_headers = (consensus_params.total_valid_epochs + 2) * consensus_params.dynamic_epoch_length; - int extra_headers_buffer = consensus_params.dynamic_epoch_length * 2; // XXX arbitrary - trim_below_height = max_height - must_keep_headers - extra_headers_buffer; + trim_below_height = std::numeric_limits::max(); } if (!blocktree.LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }, trim_below_height)) return false; @@ -4196,6 +4210,9 @@ bool BlockManager::LoadBlockIndex( pindexBestHeader = pindex; } + if (pindexBestHeader) { + ForceUntrimHeader(pindexBestHeader); + } return true; } diff --git a/src/validation.h b/src/validation.h index f4ebc713d9..fcbd388d3c 100644 --- a/src/validation.h +++ b/src/validation.h @@ -1068,4 +1068,5 @@ bool LoadMempool(CTxMemPool& pool, CChainState& active_chainstate, FopenFn mocka */ const AssumeutxoData* ExpectedAssumeutxo(const int height, const CChainParams& params); +void ForceUntrimHeader(const CBlockIndex *pindex_) EXCLUSIVE_LOCKS_REQUIRED(cs_main); #endif // BITCOIN_VALIDATION_H diff --git a/test/functional/data/rpc_getblockstats.json b/test/functional/data/rpc_getblockstats.json index 6f82cefcf3..872e4d9f64 100644 --- a/test/functional/data/rpc_getblockstats.json +++ b/test/functional/data/rpc_getblockstats.json @@ -142,7 +142,7 @@ "totalfee": 0, "txs": 1, "utxo_increase": 2, - "utxo_size_inc": 0 + "utxo_size_inc": 233 }, { "avgfee": 6720, @@ -179,7 +179,7 @@ "totalfee": 6720, "txs": 2, "utxo_increase": 4, - "utxo_size_inc": -110 + "utxo_size_inc": 424 }, { "avgfee": 32320, @@ -216,7 +216,7 @@ "totalfee": 96960, "txs": 4, "utxo_increase": 8, - "utxo_size_inc": -326 + "utxo_size_inc": 810 } ] } \ No newline at end of file diff --git a/test/lint/lint-circular-dependencies.sh b/test/lint/lint-circular-dependencies.sh index 2a02c7aa0d..c3cf15b020 100755 --- a/test/lint/lint-circular-dependencies.sh +++ b/test/lint/lint-circular-dependencies.sh @@ -35,6 +35,25 @@ EXPECTED_CIRCULAR_DEPENDENCIES=( # multi-asset transaction or not. Probably this check should be done in # CreateTransaction instead. "wallet/coinselection -> wallet/wallet -> wallet/coinselection" + # ELEMENTS: mostly derived from chain.cpp including validation.h for + # trim_header version 2 + "chain -> validation -> chain" + "dynafed -> validation -> dynafed" + "pegins -> validation -> pegins" + "block_proof -> chain -> validation -> block_proof" + "chain -> validation -> consensus/tx_verify -> chain" + "chain -> validation -> deploymentstatus -> chain" + "chain -> validation -> index/blockfilterindex -> chain" + "chain -> validation -> primitives/pak -> chain" + "chain -> validation -> txdb -> chain" + "chain -> validation -> validationinterface -> chain" + "confidential_validation -> pegins -> validation -> confidential_validation" + "consensus/tx_verify -> pegins -> validation -> consensus/tx_verify" + "dynafed -> validation -> primitives/pak -> dynafed" + "block_proof -> chain -> validation -> txdb -> block_proof" + "chain -> validation -> txdb -> pow -> chain" + "chain -> validation -> deploymentstatus -> versionbits -> chain" + "core_io -> script/sign -> pegins -> validation -> signet -> core_io" ) EXIT_CODE=0