From 0083cd26bb460e42be2adee3519fa97e63432d64 Mon Sep 17 00:00:00 2001 From: dan_s Date: Fri, 27 Feb 2026 01:01:22 -0600 Subject: [PATCH] Split main.cpp (8,217 lines) into four focused translation units - tx_validation.cpp (1,012 lines): transaction validation functions (IsStandardTx, CheckTransaction, ContextualCheckInputs, etc.) - mempool_accept.cpp (524 lines): mempool acceptance and orphan management (AcceptToMemoryPool, AddOrphanTx, GetMinRelayFee, etc.) - block_processing.cpp (4,064 lines): block processing, chain management, and disk I/O (ConnectBlock, DisconnectBlock, ActivateBestChain, CheckBlock, LoadBlockIndex, FlushStateToDisk, etc.) - main_internal.h (83 lines): shared internal state declarations formerly in anonymous namespace (CBlockIndexWorkComparator, setBlockIndexCandidates, vinfoBlockFile, setDirtyBlockIndex, etc.) main.cpp retains NET message handling (ProcessMessage, SendMessages), peer state management, and utility functions (2,821 lines). --- src/Makefile.am | 3 + src/block_processing.cpp | 4064 +++++++++++++++++++++++++++ src/main.cpp | 5584 +------------------------------------- src/main.h | 10 + src/main_internal.h | 83 + src/mempool_accept.cpp | 524 ++++ src/tx_validation.cpp | 1012 +++++++ 7 files changed, 5791 insertions(+), 5489 deletions(-) create mode 100644 src/block_processing.cpp create mode 100644 src/main_internal.h create mode 100644 src/mempool_accept.cpp create mode 100644 src/tx_validation.cpp diff --git a/src/Makefile.am b/src/Makefile.am index a042b6555..085a77170 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -281,6 +281,9 @@ libbitcoin_server_a_SOURCES = \ hush_impl.cpp \ hush_nSPV_impl.cpp \ main.cpp \ + tx_validation.cpp \ + mempool_accept.cpp \ + block_processing.cpp \ merkleblock.cpp \ metrics.h \ miner.cpp \ diff --git a/src/block_processing.cpp b/src/block_processing.cpp new file mode 100644 index 000000000..4f20c4b94 --- /dev/null +++ b/src/block_processing.cpp @@ -0,0 +1,4064 @@ +// Copyright (c) 2009-2010 Satoshi Nakamoto +// Copyright (c) 2009-2014 The Bitcoin Core developers +// Copyright (c) 2016-2024 The Hush developers +// Distributed under the GPLv3 software license, see the accompanying +// file COPYING or https://www.gnu.org/licenses/gpl-3.0.en.html +// +// Block processing functions extracted from main.cpp + +#include "main.h" +#include "main_internal.h" +#include "sodium.h" +#include "addrman.h" +#include "arith_uint256.h" +#include "chainparams.h" +#include "checkpoints.h" +#include "checkqueue.h" +#include "consensus/upgrades.h" +#include "consensus/validation.h" +#include "init.h" +#include "merkleblock.h" +#include "metrics.h" +#include "notarizationdb.h" +#include "net.h" +#include "pow.h" +#include "script/interpreter.h" +#include "txdb.h" +#include "txmempool.h" +#include "ui_interface.h" +#include "undo.h" +#include "util.h" +#include "utilmoneystr.h" +#include "validationinterface.h" +#include "wallet/asyncrpcoperation_sendmany.h" +#include "wallet/asyncrpcoperation_shieldcoinbase.h" +#include +#include +#include +#include +#include +// boost and wolfSSL fight over defining this macro +#undef ALIGN16 +#include +#include + +using namespace std; + +#include "hush_defs.h" +#include "key_io.h" +#include "hush.h" +#include "librustzcash.h" + +extern int32_t HUSH_LOADINGBLOCKS,HUSH_LONGESTCHAIN,HUSH_INSYNC,HUSH_CONNECTING,HUSH_EXTRASATOSHI; +extern int32_t HUSH_NEWBLOCKS; +extern CZindexStats zstats; +extern int32_t nFirstHalvingHeight; +extern const bool ishush3; +extern bool fAddressIndex; +extern bool fTimestampIndex; +extern bool fSpentIndex; +extern uint256 zeroid; + +#define TMPFILE_START 100000000 + +extern bool fLargeWorkForkFound; +extern bool fLargeWorkInvalidChainFound; +extern CBlockIndex *pindexBestForkTip; +extern CBlockIndex *pindexBestForkBase; +extern int64_t nTimeBestReceived; + +// Orphan maps defined in main.cpp +struct COrphanTx { + CTransaction tx; + NodeId fromPeer; +}; +extern std::map mapOrphanTransactions; +extern std::map> mapOrphanTransactionsByPrev; + +// Forward declarations for functions that stay in main.cpp +void InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state); +bool IsInitialBlockDownload(); +// IsNotInSync() declared in main.h +void Misbehaving(NodeId pnode, int howmuch); +void ClearNodeState(); // helper in main.cpp: calls mapNodeState.clear() + +// Forward declarations within this file +void CheckBlockIndex(); + +// Forward declarations for hush functions +int32_t hush_block2pubkey33(uint8_t *pubkey33,CBlock *block); +bool Getscriptaddress(char *destaddr,const CScript &scriptPubKey); +void hush_setactivation(int32_t height); +void hush_changeblocktime(); +void hush_pricesupdate(int32_t height,CBlock *pblock); +// hush_sc_block_subsidy declared in hush.h +bool hush_dailysnapshot(int32_t height); + +bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMessageHeader::MessageStartChars& messageStart) +{ + // Open history file to append + CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION); + if (fileout.IsNull()) + return error("WriteBlockToDisk: OpenBlockFile failed"); + + // Write index header + unsigned int nSize = GetSerializeSize(fileout, block); + fileout << FLATDATA(messageStart) << nSize; + + // Write block + long fileOutPos = ftell(fileout.Get()); + if (fileOutPos < 0) + return error("WriteBlockToDisk: ftell failed"); + pos.nPos = (unsigned int)fileOutPos; + fileout << block; + + return true; +} + +bool ReadBlockFromDisk(int32_t height,CBlock& block, const CDiskBlockPos& pos,bool checkPOW) +{ + uint8_t pubkey33[33]; + block.SetNull(); + + // Open history file to read + CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION); + if (filein.IsNull()) + { + //fprintf(stderr,"readblockfromdisk err A\n"); + return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString()); + } + + // Read block + try { + filein >> block; + } + catch (const std::exception& e) { + fprintf(stderr,"readblockfromdisk err B\n"); + return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString()); + } + // Check the header + if ( 0 && checkPOW != 0 ) + { + hush_block2pubkey33(pubkey33,(CBlock *)&block); + if (!(CheckEquihashSolution(&block, Params()) && CheckProofOfWork(block, pubkey33, height, Params().GetConsensus()))) + { + int32_t i; for (i=0; i<33; i++) + fprintf(stderr,"%02x",pubkey33[i]); + fprintf(stderr," warning unexpected diff at ht.%d\n",height); + + return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString()); + } + } + return true; +} + +bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex,bool checkPOW) +{ + if ( pindex == 0 ) + return false; + if (!ReadBlockFromDisk(pindex->GetHeight(),block, pindex->GetBlockPos(),checkPOW)) + return false; + if (block.GetHash() != pindex->GetBlockHash()) + return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s", + pindex->ToString(), pindex->GetBlockPos().ToString()); + return true; +} + +CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams) +{ + // fprintf(stderr,"%s: nHeight=%d\n", __func__, nHeight); + return hush_sc_block_subsidy(nHeight); +} + +void CheckForkWarningConditions() +{ + //fprintf(stderr,"%s checking for IBD\n", __func__); + AssertLockHeld(cs_main); + // Before we get past initial download, we cannot reliably alert about forks + // (we assume we don't get stuck on a fork before finishing our initial sync) + if (IsInitialBlockDownload()) + return; + + //fprintf(stderr,"%s not in IBD\n", __func__); + // If our best fork is no longer within 288 blocks (+/- 12 hours if no one mines it) + // of our head, drop it + if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->GetHeight() >= 288) + pindexBestForkTip = NULL; + + if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->chainPower > (chainActive.LastTip()->chainPower + (GetBlockProof(*chainActive.LastTip()) * 6)))) + { + if (!fLargeWorkForkFound && pindexBestForkBase) + { + std::string warning = std::string("'Warning: Large-work fork detected, forking after block ") + pindexBestForkBase->phashBlock->ToString() + std::string("'"); + LogPrintf("%s: %s\n", __func__, warning.c_str()); + } + if (pindexBestForkTip && pindexBestForkBase) + { + LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__, + pindexBestForkBase->GetHeight(), pindexBestForkBase->phashBlock->ToString(), + pindexBestForkTip->GetHeight(), pindexBestForkTip->phashBlock->ToString()); + fLargeWorkForkFound = true; + } else { + std::string warning = std::string("Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely."); + LogPrintf("%s: %s\n", __func__, warning.c_str()); + fLargeWorkInvalidChainFound = true; + } + } else { + fLargeWorkForkFound = false; + fLargeWorkInvalidChainFound = false; + } +} + +void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip) +{ + AssertLockHeld(cs_main); + // If we are on a fork that is sufficiently large, set a warning flag + CBlockIndex* pfork = pindexNewForkTip; + CBlockIndex* plonger = chainActive.LastTip(); + while (pfork && pfork != plonger) + { + while (plonger && plonger->GetHeight() > pfork->GetHeight()) + plonger = plonger->pprev; + if (pfork == plonger) + break; + pfork = pfork->pprev; + } + + // We define a condition where we should warn the user about as a fork of at least 7 blocks + // with a tip within 72 blocks (+/- 3 hours if no one mines it) of ours + // We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network + // hash rate operating on the fork. + // or a chain that is entirely longer than ours and invalid (note that this should be detected by both) + // We define it this way because it allows us to only store the highest fork tip (+ base) which meets + // the 7-block condition and from this always have the most-likely-to-cause-warning fork + if (pfork && (!pindexBestForkTip || (pindexBestForkTip && pindexNewForkTip->GetHeight() > pindexBestForkTip->GetHeight())) && + pindexNewForkTip->chainPower - pfork->chainPower > (GetBlockProof(*pfork) * 7) && + chainActive.Height() - pindexNewForkTip->GetHeight() < 72) + { + pindexBestForkTip = pindexNewForkTip; + pindexBestForkBase = pfork; + } + + CheckForkWarningConditions(); +} + +void InvalidChainFound(CBlockIndex* pindexNew) +{ + if (!pindexBestInvalid || pindexNew->chainPower > pindexBestInvalid->chainPower) + pindexBestInvalid = pindexNew; + + LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__, + pindexNew->GetBlockHash().ToString(), pindexNew->GetHeight(), + log(pindexNew->chainPower.chainWork.getdouble())/log(2.0), + DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexNew->GetBlockTime())); + CBlockIndex *tip = chainActive.LastTip(); + assert (tip); + LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__, + tip->GetBlockHash().ToString(), chainActive.Height(), + log(tip->chainPower.chainWork.getdouble())/log(2.0), + DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip->GetBlockTime())); + CheckForkWarningConditions(); +} + +namespace { + + bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart) + { + // Open history file to append + CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION); + if (fileout.IsNull()) + return error("%s: OpenUndoFile failed", __func__); + + // Write index header + unsigned int nSize = GetSerializeSize(fileout, blockundo); + fileout << FLATDATA(messageStart) << nSize; + + // Write undo data + long fileOutPos = ftell(fileout.Get()); + if (fileOutPos < 0) + return error("%s: ftell failed", __func__); + pos.nPos = (unsigned int)fileOutPos; + fileout << blockundo; + + // calculate & write checksum + CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); + hasher << hashBlock; + hasher << blockundo; + fileout << hasher.GetHash(); +//fprintf(stderr,"hashBlock.%s hasher.%s\n",hashBlock.GetHex().c_str(),hasher.GetHash().GetHex().c_str()); + return true; + } + + bool UndoReadFromDisk(CBlockUndo& blockundo, const CDiskBlockPos& pos, const uint256& hashBlock) + { + // Open history file to read + CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION); + if (filein.IsNull()) + return error("%s: OpenBlockFile failed", __func__); + + // Read block + uint256 hashChecksum; + try { + filein >> blockundo; + filein >> hashChecksum; + } + catch (const std::exception& e) { + return error("%s: Deserialize or I/O error - %s", __func__, e.what()); + } + // Verify checksum + CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); + hasher << hashBlock; + hasher << blockundo; + if (hashChecksum != hasher.GetHash()) + return error("%s: %s Checksum mismatch %s vs %s", __func__,hashBlock.GetHex().c_str(),hashChecksum.GetHex().c_str(),hasher.GetHash().GetHex().c_str()); + + return true; + } + + /** Abort with a message */ + bool AbortNode(const std::string& strMessage, const std::string& userMessage="") + { + strMiscWarning = strMessage; + LogPrintf("*** %s\n", strMessage); + uiInterface.ThreadSafeMessageBox( + userMessage.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage, + "", CClientUIInterface::MSG_ERROR); + StartShutdown(); + return false; + } + + bool AbortNode(CValidationState& state, const std::string& strMessage, const std::string& userMessage="") + { + AbortNode(strMessage, userMessage); + return state.Error(strMessage); + } + +} // anon namespace + +/** + * Apply the undo operation of a CTxInUndo to the given chain state. + * @param undo The undo object. + * @param view The coins view to which to apply the changes. + * @param out The out point that corresponds to the tx input. + * @return True on success. + */ +static bool ApplyTxInUndo(const CTxInUndo& undo, CCoinsViewCache& view, const COutPoint& out) +{ + bool fClean = true; + + CCoinsModifier coins = view.ModifyCoins(out.hash); + if (undo.nHeight != 0) { + // undo data contains height: this is the last output of the prevout tx being spent + if (!coins->IsPruned()) + fClean = fClean && error("%s: undo data overwriting existing transaction", __func__); + coins->Clear(); + coins->fCoinBase = undo.fCoinBase; + coins->nHeight = undo.nHeight; + coins->nVersion = undo.nVersion; + } else { + if (coins->IsPruned()) + fClean = fClean && error("%s: undo data adding output to missing transaction", __func__); + } + if (coins->IsAvailable(out.n)) + fClean = fClean && error("%s: undo data overwriting existing output", __func__); + if (coins->vout.size() < out.n+1) + coins->vout.resize(out.n+1); + coins->vout[out.n] = undo.txout; + + return fClean; +} + +void ConnectNotarizations(const CBlock &block, int height) +{ + NotarizationsInBlock notarizations = ScanBlockNotarizations(block, height); + if (notarizations.size() > 0) { + CDBBatch batch = CDBBatch(*pnotarizations); + batch.Write(block.GetHash(), notarizations); + WriteBackNotarizations(notarizations, batch); + pnotarizations->WriteBatch(batch, true); + LogPrintf("ConnectBlock: wrote %i block notarizations in block: %s\n", notarizations.size(), block.GetHash().GetHex().data()); + } +} + +void DisconnectNotarizations(const CBlock &block) +{ + NotarizationsInBlock nibs; + if (GetBlockNotarizations(block.GetHash(), nibs)) { + CDBBatch batch = CDBBatch(*pnotarizations); + batch.Erase(block.GetHash()); + EraseBackNotarizations(nibs, batch); + pnotarizations->WriteBatch(batch, true); + LogPrintf("DisconnectTip: deleted %i block notarizations in block: %s\n", nibs.size(), block.GetHash().GetHex().data()); + } +} + +int8_t GetAddressType(const CScript &scriptPubKey, CTxDestination &vDest, txnouttype &txType, vector> &vSols) +{ + int8_t keyType = 0; + // some non-standard types, like time lock coinbases, don't solve, but do extract + if ( (Solver(scriptPubKey, txType, vSols) || ExtractDestination(scriptPubKey, vDest)) ) + { + keyType = 1; + if (vDest.which()) + { + // if we failed to solve, and got a vDest, assume P2PKH or P2PK address returned + CKeyID kid; + if (CBitcoinAddress(vDest).GetKeyID(kid)) + { + vSols.push_back(vector(kid.begin(), kid.end())); + } + } + else if (txType == TX_SCRIPTHASH) + { + keyType = 2; + } + } + return keyType; +} + +bool DisconnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& view, bool* pfClean) +{ + assert(pindex->GetBlockHash() == view.GetBestBlock()); + + if (pfClean) + *pfClean = false; + + bool fClean = true; + + CBlockUndo blockUndo; + CDiskBlockPos pos = pindex->GetUndoPos(); + if (pos.IsNull()) + return error("DisconnectBlock(): no undo data available"); + if (!UndoReadFromDisk(blockUndo, pos, pindex->pprev->GetBlockHash())) + return error("DisconnectBlock(): failure reading undo data"); + + if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) + return error("DisconnectBlock(): block and undo data inconsistent"); + std::vector > addressIndex; + std::vector > addressUnspentIndex; + std::vector > spentIndex; + + // undo transactions in reverse order + for (int i = block.vtx.size() - 1; i >= 0; i--) { + const CTransaction &tx = block.vtx[i]; + uint256 hash = tx.GetHash(); + if (fAddressIndex) { + + for (unsigned int k = tx.vout.size(); k-- > 0;) { + const CTxOut &out = tx.vout[k]; + + vector> vSols; + CTxDestination vDest; + txnouttype txType = TX_PUBKEYHASH; + int keyType = GetAddressType(out.scriptPubKey, vDest, txType, vSols); + if ( keyType != 0 ) + { + for (auto addr : vSols) + { + uint160 addrHash = addr.size() == 20 ? uint160(addr) : Hash160(addr); + addressIndex.push_back(make_pair(CAddressIndexKey(keyType, addrHash, pindex->GetHeight(), i, hash, k, false), out.nValue)); + addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(keyType, addrHash, hash, k), CAddressUnspentValue())); + } + } + } + } + + // Check that all outputs are available and match the outputs in the block itself + // exactly. + { + CCoinsModifier outs = view.ModifyCoins(hash); + outs->ClearUnspendable(); + + CCoins outsBlock(tx, pindex->GetHeight()); + // The CCoins serialization does not serialize negative numbers. + // No network rules currently depend on the version here, so an inconsistency is harmless + // but it must be corrected before txout nversion ever influences a network rule. + if (outsBlock.nVersion < 0) + outs->nVersion = outsBlock.nVersion; + if (*outs != outsBlock) + fClean = fClean && error("DisconnectBlock(): added transaction mismatch? database corrupted"); + + // remove outputs + outs->Clear(); + } + + // unspend nullifiers + view.SetNullifiers(tx, false); + + // restore inputs + if (!tx.IsMint()) { + CTxUndo &txundo = blockUndo.vtxundo[i-1]; + //if (tx.IsPegsImport()) txundo.vprevout.insert(txundo.vprevout.begin(),CTxInUndo()); + if (txundo.vprevout.size() != tx.vin.size()) + return error("DisconnectBlock(): transaction and undo data inconsistent"); + for (unsigned int j = tx.vin.size(); j-- > 0;) { + //if (tx.IsPegsImport() && j==0) continue; + const COutPoint &out = tx.vin[j].prevout; + const CTxInUndo &undo = txundo.vprevout[j]; + if (!ApplyTxInUndo(undo, view, out)) + fClean = false; + + const CTxIn input = tx.vin[j]; + + if (fSpentIndex) { + // undo and delete the spent index + spentIndex.push_back(make_pair(CSpentIndexKey(input.prevout.hash, input.prevout.n), CSpentIndexValue())); + } + + if (fAddressIndex) { + const CTxOut &prevout = view.GetOutputFor(tx.vin[j]); + + vector> vSols; + CTxDestination vDest; + txnouttype txType = TX_PUBKEYHASH; + int keyType = GetAddressType(prevout.scriptPubKey, vDest, txType, vSols); + if ( keyType != 0 ) + { + for (auto addr : vSols) + { + uint160 addrHash = addr.size() == 20 ? uint160(addr) : Hash160(addr); + // undo spending activity + addressIndex.push_back(make_pair(CAddressIndexKey(keyType, addrHash, pindex->GetHeight(), i, hash, j, true), prevout.nValue * -1)); + // restore unspent index + addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(keyType, addrHash, input.prevout.hash, input.prevout.n), CAddressUnspentValue(prevout.nValue, prevout.scriptPubKey, undo.nHeight))); + } + } + } + } + } + } + + // set the old best Sprout anchor back + view.PopAnchor(blockUndo.old_sprout_tree_root, SPROUT); + + // set the old best Sapling anchor back + // We can get this from the `hashFinalSaplingRoot` of the last block + // However, this is only reliable if the last block was on or after + // the Sapling activation height. Otherwise, the last anchor was the + // empty root. + const bool sapling = pindex->pprev->GetHeight() >= 1 ? true : false; // NetworkUpgradeActive(pindex->pprev->GetHeight(), Params().GetConsensus(), Consensus::UPGRADE_SAPLING); + if (sapling) { + view.PopAnchor(pindex->pprev->hashFinalSaplingRoot, SAPLING); + } else { + view.PopAnchor(SaplingMerkleTree::empty_root(), SAPLING); + } + + // move best block pointer to prevout block + view.SetBestBlock(pindex->pprev->GetBlockHash()); + + // If disconnecting a block brings us back before our blocktime halving height, go back + // to our original blocktime so our DAA has the correct target for that height + int nHeight = pindex->pprev->GetHeight(); + nFirstHalvingHeight = GetArg("-z2zheight",340000); + if (ishush3 && (ASSETCHAINS_BLOCKTIME != 150) && (nHeight < nFirstHalvingHeight)) { + LogPrintf("%s: Setting blocktime to 150s at height %d!\n",__func__,nHeight); + ASSETCHAINS_BLOCKTIME = 150; + hush_changeblocktime(); + } + + + if (pfClean) { + *pfClean = fClean; + return true; + } + + if (fAddressIndex) { + if (!pblocktree->EraseAddressIndex(addressIndex)) { + return AbortNode(state, "Failed to delete address index"); + } + if (!pblocktree->UpdateAddressUnspentIndex(addressUnspentIndex)) { + return AbortNode(state, "Failed to write address unspent index"); + } + } + + return fClean; +} + +void static FlushBlockFile(bool fFinalize = false) +{ + LOCK(cs_LastBlockFile); + + CDiskBlockPos posOld(nLastBlockFile, 0); + + FILE *fileOld = OpenBlockFile(posOld); + if (fileOld) { + if (fFinalize) + TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nSize); + FileCommit(fileOld); + fclose(fileOld); + } + + fileOld = OpenUndoFile(posOld); + if (fileOld) { + if (fFinalize) + TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nUndoSize); + FileCommit(fileOld); + fclose(fileOld); + } +} + +bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize); + +static CCheckQueue scriptcheckqueue(128); + +void ThreadScriptCheck() { + RenameThread("hush-scriptch"); + scriptcheckqueue.Thread(); +} + + +static int64_t nTimeVerify = 0; +static int64_t nTimeConnect = 0; +static int64_t nTimeIndex = 0; +static int64_t nTimeCallbacks = 0; +static int64_t nTimeTotal = 0; +bool FindBlockPos(int32_t tmpflag,CValidationState &state, CDiskBlockPos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false); +bool ReceivedBlockTransactions(const CBlock &block, CValidationState& state, CBlockIndex *pindexNew, const CDiskBlockPos& pos); + +bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& view, bool fJustCheck,bool fCheckPOW) +{ + CDiskBlockPos blockPos; + const CChainParams& chainparams = Params(); + if ( HUSH_NSPV_SUPERLITE ) + return(true); + if ( HUSH_STOPAT != 0 && pindex->GetHeight() > HUSH_STOPAT ) + return(false); + //fprintf(stderr,"connectblock ht.%d\n",(int32_t)pindex->GetHeight()); + AssertLockHeld(cs_main); + + const bool ishush3 = strncmp(SMART_CHAIN_SYMBOL, "HUSH3",5) == 0 ? true : false; + + // At startup, HUSH3 doesn't know a block height yet and so we must wait until + // connecting a block to set our private/blocktime flags, which are height-dependent + nFirstHalvingHeight = GetArg("-z2zheight",340000); + if(!ASSETCHAINS_PRIVATE && ishush3) { + unsigned int nHeight = pindex->GetHeight(); + if(nHeight >= nFirstHalvingHeight) { + fprintf(stderr, "%s: Going full z2z at height %d!\n",__func__,pindex->GetHeight()); + ASSETCHAINS_PRIVATE = 1; + } + } + if (ishush3 && (ASSETCHAINS_BLOCKTIME != 75) && (chainActive.Height() >= nFirstHalvingHeight)) { + LogPrintf("%s: Blocktime halving to 75s at height %d!\n",__func__,pindex->GetHeight()); + ASSETCHAINS_BLOCKTIME = 75; + hush_changeblocktime(); + } + + bool fExpensiveChecks = true; + if (fCheckpointsEnabled) { + CBlockIndex *pindexLastCheckpoint = Checkpoints::GetLastCheckpoint(chainparams.Checkpoints()); + if (pindexLastCheckpoint && pindexLastCheckpoint->GetAncestor(pindex->GetHeight()) == pindex) { + // This block is an ancestor of a checkpoint: disable script checks + fExpensiveChecks = false; + } + } + auto verifier = libzcash::ProofVerifier::Strict(); + auto disabledVerifier = libzcash::ProofVerifier::Disabled(); + int32_t futureblock; + CAmount blockReward = GetBlockSubsidy(pindex->GetHeight(), chainparams.GetConsensus()); + uint64_t notarypaycheque = 0; + + // Check it again to verify ztx proofs, and in case a previous version let a bad block in + if ( !CheckBlock(&futureblock,pindex->GetHeight(),pindex,block, state, fExpensiveChecks ? verifier : disabledVerifier, fCheckPOW, !fJustCheck) || futureblock != 0 ) + { + //fprintf(stderr,"checkblock failure in connectblock futureblock.%d\n",futureblock); + return false; + } + if ( fCheckPOW != 0 && (pindex->nStatus & BLOCK_VALID_CONTEXT) != BLOCK_VALID_CONTEXT ) // Activate Jan 15th, 2019 + { + if ( !ContextualCheckBlock(1,block, state, pindex->pprev) ) + { + fprintf(stderr,"ContextualCheckBlock failed ht.%d\n",(int32_t)pindex->GetHeight()); + if ( pindex->nTime > 1547510400 ) + return false; + fprintf(stderr,"grandfathered exception, until jan 15th 2019\n"); + } else pindex->nStatus |= BLOCK_VALID_CONTEXT; + } + + // Do this here before the block is moved to the main block files. + if ( ASSETCHAINS_NOTARY_PAY[0] != 0 && pindex->GetHeight() > 10 ) + { + // do a full block scan to get ntz position and to enforce a valid notarization is in position 1. + // if ntz in the block, must be position 1 and the coinbase must pay notaries. + int32_t notarizationTx = hush_connectblock(true,pindex,*(CBlock *)&block); + // -1 means that the valid notarization isnt in position 1 or there are too many notarizations in this block. + if ( notarizationTx == -1 ) + return state.DoS(100, error("ConnectBlock(): Notarization is not in TX position 1 or block contains more than 1 notarization! Invalid Block!"), + REJECT_INVALID, "bad-notarization-position"); + // 1 means this block contains a valid notarization and its in position 1. + // its no longer possible for any attempted notarization to be in a block with a valid one! + // if notaries create a notarization even if its not in this chain it will need to be mined inside its own block! + if ( notarizationTx == 1 ) + { + // Check if the notaries have been paid. + if ( block.vtx[0].vout.size() == 1 ) + return state.DoS(100, error("ConnectBlock(): Notaries have not been paid!"), REJECT_INVALID, "bad-cb-amount"); + // calculate the notaries compensation and validate the amounts and pubkeys are correct. + notarypaycheque = hush_checknotarypay((CBlock *)&block,(int32_t)pindex->GetHeight()); + //fprintf(stderr, "notarypaycheque.%lu\n", notarypaycheque); + if ( notarypaycheque > 0 ) + blockReward += notarypaycheque; + else + return state.DoS(100, error("ConnectBlock(): Notary pay validation failed!"), + REJECT_INVALID, "bad-cb-amount"); + } + } + + // Move the block to the main block file, we need this to create the TxIndex in the following loop. + if ( (pindex->nStatus & BLOCK_IN_TMPFILE) != 0 ) + { + unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); + if (!FindBlockPos(0,state, blockPos, nBlockSize+8, pindex->GetHeight(), block.GetBlockTime(),false)) + return error("ConnectBlock(): FindBlockPos failed"); + if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) + return error("ConnectBlock(): FindBlockPos failed"); + pindex->nStatus &= (~BLOCK_IN_TMPFILE); + pindex->nFile = blockPos.nFile; + pindex->nDataPos = blockPos.nPos; + if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) + return error("AcceptBlock(): ReceivedBlockTransactions failed"); + setDirtyFileInfo.insert(blockPos.nFile); + //fprintf(stderr,"added ht.%d copy of tmpfile to %d.%d\n",pindex->GetHeight(),blockPos.nFile,blockPos.nPos); + } + // verify that the view's current state corresponds to the previous block + uint256 hashPrevBlock = pindex->pprev == NULL ? uint256() : pindex->pprev->GetBlockHash(); + if ( hashPrevBlock != view.GetBestBlock() ) + { + fprintf(stderr,"ConnectBlock(): hashPrevBlock != view.GetBestBlock() %s != %s\n", hashPrevBlock.ToString().c_str(), view.GetBestBlock().ToString().c_str() ); + + return state.DoS(1, error("ConnectBlock(): hashPrevBlock != view.GetBestBlock()"), + REJECT_INVALID, "hashPrevBlock-not-bestblock"); + } + assert(hashPrevBlock == view.GetBestBlock()); + + // Special case for the genesis block, skipping connection of its transactions + // (its coinbase is unspendable) + if (block.GetHash() == chainparams.GetConsensus().hashGenesisBlock) { + if (!fJustCheck) { + view.SetBestBlock(pindex->GetBlockHash()); + // Before the genesis block, there was an empty tree + SproutMerkleTree tree; + pindex->hashSproutAnchor = tree.root(); + // The genesis block contained no JoinSplits, lulz + pindex->hashFinalSproutRoot = pindex->hashSproutAnchor; + } + return true; + } + + bool fScriptChecks = (!fCheckpointsEnabled || pindex->GetHeight() >= Checkpoints::GetTotalBlocksEstimate(chainparams.Checkpoints())); + // Do not allow blocks that contain transactions which 'overwrite' older transactions, + // unless those are already completely spent. + BOOST_FOREACH(const CTransaction& tx, block.vtx) { + const CCoins* coins = view.AccessCoins(tx.GetHash()); + if (coins && !coins->IsPruned()) + return state.DoS(100, error("ConnectBlock(): tried to overwrite transaction"), + REJECT_INVALID, "bad-txns-BIP30"); + } + + unsigned int flags = SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY; + + // DERSIG (BIP66) is also always enforced, but does not have a flag. + + CBlockUndo blockundo; + + if ( ASSETCHAINS_CC != 0 ) + { + if ( scriptcheckqueue.IsIdle() == 0 ) + { + fprintf(stderr,"scriptcheckqueue isnt idle\n"); + sleep(1); + } + } + CCheckQueueControl control(fExpensiveChecks && nScriptCheckThreads ? &scriptcheckqueue : NULL); + + int64_t nTimeStart = GetTimeMicros(); + CAmount nFees = 0; + int nInputs = 0; + uint64_t valueout; + int64_t voutsum = 0, prevsum = 0, interest, sum = 0; + unsigned int nSigOps = 0; + CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size())); + std::vector > vPos; + vPos.reserve(block.vtx.size()); + blockundo.vtxundo.reserve(block.vtx.size() - 1); + std::vector > addressIndex; + std::vector > addressUnspentIndex; + std::vector > spentIndex; + // Construct the incremental merkle tree at the current + // block position, + auto old_sprout_tree_root = view.GetBestAnchor(SPROUT); + // saving the top anchor in the block index as we go. + if (!fJustCheck) { + pindex->hashSproutAnchor = old_sprout_tree_root; + } + + SproutMerkleTree sprout_tree; + + // This should never fail: we should always be able to get the root + // that is on the tip of our chain + //assert(view.GetSproutAnchorAt(old_sprout_tree_root, sprout_tree)); + + + SaplingMerkleTree sapling_tree; + assert(view.GetSaplingAnchorAt(view.GetBestAnchor(SAPLING), sapling_tree)); + + // Grab the consensus branch ID for the block's height + auto consensusBranchId = CurrentEpochBranchId(pindex->GetHeight(), Params().GetConsensus()); + + std::vector txdata; + txdata.reserve(block.vtx.size()); // Required so that pointers to individual PrecomputedTransactionData don't get invalidated + for (unsigned int i = 0; i < block.vtx.size(); i++) + { + const CTransaction &tx = block.vtx[i]; + const uint256 txhash = tx.GetHash(); + nInputs += tx.vin.size(); + nSigOps += GetLegacySigOpCount(tx); + if (nSigOps > MAX_BLOCK_SIGOPS) + return state.DoS(100, error("ConnectBlock(): too many sigops"), + REJECT_INVALID, "bad-blk-sigops"); + //fprintf(stderr,"ht.%d vout0 t%u\n",pindex->GetHeight(),tx.nLockTime); + if (!tx.IsMint()) + { + if (!view.HaveInputs(tx)) + { + fprintf(stderr, "Connect Block missing inputs tx_number.%d \nvin txid.%s vout.%d \n",i,tx.vin[0].prevout.hash.ToString().c_str(),tx.vin[0].prevout.n); + return state.DoS(100, error("ConnectBlock(): inputs missing/spent"), + REJECT_INVALID, "bad-txns-inputs-missingorspent"); + } + // are the shielded requirements met? + if (!view.HaveShieldedRequirements(tx)) + return state.DoS(100, error("ConnectBlock(): shielded requirements not met"), REJECT_INVALID, "bad-txns-joinsplit-requirements-not-met"); + + if (fAddressIndex || fSpentIndex) + { + for (size_t j = 0; j < tx.vin.size(); j++) + { + //if (tx.IsPegsImport() && j==0) continue; + const CTxIn input = tx.vin[j]; + const CTxOut &prevout = view.GetOutputFor(tx.vin[j]); + + vector> vSols; + CTxDestination vDest; + txnouttype txType = TX_PUBKEYHASH; + uint160 addrHash; + int keyType = GetAddressType(prevout.scriptPubKey, vDest, txType, vSols); + if ( keyType != 0 ) + { + for (auto addr : vSols) + { + addrHash = addr.size() == 20 ? uint160(addr) : Hash160(addr); + // record spending activity + addressIndex.push_back(make_pair(CAddressIndexKey(keyType, addrHash, pindex->GetHeight(), i, txhash, j, true), prevout.nValue * -1)); + + // remove address from unspent index + addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(keyType, addrHash, input.prevout.hash, input.prevout.n), CAddressUnspentValue())); + } + + if (fSpentIndex) { + // add the spent index to determine the txid and input that spent an output + // and to find the amount and address from an input + spentIndex.push_back(make_pair(CSpentIndexKey(input.prevout.hash, input.prevout.n), CSpentIndexValue(txhash, j, pindex->GetHeight(), prevout.nValue, keyType, addrHash))); + } + } + } + } + // Add in sigops done by pay-to-script-hash inputs; + // this is to prevent a "rogue miner" from creating + // an incredibly-expensive-to-validate block. + nSigOps += GetP2SHSigOpCount(tx, view); + if (nSigOps > MAX_BLOCK_SIGOPS) + return state.DoS(100, error("ConnectBlock(): too many sigops"), + REJECT_INVALID, "bad-blk-sigops"); + } + + txdata.emplace_back(tx); + + valueout = tx.GetValueOut(); + if ( HUSH_VALUETOOBIG(valueout) != 0 ) + { + fprintf(stderr,"valueout %.8f too big\n",(double)valueout/COIN); + return state.DoS(100, error("ConnectBlock(): GetValueOut too big"),REJECT_INVALID,"tx valueout is too big"); + } + //prevsum = voutsum; + //voutsum += valueout; + /*if ( HUSH_VALUETOOBIG(voutsum) != 0 ) + { + fprintf(stderr,"voutsum %.8f too big\n",(double)voutsum/COIN); + return state.DoS(100, error("ConnectBlock(): voutsum too big"),REJECT_INVALID,"tx valueout is too big"); + } + else + if ( voutsum < prevsum ) // PRLPAY overflows this and it isnt a conclusive test anyway + return state.DoS(100, error("ConnectBlock(): voutsum less after adding valueout"),REJECT_INVALID,"tx valueout is too big");*/ + if (!tx.IsCoinBase()) + { + nFees += view.GetValueIn(chainActive.LastTip()->GetHeight(),&interest,tx,chainActive.LastTip()->nTime) - valueout; + sum += interest; + + std::vector vChecks; + if (!ContextualCheckInputs(tx, state, view, fExpensiveChecks, flags, false, txdata[i], chainparams.GetConsensus(), consensusBranchId, nScriptCheckThreads ? &vChecks : NULL)) + return false; + control.Add(vChecks); + } + + if (fAddressIndex) { + for (unsigned int k = 0; k < tx.vout.size(); k++) { + const CTxOut &out = tx.vout[k]; + + uint160 addrHash; + + vector> vSols; + CTxDestination vDest; + txnouttype txType = TX_PUBKEYHASH; + int keyType = GetAddressType(out.scriptPubKey, vDest, txType, vSols); + if ( keyType != 0 ) + { + for (auto addr : vSols) + { + addrHash = addr.size() == 20 ? uint160(addr) : Hash160(addr); + // record receiving activity + addressIndex.push_back(make_pair(CAddressIndexKey(keyType, addrHash, pindex->GetHeight(), i, txhash, k, false), out.nValue)); + + // record unspent output + addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(keyType, addrHash, txhash, k), CAddressUnspentValue(out.nValue, out.scriptPubKey, pindex->GetHeight()))); + } + } + } + } + + CTxUndo undoDummy; + if (i > 0) { + blockundo.vtxundo.push_back(CTxUndo()); + } + UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->GetHeight()); + + + BOOST_FOREACH(const OutputDescription &outputDescription, tx.vShieldedOutput) { + sapling_tree.append(outputDescription.cm); + } + + vPos.push_back(std::make_pair(tx.GetHash(), pos)); + pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION); + } + + //view.PushAnchor(sprout_tree); + view.PushAnchor(sapling_tree); + if (!fJustCheck) { + pindex->hashFinalSproutRoot = sprout_tree.root(); + } + blockundo.old_sprout_tree_root = old_sprout_tree_root; + + // If Sapling is active, block.hashFinalSaplingRoot must be the + // same as the root of the Sapling tree + const bool sapling = pindex->GetHeight()>=1 ? true : false; //NetworkUpgradeActive(pindex->GetHeight(), chainparams.GetConsensus(), Consensus::UPGRADE_SAPLING); + if (sapling) { + if (block.hashFinalSaplingRoot != sapling_tree.root()) { + return state.DoS(100, + error("ConnectBlock(): block's hashFinalSaplingRoot is incorrect"), + REJECT_INVALID, "bad-sapling-root-in-block"); + } + } + int64_t nTime1 = GetTimeMicros(); nTimeConnect += nTime1 - nTimeStart; + LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime1 - nTimeStart), 0.001 * (nTime1 - nTimeStart) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime1 - nTimeStart) / (nInputs-1), nTimeConnect * 0.000001); + + blockReward += nFees + sum; + if ( ASSETCHAINS_COMMISSION != 0 || ASSETCHAINS_FOUNDERS_REWARD != 0 ) //ASSETCHAINS_OVERRIDE_PUBKEY33[0] != 0 && + { + uint64_t checktoshis; + if ( (checktoshis= the_commission((CBlock *)&block,(int32_t)pindex->GetHeight())) != 0 ) + { + if ( block.vtx[0].vout.size() >= 2 && block.vtx[0].vout[1].nValue == checktoshis ) + blockReward += checktoshis; + else if ( pindex->GetHeight() > 1 ) + fprintf(stderr,"checktoshis %.8f vs %.8f numvouts %d\n",dstr(checktoshis),dstr(block.vtx[0].vout[1].nValue),(int32_t)block.vtx[0].vout.size()); + } + } + if (SMART_CHAIN_SYMBOL[0] != 0 && pindex->GetHeight() == 1 && block.vtx[0].GetValueOut() != blockReward) + { + return state.DoS(100, error("ConnectBlock(): coinbase for block 1 pays wrong amount (actual=%d vs correct=%d)", block.vtx[0].GetValueOut(), blockReward), + REJECT_INVALID, "bad-cb-amount"); + } + if ( block.vtx[0].GetValueOut() > blockReward+HUSH_EXTRASATOSHI ) + { + if ( SMART_CHAIN_SYMBOL[0] != 0 || pindex->GetHeight() >= HUSH_NOTARIES_HEIGHT1 || block.vtx[0].vout[0].nValue > blockReward ) + { + //fprintf(stderr, "coinbase pays too much\n"); + //sleepflag = true; + return state.DoS(100, + error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)", + block.vtx[0].GetValueOut(), blockReward), + REJECT_INVALID, "bad-cb-amount"); + } else if ( IS_HUSH_NOTARY != 0 ) + fprintf(stderr,"allow nHeight.%d coinbase %.8f vs %.8f interest %.8f\n",(int32_t)pindex->GetHeight(),dstr(block.vtx[0].GetValueOut()),dstr(blockReward),dstr(sum)); + } + if (!control.Wait()) + return state.DoS(100, false); + int64_t nTime2 = GetTimeMicros(); nTimeVerify += nTime2 - nTimeStart; + LogPrint("bench", " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime2 - nTimeStart), nInputs <= 1 ? 0 : 0.001 * (nTime2 - nTimeStart) / (nInputs-1), nTimeVerify * 0.000001); + + if (fJustCheck) + return true; + + // Write undo information to disk + //fprintf(stderr,"nFile.%d isNull %d vs isvalid %d nStatus %x\n",(int32_t)pindex->nFile,pindex->GetUndoPos().IsNull(),pindex->IsValid(BLOCK_VALID_SCRIPTS),(uint32_t)pindex->nStatus); + if (pindex->GetUndoPos().IsNull() || !pindex->IsValid(BLOCK_VALID_SCRIPTS)) + { + if (pindex->GetUndoPos().IsNull()) + { + CDiskBlockPos pos; + if (!FindUndoPos(state, pindex->nFile, pos, ::GetSerializeSize(blockundo, SER_DISK, CLIENT_VERSION) + 40)) + return error("ConnectBlock(): FindUndoPos failed"); + if ( pindex->pprev == 0 ) + fprintf(stderr,"ConnectBlock: unexpected null pprev\n"); + if (!UndoWriteToDisk(blockundo, pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart())) + return AbortNode(state, "Failed to write undo data"); + // update nUndoPos in block index + pindex->nUndoPos = pos.nPos; + pindex->nStatus |= BLOCK_HAVE_UNDO; + } + + // Now that all consensus rules have been validated, set nCachedBranchId. + // Move this if BLOCK_VALID_CONSENSUS is ever altered. + static_assert(BLOCK_VALID_CONSENSUS == BLOCK_VALID_SCRIPTS, + "nCachedBranchId must be set after all consensus rules have been validated."); + if (IsActivationHeightForAnyUpgrade(pindex->GetHeight(), Params().GetConsensus())) { + pindex->nStatus |= BLOCK_ACTIVATES_UPGRADE; + pindex->nCachedBranchId = CurrentEpochBranchId(pindex->GetHeight(), chainparams.GetConsensus()); + } else if (pindex->pprev) { + pindex->nCachedBranchId = pindex->pprev->nCachedBranchId; + } + + pindex->RaiseValidity(BLOCK_VALID_SCRIPTS); + setDirtyBlockIndex.insert(pindex); + } + + ConnectNotarizations(block, pindex->GetHeight()); // MoMoM notarization DB. + + if (fTxIndex) + if (!pblocktree->WriteTxIndex(vPos)) + return AbortNode(state, "Failed to write transaction index"); + if (fAddressIndex) { + if (!pblocktree->WriteAddressIndex(addressIndex)) { + return AbortNode(state, "Failed to write address index"); + } + + if (!pblocktree->UpdateAddressUnspentIndex(addressUnspentIndex)) { + return AbortNode(state, "Failed to write address unspent index"); + } + } + + if (fSpentIndex) + if (!pblocktree->UpdateSpentIndex(spentIndex)) + return AbortNode(state, "Failed to write transaction index"); + + if (fTimestampIndex) + { + unsigned int logicalTS = pindex->nTime; + unsigned int prevLogicalTS = 0; + + // retrieve logical timestamp of the previous block + if (pindex->pprev) + if (!pblocktree->ReadTimestampBlockIndex(pindex->pprev->GetBlockHash(), prevLogicalTS)) + LogPrintf("%s: Failed to read previous block's logical timestamp\n", __func__); + + if (logicalTS <= prevLogicalTS) { + logicalTS = prevLogicalTS + 1; + LogPrintf("%s: Previous logical timestamp is newer Actual[%d] prevLogical[%d] Logical[%d]\n", __func__, pindex->nTime, prevLogicalTS, logicalTS); + } + + if (!pblocktree->WriteTimestampIndex(CTimestampIndexKey(logicalTS, pindex->GetBlockHash()))) + return AbortNode(state, "Failed to write timestamp index"); + + if (!pblocktree->WriteTimestampBlockIndex(CTimestampBlockIndexKey(pindex->GetBlockHash()), CTimestampBlockIndexValue(logicalTS))) + return AbortNode(state, "Failed to write blockhash index"); + } + + // add this block to the view's block chain + view.SetBestBlock(pindex->GetBlockHash()); + + int64_t nTime3 = GetTimeMicros(); nTimeIndex += nTime3 - nTime2; + LogPrint("bench", " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime3 - nTime2), nTimeIndex * 0.000001); + + // Watch for changes to the previous coinbase transaction. + static uint256 hashPrevBestCoinBase; + GetMainSignals().UpdatedTransaction(hashPrevBestCoinBase); + hashPrevBestCoinBase = block.vtx[0].GetHash(); + + int64_t nTime4 = GetTimeMicros(); nTimeCallbacks += nTime4 - nTime3; + LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime4 - nTime3), nTimeCallbacks * 0.000001); + + //FlushStateToDisk(); + hush_connectblock(false,pindex,*(CBlock *)&block); // dPoW state update. + if ( ASSETCHAINS_NOTARY_PAY[0] != 0 ) + { + // Update the notary pay with the latest payment. + pindex->nNotaryPay = pindex->pprev->nNotaryPay + notarypaycheque; + //fprintf(stderr, "total notary pay.%li\n", pindex->nNotaryPay); + } + return true; +} + +enum FlushStateMode { + FLUSH_STATE_NONE, + FLUSH_STATE_IF_NEEDED, + FLUSH_STATE_PERIODIC, + FLUSH_STATE_ALWAYS +}; + +/** + * Update the on-disk chain state. + * The caches and indexes are flushed depending on the mode we're called with + * if they're too large, if it's been a while since the last write, + * or always and in all cases if we're in prune mode and are deleting files. + */ +bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) { + LOCK2(cs_main, cs_LastBlockFile); + static int64_t nLastWrite = 0; + static int64_t nLastFlush = 0; + static int64_t nLastSetChain = 0; + std::set setFilesToPrune; + bool fFlushForPrune = false; + try { + if (fPruneMode && fCheckForPruning && !fReindex) { + FindFilesToPrune(setFilesToPrune); + fCheckForPruning = false; + if (!setFilesToPrune.empty()) { + fFlushForPrune = true; + if (!fHavePruned) { + pblocktree->WriteFlag("prunedblockfiles", true); + fHavePruned = true; + } + } + } + int64_t nNow = GetTimeMicros(); + // Avoid writing/flushing immediately after startup. + if (nLastWrite == 0) { + nLastWrite = nNow; + } + if (nLastFlush == 0) { + nLastFlush = nNow; + } + if (nLastSetChain == 0) { + nLastSetChain = nNow; + } + size_t cacheSize = pcoinsTip->DynamicMemoryUsage(); + // The cache is large and close to the limit, but we have time now (not in the middle of a block processing). + bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize * (10.0/9) > nCoinCacheUsage; + // The cache is over the limit, we have to write now. + bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nCoinCacheUsage; + // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash. + bool fPeriodicWrite = mode == FLUSH_STATE_PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000; + // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage. + bool fPeriodicFlush = mode == FLUSH_STATE_PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000; + // Combine all conditions that result in a full cache flush. + bool fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune; + // Write blocks and block index to disk. + if (fDoFullFlush || fPeriodicWrite) { + // Depend on nMinDiskSpace to ensure we can write block index + if (!CheckDiskSpace(0)) + return state.Error("out of disk space"); + // First make sure all block and undo data is flushed to disk. + FlushBlockFile(); + // Then update all block file information (which may refer to block and undo files). + { + std::vector > vFiles; + vFiles.reserve(setDirtyFileInfo.size()); + for (set::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) { + if ( *it < TMPFILE_START ) + vFiles.push_back(make_pair(*it, &vinfoBlockFile[*it])); + setDirtyFileInfo.erase(it++); + } + std::vector vBlocks; + vBlocks.reserve(setDirtyBlockIndex.size()); + for (set::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) { + vBlocks.push_back(*it); + setDirtyBlockIndex.erase(it++); + } + if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) { + return AbortNode(state, "Files to write to block index database"); + } + // Now that we have written the block indices to the database, we do not + // need to store solutions for these CBlockIndex objects in memory. + // cs_main must be held here. + uint32_t nTrimmed = 0; + for (CBlockIndex *pblockindex : vBlocks) { + pblockindex->TrimSolution(); + ++nTrimmed; + } + LogPrintf("%s: trimmed %d solutions from block index mode=%d\n", __func__, nTrimmed, mode); + } + // Finally remove any pruned files + if (fFlushForPrune) + UnlinkPrunedFiles(setFilesToPrune); + nLastWrite = nNow; + } + // Flush best chain related state. This can only be done if the blocks / block index write was also done. + if (fDoFullFlush) { + // Typical CCoins structures on disk are around 128 bytes in size. + // Pushing a new one to the database can cause it to be written + // twice (once in the log, and once in the tables). This is already + // an overestimation, as most will delete an existing entry or + // overwrite one. Still, use a conservative safety factor of 2. + if (!CheckDiskSpace(128 * 2 * 2 * pcoinsTip->GetCacheSize())) + return state.Error("out of disk space"); + // Flush the chainstate (which may refer to block index entries). + if (!pcoinsTip->Flush()) + return AbortNode(state, "Failed to write to coin database"); + nLastFlush = nNow; + } + if ((mode == FLUSH_STATE_ALWAYS || mode == FLUSH_STATE_PERIODIC) && nNow > nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000) { + // Update best block in wallet (so we can detect restored wallets). + GetMainSignals().SetBestChain(chainActive.GetLocator()); + nLastSetChain = nNow; + } + } catch (const std::runtime_error& e) { + return AbortNode(state, std::string("System error while flushing: ") + e.what()); + } + return true; +} + +void FlushStateToDisk() { + CValidationState state; + if ( HUSH_NSPV_FULLNODE ) + FlushStateToDisk(state, FLUSH_STATE_ALWAYS); +} + +void PruneAndFlush() { + CValidationState state; + fCheckForPruning = true; + FlushStateToDisk(state, FLUSH_STATE_NONE); +} + +/** Update chainActive and related internal data structures. */ +void static UpdateTip(CBlockIndex *pindexNew) { + const CChainParams& chainParams = Params(); + chainActive.SetTip(pindexNew); + + // New best block + nTimeBestReceived = GetTime(); + mempool.AddTransactionsUpdated(1); + HUSH_NEWBLOCKS++; + double progress; + if ( ishush3 ) { + progress = Checkpoints::GuessVerificationProgress(chainParams.Checkpoints(), chainActive.LastTip()); + } else { + int32_t longestchain = hush_longestchain(); + progress = (longestchain > 0 ) ? (double) chainActive.Height() / longestchain : 1.0; + } + + nFirstHalvingHeight = GetArg("-z2zheight",340000); + if(ishush3) { + if (ASSETCHAINS_BLOCKTIME != 75 && (chainActive.Height() >= nFirstHalvingHeight)) { + LogPrintf("%s: Blocktime halving to 75s at height %d!\n",__func__,chainActive.Height()); + ASSETCHAINS_BLOCKTIME = 75; + hush_changeblocktime(); + } + } + + LogPrintf("%s: new best=%s height=%d log2_work=%.8g tx=%lu date=%s progress=%f cache=%.1fMiB(%utx)\n", __func__, + chainActive.LastTip()->GetBlockHash().ToString(), chainActive.Height(), + log(chainActive.Tip()->chainPower.chainWork.getdouble())/log(2.0), + (unsigned long)chainActive.LastTip()->nChainTx, + DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.LastTip()->GetBlockTime()), progress, + pcoinsTip->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip->GetCacheSize()); + + cvBlockChange.notify_all(); +} + +/** + * Disconnect chainActive's tip. You probably want to call mempool.removeForReorg and + * mempool.removeWithoutBranchId after this, with cs_main held. + */ +bool static DisconnectTip(CValidationState &state, bool fBare = false) { + CBlockIndex *pindexDelete = chainActive.Tip(); + assert(pindexDelete); + // Read block from disk. + CBlock block; + if (!ReadBlockFromDisk(block, pindexDelete,1)) + return AbortNode(state, "Failed to read block"); + //if ( SMART_CHAIN_SYMBOL[0] != 0 || pindexDelete->GetHeight() > 1400000 ) + { + int32_t notarizedht,prevMoMheight; uint256 notarizedhash,txid; + notarizedht = hush_notarized_height(&prevMoMheight,¬arizedhash,&txid); + if ( block.GetHash() == notarizedhash ) + { + fprintf(stderr,"DisconnectTip trying to disconnect notarized block at ht.%d\n",(int32_t)pindexDelete->GetHeight()); + return state.DoS(100, error("AcceptBlock(): DisconnectTip trying to disconnect notarized blockht.%d",(int32_t)pindexDelete->GetHeight()), + REJECT_INVALID, "past-notarized-height"); + } + } + // Apply the block atomically to the chain state. + uint256 sproutAnchorBeforeDisconnect = pcoinsTip->GetBestAnchor(SPROUT); + uint256 saplingAnchorBeforeDisconnect = pcoinsTip->GetBestAnchor(SAPLING); + int64_t nStart = GetTimeMicros(); + { + CCoinsViewCache view(pcoinsTip); + if (!DisconnectBlock(block, state, pindexDelete, view)) + return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString()); + assert(view.Flush()); + DisconnectNotarizations(block); + } + pindexDelete->segid = -2; + pindexDelete->nNotaryPay = 0; + pindexDelete->newcoins = 0; + pindexDelete->zfunds = 0; + + LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001); + uint256 sproutAnchorAfterDisconnect = pcoinsTip->GetBestAnchor(SPROUT); + uint256 saplingAnchorAfterDisconnect = pcoinsTip->GetBestAnchor(SAPLING); + // Write the chain state to disk, if necessary. + if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED)) + return false; + + if (!fBare) { + // resurrect mempool transactions from the disconnected block. + for (int i = 0; i < block.vtx.size(); i++) + { + // ignore validation errors in resurrected transactions + CTransaction &tx = block.vtx[i]; + list removed; + CValidationState stateDummy; + + // don't keep staking or invalid transactions + if (tx.IsCoinBase() || ((i == (block.vtx.size() - 1)) && (ASSETCHAINS_STAKED && hush_isPoS((CBlock *)&block,pindexDelete->GetHeight(),true) != 0)) || !AcceptToMemoryPool(mempool, stateDummy, tx, false, NULL)) + { + mempool.remove(tx, removed, true); + } + } + if (sproutAnchorBeforeDisconnect != sproutAnchorAfterDisconnect) { + // The anchor may not change between block disconnects, + // in which case we don't want to evict from the mempool yet! + mempool.removeWithAnchor(sproutAnchorBeforeDisconnect, SPROUT); + } + if (saplingAnchorBeforeDisconnect != saplingAnchorAfterDisconnect) { + // The anchor may not change between block disconnects, + // in which case we don't want to evict from the mempool yet! + mempool.removeWithAnchor(saplingAnchorBeforeDisconnect, SAPLING); + } + } + + // Update chainActive and related variables. + UpdateTip(pindexDelete->pprev); + + // Updates to connected wallets are triggered by ThreadNotifyWallets + + return true; +} + +int32_t hush_activate_sapling(CBlockIndex *pindex) +{ + uint32_t blocktime,prevtime; CBlockIndex *prev; int32_t i,transition=0,height,prevht; + int32_t activation = 0; + if ( pindex == 0 ) + { + fprintf(stderr,"hush_activate_sapling null pindex\n"); + return(0); + } + height = pindex->GetHeight(); + blocktime = (uint32_t)pindex->nTime; + //fprintf(stderr,"hush_activate_sapling.%d starting blocktime %u cmp.%d\n",height,blocktime,blocktime > HUSH_SAPING_ACTIVATION); + + // avoid trying unless we have at least 30 blocks + if (height < 30) + return(0); + + for (i=0; i<30; i++) + { + if ( (prev= pindex->pprev) == 0 ) + break; + pindex = prev; + } + if ( i != 30 ) + { + fprintf(stderr,"couldnt go backwards 30 blocks\n"); + return(0); + } + height = pindex->GetHeight(); + blocktime = (uint32_t)pindex->nTime; + //fprintf(stderr,"starting blocktime %u cmp.%d\n",blocktime,blocktime > HUSH_SAPING_ACTIVATION); + if ( blocktime > HUSH_SAPING_ACTIVATION ) // find the earliest transition + { + while ( (prev= pindex->pprev) != 0 ) + { + prevht = prev->GetHeight(); + prevtime = (uint32_t)prev->nTime; + //fprintf(stderr,"(%d, %u).%d -> (%d, %u).%d\n",prevht,prevtime,prevtime > HUSH_SAPING_ACTIVATION,height,blocktime,blocktime > HUSH_SAPING_ACTIVATION); + if ( prevht+1 != height ) + { + fprintf(stderr,"hush_activate_sapling: unexpected non-contiguous ht %d vs %d\n",prevht,height); + return(0); + } + if ( prevtime <= HUSH_SAPING_ACTIVATION && blocktime > HUSH_SAPING_ACTIVATION ) + { + activation = height + 60; + fprintf(stderr,"%s transition at %d (%d, %u) -> (%d, %u)\n",SMART_CHAIN_SYMBOL,height,prevht,prevtime,height,blocktime); + } + if ( prevtime < HUSH_SAPING_ACTIVATION-3600*24 ) + break; + pindex = prev; + height = prevht; + blocktime = prevtime; + } + } + if ( activation != 0 ) + { + hush_setactivation(activation); + fprintf(stderr,"%s sapling activation at %d\n",SMART_CHAIN_SYMBOL,activation); + ASSETCHAINS_SAPLING = activation; + } + return activation; +} + +static int64_t nTimeReadFromDisk = 0; +static int64_t nTimeConnectTotal = 0; +static int64_t nTimeFlush = 0; +static int64_t nTimeChainState = 0; +static int64_t nTimePostConnect = 0; + +// Protected by cs_main +std::map> recentlyConflictedTxs; +uint64_t nRecentlyConflictedSequence = 0; +uint64_t nNotifiedSequence = 0; + +/** + * Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock + * corresponding to pindexNew, to bypass loading it again from disk. + * You probably want to call mempool.removeWithoutBranchId after this, with cs_main held. + */ +bool static ConnectTip(CValidationState &state, CBlockIndex *pindexNew, CBlock *pblock) { + + //fprintf(stderr, "%s: Start\n", __FUNCTION__); + assert(pindexNew->pprev == chainActive.Tip()); + // Read block from disk. + int64_t nTime1 = GetTimeMicros(); + CBlock block; + if (!pblock) { + if (!ReadBlockFromDisk(block, pindexNew,1)) + return AbortNode(state, "Failed to read block"); + pblock = █ + } + HUSH_CONNECTING = (int32_t)pindexNew->GetHeight(); + //fprintf(stderr,"%s connecting ht.%d maxsize.%d vs %d\n",SMART_CHAIN_SYMBOL,(int32_t)pindexNew->GetHeight(),MAX_BLOCK_SIZE(pindexNew->GetHeight()),(int32_t)::GetSerializeSize(*pblock, SER_NETWORK, PROTOCOL_VERSION)); + + // Apply the block atomically to the chain state. + int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1; + int64_t nTime3; + LogPrint("bench", " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001); + { + CCoinsViewCache view(pcoinsTip); + bool rv = ConnectBlock(*pblock, state, pindexNew, view, false, true); + HUSH_CONNECTING = -1; + GetMainSignals().BlockChecked(*pblock, state); + if (!rv) { + if (state.IsInvalid()) + { + InvalidBlockFound(pindexNew, state); + /*if ( ASSETCHAINS_CBOPRET != 0 ) + { + pindexNew->nStatus &= ~BLOCK_FAILED_MASK; + fprintf(stderr,"reconsiderblock %d\n",(int32_t)pindexNew->GetHeight()); + }*/ + } + return error("ConnectTip(): ConnectBlock %s failed", pindexNew->GetBlockHash().ToString()); + } + mapBlockSource.erase(pindexNew->GetBlockHash()); + nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2; + LogPrint("bench", " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001); + if ( HUSH_NSPV_FULLNODE ) + assert(view.Flush()); + } + int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3; + LogPrint("bench", " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001); + // Write the chain state to disk, if necessary. + if ( HUSH_NSPV_FULLNODE ) + { + if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED)) + return false; + } + int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4; + LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001); + + // Remove conflicting transactions from the mempool. + std::list txConflicted; + mempool.removeForBlock(pblock->vtx, pindexNew->GetHeight(), txConflicted, !IsInitialBlockDownload()); + + // Remove transactions that expire at new block height from mempool + auto ids = mempool.removeExpired(pindexNew->GetHeight()); + + for (auto id : ids) { + uiInterface.NotifyTxExpiration(id); + } + + // Update chainActive & related variables. + UpdateTip(pindexNew); + + // Cache the conflicted transactions for subsequent notification. + // Updates to connected wallets are triggered by ThreadNotifyWallets + recentlyConflictedTxs.insert(std::make_pair(pindexNew, txConflicted)); + nRecentlyConflictedSequence += 1; + + int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1; + LogPrint("bench", " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001); + LogPrint("bench", "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001); + if ( HUSH_LONGESTCHAIN != 0 && (pindexNew->GetHeight() >= HUSH_LONGESTCHAIN )) + HUSH_INSYNC = (int32_t)pindexNew->GetHeight(); + else HUSH_INSYNC = 0; + //fprintf(stderr,"connect.%d insync.%d ASSETCHAINS_SAPLING.%d\n",(int32_t)pindexNew->GetHeight(),HUSH_INSYNC,ASSETCHAINS_SAPLING); + + if ( HUSH_NSPV_FULLNODE ) + { + //fprintf(stderr,"%s: HUSH_NSPV_FULLNODE\n", __FUNCTION__); + if ( ASSETCHAINS_CBOPRET != 0 ) + hush_pricesupdate(pindexNew->GetHeight(),pblock); + if ( ASSETCHAINS_SAPLING <= 0 && pindexNew->nTime > HUSH_SAPING_ACTIVATION - 24*3600 ) + hush_activate_sapling(pindexNew); + if ( ASSETCHAINS_CC != 0 && HUSH_SNAPSHOT_INTERVAL != 0 && (pindexNew->GetHeight() % HUSH_SNAPSHOT_INTERVAL) == 0 && pindexNew->GetHeight() >= HUSH_SNAPSHOT_INTERVAL ) + { + uint64_t start = time(NULL); + if ( !hush_dailysnapshot(pindexNew->GetHeight()) ) + { + fprintf(stderr, "daily snapshot failed, please reindex your chain\n"); + StartShutdown(); + } + fprintf(stderr, "snapshot completed in: %d seconds\n", (int32_t)(time(NULL)-start)); + } + } + //fprintf(stderr,"%s: returning true\n", __FUNCTION__); + return true; +} +std::pair>, uint64_t> DrainRecentlyConflicted() +{ + uint64_t recentlyConflictedSequence; + std::map> txs; + { + LOCK(cs_main); + recentlyConflictedSequence = nRecentlyConflictedSequence; + txs.swap(recentlyConflictedTxs); + } + + return std::make_pair(txs, recentlyConflictedSequence); +} + +void SetChainNotifiedSequence(uint64_t recentlyConflictedSequence) { + assert(Params().NetworkIDString() == "regtest"); + LOCK(cs_main); + nNotifiedSequence = recentlyConflictedSequence; +} + +bool ChainIsFullyNotified() { + assert(Params().NetworkIDString() == "regtest"); + LOCK(cs_main); + return nRecentlyConflictedSequence == nNotifiedSequence; +} + +/** + * Return the tip of the chain with the most work in it, that isn't + * known to be invalid (it's however far from certain to be valid). + */ +static CBlockIndex* FindMostWorkChain() { + do { + CBlockIndex *pindexNew = NULL; + + // Find the best candidate header. + { + std::set::reverse_iterator it = setBlockIndexCandidates.rbegin(); + if (it == setBlockIndexCandidates.rend()) + return NULL; + pindexNew = *it; + } + + // Check whether all blocks on the path between the currently active chain and the candidate are valid. + // Just going until the active chain is an optimization, as we know all blocks in it are valid already. + CBlockIndex *pindexTest = pindexNew; + bool fInvalidAncestor = false; + while (pindexTest && !chainActive.Contains(pindexTest)) { + assert(pindexTest->nChainTx || pindexTest->GetHeight() == 0); + + // Pruned nodes may have entries in setBlockIndexCandidates for + // which block files have been deleted. Remove those as candidates + // for the most work chain if we come across them; we can't switch + // to a chain unless we have all the non-active-chain parent blocks. + bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK; + bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA); + if (fFailedChain || fMissingData) { + // Candidate chain is not usable (either invalid or missing data) + if (fFailedChain && (pindexBestInvalid == NULL || pindexNew->chainPower > pindexBestInvalid->chainPower)) + pindexBestInvalid = pindexNew; + CBlockIndex *pindexFailed = pindexNew; + // Remove the entire chain from the set. + while (pindexTest != pindexFailed) { + if (fFailedChain) { + pindexFailed->nStatus |= BLOCK_FAILED_CHILD; + } else if (fMissingData) { + // If we're missing data, then add back to mapBlocksUnlinked, + // so that if the block arrives in the future we can try adding + // to setBlockIndexCandidates again. + mapBlocksUnlinked.insert(std::make_pair(pindexFailed->pprev, pindexFailed)); + } + setBlockIndexCandidates.erase(pindexFailed); + pindexFailed = pindexFailed->pprev; + } + setBlockIndexCandidates.erase(pindexTest); + fInvalidAncestor = true; + break; + } + pindexTest = pindexTest->pprev; + } + if (!fInvalidAncestor) + return pindexNew; + } while(true); +} + +/** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */ +static void PruneBlockIndexCandidates() { + //fprintf(stderr,"%s:, setBlockIndexCandidates.size=%d\n", __FUNCTION__, setBlockIndexCandidates.size() ); + // Note that we can't delete the current block itself, as we may need to return to it later in case a + // reorganization to a better block fails. + std::set::iterator it = setBlockIndexCandidates.begin(); + while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, chainActive.LastTip())) { + //fprintf(stderr,"%s:, erasing blockindexcandidate element height=%d, time=%d\n", __FUNCTION__, (*it)->GetHeight(), (*it)->GetBlockTime() ); + setBlockIndexCandidates.erase(it++); + //fprintf(stderr,"%s:, erased element\n", __FUNCTION__); + } + //fprintf(stderr,"%s:, setBlockIndexCandidates.size()=%d\n", __FUNCTION__, setBlockIndexCandidates.size() ); + // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates. + assert(!setBlockIndexCandidates.empty()); +} + +/** + * Try to make some progress towards making pindexMostWork the active block. + * pblock is either NULL or a pointer to a CBlock corresponding to pindexMostWork. + */ +static bool ActivateBestChainStep(bool fSkipdpow, CValidationState &state, CBlockIndex *pindexMostWork, CBlock *pblock) { + AssertLockHeld(cs_main); + bool fInvalidFound = false; + const CBlockIndex *pindexOldTip = chainActive.Tip(); + const CBlockIndex *pindexFork = chainActive.FindFork(pindexMostWork); + + // stop trying to reorg if the reorged chain is before last notarized height. + // stay on the same chain tip! + int32_t notarizedht,prevMoMheight; uint256 notarizedhash,txid; + notarizedht = hush_notarized_height(&prevMoMheight,¬arizedhash,&txid); + if ( !fSkipdpow && pindexFork != 0 && pindexOldTip->GetHeight() > notarizedht && pindexFork->GetHeight() < notarizedht ) + { + LogPrintf("pindexOldTip->GetHeight().%d > notarizedht %d && pindexFork->GetHeight().%d is < notarizedht %d, so ignore it\n",(int32_t)pindexOldTip->GetHeight(),notarizedht,(int32_t)pindexFork->GetHeight(),notarizedht); + // *** DEBUG *** + if (1) + { + const CBlockIndex *pindexLastNotarized = mapBlockIndex[notarizedhash]; + auto msg = "- " + strprintf(_("Current tip : %s, height %d, work %s"), + pindexOldTip->phashBlock->GetHex(), pindexOldTip->GetHeight(), pindexOldTip->chainPower.chainWork.GetHex()) + "\n" + + "- " + strprintf(_("New tip : %s, height %d, work %s"), + pindexMostWork->phashBlock->GetHex(), pindexMostWork->GetHeight(), pindexMostWork->chainPower.chainWork.GetHex()) + "\n" + + "- " + strprintf(_("Fork point : %s, height %d"), + pindexFork->phashBlock->GetHex(), pindexFork->GetHeight()) + "\n" + + "- " + strprintf(_("Last ntrzd : %s, height %d"), + pindexLastNotarized->phashBlock->GetHex(), pindexLastNotarized->GetHeight()); + LogPrintf("[ Debug ]\n%s\n",msg); + + int nHeight = pindexFork ? pindexFork->GetHeight() : -1; + int nTargetHeight = std::min(nHeight + 32, pindexMostWork->GetHeight()); + + LogPrintf("[ Debug ] nHeight = %d, nTargetHeight = %d\n", nHeight, nTargetHeight); + + CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight); + while (pindexIter && pindexIter->GetHeight() != nHeight) { + LogPrintf("[ Debug -> New blocks list ] %s, height %d\n", pindexIter->phashBlock->GetHex(), pindexIter->GetHeight()); + pindexIter = pindexIter->pprev; + } + } + + CValidationState tmpstate; + InvalidateBlock(tmpstate,pindexMostWork); // trying to invalidate longest chain, which tried to reorg notarized chain (in case of fork point below last notarized block) + return state.DoS(100, error("ActivateBestChainStep(): pindexOldTip->GetHeight().%d > notarizedht %d && pindexFork->GetHeight().%d is < notarizedht %d, so ignore it",(int32_t)pindexOldTip->GetHeight(),notarizedht,(int32_t)pindexFork->GetHeight(),notarizedht), + REJECT_INVALID, "past-notarized-height"); + } + // - On ChainDB initialization, pindexOldTip will be null, so there are no removable blocks. + // - If pindexMostWork is in a chain that doesn't have the same genesis block as our chain, + // then pindexFork will be null, and we would need to remove the entire chain including + // our genesis block. In practice this (probably) won't happen because of checks elsewhere. + auto reorgLength = pindexOldTip ? pindexOldTip->GetHeight() - (pindexFork ? pindexFork->GetHeight() : -1) : 0; + assert(MAX_REORG_LENGTH > 0);//, "We must be able to reorg some distance"); + if ( reorgLength > MAX_REORG_LENGTH) + { + auto msg = strprintf(_( + "A block chain reorganization has been detected that would roll back %d blocks!!! " + "This is larger than the maximum of %d blocks, and so the node is shutting down for your safety." + ), reorgLength, MAX_REORG_LENGTH) + "\n\n" + + _("Reorganization details") + ":\n" + + "- " + strprintf(_("Current tip: %s, height %d, work %s\n"), + pindexOldTip->phashBlock->GetHex(), pindexOldTip->GetHeight(), pindexOldTip->chainPower.chainWork.GetHex()) + "\n" + + "- " + strprintf(_("New tip: %s, height %d, work %s\n"), + pindexMostWork->phashBlock->GetHex(), pindexMostWork->GetHeight(), pindexMostWork->chainPower.chainWork.GetHex()) + "\n" + + "- " + strprintf(_("Fork point: %s %s, height %d"), + SMART_CHAIN_SYMBOL,pindexFork->phashBlock->GetHex(), pindexFork->GetHeight()) + "\n\n" + + _("Please help me, wise human!"); + LogPrintf("*** %s\nif you launch with -maxreorg=%d it might be able to resolve this automatically", msg,reorgLength+10); + fprintf(stderr,"*** %s\nif you launch with -maxreorg=%d it might be able to resolve this automatically", msg.c_str(),reorgLength+10); + uiInterface.ThreadSafeMessageBox(msg, "", CClientUIInterface::MSG_ERROR); + StartShutdown(); + return false; + } + + // Disconnect active blocks which are no longer in the best chain. + bool fBlocksDisconnected = false; + + while (chainActive.Tip() && chainActive.Tip() != pindexFork) { + if (!DisconnectTip(state)) + return false; + fBlocksDisconnected = true; + } + if ( HUSH_REWIND != 0 ) + { + CBlockIndex *tipindex; + fprintf(stderr,">>>>>>>>>>> rewind start ht.%d -> HUSH_REWIND.%d\n",chainActive.LastTip()->GetHeight(),HUSH_REWIND); + while ( HUSH_REWIND > 0 && (tipindex= chainActive.LastTip()) != 0 && tipindex->GetHeight() > HUSH_REWIND ) + { + fBlocksDisconnected = true; + fprintf(stderr,"%d ",(int32_t)tipindex->GetHeight()); + InvalidateBlock(state,tipindex); + if ( !DisconnectTip(state) ) + break; + } + fprintf(stderr,"reached rewind.%d, best to do: ./hush-cli -ac_name=%s stop\n",HUSH_REWIND,SMART_CHAIN_SYMBOL); + sleep(20); + fprintf(stderr,"resuming normal operations\n"); + HUSH_REWIND = 0; + //return(true); + } + // Build list of new blocks to connect. + std::vector vpindexToConnect; + bool fContinue = true; + int nHeight = pindexFork ? pindexFork->GetHeight() : -1; + while (fContinue && nHeight != pindexMostWork->GetHeight()) { + // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need + // a few blocks along the way. + int nTargetHeight = std::min(nHeight + 32, pindexMostWork->GetHeight()); + vpindexToConnect.clear(); + vpindexToConnect.reserve(nTargetHeight - nHeight); + CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight); + while (pindexIter && pindexIter->GetHeight() != nHeight) { + vpindexToConnect.push_back(pindexIter); + pindexIter = pindexIter->pprev; + } + nHeight = nTargetHeight; + + // Connect new blocks. + BOOST_REVERSE_FOREACH(CBlockIndex *pindexConnect, vpindexToConnect) { + if (!ConnectTip(state, pindexConnect, pindexConnect == pindexMostWork ? pblock : NULL)) { + if (state.IsInvalid()) { + // The block violates a consensus rule. + if (!state.CorruptionPossible()) + InvalidChainFound(vpindexToConnect.back()); + state = CValidationState(); + fInvalidFound = true; + fContinue = false; + break; + } else { + // A system error occurred (disk space, database error, ...). + return false; + } + } else { + PruneBlockIndexCandidates(); + if (!pindexOldTip || chainActive.Tip()->chainPower > pindexOldTip->chainPower) { + // We're in a better position than we were. Return temporarily to release the lock. + fContinue = false; + break; + } + } + } + } + + if (fBlocksDisconnected) { + mempool.removeForReorg(pcoinsTip, chainActive.Tip()->GetHeight() + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); + } + mempool.removeWithoutBranchId( + CurrentEpochBranchId(chainActive.Tip()->GetHeight() + 1, Params().GetConsensus())); + mempool.check(pcoinsTip); + + // Callbacks/notifications for a new best chain. + if (fInvalidFound) + CheckForkWarningConditionsOnNewFork(vpindexToConnect.back()); + else + CheckForkWarningConditions(); + + return true; +} + +/** + * Make the best chain active, in multiple steps. The result is either failure + * or an activated best chain. pblock is either NULL or a pointer to a block + * that is already loaded (to avoid loading it again from disk). + */ +bool ActivateBestChain(bool fSkipdpow, CValidationState &state, CBlock *pblock) { + CBlockIndex *pindexNewTip = NULL; + CBlockIndex *pindexMostWork = NULL; + const CChainParams& chainParams = Params(); + do { + // Sleep briefly to allow other threads a chance at grabbing cs_main if + // we are connecting a long chain of blocks and would otherwise hold the + // lock almost continuously. This helps + // the internal wallet, if it is enabled, to keep up with the connected + // blocks, reducing the overall time until the node becomes usable. + // + // This is defined to be an interruption point. + // + boost::this_thread::sleep_for(boost::chrono::microseconds(200)); + + if (ShutdownRequested()) + break; + + bool fInitialDownload; + { + LOCK(cs_main); + pindexMostWork = FindMostWorkChain(); + + // Whether we have anything to do at all. + if (pindexMostWork == NULL || pindexMostWork == chainActive.Tip()) + return true; + + if (!ActivateBestChainStep(fSkipdpow, state, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : NULL)) + return false; + pindexNewTip = chainActive.Tip(); + fInitialDownload = IsInitialBlockDownload(); + } + // When we reach this point, we switched to a new tip (stored in pindexNewTip). + + // Notifications/callbacks that can run without cs_main + if (!fInitialDownload) { + uint256 hashNewTip = pindexNewTip->GetBlockHash(); + // Relay inventory, but don't relay old inventory during initial block download. + int nBlockEstimate = 0; + if (fCheckpointsEnabled) + nBlockEstimate = Checkpoints::GetTotalBlocksEstimate(chainParams.Checkpoints()); + // Don't relay blocks if pruning -- could cause a peer to try to download, resulting + // in a stalled download if the block file is pruned before the request. + if (nLocalServices & NODE_NETWORK) { + LOCK(cs_vNodes); + BOOST_FOREACH(CNode* pnode, vNodes) + if (chainActive.Height() > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate)) + pnode->PushInventory(CInv(MSG_BLOCK, hashNewTip)); + } + // Notify external listeners about the new tip. + GetMainSignals().UpdatedBlockTip(pindexNewTip); + uiInterface.NotifyBlockTip(hashNewTip); + } //else fprintf(stderr,"initial download skips propagation\n"); + } while(pindexMostWork != chainActive.Tip()); + CheckBlockIndex(); + + // Write changes periodically to disk, after relay. + if (!FlushStateToDisk(state, FLUSH_STATE_PERIODIC)) { + return false; + } + + return true; +} + +bool InvalidateBlock(CValidationState& state, CBlockIndex *pindex) { + AssertLockHeld(cs_main); + + // Mark the block itself as invalid. + pindex->nStatus |= BLOCK_FAILED_VALID; + setDirtyBlockIndex.insert(pindex); + setBlockIndexCandidates.erase(pindex); + + while (chainActive.Contains(pindex)) { + CBlockIndex *pindexWalk = chainActive.Tip(); + pindexWalk->nStatus |= BLOCK_FAILED_CHILD; + setDirtyBlockIndex.insert(pindexWalk); + setBlockIndexCandidates.erase(pindexWalk); + // ActivateBestChain considers blocks already in chainActive + // unconditionally valid already, so force disconnect away from it. + if (!DisconnectTip(state)) { + mempool.removeForReorg(pcoinsTip, chainActive.Tip()->GetHeight() + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); + mempool.removeWithoutBranchId( + CurrentEpochBranchId(chainActive.Tip()->GetHeight() + 1, Params().GetConsensus())); + return false; + } + } + //LimitMempoolSize(mempool, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); + + // The resulting new best tip may not be in setBlockIndexCandidates anymore, so + // add it again. + BlockMap::iterator it = mapBlockIndex.begin(); + while (it != mapBlockIndex.end()) { + if ((it->second != 0) && it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && !setBlockIndexCandidates.value_comp()(it->second, chainActive.Tip())) { + setBlockIndexCandidates.insert(it->second); + } + it++; + } + + InvalidChainFound(pindex); + mempool.removeForReorg(pcoinsTip, chainActive.Tip()->GetHeight() + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); + mempool.removeWithoutBranchId( + CurrentEpochBranchId(chainActive.Tip()->GetHeight() + 1, Params().GetConsensus())); + return true; +} + +bool ReconsiderBlock(CValidationState& state, CBlockIndex *pindex) { + AssertLockHeld(cs_main); + + int nHeight = pindex->GetHeight(); + + // Remove the invalidity flag from this block and all its descendants. + BlockMap::iterator it = mapBlockIndex.begin(); + while (it != mapBlockIndex.end()) { + if ((it->second != 0) && !it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) { + it->second->nStatus &= ~BLOCK_FAILED_MASK; + setDirtyBlockIndex.insert(it->second); + if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && setBlockIndexCandidates.value_comp()(chainActive.Tip(), it->second)) { + setBlockIndexCandidates.insert(it->second); + } + if (it->second == pindexBestInvalid) { + // Reset invalid block marker if it was pointing to one of those. + pindexBestInvalid = NULL; + } + } + it++; + } + + // Remove the invalidity flag from all ancestors too. + while (pindex != NULL) { + if (pindex->nStatus & BLOCK_FAILED_MASK) { + pindex->nStatus &= ~BLOCK_FAILED_MASK; + setDirtyBlockIndex.insert(pindex); + } + pindex = pindex->pprev; + } + return true; +} + +CBlockIndex* AddToBlockIndex(const CBlockHeader& block) +{ + // Check for duplicate + uint256 hash = block.GetHash(); + BlockMap::iterator it = mapBlockIndex.find(hash); + BlockMap::iterator miPrev = mapBlockIndex.find(block.hashPrevBlock); + + // the following block is for debugging, comment when not needed + /* + std::vector vrit; + for (BlockMap::iterator bit = mapBlockIndex.begin(); bit != mapBlockIndex.end(); bit++) + { + if (bit->second == NULL) + vrit.push_back(bit); + } + if (!vrit.empty()) + { + printf("found %d NULL blocks in mapBlockIndex\n", vrit.size()); + } + */ + + if (it != mapBlockIndex.end()) + { + if ( it->second != 0 ) // vNodes.size() >= HUSH_LIMITED_NETWORKSIZE + { + // this is the strange case where somehow the hash is in the mapBlockIndex via as yet undetermined process, but the pindex for the hash is not there. Theoretically it is due to processing the block headers, but I have seen it get this case without having received it from the block headers or anywhere else... jl777 + //fprintf(stderr,"addtoblockindex already there %p\n",it->second); + return it->second; + } + if ( miPrev != mapBlockIndex.end() && (*miPrev).second == 0 ) + { + fprintf(stderr,"%s: edge case of both block and prevblock in the strange state\n", __func__); + return(0); // return here to avoid the state of pindex->GetHeight() not set and pprev NULL + } + } + // Construct new block index object + CBlockIndex* pindexNew = new CBlockIndex(block); + assert(pindexNew); + // We assign the sequence id to blocks only when the full data is available, + // to avoid miners withholding blocks but broadcasting headers, to get a + // competitive advantage. + pindexNew->nSequenceId = 0; + BlockMap::iterator mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first; + pindexNew->phashBlock = &((*mi).first); + if (miPrev != mapBlockIndex.end()) + { + if ( (pindexNew->pprev = (*miPrev).second) != 0 ) + pindexNew->SetHeight(pindexNew->pprev->GetHeight() + 1); + else fprintf(stderr,"unexpected null pprev %s\n",hash.ToString().c_str()); + pindexNew->BuildSkip(); + } + pindexNew->chainPower = (pindexNew->pprev ? CChainPower(pindexNew) + pindexNew->pprev->chainPower : CChainPower(pindexNew)) + GetBlockProof(*pindexNew); + pindexNew->RaiseValidity(BLOCK_VALID_TREE); + if (pindexBestHeader == NULL || pindexBestHeader->chainPower < pindexNew->chainPower) + pindexBestHeader = pindexNew; + + setDirtyBlockIndex.insert(pindexNew); + //fprintf(stderr,"added to block index %s %p\n",hash.ToString().c_str(),pindexNew); + mi->second = pindexNew; + return pindexNew; +} + +/** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */ +bool ReceivedBlockTransactions(const CBlock &block, CValidationState& state, CBlockIndex *pindexNew, const CDiskBlockPos& pos) +{ + pindexNew->nTx = block.vtx.size(); + pindexNew->nChainTx = 0; + CAmount sproutValue = 0; + CAmount saplingValue = 0; + bool isShieldedTx = false; + unsigned int nShieldedSpends=0,nShieldedSpendsInBlock=0,nShieldedOutputs=0,nPayments=0,nShieldedOutputsInBlock=0; + unsigned int nShieldedTx=0,nFullyShieldedTx=0,nDeshieldingTx=0,nShieldingTx=0; + unsigned int nShieldedPayments=0,nFullyShieldedPayments=0,nShieldingPayments=0,nDeshieldingPayments=0; + unsigned int nNotarizations=0; + + for (auto tx : block.vtx) { + // Negative valueBalance "takes" money from the transparent value pool + // and adds it to the Sapling value pool. Positive valueBalance "gives" + // money to the transparent value pool, removing from the Sapling value + // pool. So we invert the sign here. + saplingValue += -tx.valueBalance; + + // Ignore following stats unless -zindex enabled + if (!fZindex) + continue; + + nShieldedSpends = tx.vShieldedSpend.size(); + nShieldedOutputs = tx.vShieldedOutput.size(); + isShieldedTx = (nShieldedSpends + nShieldedOutputs) > 0 ? true : false; + + // We want to avoid full verification with a low false-positive rate + // TODO: A nefarious user could create xtns which meet these criteria and skew stats, what + // else can we look for which is not full validation? + // Can we filter on properties of tx.vout[0] ? + if(tx.vin.size()==13 && tx.vout.size()==2 && tx.vout[1].scriptPubKey.IsOpReturn() && tx.vout[1].nValue==0) { + nNotarizations++; + } + + //NOTE: These are at best heuristics. Improve them as much as possible. + // You cannot compare stats generated from different sets of heuristics, so + // if you change this code, you must reindex or delete datadir + resync from scratch, or you + // will be mixing together data from two set of heuristics. + if(isShieldedTx) { + nShieldedTx++; + // NOTE: It's possible for very complex transactions to be both shielding and deshielding, + // such as (t,z)=>(t,z) Since these transactions cannot be made via RPCs currently, they + // would currently need to be made via raw transactions + if(tx.vin.size()==0 && tx.vout.size()==0) { + nFullyShieldedTx++; + } else if(tx.vin.size()>0) { + nShieldingTx++; + } else if(tx.vout.size()>0) { + nDeshieldingTx++; + } + + if (nShieldedOutputs >= 1) { + // If there are shielded outputs, count each as a payment + // By default, if there is more than 1 output, we assume 1 zaddr change output which is not a payment. + // In the case of multiple outputs which spend inputs exactly, there is no change output and this + // heuristic will undercount payments. Since this edge case is rare, this seems acceptable. + // t->(t,t,z) = 1 shielded payment + // z->(z,z) = 1 shielded payment + shielded change + // t->(z,z) = 1 shielded payment + shielded change + // t->(t,z) = 1 shielded payment + transparent change + // (z,z)->z = 1 shielded payment (has this xtn ever occurred?) + // z->(z,z,z) = 2 shielded payments + shielded change + // Assume that there is always 1 change output when there are more than one output + nShieldedPayments += nShieldedOutputs > 1 ? (nShieldedOutputs-1) : 1; + // since we have at least 1 zoutput, all transparent outputs are payments, not change + nShieldedPayments += tx.vout.size(); + + // Fully shielded do not count toward shielding/deshielding + if(tx.vin.size()==0 && tx.vout.size()==0) { + nFullyShieldedPayments += nShieldedOutputs > 1 ? (nShieldedOutputs-1) : 1; + } else { + nShieldingPayments += nShieldedOutputs > 1 ? (nShieldedOutputs-1) : 1; + // Also count remaining taddr outputs as payments + nShieldedPayments += tx.vout.size(); + } + } else if (nShieldedSpends >=1) { + // Shielded inputs with no shielded outputs. We know none are change output because + // change would flow back to the zaddr + // z->t = 1 shielded payment + // z->(t,t) = 2 shielded payments + // z->(t,t,t) = 3 shielded payments + nShieldedPayments += tx.vout.size(); + nDeshieldingPayments += tx.vout.size() > 1 ? tx.vout.size()-1 : tx.vout.size(); + } + nPayments += nShieldedPayments; + } else { + // No shielded payments, add transparent payments minus a change address + nPayments += tx.vout.size() > 1 ? tx.vout.size()-1 : tx.vout.size(); + } + // To calculate the anonset we must track the sum of spends and zouts in every tx, in every block. -- Duke + nShieldedOutputsInBlock += nShieldedOutputs; + nShieldedSpendsInBlock += nShieldedSpends; + if (fZdebug) { + fprintf(stderr,"%s: tx=%s has zspends=%d zouts=%d\n", __FUNCTION__, tx.GetHash().ToString().c_str(), nShieldedSpends, nShieldedOutputs ); + } + } + if (fDebug) { + fprintf(stderr,"%s: block %s has total zspends=%d zouts=%d\n", __FUNCTION__, block.GetHash().ToString().c_str(), nShieldedSpendsInBlock, nShieldedOutputsInBlock ); + } + + pindexNew->nSproutValue = sproutValue; + pindexNew->nChainSproutValue = boost::none; + pindexNew->nSaplingValue = saplingValue; + pindexNew->nChainSaplingValue = boost::none; + pindexNew->nFile = pos.nFile; + pindexNew->nDataPos = pos.nPos; + pindexNew->nUndoPos = 0; + pindexNew->nStatus |= BLOCK_HAVE_DATA; + pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS); + + if (fZindex) { + pindexNew->nPayments = nPayments; + pindexNew->nShieldedTx = nShieldedTx; + pindexNew->nShieldedOutputs = nShieldedOutputsInBlock; + pindexNew->nShieldedSpends = nShieldedSpendsInBlock; + pindexNew->nFullyShieldedTx = nFullyShieldedTx; + pindexNew->nDeshieldingTx = nDeshieldingTx; + pindexNew->nShieldingTx = nShieldingTx; + pindexNew->nShieldedPayments = nShieldedPayments; + pindexNew->nFullyShieldedPayments = nFullyShieldedPayments; + pindexNew->nDeshieldingPayments = nDeshieldingPayments; + pindexNew->nShieldingPayments = nShieldingPayments; + pindexNew->nNotarizations = nNotarizations; + } + setDirtyBlockIndex.insert(pindexNew); + + if (pindexNew->pprev == NULL || pindexNew->pprev->nChainTx) { + // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS. + deque queue; + queue.push_back(pindexNew); + + // Recursively process any descendant blocks that now may be eligible to be connected. + while (!queue.empty()) { + CBlockIndex *pindex = queue.front(); + queue.pop_front(); + pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx; + + // Update -zindex stats + if (fZindex) { + if (fZdebug) { + //fprintf(stderr,"%s: setting blockchain zstats with zspends=%d, zouts=%d\n", __FUNCTION__, nShieldedSpendsInBlock, nShieldedOutputsInBlock ); + } + if (pindex->pprev) { + // If chain stats are zero (such as after restart), load data from zindex.dat + if (pindex->pprev->nChainNotarizations == 0) + pindex->pprev->nChainNotarizations = zstats.nChainNotarizations; + if (pindex->pprev->nChainShieldedTx == 0) + pindex->pprev->nChainShieldedTx = zstats.nChainShieldedTx; + if (pindex->pprev->nChainShieldedOutputs == 0) + pindex->pprev->nChainShieldedOutputs = zstats.nChainShieldedOutputs; + if (pindex->pprev->nChainShieldedSpends == 0) { + pindex->pprev->nChainShieldedSpends = zstats.nChainShieldedSpends; + // TODO: if zstats.nHeight != chainActive.Height() the stats will be off + fprintf(stderr, "%s: loaded anonymity set of %li at stats height=%li vs local height=%d from disk\n", __func__, zstats.nChainShieldedOutputs - zstats.nChainShieldedSpends, zstats.nHeight, chainActive.Height() ); + } + if (pindex->pprev->nChainFullyShieldedTx == 0) + pindex->pprev->nChainFullyShieldedTx = zstats.nChainFullyShieldedTx; + if (pindex->pprev->nChainShieldingTx == 0) + pindex->pprev->nChainShieldingTx = zstats.nChainShieldingTx; + if (pindex->pprev->nChainDeshieldingTx == 0) + pindex->pprev->nChainDeshieldingTx = zstats.nChainDeshieldingTx; + if (pindex->pprev->nChainPayments == 0) { + fprintf(stderr, "%s: setting nChainPayments=%li at height %d\n", __func__, zstats.nChainPayments, chainActive.Height() ); + pindex->pprev->nChainPayments = zstats.nChainPayments; + } + if (pindex->pprev->nChainShieldedPayments == 0) + pindex->pprev->nChainShieldedPayments = zstats.nChainShieldedPayments; + if (pindex->pprev->nChainFullyShieldedPayments == 0) + pindex->pprev->nChainFullyShieldedPayments = zstats.nChainFullyShieldedPayments; + if (pindex->pprev->nChainShieldingPayments == 0) + pindex->pprev->nChainShieldingPayments = zstats.nChainShieldingPayments; + if (pindex->pprev->nChainDeshieldingPayments == 0) + pindex->pprev->nChainDeshieldingPayments = zstats.nChainDeshieldingPayments; + } + + pindex->nChainNotarizations = (pindex->pprev ? pindex->pprev->nChainNotarizations : 0) + pindex->nNotarizations; + pindex->nChainShieldedTx = (pindex->pprev ? pindex->pprev->nChainShieldedTx : 0) + pindex->nShieldedTx; + pindex->nChainShieldedOutputs = (pindex->pprev ? pindex->pprev->nChainShieldedOutputs : 0) + pindex->nShieldedOutputs; + pindex->nChainShieldedSpends = (pindex->pprev ? pindex->pprev->nChainShieldedSpends : 0) + pindex->nShieldedSpends; + pindex->nChainFullyShieldedTx = (pindex->pprev ? pindex->pprev->nChainFullyShieldedTx : 0) + pindex->nFullyShieldedTx; + pindex->nChainShieldingTx = (pindex->pprev ? pindex->pprev->nChainShieldingTx : 0) + pindex->nShieldingTx; + pindex->nChainDeshieldingTx = (pindex->pprev ? pindex->pprev->nChainDeshieldingTx : 0) + pindex->nDeshieldingTx; + pindex->nChainPayments = (pindex->pprev ? pindex->pprev->nChainPayments : 0) + pindex->nPayments; + pindex->nChainShieldedPayments = (pindex->pprev ? pindex->pprev->nChainShieldedPayments : 0) + pindex->nShieldedPayments; + pindex->nChainFullyShieldedPayments = (pindex->pprev ? pindex->pprev->nChainFullyShieldedPayments : 0) + pindex->nFullyShieldedPayments; + pindex->nChainShieldingPayments = (pindex->pprev ? pindex->pprev->nChainShieldingPayments : 0) + pindex->nShieldingPayments; + pindex->nChainDeshieldingPayments = (pindex->pprev ? pindex->pprev->nChainDeshieldingPayments : 0) + pindex->nDeshieldingPayments; + + // Update in-memory structure that gets serialized to zindex.dat + zstats.nHeight = pindex->GetHeight(); + zstats.nChainNotarizations = pindex->nChainNotarizations ; + zstats.nChainShieldedTx = pindex->nChainShieldedTx ; + zstats.nChainShieldedOutputs = pindex->nChainShieldedOutputs ; + zstats.nChainShieldedSpends = pindex->nChainShieldedSpends ; + zstats.nChainFullyShieldedTx = pindex->nChainFullyShieldedTx ; + zstats.nChainShieldingTx = pindex->nChainShieldingTx ; + zstats.nChainDeshieldingTx = pindex->nChainDeshieldingTx ; + zstats.nChainPayments = pindex->nChainPayments ; + zstats.nChainShieldedPayments = pindex->nChainShieldedPayments ; + zstats.nChainFullyShieldedPayments = pindex->nChainFullyShieldedPayments ; + zstats.nChainShieldingPayments = pindex->nChainShieldingPayments ; + zstats.nChainDeshieldingPayments = pindex->nChainDeshieldingPayments ; + fprintf(stderr,"%s: setting zstats with height,zouts,zspends,anonset=%li,%li,%li,%li\n", __FUNCTION__, zstats.nHeight, zstats.nChainShieldedOutputs, zstats.nChainShieldedSpends, zstats.nChainShieldedOutputs - zstats.nChainShieldedSpends); + + } + + if (pindex->pprev) { + if (pindex->pprev->nChainSproutValue && pindex->nSproutValue) { + pindex->nChainSproutValue = *pindex->pprev->nChainSproutValue + *pindex->nSproutValue; + } else { + pindex->nChainSproutValue = boost::none; + } + if (pindex->pprev->nChainSaplingValue) { + pindex->nChainSaplingValue = *pindex->pprev->nChainSaplingValue + pindex->nSaplingValue; + } else { + pindex->nChainSaplingValue = boost::none; + } + } else { + pindex->nChainSproutValue = pindex->nSproutValue; + pindex->nChainSaplingValue = pindex->nSaplingValue; + } + { + LOCK(cs_nBlockSequenceId); + pindex->nSequenceId = nBlockSequenceId++; + } + if (chainActive.Tip() == NULL || !setBlockIndexCandidates.value_comp()(pindex, chainActive.Tip())) { + setBlockIndexCandidates.insert(pindex); + } + std::pair::iterator, std::multimap::iterator> range = mapBlocksUnlinked.equal_range(pindex); + while (range.first != range.second) { + std::multimap::iterator it = range.first; + queue.push_back(it->second); + range.first++; + mapBlocksUnlinked.erase(it); + } + } + } else { + if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) { + mapBlocksUnlinked.insert(std::make_pair(pindexNew->pprev, pindexNew)); + } + } + + + if (fZindex) + fprintf(stderr, "ht.%d, ShieldedPayments=%d, ShieldedTx=%d, ShieldedOutputs=%d, FullyShieldedTx=%d, ntz=%d\n", + pindexNew->GetHeight(), nShieldedPayments, nShieldedTx, nShieldedOutputs, nFullyShieldedTx, nNotarizations ); + + return true; +} + +bool FindBlockPos(int32_t tmpflag,CValidationState &state, CDiskBlockPos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown) +{ + std::vector *ptr; int *lastfilep; + LOCK(cs_LastBlockFile); + + unsigned int nFile,maxTempFileSize; + + if ( tmpflag != 0 ) + { + ptr = &tmpBlockFiles; + nFile = nLastTmpFile; + lastfilep = &nLastTmpFile; + if (tmpBlockFiles.size() <= nFile) { + tmpBlockFiles.resize(nFile + 1); + } + if ( nFile == 0 ) + maxTempFileSize = maxTempFileSize0; + else if ( nFile == 1 ) + maxTempFileSize = maxTempFileSize1; + } + else + { + ptr = &vinfoBlockFile; + lastfilep = &nLastBlockFile; + nFile = fKnown ? pos.nFile : nLastBlockFile; + if (vinfoBlockFile.size() <= nFile) { + vinfoBlockFile.resize(nFile + 1); + } + } + + if (!fKnown) { + bool tmpfileflag = false; + while ( (*ptr)[nFile].nSize + nAddSize >= ((tmpflag != 0) ? maxTempFileSize : MAX_BLOCKFILE_SIZE) ) { + if ( tmpflag != 0 && tmpfileflag ) + break; + nFile++; + if ((*ptr).size() <= nFile) { + (*ptr).resize(nFile + 1); + } + tmpfileflag = true; + } + pos.nFile = nFile + tmpflag*TMPFILE_START; + pos.nPos = (*ptr)[nFile].nSize; + } + if (nFile != *lastfilep) { + if (!fKnown) { + LogPrintf("Leaving block file %i: %s\n", nFile, (*ptr)[nFile].ToString()); + } + FlushBlockFile(!fKnown); + //fprintf(stderr, "nFile = %i size.%li maxTempFileSize0.%u maxTempFileSize1.%u\n",nFile,tmpBlockFiles.size(),maxTempFileSize0,maxTempFileSize1); + if ( tmpflag != 0 && tmpBlockFiles.size() >= 3 ) + { + if ( nFile == 1 ) // Trying to get to second temp file. + { + if (!PruneOneBlockFile(true,TMPFILE_START+1)) + { + // file 1 is not ready to be used yet increase file 0's size. + fprintf(stderr, "Cant clear file 1!\n"); + // We will reset the position to the end of the first file, even if its over max size. + nFile = 0; + pos.nFile = TMPFILE_START; + pos.nPos = (*ptr)[0].nSize; + // Increase temp file one's max size by a chunk, so we wait a reasonable time to recheck the other file. + maxTempFileSize0 += BLOCKFILE_CHUNK_SIZE; + } + else + { + // The file 1 is able to be used now. Reset max size, and set nfile to use file 1. + fprintf(stderr, "CLEARED file 1!\n"); + maxTempFileSize0 = MAX_TEMPFILE_SIZE; + nFile = 1; + tmpBlockFiles[1].SetNull(); + pos.nFile = TMPFILE_START+1; + pos.nPos = (*ptr)[1].nSize; + boost::filesystem::remove(GetBlockPosFilename(pos, "blk")); + LogPrintf("Prune: deleted temp blk (%05u)\n",nFile); + } + if ( 0 && tmpflag != 0 ) + fprintf(stderr,"pos.nFile %d nPos %u\n",pos.nFile,pos.nPos); + } + else if ( nFile == 2 ) // Trying to get to third temp file. + { + if (!PruneOneBlockFile(true,TMPFILE_START)) + { + fprintf(stderr, "Cant clear file 0!\n"); + // We will reset the position to the end of the second block file, even if its over max size. + nFile = 1; + pos.nFile = TMPFILE_START+1; + pos.nPos = (*ptr)[1].nSize; + // Increase temp file one's max size by a chunk, so we wait a reasonable time to recheck the other file. + maxTempFileSize1 += BLOCKFILE_CHUNK_SIZE; + } + else + { + // The file 0 is able to be used now. Reset max size, and set nfile to use file 0. + fprintf(stderr, "CLEARED file 0!\n"); + maxTempFileSize1 = MAX_TEMPFILE_SIZE; + nFile = 0; + tmpBlockFiles[0].SetNull(); + pos.nFile = TMPFILE_START; + pos.nPos = (*ptr)[0].nSize; + boost::filesystem::remove(GetBlockPosFilename(pos, "blk")); + LogPrintf("Prune: deleted temp blk (%05u)\n",nFile); + } + if ( 0 && tmpflag != 0 ) + fprintf(stderr,"pos.nFile %d nPos %u\n",pos.nFile,pos.nPos); + } + //sleep(30); + } + //fprintf(stderr, "nFile = %i size.%li maxTempFileSize0.%u maxTempFileSize1.%u\n",nFile,tmpBlockFiles.size(),maxTempFileSize0,maxTempFileSize1); sleep(30); + *lastfilep = nFile; + //fprintf(stderr, "*lastfilep = %i\n",*lastfilep); + } + + (*ptr)[nFile].AddBlock(nHeight, nTime); + if (fKnown) + (*ptr)[nFile].nSize = std::max(pos.nPos + nAddSize, (*ptr)[nFile].nSize); + else + (*ptr)[nFile].nSize += nAddSize; + + if (!fKnown) { + unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE; + unsigned int nNewChunks = ((*ptr)[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE; + if (nNewChunks > nOldChunks) { + if (fPruneMode) + fCheckForPruning = true; + if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) { + FILE *file = OpenBlockFile(pos); + if (file) { + LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile); + AllocateFileRange(file, pos.nPos, nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos); + fclose(file); + } + } + else + return state.Error("out of disk space"); + } + } + + setDirtyFileInfo.insert(nFile + tmpflag*TMPFILE_START); + return true; +} + +bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize) +{ + std::vector *ptr; int *lastfilep; + LOCK(cs_LastBlockFile); + pos.nFile = nFile; + if ( nFile >= TMPFILE_START ) + { + fprintf(stderr,"skip tmp undo\n"); + return(false); + nFile %= TMPFILE_START; + ptr = &tmpBlockFiles; + } else ptr = &vinfoBlockFile; + + unsigned int nNewSize; + pos.nPos = (*ptr)[nFile].nUndoSize; + nNewSize = (*ptr)[nFile].nUndoSize += nAddSize; + setDirtyFileInfo.insert(nFile); + + unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE; + unsigned int nNewChunks = (nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE; + if (nNewChunks > nOldChunks) { + if (fPruneMode) + fCheckForPruning = true; + if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) { + FILE *file = OpenUndoFile(pos); + if (file) { + LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile); + AllocateFileRange(file, pos.nPos, nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos); + fclose(file); + } + } + else + return state.Error("out of disk space"); + } + + return true; +} + +bool CheckBlockHeader(int32_t *futureblockp,int32_t height,CBlockIndex *pindex, const CBlockHeader& blockhdr, CValidationState& state, bool fCheckPOW) +{ + // Check timestamp + if ( 0 ) + { + uint256 hash; int32_t i; + hash = blockhdr.GetHash(); + for (i=31; i>=0; i--) + fprintf(stderr,"%02x",((uint8_t *)&hash)[i]); + fprintf(stderr," <- CheckBlockHeader\n"); + if ( chainActive.LastTip() != 0 ) + { + hash = chainActive.LastTip()->GetBlockHash(); + for (i=31; i>=0; i--) + fprintf(stderr,"%02x",((uint8_t *)&hash)[i]); + fprintf(stderr," <- chainTip\n"); + } + } + *futureblockp = 0; + if ( ASSETCHAINS_ADAPTIVEPOW > 0 ) + { + if (blockhdr.GetBlockTime() > GetTime() + 4) + { + //LogPrintf("CheckBlockHeader block from future %d error",blockhdr.GetBlockTime() - GetTime()); + return false; + } + } + else if (blockhdr.GetBlockTime() > GetTime() + 60) + { + /*CBlockIndex *tipindex; + //fprintf(stderr,"ht.%d future block %u vs time.%u + 60\n",height,(uint32_t)blockhdr.GetBlockTime(),(uint32_t)GetTime()); + if ( (tipindex= chainActive.Tip()) != 0 && tipindex->GetBlockHash() == blockhdr.hashPrevBlock && blockhdr.GetBlockTime() < GetTime() + 60 + 5 ) + { + //fprintf(stderr,"it is the next block, let's wait for %d seconds\n",GetTime() + 60 - blockhdr.GetBlockTime()); + while ( blockhdr.GetBlockTime() > GetTime() + 60 ) + sleep(1); + //fprintf(stderr,"now its valid\n"); + } + else*/ + { + if (blockhdr.GetBlockTime() < GetTime() + 300) + *futureblockp = 1; + //LogPrintf("CheckBlockHeader block from future %d error",blockhdr.GetBlockTime() - GetTime()); + return false; //state.Invalid(error("CheckBlockHeader(): block timestamp too far in the future"),REJECT_INVALID, "time-too-new"); + } + } + // Check block version + if (height > 0 && blockhdr.nVersion < MIN_BLOCK_VERSION) + return state.DoS(100, error("CheckBlockHeader(): block version too low"),REJECT_INVALID, "version-too-low"); + + // Check Equihash solution is valid + if ( fCheckPOW ) + { + if ( !CheckEquihashSolution(&blockhdr, Params()) ) + return state.DoS(100, error("CheckBlockHeader(): Equihash solution invalid"),REJECT_INVALID, "invalid-solution"); + if ( !CheckRandomXSolution(&blockhdr, height) ) + return state.DoS(100, error("CheckBlockHeader(): RandomX solution invalid"),REJECT_INVALID, "invalid-randomx-solution"); + } + // Check proof of work matches claimed amount + /*hush_index2pubkey33(pubkey33,pindex,height); + if ( fCheckPOW && !CheckProofOfWork(height,pubkey33,blockhdr.GetHash(), blockhdr.nBits, Params().GetConsensus(),blockhdr.nTime) ) + return state.DoS(50, error("CheckBlockHeader(): proof of work failed"),REJECT_INVALID, "high-hash");*/ + return true; +} + +int32_t hush_checkPOW(int32_t slowflag,CBlock *pblock,int32_t height); + +bool CheckBlock(int32_t *futureblockp,int32_t height,CBlockIndex *pindex,const CBlock& block, CValidationState& state, + libzcash::ProofVerifier& verifier, + bool fCheckPOW, bool fCheckMerkleRoot) +{ + uint8_t pubkey33[33]; uint256 hash; uint32_t tiptime = (uint32_t)block.nTime; + // These are checks that are independent of context. + hash = block.GetHash(); + // Check that the header is valid (particularly PoW). This is mostly redundant with the call in AcceptBlockHeader. + if (!CheckBlockHeader(futureblockp,height,pindex,block,state,fCheckPOW)) + { + if ( *futureblockp == 0 ) + { + LogPrintf("CheckBlock header error"); + return false; + } + } + if ( pindex != 0 && pindex->pprev != 0 ) + tiptime = (uint32_t)pindex->pprev->nTime; + if ( fCheckPOW ) + { + //if ( !CheckEquihashSolution(&block, Params()) ) + // return state.DoS(100, error("CheckBlock: Equihash solution invalid"),REJECT_INVALID, "invalid-solution"); + hush_block2pubkey33(pubkey33,(CBlock *)&block); + if ( !CheckProofOfWork(block,pubkey33,height,Params().GetConsensus()) ) + { + int32_t z; for (z=31; z>=0; z--) + fprintf(stderr,"%02x",((uint8_t *)&hash)[z]); + fprintf(stderr," failed hash ht.%d\n",height); + return state.DoS(50, error("CheckBlock: proof of work failed"),REJECT_INVALID, "high-hash"); + } + if ( ASSETCHAINS_STAKED == 0 && hush_checkPOW(1,(CBlock *)&block,height) < 0 ) // checks Equihash + return state.DoS(100, error("CheckBlock: failed slow_checkPOW"),REJECT_INVALID, "failed-slow_checkPOW"); + } + + // Check the merkle root. + if (fCheckMerkleRoot) { + bool mutated; + uint256 hashMerkleRoot2 = block.BuildMerkleTree(&mutated); + if (block.hashMerkleRoot != hashMerkleRoot2) + return state.DoS(100, error("CheckBlock: hashMerkleRoot mismatch"), + REJECT_INVALID, "bad-txnmrklroot", true); + + // Check for merkle tree malleability (CVE-2012-2459): repeating sequences + // of transactions in a block without affecting the merkle root of a block, + // while still invalidating it. + if (mutated) + return state.DoS(100, error("CheckBlock: duplicate transaction"), + REJECT_INVALID, "bad-txns-duplicate", true); + } + + // All potential-corruption validation must be done before we do any + // transaction validation, as otherwise we may mark the header as invalid + // because we receive the wrong transactions for it. + + // Size limits + //fprintf(stderr,"%s checkblock %d -> %d vs blocksize.%d\n",SMART_CHAIN_SYMBOL,height,MAX_BLOCK_SIZE(height),(int32_t)::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION)); + if (block.vtx.empty() || block.vtx.size() > MAX_BLOCK_SIZE(height) || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE(height)) + return state.DoS(100, error("CheckBlock: size limits failed"), REJECT_INVALID, "bad-blk-length"); + + // First transaction must be coinbase, the rest must not be + if (block.vtx.empty() || !block.vtx[0].IsCoinBase()) + return state.DoS(100, error("CheckBlock: first tx is not coinbase"), REJECT_INVALID, "bad-cb-missing"); + + for (unsigned int i = 1; i < block.vtx.size(); i++) + if (block.vtx[i].IsCoinBase()) + return state.DoS(100, error("CheckBlock: more than one coinbase"), REJECT_INVALID, "bad-cb-multiple"); + + // Check transactions + CTransaction sTx; + CTransaction *ptx = NULL; + if ( ASSETCHAINS_CC != 0 && !fCheckPOW ) + return true; + + + for (uint32_t i = 0; i < block.vtx.size(); i++) + { + const CTransaction& tx = block.vtx[i]; + if (!CheckTransaction(tiptime,tx, state, verifier, i, (int32_t)block.vtx.size())) + return error("CheckBlock: CheckTransaction failed"); + } + + unsigned int nSigOps = 0; + BOOST_FOREACH(const CTransaction& tx, block.vtx) + { + nSigOps += GetLegacySigOpCount(tx); + } + if (nSigOps > MAX_BLOCK_SIGOPS) + return state.DoS(100, error("CheckBlock: out-of-bounds SigOpCount"), + REJECT_INVALID, "bad-blk-sigops", true); + return true; +} + +bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex * const pindexPrev) +{ + const CChainParams& chainParams = Params(); + const Consensus::Params& consensusParams = chainParams.GetConsensus(); + uint256 hash = block.GetHash(); + if (hash == consensusParams.hashGenesisBlock) + return true; + + assert(pindexPrev); + + int daaForkHeight = GetArg("-daaforkheight", 450000); + int nHeight = pindexPrev->GetHeight()+1; + bool ishush3 = strncmp(SMART_CHAIN_SYMBOL, "HUSH3",5) == 0 ? true : false; + // Check Proof-of-Work difficulty + if (ishush3) { + + // Difficulty (nBits) relies on the current blocktime of this block + if ((ASSETCHAINS_BLOCKTIME != 75) && (nHeight >= nFirstHalvingHeight)) { + LogPrintf("%s: Blocktime halving to 75s at height %d!\n",__func__,nHeight); + ASSETCHAINS_BLOCKTIME = 75; + hush_changeblocktime(); + } + // The change of blocktime from 150s to 75s caused incorrect AWT of 34 blocks instead of 17 + // caused by the fact that Difficulty Adjustment Algorithms do not take into account blocktime + // changing at run-time, from Consensus::Params being a const struct + unsigned int nNextWork = GetNextWorkRequired(pindexPrev, &block, consensusParams); + + if (fDebug) { + LogPrintf("%s: nbits ,%d,%lu,%lu,%d\n",__func__, nHeight, nNextWork, block.nBits, nNextWork - block.nBits ); + } + if (block.nBits != nNextWork) { + // Enforce correct nbits at DAA fork height, before that, ignore + if (nHeight > daaForkHeight) { + //cout << "Incorrect HUSH diffbits at height " << nHeight << + // " " << block.nBits << " block.nBits vs. calc " << nNextWork << + // " " << block.GetHash().ToString() << " @ " << block.GetBlockTime() << endl; + return state.DoS(100, error("%s: Incorrect diffbits at height %d: %lu vs %lu ", __func__, nHeight, nNextWork, block.nBits), REJECT_INVALID, "bad-diffbits"); + } else { + // cout << "Ignoring nbits for height=" << nHeight << endl; + } + } + } + + // Check timestamp against prev + if (ASSETCHAINS_ADAPTIVEPOW <= 0 || nHeight < 30) { + if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast() ) + { + fprintf(stderr,"ht.%d too early %u vs %u\n",(int32_t)nHeight,(uint32_t)block.GetBlockTime(),(uint32_t)pindexPrev->GetMedianTimePast()); + return state.Invalid(error("%s: block's timestamp is too early based on median time", __func__), REJECT_INVALID, "time-too-old-median"); + } + } else { + if ( block.GetBlockTime() <= pindexPrev->nTime ) + { + fprintf(stderr,"ht.%d too early2 %u vs %u\n",(int32_t)nHeight,(uint32_t)block.GetBlockTime(),(uint32_t)pindexPrev->nTime); + return state.Invalid(error("%s: block's timestamp is too early based on previous block", __func__), REJECT_INVALID, "time-too-old-prevblock"); + } + } + + // Check that timestamp is not too far in the future + if (block.GetBlockTime() > GetTime() + consensusParams.nMaxFutureBlockTime) { + return state.Invalid(error("%s: block timestamp too far in the future", __func__), REJECT_INVALID, "time-too-new"); + } + + if (fCheckpointsEnabled) { + // Check that the block chain matches the known block chain up to a checkpoint + if (!Checkpoints::CheckBlock(chainParams.Checkpoints(), nHeight, hash)) + { + return state.DoS(100, error("%s: rejected by checkpoint lock-in at %d", __func__, nHeight),REJECT_CHECKPOINT, "checkpoint mismatch"); + } + // Don't accept any forks from the main chain prior to last checkpoint + CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(chainParams.Checkpoints()); + int32_t notarized_height; + if ( nHeight == 1 && chainActive.LastTip() != 0 && chainActive.LastTip()->GetHeight() > 1 ) + { + CBlockIndex *heightblock = chainActive[nHeight]; + if ( heightblock != 0 && heightblock->GetBlockHash() == hash ) + return true; + return state.DoS(1, error("%s: trying to change height 1 forbidden", __func__)); + } + if ( nHeight != 0 ) + { + if ( pcheckpoint != 0 && nHeight < pcheckpoint->GetHeight() ) + return state.DoS(1, error("%s: forked chain older than last checkpoint (height %d) vs %d", __func__, nHeight,pcheckpoint->GetHeight())); + if ( hush_checkpoint(¬arized_height,nHeight,hash) < 0 ) + { + CBlockIndex *heightblock = chainActive[nHeight]; + if ( heightblock != 0 && heightblock->GetBlockHash() == hash ) + { + //fprintf(stderr,"got a pre notarization block that matches height.%d\n",(int32_t)nHeight); + return true; + } else return state.DoS(1, error("%s: forked chain %d older than last notarized (height %d) vs %d", __func__,nHeight, notarized_height)); + } + } + } + // Reject block.nVersion < 4 blocks + if (block.nVersion < 4) + return state.Invalid(error("%s : rejected nVersion<4 block", __func__), REJECT_OBSOLETE, "bad-version"); + + return true; +} + +bool ContextualCheckBlock(int32_t slowflag,const CBlock& block, CValidationState& state, CBlockIndex * const pindexPrev) +{ + const int nHeight = pindexPrev == NULL ? 0 : pindexPrev->GetHeight() + 1; + //const Consensus::Params& consensusParams = Params().GetConsensus(); + //bool sapling = true; //NetworkUpgradeActive(nHeight, consensusParams, Consensus::UPGRADE_SAPLING); + + // Check that all transactions are finalized + for (uint32_t i = 0; i < block.vtx.size(); i++) { + const CTransaction& tx = block.vtx[i]; + + // Check transaction contextually against consensus rules at block height + if (!ContextualCheckTransaction(slowflag,&block,pindexPrev,tx, state, nHeight, 100)) { + return false; // Failure reason has been set in validation state object + } + + int nLockTimeFlags = 0; + int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST) + ? pindexPrev->GetMedianTimePast() + : block.GetBlockTime(); + if (!IsFinalTx(tx, nHeight, nLockTimeCutoff)) { + return state.DoS(10, error("%s: contains a non-final transaction", __func__), REJECT_INVALID, "bad-txns-nonfinal"); + } + } + + // Enforce BIP 34 rule that the coinbase starts with serialized block height. + // In Hush this has been enforced since launch, except that the genesis + // block didn't include the height in the coinbase (see Zcash protocol spec + // section '6.8 Bitcoin Improvement Proposals'). + if (nHeight > 0) + { + CScript expect = CScript() << nHeight; + if (block.vtx[0].vin[0].scriptSig.size() < expect.size() || + !std::equal(expect.begin(), expect.end(), block.vtx[0].vin[0].scriptSig.begin())) { + return state.DoS(100, error("%s: block height mismatch in coinbase", __func__), REJECT_INVALID, "bad-cb-height"); + } + } + return true; +} + +bool AcceptBlockHeader(int32_t *futureblockp,const CBlockHeader& block, CValidationState& state, CBlockIndex** ppindex) +{ + static uint256 zero; + const CChainParams& chainparams = Params(); + AssertLockHeld(cs_main); + + // Check for duplicate + uint256 hash = block.GetHash(); + BlockMap::iterator miSelf = mapBlockIndex.find(hash); + if(fDebug) { + std::cerr << __func__ << ": blockhash=" << hash.ToString() << endl; + } + CBlockIndex *pindex = NULL; + if (miSelf != mapBlockIndex.end()) { + // Block header is already known. + if ( (pindex = miSelf->second) == 0 ) + miSelf->second = pindex = AddToBlockIndex(block); + if (ppindex) + *ppindex = pindex; + if ( pindex != 0 && (pindex->nStatus & BLOCK_FAILED_MASK) != 0 ) { + if ( ASSETCHAINS_CC == 0 ) { + std::cerr << __func__ << ": block " << hash.ToString() << " marked invalid"; + return state.Invalid(error("%s: block is marked invalid", __func__), 0, "duplicate"); + } else { + fprintf(stderr,"reconsider block %s\n",hash.GetHex().c_str()); + pindex->nStatus &= ~BLOCK_FAILED_MASK; + } + } + return true; + } + if (!CheckBlockHeader(futureblockp,*ppindex!=0?(*ppindex)->GetHeight():0,*ppindex, block, state,0)) { + if ( *futureblockp == 0 ) { + LogPrintf("%s: CheckBlockHeader futureblock=0\n", __func__); + return false; + } + } + if(fDebug) { + fprintf(stderr,"%s: CheckBlockHeader passed\n",__func__); + } + // Get prev block index + CBlockIndex* pindexPrev = NULL; + if (hash != chainparams.GetConsensus().hashGenesisBlock) + { + BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock); + if (mi == mapBlockIndex.end()) + { + LogPrintf("%s: hashPrevBlock %s not found\n",__func__, block.hashPrevBlock.ToString().c_str()); + //*futureblockp = 1; + return(false); + //return state.DoS(10, error("%s: prev block not found", __func__), 0, "bad-prevblk"); + } + pindexPrev = (*mi).second; + if (pindexPrev == 0 ) + { + LogPrintf("%s: hashPrevBlock %s no pindexPrev\n",__func__,block.hashPrevBlock.ToString().c_str()); + return(false); + } + if ( (pindexPrev->nStatus & BLOCK_FAILED_MASK) ) + return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk"); + } + if (!ContextualCheckBlockHeader(block, state, pindexPrev)) { + //fprintf(stderr,"AcceptBlockHeader ContextualCheckBlockHeader failed\n"); + LogPrintf("%s: ContextualCheckBlockHeader failed\n",__func__); + return false; + } + if(fDebug) { + fprintf(stderr,"%s: ContextualCheckBlockHeader passed: %s\n", __func__, hash.ToString().c_str()); + } + if (pindex == NULL) + { + if ( (pindex= AddToBlockIndex(block)) != 0 ) + { + miSelf = mapBlockIndex.find(hash); + if (miSelf != mapBlockIndex.end()) + miSelf->second = pindex; + //fprintf(stderr,"AcceptBlockHeader couldnt add to block index\n"); + } + } + if (ppindex) + *ppindex = pindex; + return true; +} + +uint256 Queued_reconsiderblock; + +bool AcceptBlock(int32_t *futureblockp,CBlock& block, CValidationState& state, CBlockIndex** ppindex, bool fRequested, CDiskBlockPos* dbp) +{ + const CChainParams& chainparams = Params(); + AssertLockHeld(cs_main); + + CBlockIndex *&pindex = *ppindex; + if (!AcceptBlockHeader(futureblockp, block, state, &pindex)) + { + if ( *futureblockp == 0 ) + { + LogPrintf("%s: AcceptBlockHeader error\n",__func__); + return false; + } + } + if ( pindex == 0 ) + { + LogPrintf("%s: null pindex\n", __func__); + *futureblockp = true; + return false; + } + //fprintf(stderr,"acceptblockheader passed\n"); + // Try to process all requested blocks that we don't have, but only + // process an unrequested block if it's new and has enough work to + // advance our tip, and isn't too many blocks ahead. + bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA; + bool fHasMoreWork = (chainActive.Tip() ? pindex->chainPower > chainActive.Tip()->chainPower : true); + // Blocks that are too out-of-order needlessly limit the effectiveness of + // pruning, because pruning will not delete block files that contain any + // blocks which are too close in height to the tip. Apply this test + // regardless of whether pruning is enabled; it should generally be safe to + // not process unrequested blocks. + bool fTooFarAhead = (pindex->GetHeight() > int(chainActive.Height() + BLOCK_DOWNLOAD_WINDOW)); //MIN_BLOCKS_TO_KEEP)); + + // TODO: deal better with return value and error conditions for duplicate + // and unrequested blocks. + //fprintf(stderr,"Accept %s flags already.%d requested.%d morework.%d farahead.%d\n",pindex->GetBlockHash().ToString().c_str(),fAlreadyHave,fRequested,fHasMoreWork,fTooFarAhead); + if (fAlreadyHave) return true; + if (!fRequested) { // If we didn't ask for it: + if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned + if (!fHasMoreWork) return true; // Don't process less-work chains + if (fTooFarAhead) return true; // Block height is too high + } + + // See method docstring for why this is always disabled + auto verifier = libzcash::ProofVerifier::Disabled(); + bool fContextualCheckBlock = ContextualCheckBlock(0,block, state, pindex->pprev); + if ( (!CheckBlock(futureblockp,pindex->GetHeight(),pindex,block, state, verifier,0)) || !fContextualCheckBlock ) + { + static int32_t saplinght = -1; + CBlockIndex *tmpptr; + if ( saplinght == -1 ) + saplinght = Params().GetConsensus().vUpgrades[Consensus::UPGRADE_SAPLING].nActivationHeight; + if ( saplinght < 0 ) + *futureblockp = 1; + // the problem is when a future sapling block comes in before we detected saplinght + if ( saplinght > 0 && (tmpptr= chainActive.LastTip()) != 0 ) + { + fprintf(stderr,"saplinght.%d tipht.%d blockht.%d cmp.%d\n",saplinght,(int32_t)tmpptr->GetHeight(),pindex->GetHeight(),pindex->GetHeight() < 0 || (pindex->GetHeight() >= saplinght && pindex->GetHeight() < saplinght+50000) || (tmpptr->GetHeight() > saplinght-720 && tmpptr->GetHeight() < saplinght+720)); + if ( pindex->GetHeight() < 0 || (pindex->GetHeight() >= saplinght && pindex->GetHeight() < saplinght+50000) || (tmpptr->GetHeight() > saplinght-720 && tmpptr->GetHeight() < saplinght+720) ) + *futureblockp = 1; + if ( ASSETCHAINS_CBOPRET != 0 ) + { + CValidationState tmpstate; CBlockIndex *tmpindex; int32_t ht,longest; + ht = (int32_t)pindex->GetHeight(); + longest = hush_longestchain(); + if ( (longest == 0 || ht < longest-6) && (tmpindex=hush_chainactive(ht)) != 0 ) + { + fprintf(stderr,"reconsider height.%d, longest.%d\n",(int32_t)ht,longest); + if ( Queued_reconsiderblock == zeroid ) + Queued_reconsiderblock = pindex->GetBlockHash(); + } + } + } + if ( *futureblockp == 0 ) + { + if (state.IsInvalid() && !state.CorruptionPossible()) { + pindex->nStatus |= BLOCK_FAILED_VALID; + setDirtyBlockIndex.insert(pindex); + } + LogPrintf("AcceptBlock CheckBlock or ContextualCheckBlock error\n"); + return false; + } + } + if ( fContextualCheckBlock ) + pindex->nStatus |= BLOCK_VALID_CONTEXT; + + int nHeight = pindex->GetHeight(); + // Temp File fix. LABS has been using this for ages with no bad effects. + // Disabled here. Set use tmp to whatever you need to use this for. + int32_t usetmp = 0; + if ( IsInitialBlockDownload() ) + usetmp = 0; + + // Write block to history file + try { + unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); + CDiskBlockPos blockPos; + if (dbp != NULL) + blockPos = *dbp; + if (!FindBlockPos(usetmp,state, blockPos, nBlockSize+8, nHeight, block.GetBlockTime(), dbp != NULL)) + return error("AcceptBlock(): FindBlockPos failed"); + if (dbp == NULL) + if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) + AbortNode(state, "Failed to write block"); + if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) + return error("AcceptBlock(): ReceivedBlockTransactions failed"); + if ( usetmp != 0 ) // not during initialdownload or if futureflag==0 and contextchecks ok + pindex->nStatus |= BLOCK_IN_TMPFILE; + } catch (const std::runtime_error& e) { + return AbortNode(state, std::string("System error: ") + e.what()); + } + + if (fCheckForPruning) + FlushStateToDisk(state, FLUSH_STATE_NONE); // we just allocated more disk space for block files + if ( *futureblockp == 0 ) + return true; + LogPrintf("AcceptBlock block from future error\n"); + return false; +} + +bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams) +{ + unsigned int nFound = 0; + for (int i = 0; i < consensusParams.nMajorityWindow && nFound < nRequired && pstart != NULL; i++) + { + if (pstart->nVersion >= minVersion) + ++nFound; + pstart = pstart->pprev; + } + return (nFound >= nRequired); +} + +bool TestBlockValidity(CValidationState &state, const CBlock& block, CBlockIndex * const pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot) +{ + AssertLockHeld(cs_main); + assert(pindexPrev == chainActive.Tip()); + + CCoinsViewCache viewNew(pcoinsTip); + CBlockIndex indexDummy(block); + indexDummy.pprev = pindexPrev; + indexDummy.SetHeight(pindexPrev->GetHeight() + 1); + // zk proofs are verified in ConnectBlock + auto verifier = libzcash::ProofVerifier::Disabled(); + // NOTE: CheckBlockHeader is called by CheckBlock + if (!ContextualCheckBlockHeader(block, state, pindexPrev)) + { + fprintf(stderr,"%s: failure A checkPOW=%d\n",__func__,fCheckPOW); + return false; + } + int32_t futureblock; + if (!CheckBlock(&futureblock,indexDummy.GetHeight(),0,block, state, verifier, fCheckPOW, fCheckMerkleRoot)) + { + fprintf(stderr,"%s: failure B checkPOW=%d\n",__func__, fCheckPOW); + return false; + } + if (!ContextualCheckBlock(0,block, state, pindexPrev)) + { + fprintf(stderr,"%s: failure C checkPOW=%d\n",__func__, fCheckPOW); + return false; + } + if (!ConnectBlock(block, state, &indexDummy, viewNew, true,fCheckPOW)) + { + fprintf(stderr,"%s: failure D checkPOW=%d\n",__func__,fCheckPOW); + return false; + } + assert(state.IsValid()); + if ( futureblock != 0 ) + return(false); + return true; +} + +// BLOCK PRUNING CODE +/* Calculate the amount of disk space the block & undo files currently use */ +uint64_t CalculateCurrentUsage() +{ + uint64_t retval = 0; + BOOST_FOREACH(const CBlockFileInfo &file, vinfoBlockFile) { + retval += file.nSize + file.nUndoSize; + } + return retval; +} + +/* Prune a block file (modify associated database entries)*/ +bool PruneOneBlockFile(bool tempfile, const int fileNumber) +{ + uint256 notarized_hash,notarized_desttxid; int32_t prevMoMheight,notarized_height; + notarized_height = hush_notarized_height(&prevMoMheight,¬arized_hash,¬arized_desttxid); + //fprintf(stderr, "pruneblockfile.%i\n",fileNumber); sleep(15); + for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); ++it) + { + CBlockIndex* pindex = it->second; + if (pindex && pindex->nFile == fileNumber) + { + if ( tempfile && (pindex->nStatus & BLOCK_IN_TMPFILE != 0) ) + { + if ( chainActive.Contains(pindex) ) + { + // Block is in main chain so we cant clear this file! + return(false); + } + fprintf(stderr, "pindex height.%i notarized height.%i \n", pindex->GetHeight(), notarized_height); + if ( pindex->GetHeight() > notarized_height ) // Need to check this, does an invalid block have a height? + { + // This blocks height is not older than last notarization so it can be reorged into the main chain. + // We cant clear this file! + return(false); + } + else + { + // Block is not in main chain and is older than last notarized block so its safe for removal. + fprintf(stderr, "Block [%i] in tempfile.%i We can clear this block!\n",pindex->GetHeight(),fileNumber); + // Add index to list and remove after loop? + } + } + pindex->nStatus &= ~BLOCK_HAVE_DATA; + pindex->nStatus &= ~BLOCK_HAVE_UNDO; + pindex->nFile = 0; + pindex->nDataPos = 0; + pindex->nUndoPos = 0; + setDirtyBlockIndex.insert(pindex); + // Prune from mapBlocksUnlinked -- any block we prune would have + // to be downloaded again in order to consider its chain, at which + // point it would be considered as a candidate for + // mapBlocksUnlinked or setBlockIndexCandidates. + std::pair::iterator, std::multimap::iterator> range = mapBlocksUnlinked.equal_range(pindex->pprev); + while (range.first != range.second) + { + std::multimap::iterator it = range.first; + range.first++; + if (it->second == pindex) + { + mapBlocksUnlinked.erase(it); + } + } + } + } + if (!tempfile) + vinfoBlockFile[fileNumber].SetNull(); + setDirtyFileInfo.insert(fileNumber); + return(true); +} + + +void UnlinkPrunedFiles(std::set& setFilesToPrune) +{ + for (set::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) { + CDiskBlockPos pos(*it, 0); + boost::filesystem::remove(GetBlockPosFilename(pos, "blk")); + boost::filesystem::remove(GetBlockPosFilename(pos, "rev")); + LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it); + } +} + +/* Calculate the block/rev files that should be deleted to remain under target*/ +void FindFilesToPrune(std::set& setFilesToPrune) +{ + LOCK2(cs_main, cs_LastBlockFile); + if (chainActive.Tip() == NULL || nPruneTarget == 0) { + return; + } + if (chainActive.Tip()->GetHeight() <= Params().PruneAfterHeight()) { + return; + } + unsigned int nLastBlockWeCanPrune = chainActive.Tip()->GetHeight() - MIN_BLOCKS_TO_KEEP; + uint64_t nCurrentUsage = CalculateCurrentUsage(); + // We don't check to prune until after we've allocated new space for files + // So we should leave a buffer under our target to account for another allocation + // before the next pruning. + uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE; + uint64_t nBytesToPrune; + int count=0; + + if (nCurrentUsage + nBuffer >= nPruneTarget) { + for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) { + nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize; + + if (vinfoBlockFile[fileNumber].nSize == 0) + continue; + + if (nCurrentUsage + nBuffer < nPruneTarget) // are we below our target? + break; + + // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning + if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) + continue; + + PruneOneBlockFile(false, fileNumber); + // Queue up the files for removal + setFilesToPrune.insert(fileNumber); + nCurrentUsage -= nBytesToPrune; + count++; + } + } + + LogPrint("prune", "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n", + nPruneTarget/1024/1024, nCurrentUsage/1024/1024, + ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024, + nLastBlockWeCanPrune, count); +} + +bool CheckDiskSpace(uint64_t nAdditionalBytes) +{ + uint64_t nFreeBytesAvailable = boost::filesystem::space(GetDataDir()).available; + if(fDebug) { + fprintf(stderr,"Free bytes on disk: %lu\n", nFreeBytesAvailable); + } + // Check for nMinDiskSpace bytes (defined in main.h) + if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes) + return AbortNode("Disk space is low!!!", _("Error: Disk space is low!!!")); + + return true; +} + +FILE* OpenDiskFile(const CDiskBlockPos &pos, const char *prefix, bool fReadOnly) +{ + static int32_t didinit[256]; + if (pos.IsNull()) + return NULL; + boost::filesystem::path path = GetBlockPosFilename(pos, prefix); + boost::filesystem::create_directories(path.parent_path()); + FILE* file = fopen(path.string().c_str(), "rb+"); + if (!file && !fReadOnly) + file = fopen(path.string().c_str(), "wb+"); + if (!file) { + LogPrintf("Unable to open file %s\n", path.string()); + return NULL; + } + if ( pos.nFile < sizeof(didinit)/sizeof(*didinit) && didinit[pos.nFile] == 0 && strcmp(prefix,(char *)"blk") == 0 ) + { + hush_prefetch(file); + didinit[pos.nFile] = 1; + } + if (pos.nPos) { + if (fseek(file, pos.nPos, SEEK_SET)) { + LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, path.string()); + fclose(file); + return NULL; + } + } + return file; +} + +FILE* OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly) { + return OpenDiskFile(pos, "blk", fReadOnly); +} + +FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) { + return OpenDiskFile(pos, "rev", fReadOnly); +} + +boost::filesystem::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix) +{ + return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix, pos.nFile); +} + +CBlockIndex * InsertBlockIndex(uint256 hash) +{ + if (hash.IsNull()) + return NULL; + + // Return existing + BlockMap::iterator mi = mapBlockIndex.find(hash); + if (mi != mapBlockIndex.end() && mi->second != NULL) + return (*mi).second; + + // Create new + CBlockIndex* pindexNew = new CBlockIndex(); + if (!pindexNew) + throw runtime_error("InsertBlockIndex(): new CBlockIndex failed"); + mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first; + pindexNew->phashBlock = &((*mi).first); + //fprintf(stderr,"inserted to block index %s\n",hash.ToString().c_str()); + + return pindexNew; +} + +bool static LoadBlockIndexDB() +{ + const CChainParams& chainparams = Params(); + //LogPrintf("%s: start loading guts\n", __func__); + if (!pblocktree->LoadBlockIndexGuts()) + return false; + LogPrintf("%s: loaded guts\n", __func__); + boost::this_thread::interruption_point(); + + // Calculate chainPower + vector > vSortedByHeight; + vSortedByHeight.reserve(mapBlockIndex.size()); + BOOST_FOREACH(const PAIRTYPE(uint256, CBlockIndex*)& item, mapBlockIndex) + { + CBlockIndex* pindex = item.second; + vSortedByHeight.push_back(make_pair(pindex->GetHeight(), pindex)); + } + if(fDebug) + fprintf(stderr,"load blockindexDB paired %u\n",(uint32_t)time(NULL)); + sort(vSortedByHeight.begin(), vSortedByHeight.end()); + if(fDebug) + fprintf(stderr,"load blockindexDB sorted %u\n",(uint32_t)time(NULL)); + + BOOST_FOREACH(const PAIRTYPE(int, CBlockIndex*)& item, vSortedByHeight) + { + CBlockIndex* pindex = item.second; + pindex->chainPower = (pindex->pprev ? CChainPower(pindex) + pindex->pprev->chainPower : CChainPower(pindex)) + GetBlockProof(*pindex); + // We can link the chain of blocks for which we've received transactions at some point. + // Pruned nodes may have deleted the block. + if (pindex->nTx > 0) { + if (pindex->pprev) { + if (pindex->pprev->nChainTx) { + pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx; + if (fZindex) { + pindex->nChainNotarizations = pindex->pprev->nChainNotarizations + pindex->nNotarizations; + pindex->nChainShieldedTx = pindex->pprev->nChainShieldedTx + pindex->nShieldedTx; + pindex->nChainShieldedOutputs = pindex->pprev->nChainShieldedOutputs + pindex->nShieldedOutputs; + pindex->nChainShieldedPayments = pindex->pprev->nChainShieldedPayments + pindex->nShieldedPayments; + pindex->nChainShieldingTx = pindex->pprev->nChainShieldingTx + pindex->nShieldingTx; + + pindex->nChainPayments = pindex->pprev->nChainPayments + pindex->nPayments; + pindex->nChainShieldingPayments = pindex->pprev->nChainShieldingPayments + pindex->nShieldingPayments; + pindex->nChainDeshieldingTx = pindex->pprev->nChainShieldedTx + pindex->nShieldedTx; + pindex->nChainDeshieldingPayments = pindex->pprev->nChainShieldedPayments + pindex->nShieldedPayments; + pindex->nChainFullyShieldedTx = pindex->pprev->nChainFullyShieldedTx + pindex->nFullyShieldedTx; + pindex->nChainFullyShieldedPayments = pindex->pprev->nChainFullyShieldedPayments + pindex->nFullyShieldedPayments; + } + + if (pindex->pprev->nChainSproutValue && pindex->nSproutValue) { + pindex->nChainSproutValue = *pindex->pprev->nChainSproutValue + *pindex->nSproutValue; + } else { + pindex->nChainSproutValue = boost::none; + } + if (pindex->pprev->nChainSaplingValue) { + pindex->nChainSaplingValue = *pindex->pprev->nChainSaplingValue + pindex->nSaplingValue; + } else { + pindex->nChainSaplingValue = boost::none; + } + } else { + pindex->nChainTx = 0; + if (fZindex) { + pindex->nChainPayments = 0; + pindex->nChainNotarizations = 0; + pindex->nChainShieldedTx = 0; + pindex->nChainShieldedOutputs = 0; + pindex->nChainFullyShieldedTx = 0; + pindex->nChainShieldedPayments = 0; + pindex->nChainShieldingPayments = 0; + pindex->nChainDeshieldingTx = 0; + pindex->nChainDeshieldingPayments = 0; + pindex->nChainFullyShieldedTx = 0; + pindex->nChainFullyShieldedPayments = 0; + } + pindex->nChainSproutValue = boost::none; + pindex->nChainSaplingValue = boost::none; + mapBlocksUnlinked.insert(std::make_pair(pindex->pprev, pindex)); + } + } else { + pindex->nChainTx = pindex->nTx; + pindex->nChainSproutValue = pindex->nSproutValue; + pindex->nChainSaplingValue = pindex->nSaplingValue; + if (fZindex) { + pindex->nChainPayments = pindex->nPayments; + pindex->nChainNotarizations = pindex->nNotarizations; + pindex->nChainShieldedTx = pindex->nShieldedTx; + pindex->nChainShieldedOutputs = pindex->nShieldedOutputs; + pindex->nChainShieldedPayments = pindex->nShieldedPayments; + pindex->nChainShieldingTx = pindex->nShieldingTx; + pindex->nChainShieldingPayments = pindex->nShieldingPayments; + pindex->nChainDeshieldingTx = pindex->nDeshieldingTx; + pindex->nChainDeshieldingPayments = pindex->nDeshieldingPayments; + pindex->nChainFullyShieldedPayments = pindex->nFullyShieldedPayments; + } + } + } + // Construct in-memory chain of branch IDs. + // Relies on invariant: a block that does not activate a network upgrade + // will always be valid under the same consensus rules as its parent. + // Genesis block has a branch ID of zero by definition, but has no + // validity status because it is side-loaded into a fresh chain. + // Activation blocks will have branch IDs set (read from disk). + if (pindex->pprev) { + if (pindex->IsValid(BLOCK_VALID_CONSENSUS) && !pindex->nCachedBranchId) { + pindex->nCachedBranchId = pindex->pprev->nCachedBranchId; + } + } else { + pindex->nCachedBranchId = SPROUT_BRANCH_ID; + } + if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == NULL)) + setBlockIndexCandidates.insert(pindex); + if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->chainPower > pindexBestInvalid->chainPower)) + pindexBestInvalid = pindex; + if (pindex->pprev) + pindex->BuildSkip(); + if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == NULL || CBlockIndexWorkComparator()(pindexBestHeader, pindex))) + pindexBestHeader = pindex; + } + fprintf(stderr,"load blockindexDB chained %u\n",(uint32_t)time(NULL)); + + // Load block file info + pblocktree->ReadLastBlockFile(nLastBlockFile); + vinfoBlockFile.resize(nLastBlockFile + 1); + tmpBlockFiles.resize(nLastTmpFile + 1); + LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile); + for (int nFile = 0; nFile <= nLastBlockFile; nFile++) { + pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]); + } + LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString()); + for (int nFile = nLastBlockFile + 1; true; nFile++) { + CBlockFileInfo info; + if (pblocktree->ReadBlockFileInfo(nFile, info)) { + vinfoBlockFile.push_back(info); + } else { + break; + } + } + + // Check presence of blk files + LogPrintf("Checking all blk files are present...\n"); + set setBlkDataFiles; + BOOST_FOREACH(const PAIRTYPE(uint256, CBlockIndex*)& item, mapBlockIndex) + { + CBlockIndex* pindex = item.second; + if (pindex->nStatus & BLOCK_HAVE_DATA) { + setBlkDataFiles.insert(pindex->nFile); + } + } + fprintf(stderr,"load blockindexDB %u\n",(uint32_t)time(NULL)); + for (std::set::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) + { + CDiskBlockPos pos(*it, 0); + if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) { + return false; + } + } + + // Check whether we have ever pruned block & undo files + pblocktree->ReadFlag("prunedblockfiles", fHavePruned); + if (fHavePruned) + LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n"); + + // Check whether we need to continue reindexing + bool fReindexing = false; + pblocktree->ReadReindexing(fReindexing); + fReindex |= fReindexing; + + // Check whether we have a transaction index + pblocktree->ReadFlag("txindex", fTxIndex); + LogPrintf("%s: transaction index %s\n", __func__, fTxIndex ? "enabled" : "disabled"); + + // Check whether we have an address index + pblocktree->ReadFlag("addressindex", fAddressIndex); + LogPrintf("%s: address index %s\n", __func__, fAddressIndex ? "enabled" : "disabled"); + + // Check whether we have a shielded index + pblocktree->ReadFlag("zindex", fZindex); + LogPrintf("%s: shielded index %s\n", __func__, fZindex ? "enabled" : "disabled"); + + // Check whether we have a timestamp index + pblocktree->ReadFlag("timestampindex", fTimestampIndex); + LogPrintf("%s: timestamp index %s\n", __func__, fTimestampIndex ? "enabled" : "disabled"); + + // Check whether we have a spent index + pblocktree->ReadFlag("spentindex", fSpentIndex); + LogPrintf("%s: spent index %s\n", __func__, fSpentIndex ? "enabled" : "disabled"); + + // Load pointer to end of best chain + BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock()); + if (it == mapBlockIndex.end()) + return true; + + chainActive.SetTip(it->second); + + // Try to detect if we are z2z based on height of blocks on disk + // This helps to set it correctly on startup before a new block is connected + if(ishush3 && chainActive.Height() >= 340000) { + LogPrintf("%s: enabled ac_private=1 at height=%d\n", __func__, chainActive.Height()); + ASSETCHAINS_PRIVATE = 1; + } + + // Set hashFinalSproutRoot for the end of best chain + it->second->hashFinalSproutRoot = pcoinsTip->GetBestAnchor(SPROUT); + + fprintf(stderr,"about to prune block index\n"); + + PruneBlockIndexCandidates(); + + double progress; + if ( ishush3 ) { + progress = Checkpoints::GuessVerificationProgress(chainparams.Checkpoints(), chainActive.LastTip()); + } else { + int32_t longestchain = hush_longestchain(); + // TODO: hush_longestchain does not have the data it needs at the time LoadBlockIndexDB + // runs, which makes it return 0, so we guess 50% for now + progress = (longestchain > 0 ) ? (double) chainActive.Height() / longestchain : 0.5; + } + LogPrintf("%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__, + chainActive.LastTip()->GetBlockHash().ToString(), chainActive.Height(), + DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.LastTip()->GetBlockTime()), + progress); + + CBlockIndex *pindex; + if ( (pindex= chainActive.LastTip()) != 0 ) + { + if ( ASSETCHAINS_SAPLING <= 0 ) + { + fprintf(stderr,"set sapling height, if possible from ht.%d %u\n",(int32_t)pindex->GetHeight(),(uint32_t)pindex->nTime); + hush_activate_sapling(pindex); + } + } + return true; +} + +CVerifyDB::CVerifyDB() +{ + uiInterface.ShowProgress(_("Verifying blocks..."), 0); +} + +CVerifyDB::~CVerifyDB() +{ + uiInterface.ShowProgress("", 100); +} + +bool CVerifyDB::VerifyDB(CCoinsView *coinsview, int nCheckLevel, int nCheckDepth) +{ + LOCK(cs_main); + if (chainActive.Tip() == NULL || chainActive.Tip()->pprev == NULL) + return true; + + // Verify blocks in the best chain + if (nCheckDepth <= 0) + nCheckDepth = 1000000000; // suffices until the year 19000 + if (nCheckDepth > chainActive.Height()) + nCheckDepth = chainActive.Height(); + nCheckLevel = std::max(0, std::min(4, nCheckLevel)); + LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel); + CCoinsViewCache coins(coinsview); + CBlockIndex* pindexState = chainActive.Tip(); + CBlockIndex* pindexFailure = NULL; + int nGoodTransactions = 0; + CValidationState state; + // No need to verify shielded req's twice + auto verifier = libzcash::ProofVerifier::Disabled(); + //fprintf(stderr,"start VerifyDB %u\n",(uint32_t)time(NULL)); + for (CBlockIndex* pindex = chainActive.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) + { + boost::this_thread::interruption_point(); + uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, (int)(((double)(chainActive.Height() - pindex->GetHeight())) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))))); + if (pindex->GetHeight() < chainActive.Height()-nCheckDepth) + break; + CBlock block; + // check level 0: read from disk + if (!ReadBlockFromDisk(block, pindex,0)) + return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->GetHeight(), pindex->GetBlockHash().ToString()); + // check level 1: verify block validity + int32_t futureblock; + if (nCheckLevel >= 1 && !CheckBlock(&futureblock,pindex->GetHeight(),pindex,block, state, verifier,0) ) + return error("VerifyDB(): *** found bad block at %d, hash=%s\n", pindex->GetHeight(), pindex->GetBlockHash().ToString()); + // check level 2: verify undo validity + if (nCheckLevel >= 2 && pindex) { + CBlockUndo undo; + CDiskBlockPos pos = pindex->GetUndoPos(); + if (!pos.IsNull()) { + if (!UndoReadFromDisk(undo, pos, pindex->pprev->GetBlockHash())) + return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->GetHeight(), pindex->GetBlockHash().ToString()); + } + } + // check level 3: check for inconsistencies during memory-only disconnect of tip blocks + if (nCheckLevel >= 3 && pindex == pindexState && (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <= nCoinCacheUsage) { + bool fClean = true; + if (!DisconnectBlock(block, state, pindex, coins, &fClean)) + return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->GetHeight(), pindex->GetBlockHash().ToString()); + pindexState = pindex->pprev; + if (!fClean) { + nGoodTransactions = 0; + pindexFailure = pindex; + } else + nGoodTransactions += block.vtx.size(); + } + if (ShutdownRequested()) + return true; + } + //fprintf(stderr,"end VerifyDB %u\n",(uint32_t)time(NULL)); + if (pindexFailure) + return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->GetHeight() + 1, nGoodTransactions); + + // check level 4: try reconnecting blocks + if (nCheckLevel >= 4) { + CBlockIndex *pindex = pindexState; + while (pindex != chainActive.Tip()) { + boost::this_thread::interruption_point(); + uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->GetHeight())) / (double)nCheckDepth * 50)))); + pindex = chainActive.Next(pindex); + CBlock block; + if (!ReadBlockFromDisk(block, pindex,0)) + return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->GetHeight(), pindex->GetBlockHash().ToString()); + if (!ConnectBlock(block, state, pindex, coins,false, true)) + return error("VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex->GetHeight(), pindex->GetBlockHash().ToString()); + } + } + + LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", chainActive.Height() - pindexState->GetHeight(), nGoodTransactions); + + return true; +} + +bool RewindBlockIndex(const CChainParams& params, bool& clearWitnessCaches) +{ + LOCK(cs_main); + + // RewindBlockIndex is called after LoadBlockIndex, so at this point every block + // index will have nCachedBranchId set based on the values previously persisted + // to disk. By definition, a set nCachedBranchId means that the block was + // fully-validated under the corresponding consensus rules. Thus we can quickly + // identify whether the current active chain matches our expected sequence of + // consensus rule changes, with two checks: + // + // - BLOCK_ACTIVATES_UPGRADE is set only on blocks that activate upgrades. + // - nCachedBranchId for each block matches what we expect. + auto sufficientlyValidated = [¶ms](const CBlockIndex* pindex) { + auto consensus = params.GetConsensus(); + bool fFlagSet = pindex->nStatus & BLOCK_ACTIVATES_UPGRADE; + bool fFlagExpected = IsActivationHeightForAnyUpgrade(pindex->GetHeight(), consensus); + return fFlagSet == fFlagExpected && + pindex->nCachedBranchId && + *pindex->nCachedBranchId == CurrentEpochBranchId(pindex->GetHeight(), consensus); + }; + + int nHeight = 1; + while (nHeight <= chainActive.Height()) { + if (!sufficientlyValidated(chainActive[nHeight])) { + break; + } + nHeight++; + } + + // nHeight is now the height of the first insufficiently-validated block, or tipheight + 1 + auto rewindLength = chainActive.Height() - nHeight; + if (rewindLength > 0 && rewindLength > MAX_REORG_LENGTH) + { + auto pindexOldTip = chainActive.Tip(); + auto pindexRewind = chainActive[nHeight - 1]; + auto msg = strprintf(_( + "A block chain rewind has been detected that would roll back %d blocks! " + "This is larger than the maximum of %d blocks, and so the node is shutting down for your safety." + ), rewindLength, MAX_REORG_LENGTH) + "\n\n" + + _("Rewind details") + ":\n" + + "- " + strprintf(_("Current tip: %s, height %d"), + pindexOldTip->phashBlock->GetHex(), pindexOldTip->GetHeight()) + "\n" + + "- " + strprintf(_("Rewinding to: %s, height %d"), + pindexRewind->phashBlock->GetHex(), pindexRewind->GetHeight()) + "\n\n" + + _("Please help, human!"); + LogPrintf("*** %s\n", msg); + uiInterface.ThreadSafeMessageBox(msg, "", CClientUIInterface::MSG_ERROR); + StartShutdown(); + return false; + } + + CValidationState state; + CBlockIndex* pindex = chainActive.Tip(); + while (chainActive.Height() >= nHeight) { + if (fPruneMode && !(chainActive.Tip()->nStatus & BLOCK_HAVE_DATA)) { + // If pruning, don't try rewinding past the HAVE_DATA point; + // since older blocks can't be served anyway, there's + // no need to walk further, and trying to DisconnectTip() + // will fail (and require a needless reindex/redownload + // of the blockchain). + break; + } + if (!DisconnectTip(state, true)) { + return error("RewindBlockIndex: unable to disconnect block at height %i", pindex->GetHeight()); + } + // Occasionally flush state to disk. + if (!FlushStateToDisk(state, FLUSH_STATE_PERIODIC)) + return false; + } + + // Reduce validity flag and have-data flags. + + // Collect blocks to be removed (blocks in mapBlockIndex must be at least BLOCK_VALID_TREE). + // We do this after actual disconnecting, otherwise we'll end up writing the lack of data + // to disk before writing the chainstate, resulting in a failure to continue if interrupted. + std::vector vBlocks; + for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) { + CBlockIndex* pindexIter = it->second; + + // Note: If we encounter an insufficiently validated block that + // is on chainActive, it must be because we are a pruning node, and + // this block or some successor doesn't HAVE_DATA, so we were unable to + // rewind all the way. Blocks remaining on chainActive at this point + // must not have their validity reduced. + if (pindexIter && !sufficientlyValidated(pindexIter) && !chainActive.Contains(pindexIter)) { + // Reduce validity + pindexIter->nStatus = + std::min(pindexIter->nStatus & BLOCK_VALID_MASK, BLOCK_VALID_TREE) | + (pindexIter->nStatus & ~BLOCK_VALID_MASK); + // Remove have-data flags + pindexIter->nStatus &= ~(BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO); + // Remove branch ID + pindexIter->nStatus &= ~BLOCK_ACTIVATES_UPGRADE; + pindexIter->nCachedBranchId = boost::none; + // Remove storage location + pindexIter->nFile = 0; + pindexIter->nDataPos = 0; + pindexIter->nUndoPos = 0; + // Remove various other things + pindexIter->nTx = 0; + pindexIter->nChainTx = 0; + pindexIter->nSproutValue = boost::none; + pindexIter->nChainSproutValue = boost::none; + pindexIter->nSaplingValue = 0; + pindexIter->nChainSaplingValue = boost::none; + pindexIter->nSequenceId = 0; + + // Make sure it gets written + /* corresponds to commented out block below as an alternative to setDirtyBlockIndex + vBlocks.push_back(pindexIter); + */ + setDirtyBlockIndex.insert(pindexIter); + if (pindexIter == pindexBestInvalid) + { + //fprintf(stderr,"Reset invalid block marker if it was pointing to this block\n"); + pindexBestInvalid = NULL; + } + + // Update indices + setBlockIndexCandidates.erase(pindexIter); + auto ret = mapBlocksUnlinked.equal_range(pindexIter->pprev); + while (ret.first != ret.second) { + if (ret.first->second == pindexIter) { + mapBlocksUnlinked.erase(ret.first++); + } else { + ++ret.first; + } + } + } else if (pindexIter->IsValid(BLOCK_VALID_TRANSACTIONS) && pindexIter->nChainTx) { + setBlockIndexCandidates.insert(pindexIter); + } + } + + PruneBlockIndexCandidates(); + + CheckBlockIndex(); + + if (!FlushStateToDisk(state, FLUSH_STATE_ALWAYS)) { + return false; + } + + return true; +} + +void UnloadBlockIndex() +{ + LOCK(cs_main); + setBlockIndexCandidates.clear(); + chainActive.SetTip(NULL); + pindexBestInvalid = NULL; + pindexBestHeader = NULL; + mempool.clear(); + mapOrphanTransactions.clear(); + mapOrphanTransactionsByPrev.clear(); + nSyncStarted = 0; + mapBlocksUnlinked.clear(); + vinfoBlockFile.clear(); + tmpBlockFiles.clear(); + nLastBlockFile = 0; + nBlockSequenceId = 1; + mapBlockSource.clear(); + mapBlocksInFlight.clear(); + nQueuedValidatedHeaders = 0; + nPreferredDownload = 0; + setDirtyBlockIndex.clear(); + setDirtyFileInfo.clear(); + ClearNodeState(); + recentRejects.reset(NULL); + + BOOST_FOREACH(BlockMap::value_type& entry, mapBlockIndex) { + delete entry.second; + } + mapBlockIndex.clear(); + fHavePruned = false; +} + +bool LoadBlockIndex() +{ + // Load block index from databases + HUSH_LOADINGBLOCKS = 1; + if (!fReindex && !LoadBlockIndexDB()) + { + HUSH_LOADINGBLOCKS = 0; + return false; + } + fprintf(stderr,"finished loading blocks %s\n",SMART_CHAIN_SYMBOL); + return true; +} + + +bool InitBlockIndex() { + const CChainParams& chainparams = Params(); + LOCK(cs_main); + tmpBlockFiles.clear(); + + // Initialize global variables that cannot be constructed at startup. + recentRejects.reset(new CRollingBloomFilter(120000, 0.000001)); + // Check whether we're already initialized + if (chainActive.Genesis() != NULL) + { + return true; + } + if ( pblocktree != 0 ) + { + // Use the provided setting for -txindex in the new database + fTxIndex = GetBoolArg("-txindex", true); + pblocktree->WriteFlag("txindex", fTxIndex); + + // Use the provided setting for -addressindex in the new database + fAddressIndex = GetBoolArg("-addressindex", DEFAULT_ADDRESSINDEX); + pblocktree->WriteFlag("addressindex", fAddressIndex); + + // Use the provided setting for -zindex in the new database + fZindex = GetBoolArg("-zindex", DEFAULT_SHIELDEDINDEX); + pblocktree->WriteFlag("zindex", fZindex); + + // Use the provided setting for -timestampindex in the new database + fTimestampIndex = GetBoolArg("-timestampindex", DEFAULT_TIMESTAMPINDEX); + pblocktree->WriteFlag("timestampindex", fTimestampIndex); + + fSpentIndex = GetBoolArg("-spentindex", DEFAULT_SPENTINDEX); + pblocktree->WriteFlag("spentindex", fSpentIndex); + fprintf(stderr,"fAddressIndex.%d/%d fSpentIndex.%d/%d fZindex.%d/%d\n",fAddressIndex,DEFAULT_ADDRESSINDEX,fSpentIndex,DEFAULT_SPENTINDEX,fZindex, DEFAULT_SHIELDEDINDEX ); + LogPrintf("Initializing databases...\n"); + } + // Only add the genesis block if not reindexing (in which case we reuse the one already on disk) + if (!fReindex) { + try { + CBlock &block = const_cast(Params().GenesisBlock()); + // Start new block file + unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); + CDiskBlockPos blockPos; + CValidationState state; + if (!FindBlockPos(0,state, blockPos, nBlockSize+8, 0, block.GetBlockTime())) + return error("LoadBlockIndex(): FindBlockPos failed"); + if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) + return error("LoadBlockIndex(): writing genesis block to disk failed"); + CBlockIndex *pindex = AddToBlockIndex(block); + if ( pindex == 0 ) + return error("LoadBlockIndex(): couldnt add to block index"); + if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) + return error("LoadBlockIndex(): genesis block not accepted"); + if (!ActivateBestChain(true, state, &block)) + return error("LoadBlockIndex(): genesis block cannot be activated"); + // Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data + if ( HUSH_NSPV_FULLNODE ) + return FlushStateToDisk(state, FLUSH_STATE_ALWAYS); + else return(true); + } catch (const std::runtime_error& e) { + return error("LoadBlockIndex(): failed to initialize block database: %s", e.what()); + } + } + + return true; +} + + + +bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp) +{ + const CChainParams& chainparams = Params(); + // Map of disk positions for blocks with unknown parent (only used for reindex) + static std::multimap mapBlocksUnknownParent; + int64_t nStart = GetTimeMillis(); + + int nLoaded = 0; + try { + // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor + //CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SIZE, MAX_BLOCK_SIZE+8, SER_DISK, CLIENT_VERSION); + CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SIZE(10000000), MAX_BLOCK_SIZE(10000000)+8, SER_DISK, CLIENT_VERSION); + uint64_t nRewind = blkdat.GetPos(); + while (!blkdat.eof()) { + boost::this_thread::interruption_point(); + + blkdat.SetPos(nRewind); + nRewind++; // start one byte further next time, in case of failure + blkdat.SetLimit(); // remove former limit + unsigned int nSize = 0; + try { + // locate a header + unsigned char buf[MESSAGE_START_SIZE]; + blkdat.FindByte(Params().MessageStart()[0]); + nRewind = blkdat.GetPos()+1; + blkdat >> FLATDATA(buf); + if (memcmp(buf, Params().MessageStart(), MESSAGE_START_SIZE)) + continue; + // read size + blkdat >> nSize; + if (nSize < 80 || nSize > MAX_BLOCK_SIZE(10000000)) + continue; + } catch (const std::exception&) { + // no valid block header found; don't complain + break; + } + try { + // read block + CBlock block; + uint64_t nBlockPos = blkdat.GetPos(); + if (dbp) + dbp->nPos = nBlockPos; + blkdat.SetLimit(nBlockPos + nSize); + blkdat.SetPos(nBlockPos); + blkdat >> block; + + nRewind = blkdat.GetPos(); + // detect out of order blocks, and store them for later + uint256 hash = block.GetHash(); + if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex.find(block.hashPrevBlock) == mapBlockIndex.end()) { + LogPrint("reindex", "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(), + block.hashPrevBlock.ToString()); + if (dbp) + mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp)); + continue; + } + + // process in case the block isn't known yet + if (mapBlockIndex.count(hash) == 0 || (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) { + CValidationState state; + if (ProcessNewBlock(0,0,state, NULL, &block, true, dbp)) + nLoaded++; + if (state.IsError()) + break; + } else if (hash != chainparams.GetConsensus().hashGenesisBlock && hush_blockheight(hash) % 1000 == 0) { + LogPrintf("Block Import: already had block %s at height %d\n", hash.ToString(), hush_blockheight(hash)); + } + + // Recursively process earlier encountered successors of this block + deque queue; + queue.push_back(hash); + while (!queue.empty()) { + uint256 head = queue.front(); + queue.pop_front(); + std::pair::iterator, std::multimap::iterator> range = mapBlocksUnknownParent.equal_range(head); + while (range.first != range.second) { + std::multimap::iterator it = range.first; + + if (ReadBlockFromDisk(mapBlockIndex.count(hash)!=0?mapBlockIndex[hash]->GetHeight():0,block, it->second,1)) + { + LogPrintf("%s: Processing out of order child %s of %s\n", __func__, block.GetHash().ToString(), + head.ToString()); + CValidationState dummy; + if (ProcessNewBlock(0,0,dummy, NULL, &block, true, &it->second)) + { + nLoaded++; + queue.push_back(block.GetHash()); + } + } + range.first++; + mapBlocksUnknownParent.erase(it); + } + } + } catch (const std::exception& e) { + LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what()); + } + } + } catch (const std::runtime_error& e) { + AbortNode(std::string("System error: ") + e.what()); + } + if (nLoaded > 0) + LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart); + return nLoaded > 0; +} + +void CheckBlockIndex() +{ + const Consensus::Params& consensusParams = Params().GetConsensus(); + if (!fCheckBlockIndex) { + return; + } + + LOCK(cs_main); + + // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain, + // so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when + // iterating the block tree require that chainActive has been initialized.) + if (chainActive.Height() < 0) { + assert(mapBlockIndex.size() <= 1); + return; + } + + // Build forward-pointing map of the entire block tree. + std::multimap forward; + for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) { + if ( it->second != 0 ) + forward.insert(std::make_pair(it->second->pprev, it->second)); + } + if ( Params().NetworkIDString() != "regtest" ) + assert(forward.size() == mapBlockIndex.size()); + + std::pair::iterator,std::multimap::iterator> rangeGenesis = forward.equal_range(NULL); + CBlockIndex *pindex = rangeGenesis.first->second; + rangeGenesis.first++; + assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent NULL. + + // Iterate over the entire block tree, using depth-first search. + // Along the way, remember whether there are blocks on the path from genesis + // block being explored which are the first to have certain properties. + size_t nNodes = 0; + int nHeight = 0; + CBlockIndex* pindexFirstInvalid = NULL; // Oldest ancestor of pindex which is invalid. + CBlockIndex* pindexFirstMissing = NULL; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA. + CBlockIndex* pindexFirstNeverProcessed = NULL; // Oldest ancestor of pindex for which nTx == 0. + CBlockIndex* pindexFirstNotTreeValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not). + CBlockIndex* pindexFirstNotTransactionsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not). + CBlockIndex* pindexFirstNotChainValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not). + CBlockIndex* pindexFirstNotScriptsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not). + while (pindex != NULL) { + nNodes++; + if (pindexFirstInvalid == NULL && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex; + if (pindexFirstMissing == NULL && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex; + if (pindexFirstNeverProcessed == NULL && pindex->nTx == 0) pindexFirstNeverProcessed = pindex; + if (pindex->pprev != NULL && pindexFirstNotTreeValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex; + if (pindex->pprev != NULL && pindexFirstNotTransactionsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex; + if (pindex->pprev != NULL && pindexFirstNotChainValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex; + if (pindex->pprev != NULL && pindexFirstNotScriptsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) pindexFirstNotScriptsValid = pindex; + + // Begin: actual consistency checks. + if (pindex->pprev == NULL) { + // Genesis block checks. + assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // Genesis block's hash must match. + assert(pindex == chainActive.Genesis()); // The current active chain's genesis block must be this block. + } + if (pindex->nChainTx == 0) assert(pindex->nSequenceId == 0); // nSequenceId can't be set for blocks that aren't linked + // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred). + // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred. + if (!fHavePruned) { + // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0 + assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0)); + assert(pindexFirstMissing == pindexFirstNeverProcessed); + } else { + // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0 + if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0); + } + if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA); + assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent. + // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set. + assert((pindexFirstNeverProcessed != NULL) == (pindex->nChainTx == 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned). + assert((pindexFirstNotTransactionsValid != NULL) == (pindex->nChainTx == 0)); + assert(pindex->GetHeight() == nHeight); // nHeight must be consistent. + assert(pindex->pprev == NULL || pindex->chainPower >= pindex->pprev->chainPower); // For every block except the genesis block, the chainwork must be larger than the parent's. + assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->GetHeight() < nHeight))); // The pskip pointer must point back for all but the first 2 blocks. + assert(pindexFirstNotTreeValid == NULL); // All mapBlockIndex entries must at least be TREE valid + if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == NULL); // TREE valid implies all parents are TREE valid + if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == NULL); // CHAIN valid implies all parents are CHAIN valid + if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == NULL); // SCRIPTS valid implies all parents are SCRIPTS valid + if (pindexFirstInvalid == NULL) { + // Checks for not-invalid blocks. + assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents. + } + if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstNeverProcessed == NULL) { + if (pindexFirstInvalid == NULL) { + // If this block sorts at least as good as the current tip and + // is valid and we have all data for its parents, it must be in + // setBlockIndexCandidates. chainActive.Tip() must also be there + // even if some data has been pruned. + if (pindexFirstMissing == NULL || pindex == chainActive.Tip()) { + assert(setBlockIndexCandidates.count(pindex)); + } + // If some parent is missing, then it could be that this block was in + // setBlockIndexCandidates but had to be removed because of the missing data. + // In this case it must be in mapBlocksUnlinked -- see test below. + } + } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates. + assert(setBlockIndexCandidates.count(pindex) == 0); + } + // Check whether this block is in mapBlocksUnlinked. + std::pair::iterator,std::multimap::iterator> rangeUnlinked = mapBlocksUnlinked.equal_range(pindex->pprev); + bool foundInUnlinked = false; + while (rangeUnlinked.first != rangeUnlinked.second) { + assert(rangeUnlinked.first->first == pindex->pprev); + if (rangeUnlinked.first->second == pindex) { + foundInUnlinked = true; + break; + } + rangeUnlinked.first++; + } + if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != NULL && pindexFirstInvalid == NULL) { + // If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked. + assert(foundInUnlinked); + } + if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA + if (pindexFirstMissing == NULL) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked. + if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == NULL && pindexFirstMissing != NULL) { + // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent. + assert(fHavePruned); // We must have pruned. + // This block may have entered mapBlocksUnlinked if: + // - it has a descendant that at some point had more work than the + // tip, and + // - we tried switching to that descendant but were missing + // data for some intermediate block between chainActive and the + // tip. + // So if this block is itself better than chainActive.Tip() and it wasn't in + // setBlockIndexCandidates, then it must be in mapBlocksUnlinked. + if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && setBlockIndexCandidates.count(pindex) == 0) { + if (pindexFirstInvalid == NULL) { + assert(foundInUnlinked); + } + } + } + // try { + // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow + // } catch (const runtime_error&) { + // assert(!"Failed to read index entry"); + // } + // End: actual consistency checks. + + // Try descending into the first subnode. + std::pair::iterator,std::multimap::iterator> range = forward.equal_range(pindex); + if (range.first != range.second) { + // A subnode was found. + pindex = range.first->second; + nHeight++; + continue; + } + // This is a leaf node. + // Move upwards until we reach a node of which we have not yet visited the last child. + while (pindex) { + // We are going to either move to a parent or a sibling of pindex. + // If pindex was the first with a certain property, unset the corresponding variable. + if (pindex == pindexFirstInvalid) pindexFirstInvalid = NULL; + if (pindex == pindexFirstMissing) pindexFirstMissing = NULL; + if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = NULL; + if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = NULL; + if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = NULL; + if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = NULL; + if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = NULL; + // Find our parent. + CBlockIndex* pindexPar = pindex->pprev; + // Find which child we just visited. + std::pair::iterator,std::multimap::iterator> rangePar = forward.equal_range(pindexPar); + while (rangePar.first->second != pindex) { + assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child. + rangePar.first++; + } + // Proceed to the next one. + rangePar.first++; + if (rangePar.first != rangePar.second) { + // Move to the sibling. + pindex = rangePar.first->second; + break; + } else { + // Move up further. + pindex = pindexPar; + nHeight--; + continue; + } + } + } + + // Check that we actually traversed the entire map. + assert(nNodes == forward.size()); +} + diff --git a/src/main.cpp b/src/main.cpp index 06fd58d8e..be2b1c388 100644 --- a/src/main.cpp +++ b/src/main.cpp @@ -85,7 +85,7 @@ void hush_pricesupdate(int32_t height,CBlock *pblock); BlockMap mapBlockIndex; CChain chainActive; CBlockIndex *pindexBestHeader = NULL; -static int64_t nTimeBestReceived = 0; +int64_t nTimeBestReceived = 0; CWaitableCriticalSection csBestBlock; CConditionVariable cvBlockChange; int nScriptCheckThreads = 0; @@ -107,7 +107,7 @@ size_t nCoinCacheUsage = 5000 * 300; uint64_t nPruneTarget = 0; // If the tip is older than this (in seconds), the node is considered to be in initial block download. int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE; -const bool ishush3 = strncmp(SMART_CHAIN_SYMBOL, "HUSH3",5) == 0 ? true : false; +extern const bool ishush3 = strncmp(SMART_CHAIN_SYMBOL, "HUSH3",5) == 0 ? true : false; int32_t nFirstHalvingHeight = 340000; unsigned int expiryDelta = DEFAULT_TX_EXPIRY_DELTA; @@ -132,136 +132,103 @@ struct COrphanTx { }; map mapOrphanTransactions GUARDED_BY(cs_main);; map > mapOrphanTransactionsByPrev GUARDED_BY(cs_main);; -void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** * Returns true if there are nRequired or more blocks of minVersion or above * in the last Consensus::Params::nMajorityWindow blocks, starting at pstart and going backwards. */ -static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams); -static void CheckBlockIndex(); +bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams); +void CheckBlockIndex(); +void InvalidChainFound(CBlockIndex* pindexNew); /** Constant stuff for coinbase transactions we create: */ CScript COINBASE_FLAGS; const string strMessageMagic = "Hush Signed Message:\n"; -// Internal stuff -namespace { +// Internal shared state — struct definitions and extern declarations in main_internal.h +#include "main_internal.h" - struct CBlockIndexWorkComparator - { - bool operator()(CBlockIndex *pa, const CBlockIndex *pb) const { - // First sort by most total work, ... - - if (pa->chainPower.chainWork > pb->chainPower.chainWork) return false; - if (pa->chainPower.chainWork < pb->chainPower.chainWork) return true; +CBlockIndex *pindexBestInvalid; - // ... then by earliest time received, ... - if (pa->nSequenceId < pb->nSequenceId) return false; - if (pa->nSequenceId > pb->nSequenceId) return true; +/** + * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and + * as good as our current tip or better. Entries may be failed, though, and pruning nodes may be + * missing the data for the block. + */ +set setBlockIndexCandidates; - // Use pointer address as tie breaker (should only happen with blocks - // loaded from disk, as those all have id 0). - if (pa < pb) return false; - if (pa > pb) return true; +/** Number of nodes with fSyncStarted. */ +int nSyncStarted = 0; - // Identical blocks. - return false; - } - }; +/** All pairs A->B, where A (or one if its ancestors) misses transactions, but B has transactions. + * Pruned nodes may have entries where B is missing data. + */ +multimap mapBlocksUnlinked; - CBlockIndex *pindexBestInvalid; +CCriticalSection cs_LastBlockFile; +std::vector vinfoBlockFile,tmpBlockFiles; +int nLastBlockFile = 0; +int nLastTmpFile = 0; +unsigned int maxTempFileSize0 = MAX_TEMPFILE_SIZE; +unsigned int maxTempFileSize1 = MAX_TEMPFILE_SIZE; +/** Global flag to indicate we should check to see if there are + * block/undo files that should be deleted. Set on startup + * or if we allocate more file space when we're in prune mode + */ +bool fCheckForPruning = false; - /** - * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself and all ancestors) and - * as good as our current tip or better. Entries may be failed, though, and pruning nodes may be - * missing the data for the block. - */ - - //set> setBlockIndexCandidates; - set setBlockIndexCandidates; - - /** Number of nodes with fSyncStarted. */ - int nSyncStarted = 0; +/** + * Every received block is assigned a unique and increasing identifier, so we + * know which one to give priority in case of a fork. + */ +CCriticalSection cs_nBlockSequenceId; +/** Blocks loaded from disk are assigned id 0, so start the counter at 1. */ +uint32_t nBlockSequenceId = 1; - /** All pairs A->B, where A (or one if its ancestors) misses transactions, but B has transactions. - * Pruned nodes may have entries where B is missing data. - */ - multimap mapBlocksUnlinked; +/** + * Sources of received blocks, saved to be able to send them reject + * messages or ban them when processing happens afterwards. Protected by + * cs_main. + */ +map mapBlockSource; - CCriticalSection cs_LastBlockFile; - std::vector vinfoBlockFile,tmpBlockFiles; - int nLastBlockFile = 0; - int nLastTmpFile = 0; - unsigned int maxTempFileSize0 = MAX_TEMPFILE_SIZE; - unsigned int maxTempFileSize1 = MAX_TEMPFILE_SIZE; - /** Global flag to indicate we should check to see if there are - * block/undo files that should be deleted. Set on startup - * or if we allocate more file space when we're in prune mode - */ - bool fCheckForPruning = false; +/** + * Filter for transactions that were recently rejected by + * AcceptToMemoryPool. These are not rerequested until the chain tip + * changes, at which point the entire filter is reset. Protected by + * cs_main. + * + * Without this filter we'd be re-requesting txs from each of our peers, + * increasing bandwidth consumption considerably. For instance, with 100 + * peers, half of which relay a tx we don't accept, that might be a 50x + * bandwidth increase. A flooding attacker attempting to roll-over the + * filter using minimum-sized, 60byte, transactions might manage to send + * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a + * two minute window to send invs to us. + * + * Decreasing the false positive rate is fairly cheap, so we pick one in a + * million to make it highly unlikely for users to have issues with this + * filter. + * + * Memory used: 1.7MB + */ +boost::scoped_ptr recentRejects; +uint256 hashRecentRejectsChainTip; - /** - * Every received block is assigned a unique and increasing identifier, so we - * know which one to give priority in case of a fork. - */ - CCriticalSection cs_nBlockSequenceId; - /** Blocks loaded from disk are assigned id 0, so start the counter at 1. */ - uint32_t nBlockSequenceId = 1; +map::iterator> > mapBlocksInFlight; - /** - * Sources of received blocks, saved to be able to send them reject - * messages or ban them when processing happens afterwards. Protected by - * cs_main. - */ - map mapBlockSource; +/** Number of blocks in flight with validated headers. */ +int nQueuedValidatedHeaders = 0; - /** - * Filter for transactions that were recently rejected by - * AcceptToMemoryPool. These are not rerequested until the chain tip - * changes, at which point the entire filter is reset. Protected by - * cs_main. - * - * Without this filter we'd be re-requesting txs from each of our peers, - * increasing bandwidth consumption considerably. For instance, with 100 - * peers, half of which relay a tx we don't accept, that might be a 50x - * bandwidth increase. A flooding attacker attempting to roll-over the - * filter using minimum-sized, 60byte, transactions might manage to send - * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a - * two minute window to send invs to us. - * - * Decreasing the false positive rate is fairly cheap, so we pick one in a - * million to make it highly unlikely for users to have issues with this - * filter. - * - * Memory used: 1.7MB - */ - boost::scoped_ptr recentRejects; - uint256 hashRecentRejectsChainTip; +/** Number of preferable block download peers. */ +int nPreferredDownload = 0; - /** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */ - struct QueuedBlock { - uint256 hash; - CBlockIndex *pindex; //! Optional. - int64_t nTime; //! Time of "getdata" request in microseconds. - bool fValidatedHeaders; //! Whether this block has validated headers at the time of request. - int64_t nTimeDisconnect; //! The timeout for this block request (for disconnecting a slow peer) - }; - map::iterator> > mapBlocksInFlight; +/** Dirty block index entries. */ +set setDirtyBlockIndex; - /** Number of blocks in flight with validated headers. */ - int nQueuedValidatedHeaders = 0; - - /** Number of preferable block download peers. */ - int nPreferredDownload = 0; - - /** Dirty block index entries. */ - set setDirtyBlockIndex; - - /** Dirty block file entries. */ - set setDirtyFileInfo; -} // anon namespace +/** Dirty block file entries. */ +set setDirtyFileInfo; // Registration of network node signals. namespace { @@ -571,6 +538,11 @@ namespace { } // anon namespace +// Helper for block_processing.cpp to clear NET internal state +void ClearNodeState() { + mapNodeState.clear(); +} + // CZindexDB CZindexDB::CZindexDB() { @@ -850,1195 +822,12 @@ bool hush_dailysnapshot(int32_t height) // mapOrphanTransactions // -bool AddOrphanTx(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main) -{ - uint256 hash = tx.GetHash(); - if (mapOrphanTransactions.count(hash)) - return false; +// [mempool_accept.cpp] Orphan tx functions (AddOrphanTx, EraseOrphanTx, EraseOrphansFor, LimitOrphanTxSize) extracted - // Ignore big transactions, to avoid a - // send-big-orphans memory exhaustion attack. If a peer has a legitimate - // large transaction with a missing parent then we assume - // it will rebroadcast it later, after the parent transaction(s) - // have been mined or received. - // 10,000 orphans, each of which is at most 5,000 bytes big is - // at most 500 megabytes of orphans: - unsigned int sz = GetSerializeSize(tx, SER_NETWORK, tx.nVersion); - if (sz > 5000) - { - LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString()); - return false; - } - mapOrphanTransactions[hash].tx = tx; - mapOrphanTransactions[hash].fromPeer = peer; - BOOST_FOREACH(const CTxIn& txin, tx.vin) - mapOrphanTransactionsByPrev[txin.prevout.hash].insert(hash); +// [tx_validation.cpp] IsStandardTx through CheckTransactionWithoutProofVerification extracted - LogPrint("mempool", "stored orphan tx %s (mapsz %u prevsz %u)\n", hash.ToString(), - mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size()); - return true; -} - -void static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) -{ - map::iterator it = mapOrphanTransactions.find(hash); - if (it == mapOrphanTransactions.end()) - return; - BOOST_FOREACH(const CTxIn& txin, it->second.tx.vin) - { - map >::iterator itPrev = mapOrphanTransactionsByPrev.find(txin.prevout.hash); - if (itPrev == mapOrphanTransactionsByPrev.end()) - continue; - itPrev->second.erase(hash); - if (itPrev->second.empty()) - mapOrphanTransactionsByPrev.erase(itPrev); - } - mapOrphanTransactions.erase(it); -} - -void EraseOrphansFor(NodeId peer) -{ - int nErased = 0; - map::iterator iter = mapOrphanTransactions.begin(); - while (iter != mapOrphanTransactions.end()) - { - map::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid - if (maybeErase->second.fromPeer == peer) - { - EraseOrphanTx(maybeErase->second.tx.GetHash()); - ++nErased; - } - } - if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased, peer); -} - - -unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main) -{ - unsigned int nEvicted = 0; - while (mapOrphanTransactions.size() > nMaxOrphans) - { - // Evict a random orphan: - uint256 randomhash = GetRandHash(); - map::iterator it = mapOrphanTransactions.lower_bound(randomhash); - if (it == mapOrphanTransactions.end()) - it = mapOrphanTransactions.begin(); - EraseOrphanTx(it->first); - ++nEvicted; - } - return nEvicted; -} - - -bool IsStandardTx(const CTransaction& tx, string& reason, const int nHeight) -{ - const bool overwinterActive = nHeight>=1 ? true : false; //NetworkUpgradeActive(nHeight, Params().GetConsensus(), Consensus::UPGRADE_OVERWINTER); - const bool saplingActive = nHeight>=1 ? true : false; //NetworkUpgradeActive(nHeight, Params().GetConsensus(), Consensus::UPGRADE_SAPLING); - - if (saplingActive) { - // Sapling standard rules apply - if (tx.nVersion > CTransaction::SAPLING_MAX_CURRENT_VERSION || tx.nVersion < CTransaction::SAPLING_MIN_CURRENT_VERSION) { - reason = "sapling-version"; - return false; - } - } else if (overwinterActive) { - // Overwinter standard rules apply - if (tx.nVersion > CTransaction::OVERWINTER_MAX_CURRENT_VERSION || tx.nVersion < CTransaction::OVERWINTER_MIN_CURRENT_VERSION) { - reason = "overwinter-version"; - return false; - } - } else { - // Sprout standard rules apply - if (tx.nVersion > CTransaction::SPROUT_MAX_CURRENT_VERSION || tx.nVersion < CTransaction::SPROUT_MIN_CURRENT_VERSION) { - reason = "version"; - return false; - } - } - - BOOST_FOREACH(const CTxIn& txin, tx.vin) - { - // Biggest 'standard' txin is a 15-of-15 P2SH multisig with compressed - // keys. (remember the 520 byte limit on redeemScript size) That works - // out to a (15*(33+1))+3=513 byte redeemScript, 513+1+15*(73+1)+3=1627 - // bytes of scriptSig, which we round off to 1650 bytes for some minor - // future-proofing. That's also enough to spend a 20-of-20 - // CHECKMULTISIG scriptPubKey, though such a scriptPubKey is not - // considered standard) - if (txin.scriptSig.size() > 1650) { - reason = "scriptsig-size"; - return false; - } - if (!txin.scriptSig.IsPushOnly()) { - reason = "scriptsig-not-pushonly"; - return false; - } - } - - unsigned int v=0,nDataOut = 0; - txnouttype whichType; - BOOST_FOREACH(const CTxOut& txout, tx.vout) - { - if (!::IsStandard(txout.scriptPubKey, whichType)) - { - reason = "scriptpubkey"; - //fprintf(stderr," vout.%d nDataout.%d\n",v,nDataOut); - return false; - } - - if (whichType == TX_NULL_DATA) - { - if ( txout.scriptPubKey.size() > DRAGON_MAXSCRIPTSIZE ) - { - reason = "opreturn too big"; - return(false); - } - nDataOut++; - //fprintf(stderr,"is OP_RETURN\n"); - } else if ((whichType == TX_MULTISIG) && (!fIsBareMultisigStd)) { - reason = "bare-multisig"; - return false; - } else if (txout.IsDust(::minRelayTxFee)) { - reason = "dust"; - return false; - } - v++; - } - - // only one OP_RETURN txout is permitted - if (nDataOut > 1) { - reason = "multi-op-return"; - return false; - } - - return true; -} - -bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime) -{ - if (tx.nLockTime == 0) - return true; - if ((int64_t)tx.nLockTime < ((int64_t)tx.nLockTime < LOCKTIME_THRESHOLD ? (int64_t)nBlockHeight : nBlockTime)) - return true; - BOOST_FOREACH(const CTxIn& txin, tx.vin) - { - if ( !hush_hardfork_active(nBlockTime) && txin.nSequence == 0xfffffffe && - //if ( (nBlockTime <= ASSETCHAINS_STAKED_HF_TIMESTAMP ) && txin.nSequence == 0xfffffffe && - ( - ((int64_t)tx.nLockTime >= LOCKTIME_THRESHOLD && (int64_t)tx.nLockTime > nBlockTime) || - ((int64_t)tx.nLockTime < LOCKTIME_THRESHOLD && (int64_t)tx.nLockTime > nBlockHeight) - ) - ) - { - - } - //else if ( nBlockTime > ASSETCHAINS_STAKED_HF_TIMESTAMP && txin.nSequence == 0xfffffffe && - else if ( hush_hardfork_active(nBlockTime) && txin.nSequence == 0xfffffffe && - ( - ((int64_t)tx.nLockTime >= LOCKTIME_THRESHOLD && (int64_t)tx.nLockTime <= nBlockTime) || - ((int64_t)tx.nLockTime < LOCKTIME_THRESHOLD && (int64_t)tx.nLockTime <= nBlockHeight)) - ) - { - - } - else if (!txin.IsFinal()) - { - LogPrintf("non-final txin txid.%s seq.%x locktime.%u vs nTime.%u\n",tx.GetHash().ToString().c_str(),txin.nSequence,(uint32_t)tx.nLockTime,(uint32_t)nBlockTime); - return false; - } - } - return true; -} - -bool IsExpiredTx(const CTransaction &tx, int nBlockHeight) -{ - if (tx.nExpiryHeight == 0 || tx.IsCoinBase()) { - return false; - } - return static_cast(nBlockHeight) > tx.nExpiryHeight; -} - -bool CheckFinalTx(const CTransaction &tx, int flags) -{ - AssertLockHeld(cs_main); - - // By convention a negative value for flags indicates that the - // current network-enforced consensus rules should be used. In - // a future soft-fork scenario that would mean checking which - // rules would be enforced for the next block and setting the - // appropriate flags. At the present time no soft-forks are - // scheduled, so no flags are set. - flags = std::max(flags, 0); - - // CheckFinalTx() uses chainActive.Height()+1 to evaluate - // nLockTime because when IsFinalTx() is called within - // CBlock::AcceptBlock(), the height of the block *being* - // evaluated is what is used. Thus if we want to know if a - // transaction can be part of the *next* block, we need to call - // IsFinalTx() with one more than chainActive.Height(). - const int nBlockHeight = chainActive.Height() + 1; - - // Timestamps on the other hand don't get any special treatment, - // because we can't know what timestamp the next block will have, - // and there aren't timestamp applications where it matters. - // However this changes once median past time-locks are enforced: - const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST) - ? chainActive.Tip()->GetMedianTimePast() - : GetTime(); - - return IsFinalTx(tx, nBlockHeight, nBlockTime); -} - -/** - * Check transaction inputs to mitigate two - * potential denial-of-service attacks: - * - * 1. scriptSigs with extra data stuffed into them, - * not consumed by scriptPubKey (or P2SH script) - * 2. P2SH scripts with a crazy number of expensive - * CHECKSIG/CHECKMULTISIG operations - */ -bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs, uint32_t consensusBranchId) -{ - if (tx.IsCoinBase()) - return true; // Coinbases don't use vin normally - - //if (tx.IsCoinImport()) - // return tx.vin[0].scriptSig.IsCoinImport(); - - for (unsigned int i = 0; i < tx.vin.size(); i++) - { - //if (tx.IsPegsImport() && i==0) continue; - const CTxOut& prev = mapInputs.GetOutputFor(tx.vin[i]); - - vector > vSolutions; - txnouttype whichType; - // get the scriptPubKey corresponding to this input: - const CScript& prevScript = prev.scriptPubKey; - //printf("Previous script: %s\n", prevScript.ToString().c_str()); - - if (!Solver(prevScript, whichType, vSolutions)) - return false; - int nArgsExpected = ScriptSigArgsExpected(whichType, vSolutions); - if (nArgsExpected < 0) - return false; - - // Transactions with extra stuff in their scriptSigs are - // non-standard. Note that this EvalScript() call will - // be quick, because if there are any operations - // beside "push data" in the scriptSig - // IsStandardTx() will have already returned false - // and this method isn't called. - vector > stack; - //printf("Checking script: %s\n", tx.vin[i].scriptSig.ToString().c_str()); - if (!EvalScript(stack, tx.vin[i].scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker(), consensusBranchId)) - return false; - - if (whichType == TX_SCRIPTHASH) - { - if (stack.empty()) - return false; - CScript subscript(stack.back().begin(), stack.back().end()); - vector > vSolutions2; - txnouttype whichType2; - if (Solver(subscript, whichType2, vSolutions2)) - { - int tmpExpected = ScriptSigArgsExpected(whichType2, vSolutions2); - if (tmpExpected < 0) - return false; - nArgsExpected += tmpExpected; - } - else - { - // Any other Script with less than 15 sigops OK: - unsigned int sigops = subscript.GetSigOpCount(true); - // ... extra data left on the stack after execution is OK, too: - return (sigops <= MAX_P2SH_SIGOPS); - } - } - - if (stack.size() != (unsigned int)nArgsExpected) - return false; - } - - return true; -} - -unsigned int GetLegacySigOpCount(const CTransaction& tx) -{ - unsigned int nSigOps = 0; - BOOST_FOREACH(const CTxIn& txin, tx.vin) - { - nSigOps += txin.scriptSig.GetSigOpCount(false); - } - BOOST_FOREACH(const CTxOut& txout, tx.vout) - { - nSigOps += txout.scriptPubKey.GetSigOpCount(false); - } - return nSigOps; -} - -unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& inputs) -{ - if (tx.IsCoinBase()) - return 0; - - unsigned int nSigOps = 0; - for (unsigned int i = 0; i < tx.vin.size(); i++) - { - //if (tx.IsPegsImport() && i==0) continue; - const CTxOut &prevout = inputs.GetOutputFor(tx.vin[i]); - if (prevout.scriptPubKey.IsPayToScriptHash()) - nSigOps += prevout.scriptPubKey.GetSigOpCount(tx.vin[i].scriptSig); - } - return nSigOps; -} - -// Ensure that a coinbase transaction is structured according to the consensus rules of the chain -bool ContextualCheckCoinbaseTransaction(int32_t slowflag,const CBlock *block,CBlockIndex * const previndex,const CTransaction& tx, const int nHeight,int32_t validateprices) -{ - if ( slowflag != 0 && ASSETCHAINS_CBOPRET != 0 && validateprices != 0 && nHeight > 0 && tx.vout.size() > 0 ) - { - if ( hush_opretvalidate(block,previndex,nHeight,tx.vout[tx.vout.size()-1].scriptPubKey) < 0 ) - return(false); - } - return(true); -} - -/** - * Check a transaction contextually against a set of consensus rules valid at a given block height. - * - * Notes: - * 1. AcceptToMemoryPool calls CheckTransaction and this function. - * 2. ProcessNewBlock calls AcceptBlock, which calls CheckBlock (which calls CheckTransaction) - * and ContextualCheckBlock (which calls this function). - * 3. The isInitBlockDownload argument is only to assist with testing. - */ -bool ContextualCheckTransaction(int32_t slowflag,const CBlock *block, CBlockIndex * const previndex, - const CTransaction& tx, - CValidationState &state, - const int nHeight, - const int dosLevel, - bool (*isInitBlockDownload)(),int32_t validateprices) -{ - const bool overwinterActive = nHeight >=1 ? true : false; //NetworkUpgradeActive(nHeight, Params().GetConsensus(), Consensus::UPGRADE_OVERWINTER); - const bool saplingActive = nHeight >=1 ? true : false; //NetworkUpgradeActive(nHeight, Params().GetConsensus(), Consensus::UPGRADE_SAPLING); - - if (saplingActive) { - // Reject transactions with valid version but missing overwintered flag - if (tx.nVersion >= SAPLING_MIN_TX_VERSION && !tx.fOverwintered) { - return state.DoS(dosLevel, error("ContextualCheckTransaction(): overwintered flag must be set"), - REJECT_INVALID, "tx-overwintered-flag-not-set"); - } - - // Reject transactions with non-Sapling version group ID - if (tx.fOverwintered && tx.nVersionGroupId != SAPLING_VERSION_GROUP_ID) - { - //return state.DoS(dosLevel, error("CheckTransaction(): invalid Sapling tx version"),REJECT_INVALID, "bad-sapling-tx-version-group-id"); - if ( 0 ) - { - string strHex = EncodeHexTx(tx); - fprintf(stderr,"invalid Sapling rawtx.%s\n",strHex.c_str()); - } - return state.DoS(isInitBlockDownload() ? 0 : dosLevel, - error("CheckTransaction(): invalid Sapling tx version"), - REJECT_INVALID, "bad-sapling-tx-version-group-id"); - } - - // Reject transactions with invalid version - if (tx.fOverwintered && tx.nVersion < SAPLING_MIN_TX_VERSION ) { - return state.DoS(100, error("CheckTransaction(): Sapling version too low"), - REJECT_INVALID, "bad-tx-sapling-version-too-low"); - } - - // Reject transactions with invalid version - if (tx.fOverwintered && tx.nVersion > SAPLING_MAX_TX_VERSION ) { - return state.DoS(100, error("CheckTransaction(): Sapling version too high"), - REJECT_INVALID, "bad-tx-sapling-version-too-high"); - } - } else if (overwinterActive) { - // Reject transactions with valid version but missing overwinter flag - if (tx.nVersion >= OVERWINTER_MIN_TX_VERSION && !tx.fOverwintered) { - return state.DoS(dosLevel, error("ContextualCheckTransaction(): overwinter flag must be set"), - REJECT_INVALID, "tx-overwinter-flag-not-set"); - } - - // Reject transactions with non-Overwinter version group ID - if (tx.fOverwintered && tx.nVersionGroupId != OVERWINTER_VERSION_GROUP_ID) - { - //return state.DoS(dosLevel, error("CheckTransaction(): invalid Overwinter tx version"),REJECT_INVALID, "bad-overwinter-tx-version-group-id"); - return state.DoS(isInitBlockDownload() ? 0 : dosLevel, - error("CheckTransaction(): invalid Overwinter tx version"), - REJECT_INVALID, "bad-overwinter-tx-version-group-id"); - } - - // Reject transactions with invalid version - if (tx.fOverwintered && tx.nVersion > OVERWINTER_MAX_TX_VERSION ) { - return state.DoS(100, error("CheckTransaction(): overwinter version too high"), - REJECT_INVALID, "bad-tx-overwinter-version-too-high"); - } - } - - // Rules that apply to Overwinter or later: - //fprintf(stderr,"ht.%d overwinterActive.%d tx.overwintered.%d\n",nHeight,overwinterActive,overwinterActive); - if (overwinterActive) - { - // Reject transactions intended for Sprout - if (!tx.fOverwintered) - { - int32_t ht = Params().GetConsensus().vUpgrades[Consensus::UPGRADE_OVERWINTER].nActivationHeight; - fprintf(stderr,"overwinter is active tx.%s not, ht.%d vs %d\n",tx.GetHash().ToString().c_str(),nHeight,ht); - return state.DoS((ASSETCHAINS_PRIVATE != 0 || ht < 0 || nHeight < ht) ? 0 : dosLevel, error("ContextualCheckTransaction: overwinter is active"),REJECT_INVALID, "tx-overwinter-active"); - } - - // Check that all transactions are unexpired - if (IsExpiredTx(tx, nHeight)) { - // Don't increase banscore if the transaction only just expired - //int expiredDosLevel = IsExpiredTx(tx, nHeight - 1) ? (dosLevel > 10 ? dosLevel : 10) : 0; - //string strHex = EncodeHexTx(tx); - //fprintf(stderr, "transaction expired.%s\n",strHex.c_str()); - - // Do not ban nodes which relay expired tx's, it's a bug not an attack - return state.DoS(0, error("ContextualCheckTransaction(): transaction %s is expired, expiry block %i vs current block %i\n",tx.GetHash().ToString(),tx.nExpiryHeight,nHeight), REJECT_INVALID, "tx-overwinter-expired"); - } - } - - // Rules that apply before Sapling: - if (!saplingActive) { - // Size limits - //BOOST_STATIC_ASSERT(MAX_BLOCK_SIZE(chainActive.LastTip()->GetHeight()+1) > MAX_TX_SIZE_BEFORE_SAPLING); // sanity - if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > MAX_TX_SIZE_BEFORE_SAPLING) - return state.DoS(100, error("ContextualCheckTransaction(): size limits failed"), - REJECT_INVALID, "bad-txns-oversize"); - } - - uint256 dataToBeSigned; - - if (!tx.IsMint() && - (!tx.vjoinsplit.empty() || - !tx.vShieldedSpend.empty() || - !tx.vShieldedOutput.empty())) - { - auto consensusBranchId = CurrentEpochBranchId(nHeight, Params().GetConsensus()); - // Empty output script. - CScript scriptCode; - try { - dataToBeSigned = SignatureHash(scriptCode, tx, NOT_AN_INPUT, SIGHASH_ALL, 0, consensusBranchId); - } catch (std::logic_error ex) { - return state.DoS(100, error("CheckTransaction(): error computing signature hash"), - REJECT_INVALID, "error-computing-signature-hash"); - } - - } - - if (tx.IsCoinBase()) - { - if (!ContextualCheckCoinbaseTransaction(slowflag,block,previndex,tx, nHeight,validateprices)) - return state.DoS(100, error("CheckTransaction(): invalid script data for coinbase time lock"), - REJECT_INVALID, "bad-txns-invalid-script-data-for-coinbase-time-lock"); - } - - // Avoid ztx validation during IBD if height is less than latest checkpoint - if (fCheckpointsEnabled && (nHeight < Checkpoints::GetTotalBlocksEstimate(Params().Checkpoints())) ) { - return true; - } - - if (!tx.vShieldedSpend.empty() || - !tx.vShieldedOutput.empty()) - { - auto ctx = librustzcash_sapling_verification_ctx_init(); - - for (const SpendDescription &spend : tx.vShieldedSpend) { - if (!librustzcash_sapling_check_spend( - ctx, - spend.cv.begin(), - spend.anchor.begin(), - spend.nullifier.begin(), - spend.rk.begin(), - spend.zkproof.begin(), - spend.spendAuthSig.begin(), - dataToBeSigned.begin() - )) - { - librustzcash_sapling_verification_ctx_free(ctx); - return state.DoS(100, error("ContextualCheckTransaction(): Sapling spend description invalid"), - REJECT_INVALID, "bad-txns-sapling-spend-description-invalid"); - } - } - - for (const OutputDescription &output : tx.vShieldedOutput) { - if (!librustzcash_sapling_check_output( - ctx, - output.cv.begin(), - output.cm.begin(), - output.ephemeralKey.begin(), - output.zkproof.begin() - )) - { - librustzcash_sapling_verification_ctx_free(ctx); - return state.DoS(100, error("ContextualCheckTransaction(): Sapling output description invalid"), - REJECT_INVALID, "bad-txns-sapling-output-description-invalid"); - } - } - - if (!librustzcash_sapling_final_check( - ctx, - tx.valueBalance, - tx.bindingSig.begin(), - dataToBeSigned.begin() - )) - { - librustzcash_sapling_verification_ctx_free(ctx); - fprintf(stderr,"%s: Invalid sapling binding sig! tx=%s valueBalance=%li, bindingSig.size=%li\n", __func__, tx.GetHash().ToString().c_str(), tx.valueBalance, tx.bindingSig.size() ); - return state.DoS(100, error("ContextualCheckTransaction(): Sapling binding signature invalid"), - REJECT_INVALID, "bad-txns-sapling-binding-signature-invalid"); - } - - librustzcash_sapling_verification_ctx_free(ctx); - } - return true; -} - -bool CheckTransaction(uint32_t tiptime,const CTransaction& tx, CValidationState &state, - libzcash::ProofVerifier& verifier,int32_t txIndex, int32_t numTxs) -{ - // Don't count coinbase transactions because mining skews the count - if (!tx.IsCoinBase()) { - transactionsValidated.increment(); - } - - if (!CheckTransactionWithoutProofVerification(tiptime,tx, state)) { - return false; - } - return true; -} - -// This is and hush_notaries()/gethushseason/getacseason are all consensus code -int32_t hush_isnotaryvout(char *coinaddr,uint32_t tiptime) { - bool ishush3 = strncmp(SMART_CHAIN_SYMBOL, "HUSH3",5) == 0 ? true : false; - bool istush = strncmp(SMART_CHAIN_SYMBOL, "TUSH",4) == 0 ? true : false; - int32_t height = chainActive.LastTip()->GetHeight(); - int32_t season = (ishush3 || istush) ? gethushseason(height) : getacseason(tiptime); - fprintf(stderr,"%s: coinaddr=%s season=%d, tiptime=%d\n", __func__, coinaddr, season,tiptime); - if ( NOTARY_ADDRESSES[season-1][0][0] == 0 ) { - uint8_t pubkeys[64][33]; - hush_notaries(pubkeys,0,tiptime); - } - if ( strcmp(coinaddr,CRYPTO555_HUSHADDR) == 0 ) - return(1); - for (int32_t i = 0; i < NUM_HUSH_NOTARIES; i++) { - if ( strcmp(coinaddr,NOTARY_ADDRESSES[season-1][i]) == 0 ) { - if(fDebug) { - fprintf(stderr, "%s: coinaddr.%s notaryaddress[%i].%s\n",__func__, coinaddr,i,NOTARY_ADDRESSES[season-1][i]); - } - return(1); - } - } - return(0); -} - -int32_t hush_scpublic(uint32_t tiptime); - -bool CheckTransactionWithoutProofVerification(uint32_t tiptime,const CTransaction& tx, CValidationState &state) -{ - // Basic checks that don't depend on any context - int32_t invalid_private_taddr=0,z_z=0,z_t=0,t_z=0,acpublic = hush_scpublic(tiptime); - /** - * Previously: - * 1. The consensus rule below was: - * if (tx.nVersion < SPROUT_MIN_TX_VERSION) { ... } - * which checked if tx.nVersion fell within the range: - * INT32_MIN <= tx.nVersion < SPROUT_MIN_TX_VERSION - * 2. The parser allowed tx.nVersion to be negative - * - * Now: - * 1. The consensus rule checks to see if tx.Version falls within the range: - * 0 <= tx.nVersion < SPROUT_MIN_TX_VERSION - * 2. The previous consensus rule checked for negative values within the range: - * INT32_MIN <= tx.nVersion < 0 - * This is unnecessary for Overwinter transactions since the parser now - * interprets the sign bit as fOverwintered, so tx.nVersion is always >=0, - * and when Overwinter is not active ContextualCheckTransaction rejects - * transactions with fOverwintered set. When fOverwintered is set, - * this function and ContextualCheckTransaction will together check to - * ensure tx.nVersion avoids the following ranges: - * 0 <= tx.nVersion < OVERWINTER_MIN_TX_VERSION - * OVERWINTER_MAX_TX_VERSION < tx.nVersion <= INT32_MAX - */ - if (!tx.fOverwintered && tx.nVersion < SPROUT_MIN_TX_VERSION) { - return state.DoS(100, error("CheckTransaction(): version too low"), - REJECT_INVALID, "bad-txns-version-too-low"); - } else if (tx.fOverwintered) { - if (tx.nVersion < OVERWINTER_MIN_TX_VERSION) { - return state.DoS(100, error("CheckTransaction(): overwinter version too low"), - REJECT_INVALID, "bad-tx-overwinter-version-too-low"); - } - if (tx.nVersionGroupId != OVERWINTER_VERSION_GROUP_ID && - tx.nVersionGroupId != SAPLING_VERSION_GROUP_ID) { - return state.DoS(100, error("CheckTransaction(): unknown tx version group id"), - REJECT_INVALID, "bad-tx-version-group-id"); - } - if (tx.nExpiryHeight >= TX_EXPIRY_HEIGHT_THRESHOLD) { - return state.DoS(100, error("CheckTransaction(): expiry height is too high"), - REJECT_INVALID, "bad-tx-expiry-height-too-high"); - } - } - - // Transactions containing empty `vin` must have non-empty `vShieldedSpend`. - if (tx.vin.empty() && tx.vShieldedSpend.empty()) - return state.DoS(10, error("CheckTransaction(): vin empty"), - REJECT_INVALID, "bad-txns-vin-empty"); - - // Transactions containing empty `vout` must have non-empty `vShieldedOutput`. - if (tx.vout.empty() && tx.vShieldedOutput.empty()) - return state.DoS(10, error("CheckTransaction(): vout empty"), - REJECT_INVALID, "bad-txns-vout-empty"); - - // Size limits - //BOOST_STATIC_ASSERT(MAX_BLOCK_SIZE(chainActive.LastTip()->GetHeight()+1) >= MAX_TX_SIZE_AFTER_SAPLING); // sanity - BOOST_STATIC_ASSERT(MAX_TX_SIZE_AFTER_SAPLING > MAX_TX_SIZE_BEFORE_SAPLING); // sanity - if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > MAX_TX_SIZE_AFTER_SAPLING) - return state.DoS(100, error("CheckTransaction(): size limits failed"), - REJECT_INVALID, "bad-txns-oversize"); - - // Check for negative or overflow output values - CAmount nValueOut = 0; - int32_t iscoinbase = tx.IsCoinBase(); - BOOST_FOREACH(const CTxOut& txout, tx.vout) - { - if (txout.nValue < 0) - return state.DoS(100, error("CheckTransaction(): txout.nValue negative"), - REJECT_INVALID, "bad-txns-vout-negative"); - if (txout.nValue > MAX_MONEY) - { - fprintf(stderr,"%.8f > max %.8f\n",(double)txout.nValue/COIN,(double)MAX_MONEY/COIN); - return state.DoS(100, error("CheckTransaction(): txout.nValue too high"),REJECT_INVALID, "bad-txns-vout-toolarge"); - } - if ( ASSETCHAINS_PRIVATE != 0 ) - { - //fprintf(stderr,"private chain nValue %.8f iscoinbase.%d\n",(double)txout.nValue/COIN,iscoinbase); - if (iscoinbase == 0 && txout.nValue > 0) - { - char destaddr[65]; - Getscriptaddress(destaddr,txout.scriptPubKey); - - if ( hush_isnotaryvout(destaddr,tiptime) == 0 ) - { - const bool isburn = (strcmp(destaddr,BURN_ADDRESS) == 0); - if ((ASSETCHAINS_BURN == 1) && isburn && tx.vin.empty()) { - // -ac_burn=1 means only zaddrs can send to the burn address - fprintf(stderr,"%s: allowing zaddr to send to burn address %s on private chain because ac_burn=1\n", __func__, destaddr); - } else if ((ASSETCHAINS_BURN == 2) && isburn) { - // -ac_burn=2 allows notary taddrs to send directly to the burn address - fprintf(stderr,"%s: allowing burn address %s on private chain because ac_burn=2\n", __func__, destaddr); - } else { - invalid_private_taddr = 1; - fprintf(stderr,"%s: invalid taddr %s on private chain!\n", __func__, destaddr); - } - } - } - } - if ( txout.scriptPubKey.size() > DRAGON_MAXSCRIPTSIZE ) - return state.DoS(100, error("CheckTransaction(): txout.scriptPubKey.size() too big"),REJECT_INVALID, "bad-txns-opret-too-big"); - nValueOut += txout.nValue; - if (!MoneyRange(nValueOut)) - return state.DoS(100, error("CheckTransaction(): txout total out of range"), - REJECT_INVALID, "bad-txns-txouttotal-toolarge"); - } - - // Check for non-zero valueBalance when there are no Sapling inputs or outputs - if (tx.vShieldedSpend.empty() && tx.vShieldedOutput.empty() && tx.valueBalance != 0) { - return state.DoS(100, error("CheckTransaction(): tx.valueBalance has no sources or sinks"), - REJECT_INVALID, "bad-txns-valuebalance-nonzero"); - } - if ( acpublic != 0 && (tx.vShieldedSpend.empty() == 0 || tx.vShieldedOutput.empty() == 0) ) - { - return state.DoS(100, error("CheckTransaction(): this is a public chain, no sapling allowed"), - REJECT_INVALID, "bad-txns-acpublic-chain"); - } - if ( ASSETCHAINS_PRIVATE != 0 && invalid_private_taddr != 0 && tx.vShieldedSpend.empty() == 0 ) - { - return state.DoS(100, error("CheckTransaction(): this is a private chain, no sapling -> taddr"), - REJECT_INVALID, "bad-txns-acprivate-chain"); - } - // Check for overflow valueBalance - if (tx.valueBalance > MAX_MONEY || tx.valueBalance < -MAX_MONEY) { - return state.DoS(100, error("CheckTransaction(): abs(tx.valueBalance) too large"), - REJECT_INVALID, "bad-txns-valuebalance-toolarge"); - } - - if (tx.valueBalance <= 0) { - // NB: negative valueBalance "takes" money from the transparent value pool just as outputs do - nValueOut += -tx.valueBalance; - - if (!MoneyRange(nValueOut)) { - return state.DoS(100, error("CheckTransaction(): txout total out of range"), - REJECT_INVALID, "bad-txns-txouttotal-toolarge"); - } - } - - if ( ASSETCHAINS_PRIVATE != 0 && invalid_private_taddr != 0 ) - { - static uint32_t counter; - if ( counter++ < 10 ) - fprintf(stderr,"found taddr in private chain: z_z.%d z_t.%d t_z.%d vinsize.%d\n",z_z,z_t,t_z,(int32_t)tx.vin.size()); - if ( z_t == 0 || z_z != 0 || t_z != 0 || tx.vin.size() != 0 ) - return state.DoS(100, error("CheckTransaction(): this is a private chain, sending to taddrs not allowed"),REJECT_INVALID, "bad-txns-acprivacy-chain"); - } - if ( ASSETCHAINS_TXPOW != 0 ) - { - // BTC genesis coinbase 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b - uint256 txid = tx.GetHash(); - if ( ((ASSETCHAINS_TXPOW & 2) != 0 && iscoinbase != 0) || ((ASSETCHAINS_TXPOW & 1) != 0 && iscoinbase == 0) ) - { - if ( ((uint8_t *)&txid)[0] != 0 || ((uint8_t *)&txid)[31] != 0 ) - { - uint256 genesistxid = uint256S("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"); - if ( txid != genesistxid ) - { - fprintf(stderr,"private chain iscoinbase.%d invalid txpow.%d txid.%s\n",iscoinbase,ASSETCHAINS_TXPOW,txid.GetHex().c_str()); - return state.DoS(100, error("CheckTransaction(): this is a txpow chain, must have 0x00 ends"),REJECT_INVALID, "bad-txns-actxpow-chain"); - } - } - } - } - - // Ensure input values do not exceed MAX_MONEY - // We have not resolved the txin values at this stage, - // but we do know what the joinsplits claim to add - // to the value pool. - { - CAmount nValueIn = 0; - - // Also check for Sapling - if (tx.valueBalance >= 0) { - // NB: positive valueBalance "adds" money to the transparent value pool, just as inputs do - nValueIn += tx.valueBalance; - - if (!MoneyRange(nValueIn)) { - return state.DoS(100, error("CheckTransaction(): txin total out of range"), - REJECT_INVALID, "bad-txns-txintotal-toolarge"); - } - } - } - - // Check for duplicate inputs - set vInOutPoints; - BOOST_FOREACH(const CTxIn& txin, tx.vin) - { - if (vInOutPoints.count(txin.prevout)) - return state.DoS(100, error("CheckTransaction(): duplicate inputs"), - REJECT_INVALID, "bad-txns-inputs-duplicate"); - vInOutPoints.insert(txin.prevout); - } - - // Check for duplicate sapling nullifiers in this transaction - { - set vSaplingNullifiers; - BOOST_FOREACH(const SpendDescription& spend_desc, tx.vShieldedSpend) - { - if (vSaplingNullifiers.count(spend_desc.nullifier)) - return state.DoS(100, error("CheckTransaction(): duplicate nullifiers"), - REJECT_INVALID, "bad-spend-description-nullifiers-duplicate"); - - vSaplingNullifiers.insert(spend_desc.nullifier); - } - } - - if (tx.IsMint()) { - // There should be no joinsplits in a coinbase transaction - if (tx.vjoinsplit.size() > 0) - return state.DoS(100, error("CheckTransaction(): coinbase has joinsplits"), - REJECT_INVALID, "bad-cb-has-joinsplits"); - - // A coinbase transaction cannot have spend descriptions or output descriptions - if (tx.vShieldedSpend.size() > 0) - return state.DoS(100, error("CheckTransaction(): coinbase has spend descriptions"), - REJECT_INVALID, "bad-cb-has-spend-description"); - if (tx.vShieldedOutput.size() > 0) - return state.DoS(100, error("CheckTransaction(): coinbase has output descriptions"), - REJECT_INVALID, "bad-cb-has-output-description"); - - if (tx.vin[0].scriptSig.size() < 2 || tx.vin[0].scriptSig.size() > 100) - return state.DoS(100, error("CheckTransaction(): coinbase script size"), - REJECT_INVALID, "bad-cb-length"); - } else { - BOOST_FOREACH(const CTxIn& txin, tx.vin) - if (txin.prevout.IsNull()) - return state.DoS(10, error("CheckTransaction(): prevout is null"), - REJECT_INVALID, "bad-txns-prevout-null"); - } - - return true; -} - -CAmount GetMinRelayFee(const CTransaction& tx, unsigned int nBytes, bool fAllowFree) -{ - { - LOCK(mempool.cs); - uint256 hash = tx.GetHash(); - double dPriorityDelta = 0; - CAmount nFeeDelta = 0; - mempool.ApplyDeltas(hash, dPriorityDelta, nFeeDelta); - if (dPriorityDelta > 0 || nFeeDelta > 0) - return 0; - } - - CAmount nMinFee = ::minRelayTxFee.GetFee(nBytes); - - if (fAllowFree) - { - // There is a free transaction area in blocks created by most miners, - // * If we are relaying we allow transactions up to DEFAULT_BLOCK_PRIORITY_SIZE - 1000 - // to be considered to fall into this category. We don't want to encourage sending - // multiple transactions instead of one big transaction to avoid fees. - if (nBytes < (DEFAULT_BLOCK_PRIORITY_SIZE - 1000)) - nMinFee = 0; - } - - if (!MoneyRange(nMinFee)) - nMinFee = MAX_MONEY; - return nMinFee; -} - - -bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree,bool* pfMissingInputs, bool fRejectAbsurdFee, int dosLevel) -{ - AssertLockHeld(cs_main); - const uint32_t z2zTransitionWindow = 10; - const uint32_t z2zTransitionStart = 340000 - z2zTransitionWindow; - const uint32_t nHeight = chainActive.Height(); - - // This only applies to HUSH3, other chains can start off z2z via ac_private=1 - if(ishush3) { - if((nHeight >= z2zTransitionStart) || (nHeight <= 340000)) { - // During the z2z transition window, only coinbase tx's as part of blocks are allowed - // Theory: We want an empty mempool at our fork block height, and the only way to assure that - // is to have an empty mempool for a few previous blocks, to take care of potential re-orgs - // and edge cases. This empty mempool assures there will be no transactions involving taddrs - // stuck in the mempool, when the z2z rule takes effect. - // Thanks to jl777 for helping design this - fprintf(stderr,"%s: rejecting all tx's during z2z transition window. Please retry after Block %d !!!\n", __func__,nHeight); - return false; - } - } - if (pfMissingInputs) - *pfMissingInputs = false; - uint32_t tiptime; - int flag=0,nextBlockHeight = chainActive.Height() + 1; - auto consensusBranchId = CurrentEpochBranchId(nextBlockHeight, Params().GetConsensus()); - if ( nextBlockHeight <= 1 || chainActive.LastTip() == 0 ) - tiptime = (uint32_t)time(NULL); - else tiptime = (uint32_t)chainActive.LastTip()->nTime; - - auto verifier = libzcash::ProofVerifier::Strict(); - - if (!CheckTransaction(tiptime,tx, state, verifier, 0, 0)) - { - return error("AcceptToMemoryPool: CheckTransaction failed"); - } - - // Reject duplicate output proofs in a single ztx in mempool - // Migrate this to CheckTransaction() to make it a consensus requirement - { - set vSaplingOutputProof; - BOOST_FOREACH(const OutputDescription& output, tx.vShieldedOutput) - { - if (vSaplingOutputProof.count(output.zkproof)) - return state.Invalid(error("AcceptToMemoryPool: duplicate output proof"),REJECT_DUPLICATE_OUTPUT_PROOF, "bad-txns-duplicate-output-proof"); - vSaplingOutputProof.insert(output.zkproof); - } - } - - // Reject duplicate spend proofs in a single ztx in mempool - // Migrate this to CheckTransaction() to make it a consensus requirement - { - set vSaplingSpendProof; - BOOST_FOREACH(const SpendDescription& spend, tx.vShieldedSpend) - { - if (vSaplingSpendProof.count(spend.zkproof)) - return state.Invalid(error("AcceptToMemoryPool: duplicate spend proof"),REJECT_DUPLICATE_SPEND_PROOF, "bad-txns-duplicate-spend-proof"); - vSaplingSpendProof.insert(spend.zkproof); - } - } - - // DoS level set to 10 to be more forgiving. - // Check transaction contextually against the set of consensus rules which apply in the next block to be mined. - if (!ContextualCheckTransaction(0,0,0,tx, state, nextBlockHeight, (dosLevel == -1) ? 10 : dosLevel)) - { - return error("AcceptToMemoryPool: ContextualCheckTransaction failed"); - } -//fprintf(stderr,"addmempool 2\n"); - // Coinbase is only valid in a block, not as a loose transaction - if (tx.IsCoinBase()) - { - fprintf(stderr,"AcceptToMemoryPool coinbase as individual tx\n"); - return state.DoS(100, error("AcceptToMemoryPool: coinbase as individual tx"),REJECT_INVALID, "coinbase"); - } - - // Rather not work on nonstandard transactions (unless -testnet/-regtest) - string reason; - if (Params().RequireStandard() && !IsStandardTx(tx, reason, nextBlockHeight)) - { - // - //fprintf(stderr,"AcceptToMemoryPool reject nonstandard transaction: %s\nscriptPubKey: %s\n",reason.c_str(),tx.vout[0].scriptPubKey.ToString().c_str()); - return state.DoS(0,error("AcceptToMemoryPool: nonstandard transaction: %s", reason),REJECT_NONSTANDARD, reason); - } - - // Only accept nLockTime-using transactions that can be mined in the next - // block; we don't want our mempool filled up with transactions that can't - // be mined yet. - if (!CheckFinalTx(tx, STANDARD_LOCKTIME_VERIFY_FLAGS)) - { - //fprintf(stderr,"AcceptToMemoryPool reject non-final\n"); - return state.DoS(0, false, REJECT_NONSTANDARD, "non-final"); - } - // is it already in the memory pool? - uint256 hash = tx.GetHash(); - if (pool.exists(hash)) - { - //fprintf(stderr,"already in mempool\n"); - return state.Invalid(false, REJECT_DUPLICATE, "already in mempool"); - } - - // Check for conflicts with in-memory transactions - { - LOCK(pool.cs); // protect pool.mapNextTx - for (unsigned int i = 0; i < tx.vin.size(); i++) - { - COutPoint outpoint = tx.vin[i].prevout; - if (pool.mapNextTx.count(outpoint)) - { - // Disable replacement feature for now - return false; - } - } - - for (const SpendDescription &spendDescription : tx.vShieldedSpend) { - if (pool.nullifierExists(spendDescription.nullifier, SAPLING)) { - return false; - } - } - } - - { - CCoinsView dummy; - CCoinsViewCache view(&dummy); - int64_t interest; - CAmount nValueIn = 0; - { - LOCK(pool.cs); - CCoinsViewMemPool viewMemPool(pcoinsTip, pool); - view.SetBackend(viewMemPool); - - // do we already have it? - if (view.HaveCoins(hash)) { - //fprintf(stderr,"view.HaveCoins(hash) error\n"); - return state.Invalid(false, REJECT_DUPLICATE, "already have coins"); - } - - { - // do all inputs exist? - // Note that this does not check for the presence of actual outputs (see the next check for that), - // and only helps with filling in pfMissingInputs (to determine missing vs spent). - BOOST_FOREACH(const CTxIn txin, tx.vin) - { - if (!view.HaveCoins(txin.prevout.hash)) { - if (pfMissingInputs) - *pfMissingInputs = true; - //fprintf(stderr,"missing inputs\n"); - return false; - // https://github.com/zcash/zcash/blob/master/src/main.cpp#L1490 - // state.DoS(0, error("AcceptToMemoryPool: tx inputs not found"),REJECT_INVALID, "bad-txns-inputs-missing"); - } - } - // are the actual inputs available? - if (!view.HaveInputs(tx)) { - //fprintf(stderr,"accept failure. inputs-spent\n"); - return state.Invalid(error("AcceptToMemoryPool: inputs already spent"),REJECT_DUPLICATE, "bad-txns-inputs-spent"); - } - } - - // are the zaddr requirements met? - if (!view.HaveShieldedRequirements(tx)) { - //fprintf(stderr,"accept failure. ztx reqs not met\n"); - return state.Invalid(error("AcceptToMemoryPool: shielded requirements not met"),REJECT_DUPLICATE, "bad-txns-shielded-requirements-not-met"); - } - - // Bring the best block into scope - view.GetBestBlock(); - - nValueIn = view.GetValueIn(chainActive.LastTip()->GetHeight(),&interest,tx,chainActive.LastTip()->nTime); - // we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool - view.SetBackend(dummy); - } - // Check for non-standard pay-to-script-hash in inputs - if (Params().RequireStandard() && !AreInputsStandard(tx, view, consensusBranchId)) - return error("AcceptToMemoryPool: reject nonstandard transaction input"); - - // Check that the transaction doesn't have an excessive number of - // sigops, making it impossible to mine. Since the coinbase transaction - // itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than - // MAX_BLOCK_SIGOPS; we still consider this an invalid rather than - // merely non-standard transaction. - unsigned int nSigOps = GetLegacySigOpCount(tx); - nSigOps += GetP2SHSigOpCount(tx, view); - if (nSigOps > MAX_STANDARD_TX_SIGOPS) - { - fprintf(stderr,"accept failure.4\n"); - return state.DoS(1, error("AcceptToMemoryPool: too many sigops %s, %d > %d", hash.ToString(), nSigOps, MAX_STANDARD_TX_SIGOPS),REJECT_NONSTANDARD, "bad-txns-too-many-sigops"); - } - - CAmount nValueOut = tx.GetValueOut(); - CAmount nFees = nValueIn-nValueOut; - double dPriority = view.GetPriority(tx, chainActive.Height()); - if ( nValueOut > 777777*COIN && HUSH_VALUETOOBIG(nValueOut - 777777*COIN) != 0 ) // some room for blockreward and txfees - return state.DoS(100, error("AcceptToMemoryPool: GetValueOut too big"),REJECT_INVALID,"tx valueout is too big"); - - // Keep track of transactions that spend a coinbase, which we re-scan - // during reorgs to ensure COINBASE_MATURITY is still met. - bool fSpendsCoinbase = false; - BOOST_FOREACH(const CTxIn &txin, tx.vin) { - const CCoins *coins = view.AccessCoins(txin.prevout.hash); - if (coins->IsCoinBase()) { - fSpendsCoinbase = true; - break; - } - } - // Grab the branch ID we expect this transaction to commit to. We don't - // yet know if it does, but if the entry gets added to the mempool, then - // it has passed ContextualCheckInputs and therefore this is correct. - auto consensusBranchId = CurrentEpochBranchId(chainActive.Height() + 1, Params().GetConsensus()); - - CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height(), mempool.HasNoInputsOf(tx), fSpendsCoinbase, consensusBranchId); - unsigned int nSize = entry.GetTxSize(); - - // Accept a tx if it contains zspends and has at least the default fee specified by z_sendmany. - if (tx.vShieldedSpend.size() > 0 && nFees >= ASYNC_RPC_OPERATION_DEFAULT_MINERS_FEE) { - // In future we will we have more accurate and dynamic computation of fees, derpz - } else { - // Don't accept it if it can't get into a block, yallz - CAmount txMinFee = GetMinRelayFee(tx, nSize, true); - if (fLimitFree && nFees < txMinFee) { - //fprintf(stderr,"accept failure.5\n"); - return state.DoS(0, error("AcceptToMemoryPool: not enough fees %s, %d < %d",hash.ToString(), nFees, txMinFee),REJECT_INSUFFICIENTFEE, "insufficient fee"); - } - } - - // Require that free transactions have sufficient priority to be mined in the next block. - if (GetBoolArg("-relaypriority", false) && nFees < ::minRelayTxFee.GetFee(nSize) && !AllowFree(view.GetPriority(tx, chainActive.Height() + 1))) { - fprintf(stderr,"accept failure.6\n"); - return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "insufficient priority"); - } - - // Continuously rate-limit free (really, very-low-fee) transactions - // This mitigates 'penny-flooding' -- sending thousands of free transactions just to - // be annoying or make others' transactions take longer to confirm. - if (fLimitFree && nFees < ::minRelayTxFee.GetFee(nSize) ) - { - static CCriticalSection csFreeLimiter; - static double dFreeCount; - static int64_t nLastTime; - int64_t nNow = GetTime(); - - LOCK(csFreeLimiter); - - // Use an exponentially decaying ~10-minute window: - dFreeCount *= pow(1.0 - 1.0/600.0, (double)(nNow - nLastTime)); - nLastTime = nNow; - // -limitfreerelay unit is thousand-bytes-per-minute - // At default rate it would take over a month to fill 1GB - if (dFreeCount >= GetArg("-limitfreerelay", 15)*10*1000) - { - fprintf(stderr,"accept failure.7\n"); - return state.DoS(0, error("AcceptToMemoryPool: free transaction rejected by rate limiter"), REJECT_INSUFFICIENTFEE, "rate limited free transaction"); - } - LogPrint("mempool", "Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount+nSize); - dFreeCount += nSize; - } - - fRejectAbsurdFee = false; - - if ( fRejectAbsurdFee && nFees > ::minRelayTxFee.GetFee(nSize) * 10000 && nFees > nValueOut/19) - // Disable checks for absurd fees when adding to the mempool. Instead, this check is done - // when a user attempts to make a transaction with an absurd fee and only rejects absurd - // fees when OP_RETURN data is NOT being used. This means users making normal financial - // transactions (z2z) are protected from absurd fees, it is only users who are storing - // arbitrary data via a z2t transaction are allowed to (or potentially required) to pay high fees - // It would be nice to detect the use of OP_RETURN right here but it seems to only be known - // inside of IsStandard() inside of IsStandardTx() and we want to avoid doing expensive checks - // multiple times. - { - string errmsg = strprintf("absurdly high fees %s, %d > %d", - hash.ToString(), - nFees, ::minRelayTxFee.GetFee(nSize) * 10000); - LogPrint("mempool", errmsg.c_str()); - return state.Error("AcceptToMemoryPool: " + errmsg); - } - //fprintf(stderr,"addmempool 6\n"); - - // Check against previous transactions - // This is done last to help prevent CPU exhaustion denial-of-service attacks. - PrecomputedTransactionData txdata(tx); - if (!ContextualCheckInputs(tx, state, view, true, STANDARD_SCRIPT_VERIFY_FLAGS, true, txdata, Params().GetConsensus(), consensusBranchId)) - { - //fprintf(stderr,"accept failure.9\n"); - return error("AcceptToMemoryPool: ConnectInputs failed %s", hash.ToString()); - } - - // Check again against just the consensus-critical mandatory script - // verification flags, in case of bugs in the standard flags that cause - // transactions to pass as valid when they're actually invalid. For - // instance the STRICTENC flag was incorrectly allowing certain - // CHECKSIG NOT scripts to pass, even though they were invalid. - // - // There is a similar check in CreateNewBlock() to prevent creating - // invalid blocks, however allowing such transactions into the mempool - // can be exploited as a DoS attack. - // XXX: is this neccesary for CryptoConditions? - if ( HUSH_CONNECTING <= 0 && chainActive.LastTip() != 0 ) - { - flag = 1; - HUSH_CONNECTING = (1<<30) + (int32_t)chainActive.LastTip()->GetHeight() + 1; - } - - if (!ContextualCheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS, true, txdata, Params().GetConsensus(), consensusBranchId)) - { - if ( flag != 0 ) - HUSH_CONNECTING = -1; - return error("AcceptToMemoryPool: BUG! PLEASE REPORT THIS! ConnectInputs failed against MANDATORY but not STANDARD flags %s", hash.ToString()); - } - if ( flag != 0 ) - HUSH_CONNECTING = -1; - - { - LOCK(pool.cs); - // Store transaction in memory - pool.addUnchecked(hash, entry, !IsInitialBlockDownload()); - - // Add memory address index - if (fAddressIndex) { - pool.addAddressIndex(entry, view); - } - - // Add memory spent index - if (fSpentIndex) { - pool.addSpentIndex(entry, view); - } - } - } - return true; -} - -bool CCTxFixAcceptToMemPoolUnchecked(CTxMemPool& pool, const CTransaction &tx) -{ - // called from CheckBlock which is in cs_main and mempool.cs locks already. - auto consensusBranchId = CurrentEpochBranchId(chainActive.Height() + 1, Params().GetConsensus()); - CTxMemPoolEntry entry(tx, 0, GetTime(), 0, chainActive.Height(), mempool.HasNoInputsOf(tx), false, consensusBranchId); - //fprintf(stderr, "adding %s to mempool from block %d\n",tx.GetHash().ToString().c_str(),chainActive.GetHeight()); - pool.addUnchecked(tx.GetHash(), entry, false); - return true; -} +// [mempool_accept.cpp] GetMinRelayFee, AcceptToMemoryPool, CCTxFixAcceptToMemPoolUnchecked extracted bool GetTimestampIndex(const unsigned int &high, const unsigned int &low, const bool fActiveOnly, std::vector > &hashes) { @@ -2115,21 +904,7 @@ struct CompareBlocksByHeightMain else return(coins.vout[n].nValue); }*/ -bool myAddtomempool(CTransaction &tx, CValidationState *pstate, bool fSkipExpiry) -{ - CValidationState state; - if (!pstate) - pstate = &state; - CTransaction Ltx; bool fMissingInputs,fOverrideFees = false; - if ( mempool.lookup(tx.GetHash(),Ltx) == 0 ) - { - if ( !fSkipExpiry ) - return(AcceptToMemoryPool(mempool, *pstate, tx, false, &fMissingInputs, !fOverrideFees, -1)); - else - return(CCTxFixAcceptToMemPoolUnchecked(mempool,tx)); - } - else return(true); -} +// [mempool_accept.cpp] myAddtomempool extracted bool myGetTransaction(const uint256 &hash, CTransaction &txOut, uint256 &hashBlock) { @@ -2267,81 +1042,8 @@ bool GetTransaction(const uint256 &hash, CTransaction &txOut, uint256 &hashBlock // CBlock and CBlockIndex -bool WriteBlockToDisk(const CBlock& block, CDiskBlockPos& pos, const CMessageHeader::MessageStartChars& messageStart) -{ - // Open history file to append - CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION); - if (fileout.IsNull()) - return error("WriteBlockToDisk: OpenBlockFile failed"); +// [block_processing.cpp] Lines 1072-1146 extracted - // Write index header - unsigned int nSize = GetSerializeSize(fileout, block); - fileout << FLATDATA(messageStart) << nSize; - - // Write block - long fileOutPos = ftell(fileout.Get()); - if (fileOutPos < 0) - return error("WriteBlockToDisk: ftell failed"); - pos.nPos = (unsigned int)fileOutPos; - fileout << block; - - return true; -} - -bool ReadBlockFromDisk(int32_t height,CBlock& block, const CDiskBlockPos& pos,bool checkPOW) -{ - uint8_t pubkey33[33]; - block.SetNull(); - - // Open history file to read - CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION); - if (filein.IsNull()) - { - //fprintf(stderr,"readblockfromdisk err A\n"); - return error("ReadBlockFromDisk: OpenBlockFile failed for %s", pos.ToString()); - } - - // Read block - try { - filein >> block; - } - catch (const std::exception& e) { - fprintf(stderr,"readblockfromdisk err B\n"); - return error("%s: Deserialize or I/O error - %s at %s", __func__, e.what(), pos.ToString()); - } - // Check the header - if ( 0 && checkPOW != 0 ) - { - hush_block2pubkey33(pubkey33,(CBlock *)&block); - if (!(CheckEquihashSolution(&block, Params()) && CheckProofOfWork(block, pubkey33, height, Params().GetConsensus()))) - { - int32_t i; for (i=0; i<33; i++) - fprintf(stderr,"%02x",pubkey33[i]); - fprintf(stderr," warning unexpected diff at ht.%d\n",height); - - return error("ReadBlockFromDisk: Errors in block header at %s", pos.ToString()); - } - } - return true; -} - -bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex,bool checkPOW) -{ - if ( pindex == 0 ) - return false; - if (!ReadBlockFromDisk(pindex->GetHeight(),block, pindex->GetBlockPos(),checkPOW)) - return false; - if (block.GetHash() != pindex->GetBlockHash()) - return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s", - pindex->ToString(), pindex->GetBlockPos().ToString()); - return true; -} - -CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams) -{ - // fprintf(stderr,"%s: nHeight=%d\n", __func__, nHeight); - return hush_sc_block_subsidy(nHeight); -} bool IsInitialBlockDownload() { @@ -2424,82 +1126,13 @@ int IsNotInSync() return false; } -static bool fLargeWorkForkFound = false; -static bool fLargeWorkInvalidChainFound = false; -static CBlockIndex *pindexBestForkTip = NULL; -static CBlockIndex *pindexBestForkBase = NULL; +bool fLargeWorkForkFound = false; +bool fLargeWorkInvalidChainFound = false; +CBlockIndex *pindexBestForkTip = NULL; +CBlockIndex *pindexBestForkBase = NULL; -void CheckForkWarningConditions() -{ - //fprintf(stderr,"%s checking for IBD\n", __func__); - AssertLockHeld(cs_main); - // Before we get past initial download, we cannot reliably alert about forks - // (we assume we don't get stuck on a fork before finishing our initial sync) - if (IsInitialBlockDownload()) - return; +// [block_processing.cpp] Lines 1234-1304 extracted - //fprintf(stderr,"%s not in IBD\n", __func__); - // If our best fork is no longer within 288 blocks (+/- 12 hours if no one mines it) - // of our head, drop it - if (pindexBestForkTip && chainActive.Height() - pindexBestForkTip->GetHeight() >= 288) - pindexBestForkTip = NULL; - - if (pindexBestForkTip || (pindexBestInvalid && pindexBestInvalid->chainPower > (chainActive.LastTip()->chainPower + (GetBlockProof(*chainActive.LastTip()) * 6)))) - { - if (!fLargeWorkForkFound && pindexBestForkBase) - { - std::string warning = std::string("'Warning: Large-work fork detected, forking after block ") + pindexBestForkBase->phashBlock->ToString() + std::string("'"); - LogPrintf("%s: %s\n", __func__, warning.c_str()); - } - if (pindexBestForkTip && pindexBestForkBase) - { - LogPrintf("%s: Warning: Large valid fork found\n forking the chain at height %d (%s)\n lasting to height %d (%s).\nChain state database corruption likely.\n", __func__, - pindexBestForkBase->GetHeight(), pindexBestForkBase->phashBlock->ToString(), - pindexBestForkTip->GetHeight(), pindexBestForkTip->phashBlock->ToString()); - fLargeWorkForkFound = true; - } else { - std::string warning = std::string("Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely."); - LogPrintf("%s: %s\n", __func__, warning.c_str()); - fLargeWorkInvalidChainFound = true; - } - } else { - fLargeWorkForkFound = false; - fLargeWorkInvalidChainFound = false; - } -} - -void CheckForkWarningConditionsOnNewFork(CBlockIndex* pindexNewForkTip) -{ - AssertLockHeld(cs_main); - // If we are on a fork that is sufficiently large, set a warning flag - CBlockIndex* pfork = pindexNewForkTip; - CBlockIndex* plonger = chainActive.LastTip(); - while (pfork && pfork != plonger) - { - while (plonger && plonger->GetHeight() > pfork->GetHeight()) - plonger = plonger->pprev; - if (pfork == plonger) - break; - pfork = pfork->pprev; - } - - // We define a condition where we should warn the user about as a fork of at least 7 blocks - // with a tip within 72 blocks (+/- 3 hours if no one mines it) of ours - // We use 7 blocks rather arbitrarily as it represents just under 10% of sustained network - // hash rate operating on the fork. - // or a chain that is entirely longer than ours and invalid (note that this should be detected by both) - // We define it this way because it allows us to only store the highest fork tip (+ base) which meets - // the 7-block condition and from this always have the most-likely-to-cause-warning fork - if (pfork && (!pindexBestForkTip || (pindexBestForkTip && pindexNewForkTip->GetHeight() > pindexBestForkTip->GetHeight())) && - pindexNewForkTip->chainPower - pfork->chainPower > (GetBlockProof(*pfork) * 7) && - chainActive.Height() - pindexNewForkTip->GetHeight() < 72) - { - pindexBestForkTip = pindexNewForkTip; - pindexBestForkBase = pfork; - } - - CheckForkWarningConditions(); -} // Requires cs_main. void Misbehaving(NodeId pnode, int howmuch) @@ -2521,25 +1154,10 @@ void Misbehaving(NodeId pnode, int howmuch) LogPrintf("%s: %s (%d -> %d)\n", __func__, state->name, state->nMisbehavior-howmuch, state->nMisbehavior); } -void static InvalidChainFound(CBlockIndex* pindexNew) -{ - if (!pindexBestInvalid || pindexNew->chainPower > pindexBestInvalid->chainPower) - pindexBestInvalid = pindexNew; +// [block_processing.cpp] Lines 1326-1342 extracted - LogPrintf("%s: invalid block=%s height=%d log2_work=%.8g date=%s\n", __func__, - pindexNew->GetBlockHash().ToString(), pindexNew->GetHeight(), - log(pindexNew->chainPower.chainWork.getdouble())/log(2.0), - DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexNew->GetBlockTime())); - CBlockIndex *tip = chainActive.LastTip(); - assert (tip); - LogPrintf("%s: current best=%s height=%d log2_work=%.8g date=%s\n", __func__, - tip->GetBlockHash().ToString(), chainActive.Height(), - log(tip->chainPower.chainWork.getdouble())/log(2.0), - DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip->GetBlockTime())); - CheckForkWarningConditions(); -} -void static InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) { +void InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state) { int nDoS = 0; if (state.IsInvalid(nDoS)) { std::map::iterator it = mapBlockSource.find(pindex->GetBlockHash()); @@ -2558,2903 +1176,12 @@ void static InvalidBlockFound(CBlockIndex *pindex, const CValidationState &state } } -void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight) -{ - if (!tx.IsMint()) // mark inputs spent - { - txundo.vprevout.reserve(tx.vin.size()); - BOOST_FOREACH(const CTxIn &txin, tx.vin) { - //if (tx.IsPegsImport() && txin.prevout.n==10e8) continue; - CCoinsModifier coins = inputs.ModifyCoins(txin.prevout.hash); - unsigned nPos = txin.prevout.n; +// [tx_validation.cpp] UpdateCoins through ContextualCheckInputs extracted - if (nPos >= coins->vout.size() || coins->vout[nPos].IsNull()) - assert(false); - // mark an outpoint spent, and construct undo information - txundo.vprevout.push_back(CTxInUndo(coins->vout[nPos])); - coins->Spend(nPos); - if (coins->vout.size() == 0) { - CTxInUndo& undo = txundo.vprevout.back(); - undo.nHeight = coins->nHeight; - undo.fCoinBase = coins->fCoinBase; - undo.nVersion = coins->nVersion; - } - } - } - - // spend nullifiers - inputs.SetNullifiers(tx, true); - - inputs.ModifyCoins(tx.GetHash())->FromTx(tx, nHeight); // add outputs -} - -void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight) -{ - CTxUndo txundo; - UpdateCoins(tx, inputs, txundo, nHeight); -} - -bool CScriptCheck::operator()() { - const CScript &scriptSig = ptxTo->vin[nIn].scriptSig; - ServerTransactionSignatureChecker checker(ptxTo, nIn, amount, cacheStore, *txdata); - if (!VerifyScript(scriptSig, scriptPubKey, nFlags, checker, consensusBranchId, &error)) { - return ::error("CScriptCheck(): %s:%d VerifySignature failed: %s", ptxTo->GetHash().ToString(), nIn, ScriptErrorString(error)); - } - return true; -} - -int GetSpendHeight(const CCoinsViewCache& inputs) -{ - LOCK(cs_main); - CBlockIndex* pindexPrev = mapBlockIndex.find(inputs.GetBestBlock())->second; - return pindexPrev->GetHeight() + 1; -} - -namespace Consensus { - bool CheckTxInputs(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, const Consensus::Params& consensusParams) - { - // This doesn't trigger the DoS code on purpose; if it did, it would make it easier - // for an attacker to attempt to split the network. - if (!inputs.HaveInputs(tx)) - return state.Invalid(error("CheckInputs(): %s inputs unavailable", tx.GetHash().ToString())); - - // are the shielded requirements met? - if (!inputs.HaveShieldedRequirements(tx)) - return state.Invalid(error("CheckInputs(): %s shielded requirements not met", tx.GetHash().ToString())); - - CAmount nValueIn = 0; - CAmount nFees = 0; - for (unsigned int i = 0; i < tx.vin.size(); i++) - { - const COutPoint &prevout = tx.vin[i].prevout; - const CCoins *coins = inputs.AccessCoins(prevout.hash); - assert(coins); - - if (coins->IsCoinBase()) { - // ensure that output of coinbases are not still time locked - if (coins->TotalTxValue() >= ASSETCHAINS_TIMELOCKGTE) - { - uint64_t unlockTime = hush_block_unlocktime(coins->nHeight); - if (nSpendHeight < unlockTime) { - return state.DoS(10, - error("CheckInputs(): tried to spend coinbase that is timelocked until block %d", unlockTime), - REJECT_INVALID, "bad-txns-premature-spend-of-coinbase"); - } - } - - // Ensure that coinbases are matured, no DoS as retry may work later - if (nSpendHeight - coins->nHeight < COINBASE_MATURITY) { - return state.Invalid( - error("CheckInputs(): tried to spend coinbase at depth %d/%d", nSpendHeight - coins->nHeight, (int32_t)COINBASE_MATURITY), - REJECT_INVALID, "bad-txns-premature-spend-of-coinbase"); - } - - } - - // Check for negative or overflow input values - nValueIn += coins->vout[prevout.n].nValue; - if (!MoneyRange(coins->vout[prevout.n].nValue) || !MoneyRange(nValueIn)) - return state.DoS(100, error("CheckInputs(): txin values out of range"), - REJECT_INVALID, "bad-txns-inputvalues-outofrange"); - - } - - nValueIn += tx.GetShieldedValueIn(); - if (!MoneyRange(nValueIn)) - return state.DoS(100, error("CheckInputs(): shielded input to transparent value pool out of range"), - REJECT_INVALID, "bad-txns-inputvalues-outofrange"); - - if (nValueIn < tx.GetValueOut()) - { - fprintf(stderr,"spentheight.%d valuein %s vs %s error\n",nSpendHeight,FormatMoney(nValueIn).c_str(), FormatMoney(tx.GetValueOut()).c_str()); - return state.DoS(100, error("CheckInputs(): %s value in (%s) < value out (%s) diff %.8f", - tx.GetHash().ToString(), FormatMoney(nValueIn), FormatMoney(tx.GetValueOut()),((double)nValueIn - tx.GetValueOut())/COIN),REJECT_INVALID, "bad-txns-in-belowout"); - } - // Tally transaction fees - CAmount nTxFee = nValueIn - tx.GetValueOut(); - if (nTxFee < 0) - return state.DoS(100, error("CheckInputs(): %s nTxFee < 0", tx.GetHash().ToString()), - REJECT_INVALID, "bad-txns-fee-negative"); - nFees += nTxFee; - if (!MoneyRange(nFees)) - return state.DoS(100, error("CheckInputs(): nFees out of range"), - REJECT_INVALID, "bad-txns-fee-outofrange"); - - //NOTE: Since we have access to fee here, verify that opreturn pays - //required minimum fee, even though this is a check on outputs not - //inputs. If we don't do it here we would need to duplicate already - //done work somewhere else - - if ( ASSETCHAINS_MINOPRETURNFEE > 0 ) { - BOOST_FOREACH(const CTxOut& txout, tx.vout) { - const bool isopret = txout.scriptPubKey.IsOpReturn(); - - // HUSH+DRGX do not use -ac_minopreturnfee so this does not (yet) - // affect those chains, they will need a height activated consensus - // change - - if ( isopret ) { - // Is there any difference between nTxFee and nFees ? - // They seem to be 2 vars with the same value - fprintf(stderr,"%s: opreturn=1 nFees=%ld nTxFee=%ld\n", __func__, nFees, nTxFee); - if (nTxFee < ASSETCHAINS_MINOPRETURNFEE) { - return state.DoS(100,error("CheckInputs(): tx does not have required mininum fee for OP_RETURN"), REJECT_INVALID, "bad-txns-minopreturnfee"); - } - } - } - } - - return true; - } -}// namespace Consensus - -bool ContextualCheckInputs( - const CTransaction& tx, - CValidationState &state, - const CCoinsViewCache &inputs, - bool fScriptChecks, - unsigned int flags, - bool cacheStore, - PrecomputedTransactionData& txdata, - const Consensus::Params& consensusParams, - uint32_t consensusBranchId, - std::vector *pvChecks) -{ - if (!tx.IsMint()) - { - if (!Consensus::CheckTxInputs(tx, state, inputs, GetSpendHeight(inputs), consensusParams)) { - return false; - } - - if (pvChecks) - pvChecks->reserve(tx.vin.size()); - - // The first loop above does all the inexpensive checks. - // Only if ALL inputs pass do we perform expensive ECDSA signature checks. - // Helps prevent CPU exhaustion attacks. - - // Skip ECDSA signature verification when connecting blocks - // before the last block chain checkpoint. This is safe because block merkle hashes are - // still computed and checked, and any change will be caught at the next checkpoint. - if (fScriptChecks) { - for (unsigned int i = 0; i < tx.vin.size(); i++) { - //if (tx.IsPegsImport() && i==0) continue; - const COutPoint &prevout = tx.vin[i].prevout; - const CCoins* coins = inputs.AccessCoins(prevout.hash); - assert(coins); - - // Verify signature - CScriptCheck check(*coins, tx, i, flags, cacheStore, consensusBranchId, &txdata); - if (pvChecks) { - pvChecks->push_back(CScriptCheck()); - check.swap(pvChecks->back()); - } else if (!check()) { - if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) { - // Check whether the failure was caused by a - // non-mandatory script verification check, such as - // non-standard DER encodings or non-null dummy - // arguments; if so, don't trigger DoS protection to - // avoid splitting the network between upgraded and - // non-upgraded nodes. - CScriptCheck check2(*coins, tx, i, - flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheStore, consensusBranchId, &txdata); - if (check2()) - return state.Invalid(false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError()))); - } - // Failures of other flags indicate a transaction that is - // invalid in new blocks, e.g. a invalid P2SH. We DoS ban - // such nodes as they are not following the protocol. That - // said during an upgrade careful thought should be taken - // as to the correct behavior - we may want to continue - // peering with non-upgraded nodes even after a soft-fork - // super-majority vote has passed. - return state.DoS(100,false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError()))); - } - } - } - } - - return true; -} - -namespace { - - bool UndoWriteToDisk(const CBlockUndo& blockundo, CDiskBlockPos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart) - { - // Open history file to append - CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION); - if (fileout.IsNull()) - return error("%s: OpenUndoFile failed", __func__); - - // Write index header - unsigned int nSize = GetSerializeSize(fileout, blockundo); - fileout << FLATDATA(messageStart) << nSize; - - // Write undo data - long fileOutPos = ftell(fileout.Get()); - if (fileOutPos < 0) - return error("%s: ftell failed", __func__); - pos.nPos = (unsigned int)fileOutPos; - fileout << blockundo; - - // calculate & write checksum - CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); - hasher << hashBlock; - hasher << blockundo; - fileout << hasher.GetHash(); -//fprintf(stderr,"hashBlock.%s hasher.%s\n",hashBlock.GetHex().c_str(),hasher.GetHash().GetHex().c_str()); - return true; - } - - bool UndoReadFromDisk(CBlockUndo& blockundo, const CDiskBlockPos& pos, const uint256& hashBlock) - { - // Open history file to read - CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION); - if (filein.IsNull()) - return error("%s: OpenBlockFile failed", __func__); - - // Read block - uint256 hashChecksum; - try { - filein >> blockundo; - filein >> hashChecksum; - } - catch (const std::exception& e) { - return error("%s: Deserialize or I/O error - %s", __func__, e.what()); - } - // Verify checksum - CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION); - hasher << hashBlock; - hasher << blockundo; - if (hashChecksum != hasher.GetHash()) - return error("%s: %s Checksum mismatch %s vs %s", __func__,hashBlock.GetHex().c_str(),hashChecksum.GetHex().c_str(),hasher.GetHash().GetHex().c_str()); - - return true; - } - - /** Abort with a message */ - bool AbortNode(const std::string& strMessage, const std::string& userMessage="") - { - strMiscWarning = strMessage; - LogPrintf("*** %s\n", strMessage); - uiInterface.ThreadSafeMessageBox( - userMessage.empty() ? _("Error: A fatal internal error occurred, see debug.log for details") : userMessage, - "", CClientUIInterface::MSG_ERROR); - StartShutdown(); - return false; - } - - bool AbortNode(CValidationState& state, const std::string& strMessage, const std::string& userMessage="") - { - AbortNode(strMessage, userMessage); - return state.Error(strMessage); - } - -} // anon namespace - -/** - * Apply the undo operation of a CTxInUndo to the given chain state. - * @param undo The undo object. - * @param view The coins view to which to apply the changes. - * @param out The out point that corresponds to the tx input. - * @return True on success. - */ -static bool ApplyTxInUndo(const CTxInUndo& undo, CCoinsViewCache& view, const COutPoint& out) -{ - bool fClean = true; - - CCoinsModifier coins = view.ModifyCoins(out.hash); - if (undo.nHeight != 0) { - // undo data contains height: this is the last output of the prevout tx being spent - if (!coins->IsPruned()) - fClean = fClean && error("%s: undo data overwriting existing transaction", __func__); - coins->Clear(); - coins->fCoinBase = undo.fCoinBase; - coins->nHeight = undo.nHeight; - coins->nVersion = undo.nVersion; - } else { - if (coins->IsPruned()) - fClean = fClean && error("%s: undo data adding output to missing transaction", __func__); - } - if (coins->IsAvailable(out.n)) - fClean = fClean && error("%s: undo data overwriting existing output", __func__); - if (coins->vout.size() < out.n+1) - coins->vout.resize(out.n+1); - coins->vout[out.n] = undo.txout; - - return fClean; -} - -void ConnectNotarizations(const CBlock &block, int height) -{ - NotarizationsInBlock notarizations = ScanBlockNotarizations(block, height); - if (notarizations.size() > 0) { - CDBBatch batch = CDBBatch(*pnotarizations); - batch.Write(block.GetHash(), notarizations); - WriteBackNotarizations(notarizations, batch); - pnotarizations->WriteBatch(batch, true); - LogPrintf("ConnectBlock: wrote %i block notarizations in block: %s\n", notarizations.size(), block.GetHash().GetHex().data()); - } -} - -void DisconnectNotarizations(const CBlock &block) -{ - NotarizationsInBlock nibs; - if (GetBlockNotarizations(block.GetHash(), nibs)) { - CDBBatch batch = CDBBatch(*pnotarizations); - batch.Erase(block.GetHash()); - EraseBackNotarizations(nibs, batch); - pnotarizations->WriteBatch(batch, true); - LogPrintf("DisconnectTip: deleted %i block notarizations in block: %s\n", nibs.size(), block.GetHash().GetHex().data()); - } -} - -int8_t GetAddressType(const CScript &scriptPubKey, CTxDestination &vDest, txnouttype &txType, vector> &vSols) -{ - int8_t keyType = 0; - // some non-standard types, like time lock coinbases, don't solve, but do extract - if ( (Solver(scriptPubKey, txType, vSols) || ExtractDestination(scriptPubKey, vDest)) ) - { - keyType = 1; - if (vDest.which()) - { - // if we failed to solve, and got a vDest, assume P2PKH or P2PK address returned - CKeyID kid; - if (CBitcoinAddress(vDest).GetKeyID(kid)) - { - vSols.push_back(vector(kid.begin(), kid.end())); - } - } - else if (txType == TX_SCRIPTHASH) - { - keyType = 2; - } - } - return keyType; -} - -bool DisconnectBlock(CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& view, bool* pfClean) -{ - assert(pindex->GetBlockHash() == view.GetBestBlock()); - - if (pfClean) - *pfClean = false; - - bool fClean = true; - - CBlockUndo blockUndo; - CDiskBlockPos pos = pindex->GetUndoPos(); - if (pos.IsNull()) - return error("DisconnectBlock(): no undo data available"); - if (!UndoReadFromDisk(blockUndo, pos, pindex->pprev->GetBlockHash())) - return error("DisconnectBlock(): failure reading undo data"); - - if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) - return error("DisconnectBlock(): block and undo data inconsistent"); - std::vector > addressIndex; - std::vector > addressUnspentIndex; - std::vector > spentIndex; - - // undo transactions in reverse order - for (int i = block.vtx.size() - 1; i >= 0; i--) { - const CTransaction &tx = block.vtx[i]; - uint256 hash = tx.GetHash(); - if (fAddressIndex) { - - for (unsigned int k = tx.vout.size(); k-- > 0;) { - const CTxOut &out = tx.vout[k]; - - vector> vSols; - CTxDestination vDest; - txnouttype txType = TX_PUBKEYHASH; - int keyType = GetAddressType(out.scriptPubKey, vDest, txType, vSols); - if ( keyType != 0 ) - { - for (auto addr : vSols) - { - uint160 addrHash = addr.size() == 20 ? uint160(addr) : Hash160(addr); - addressIndex.push_back(make_pair(CAddressIndexKey(keyType, addrHash, pindex->GetHeight(), i, hash, k, false), out.nValue)); - addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(keyType, addrHash, hash, k), CAddressUnspentValue())); - } - } - } - } - - // Check that all outputs are available and match the outputs in the block itself - // exactly. - { - CCoinsModifier outs = view.ModifyCoins(hash); - outs->ClearUnspendable(); - - CCoins outsBlock(tx, pindex->GetHeight()); - // The CCoins serialization does not serialize negative numbers. - // No network rules currently depend on the version here, so an inconsistency is harmless - // but it must be corrected before txout nversion ever influences a network rule. - if (outsBlock.nVersion < 0) - outs->nVersion = outsBlock.nVersion; - if (*outs != outsBlock) - fClean = fClean && error("DisconnectBlock(): added transaction mismatch? database corrupted"); - - // remove outputs - outs->Clear(); - } - - // unspend nullifiers - view.SetNullifiers(tx, false); - - // restore inputs - if (!tx.IsMint()) { - CTxUndo &txundo = blockUndo.vtxundo[i-1]; - //if (tx.IsPegsImport()) txundo.vprevout.insert(txundo.vprevout.begin(),CTxInUndo()); - if (txundo.vprevout.size() != tx.vin.size()) - return error("DisconnectBlock(): transaction and undo data inconsistent"); - for (unsigned int j = tx.vin.size(); j-- > 0;) { - //if (tx.IsPegsImport() && j==0) continue; - const COutPoint &out = tx.vin[j].prevout; - const CTxInUndo &undo = txundo.vprevout[j]; - if (!ApplyTxInUndo(undo, view, out)) - fClean = false; - - const CTxIn input = tx.vin[j]; - - if (fSpentIndex) { - // undo and delete the spent index - spentIndex.push_back(make_pair(CSpentIndexKey(input.prevout.hash, input.prevout.n), CSpentIndexValue())); - } - - if (fAddressIndex) { - const CTxOut &prevout = view.GetOutputFor(tx.vin[j]); - - vector> vSols; - CTxDestination vDest; - txnouttype txType = TX_PUBKEYHASH; - int keyType = GetAddressType(prevout.scriptPubKey, vDest, txType, vSols); - if ( keyType != 0 ) - { - for (auto addr : vSols) - { - uint160 addrHash = addr.size() == 20 ? uint160(addr) : Hash160(addr); - // undo spending activity - addressIndex.push_back(make_pair(CAddressIndexKey(keyType, addrHash, pindex->GetHeight(), i, hash, j, true), prevout.nValue * -1)); - // restore unspent index - addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(keyType, addrHash, input.prevout.hash, input.prevout.n), CAddressUnspentValue(prevout.nValue, prevout.scriptPubKey, undo.nHeight))); - } - } - } - } - } - } - - // set the old best Sprout anchor back - view.PopAnchor(blockUndo.old_sprout_tree_root, SPROUT); - - // set the old best Sapling anchor back - // We can get this from the `hashFinalSaplingRoot` of the last block - // However, this is only reliable if the last block was on or after - // the Sapling activation height. Otherwise, the last anchor was the - // empty root. - const bool sapling = pindex->pprev->GetHeight() >= 1 ? true : false; // NetworkUpgradeActive(pindex->pprev->GetHeight(), Params().GetConsensus(), Consensus::UPGRADE_SAPLING); - if (sapling) { - view.PopAnchor(pindex->pprev->hashFinalSaplingRoot, SAPLING); - } else { - view.PopAnchor(SaplingMerkleTree::empty_root(), SAPLING); - } - - // move best block pointer to prevout block - view.SetBestBlock(pindex->pprev->GetBlockHash()); - - // If disconnecting a block brings us back before our blocktime halving height, go back - // to our original blocktime so our DAA has the correct target for that height - int nHeight = pindex->pprev->GetHeight(); - nFirstHalvingHeight = GetArg("-z2zheight",340000); - if (ishush3 && (ASSETCHAINS_BLOCKTIME != 150) && (nHeight < nFirstHalvingHeight)) { - LogPrintf("%s: Setting blocktime to 150s at height %d!\n",__func__,nHeight); - ASSETCHAINS_BLOCKTIME = 150; - hush_changeblocktime(); - } - - - if (pfClean) { - *pfClean = fClean; - return true; - } - - if (fAddressIndex) { - if (!pblocktree->EraseAddressIndex(addressIndex)) { - return AbortNode(state, "Failed to delete address index"); - } - if (!pblocktree->UpdateAddressUnspentIndex(addressUnspentIndex)) { - return AbortNode(state, "Failed to write address unspent index"); - } - } - - return fClean; -} - -void static FlushBlockFile(bool fFinalize = false) -{ - LOCK(cs_LastBlockFile); - - CDiskBlockPos posOld(nLastBlockFile, 0); - - FILE *fileOld = OpenBlockFile(posOld); - if (fileOld) { - if (fFinalize) - TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nSize); - FileCommit(fileOld); - fclose(fileOld); - } - - fileOld = OpenUndoFile(posOld); - if (fileOld) { - if (fFinalize) - TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nUndoSize); - FileCommit(fileOld); - fclose(fileOld); - } -} - -bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize); - -static CCheckQueue scriptcheckqueue(128); - -void ThreadScriptCheck() { - RenameThread("hush-scriptch"); - scriptcheckqueue.Thread(); -} - - -static int64_t nTimeVerify = 0; -static int64_t nTimeConnect = 0; -static int64_t nTimeIndex = 0; -static int64_t nTimeCallbacks = 0; -static int64_t nTimeTotal = 0; -bool FindBlockPos(int32_t tmpflag,CValidationState &state, CDiskBlockPos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown = false); -bool ReceivedBlockTransactions(const CBlock &block, CValidationState& state, CBlockIndex *pindexNew, const CDiskBlockPos& pos); - -bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pindex, CCoinsViewCache& view, bool fJustCheck,bool fCheckPOW) -{ - CDiskBlockPos blockPos; - const CChainParams& chainparams = Params(); - if ( HUSH_NSPV_SUPERLITE ) - return(true); - if ( HUSH_STOPAT != 0 && pindex->GetHeight() > HUSH_STOPAT ) - return(false); - //fprintf(stderr,"connectblock ht.%d\n",(int32_t)pindex->GetHeight()); - AssertLockHeld(cs_main); - - const bool ishush3 = strncmp(SMART_CHAIN_SYMBOL, "HUSH3",5) == 0 ? true : false; - - // At startup, HUSH3 doesn't know a block height yet and so we must wait until - // connecting a block to set our private/blocktime flags, which are height-dependent - nFirstHalvingHeight = GetArg("-z2zheight",340000); - if(!ASSETCHAINS_PRIVATE && ishush3) { - unsigned int nHeight = pindex->GetHeight(); - if(nHeight >= nFirstHalvingHeight) { - fprintf(stderr, "%s: Going full z2z at height %d!\n",__func__,pindex->GetHeight()); - ASSETCHAINS_PRIVATE = 1; - } - } - if (ishush3 && (ASSETCHAINS_BLOCKTIME != 75) && (chainActive.Height() >= nFirstHalvingHeight)) { - LogPrintf("%s: Blocktime halving to 75s at height %d!\n",__func__,pindex->GetHeight()); - ASSETCHAINS_BLOCKTIME = 75; - hush_changeblocktime(); - } - - bool fExpensiveChecks = true; - if (fCheckpointsEnabled) { - CBlockIndex *pindexLastCheckpoint = Checkpoints::GetLastCheckpoint(chainparams.Checkpoints()); - if (pindexLastCheckpoint && pindexLastCheckpoint->GetAncestor(pindex->GetHeight()) == pindex) { - // This block is an ancestor of a checkpoint: disable script checks - fExpensiveChecks = false; - } - } - auto verifier = libzcash::ProofVerifier::Strict(); - auto disabledVerifier = libzcash::ProofVerifier::Disabled(); - int32_t futureblock; - CAmount blockReward = GetBlockSubsidy(pindex->GetHeight(), chainparams.GetConsensus()); - uint64_t notarypaycheque = 0; - - // Check it again to verify ztx proofs, and in case a previous version let a bad block in - if ( !CheckBlock(&futureblock,pindex->GetHeight(),pindex,block, state, fExpensiveChecks ? verifier : disabledVerifier, fCheckPOW, !fJustCheck) || futureblock != 0 ) - { - //fprintf(stderr,"checkblock failure in connectblock futureblock.%d\n",futureblock); - return false; - } - if ( fCheckPOW != 0 && (pindex->nStatus & BLOCK_VALID_CONTEXT) != BLOCK_VALID_CONTEXT ) // Activate Jan 15th, 2019 - { - if ( !ContextualCheckBlock(1,block, state, pindex->pprev) ) - { - fprintf(stderr,"ContextualCheckBlock failed ht.%d\n",(int32_t)pindex->GetHeight()); - if ( pindex->nTime > 1547510400 ) - return false; - fprintf(stderr,"grandfathered exception, until jan 15th 2019\n"); - } else pindex->nStatus |= BLOCK_VALID_CONTEXT; - } - - // Do this here before the block is moved to the main block files. - if ( ASSETCHAINS_NOTARY_PAY[0] != 0 && pindex->GetHeight() > 10 ) - { - // do a full block scan to get ntz position and to enforce a valid notarization is in position 1. - // if ntz in the block, must be position 1 and the coinbase must pay notaries. - int32_t notarizationTx = hush_connectblock(true,pindex,*(CBlock *)&block); - // -1 means that the valid notarization isnt in position 1 or there are too many notarizations in this block. - if ( notarizationTx == -1 ) - return state.DoS(100, error("ConnectBlock(): Notarization is not in TX position 1 or block contains more than 1 notarization! Invalid Block!"), - REJECT_INVALID, "bad-notarization-position"); - // 1 means this block contains a valid notarization and its in position 1. - // its no longer possible for any attempted notarization to be in a block with a valid one! - // if notaries create a notarization even if its not in this chain it will need to be mined inside its own block! - if ( notarizationTx == 1 ) - { - // Check if the notaries have been paid. - if ( block.vtx[0].vout.size() == 1 ) - return state.DoS(100, error("ConnectBlock(): Notaries have not been paid!"), REJECT_INVALID, "bad-cb-amount"); - // calculate the notaries compensation and validate the amounts and pubkeys are correct. - notarypaycheque = hush_checknotarypay((CBlock *)&block,(int32_t)pindex->GetHeight()); - //fprintf(stderr, "notarypaycheque.%lu\n", notarypaycheque); - if ( notarypaycheque > 0 ) - blockReward += notarypaycheque; - else - return state.DoS(100, error("ConnectBlock(): Notary pay validation failed!"), - REJECT_INVALID, "bad-cb-amount"); - } - } - - // Move the block to the main block file, we need this to create the TxIndex in the following loop. - if ( (pindex->nStatus & BLOCK_IN_TMPFILE) != 0 ) - { - unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); - if (!FindBlockPos(0,state, blockPos, nBlockSize+8, pindex->GetHeight(), block.GetBlockTime(),false)) - return error("ConnectBlock(): FindBlockPos failed"); - if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) - return error("ConnectBlock(): FindBlockPos failed"); - pindex->nStatus &= (~BLOCK_IN_TMPFILE); - pindex->nFile = blockPos.nFile; - pindex->nDataPos = blockPos.nPos; - if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) - return error("AcceptBlock(): ReceivedBlockTransactions failed"); - setDirtyFileInfo.insert(blockPos.nFile); - //fprintf(stderr,"added ht.%d copy of tmpfile to %d.%d\n",pindex->GetHeight(),blockPos.nFile,blockPos.nPos); - } - // verify that the view's current state corresponds to the previous block - uint256 hashPrevBlock = pindex->pprev == NULL ? uint256() : pindex->pprev->GetBlockHash(); - if ( hashPrevBlock != view.GetBestBlock() ) - { - fprintf(stderr,"ConnectBlock(): hashPrevBlock != view.GetBestBlock() %s != %s\n", hashPrevBlock.ToString().c_str(), view.GetBestBlock().ToString().c_str() ); - - return state.DoS(1, error("ConnectBlock(): hashPrevBlock != view.GetBestBlock()"), - REJECT_INVALID, "hashPrevBlock-not-bestblock"); - } - assert(hashPrevBlock == view.GetBestBlock()); - - // Special case for the genesis block, skipping connection of its transactions - // (its coinbase is unspendable) - if (block.GetHash() == chainparams.GetConsensus().hashGenesisBlock) { - if (!fJustCheck) { - view.SetBestBlock(pindex->GetBlockHash()); - // Before the genesis block, there was an empty tree - SproutMerkleTree tree; - pindex->hashSproutAnchor = tree.root(); - // The genesis block contained no JoinSplits, lulz - pindex->hashFinalSproutRoot = pindex->hashSproutAnchor; - } - return true; - } - - bool fScriptChecks = (!fCheckpointsEnabled || pindex->GetHeight() >= Checkpoints::GetTotalBlocksEstimate(chainparams.Checkpoints())); - // Do not allow blocks that contain transactions which 'overwrite' older transactions, - // unless those are already completely spent. - BOOST_FOREACH(const CTransaction& tx, block.vtx) { - const CCoins* coins = view.AccessCoins(tx.GetHash()); - if (coins && !coins->IsPruned()) - return state.DoS(100, error("ConnectBlock(): tried to overwrite transaction"), - REJECT_INVALID, "bad-txns-BIP30"); - } - - unsigned int flags = SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY; - - // DERSIG (BIP66) is also always enforced, but does not have a flag. - - CBlockUndo blockundo; - - if ( ASSETCHAINS_CC != 0 ) - { - if ( scriptcheckqueue.IsIdle() == 0 ) - { - fprintf(stderr,"scriptcheckqueue isnt idle\n"); - sleep(1); - } - } - CCheckQueueControl control(fExpensiveChecks && nScriptCheckThreads ? &scriptcheckqueue : NULL); - - int64_t nTimeStart = GetTimeMicros(); - CAmount nFees = 0; - int nInputs = 0; - uint64_t valueout; - int64_t voutsum = 0, prevsum = 0, interest, sum = 0; - unsigned int nSigOps = 0; - CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size())); - std::vector > vPos; - vPos.reserve(block.vtx.size()); - blockundo.vtxundo.reserve(block.vtx.size() - 1); - std::vector > addressIndex; - std::vector > addressUnspentIndex; - std::vector > spentIndex; - // Construct the incremental merkle tree at the current - // block position, - auto old_sprout_tree_root = view.GetBestAnchor(SPROUT); - // saving the top anchor in the block index as we go. - if (!fJustCheck) { - pindex->hashSproutAnchor = old_sprout_tree_root; - } - - SproutMerkleTree sprout_tree; - - // This should never fail: we should always be able to get the root - // that is on the tip of our chain - //assert(view.GetSproutAnchorAt(old_sprout_tree_root, sprout_tree)); - - - SaplingMerkleTree sapling_tree; - assert(view.GetSaplingAnchorAt(view.GetBestAnchor(SAPLING), sapling_tree)); - - // Grab the consensus branch ID for the block's height - auto consensusBranchId = CurrentEpochBranchId(pindex->GetHeight(), Params().GetConsensus()); - - std::vector txdata; - txdata.reserve(block.vtx.size()); // Required so that pointers to individual PrecomputedTransactionData don't get invalidated - for (unsigned int i = 0; i < block.vtx.size(); i++) - { - const CTransaction &tx = block.vtx[i]; - const uint256 txhash = tx.GetHash(); - nInputs += tx.vin.size(); - nSigOps += GetLegacySigOpCount(tx); - if (nSigOps > MAX_BLOCK_SIGOPS) - return state.DoS(100, error("ConnectBlock(): too many sigops"), - REJECT_INVALID, "bad-blk-sigops"); - //fprintf(stderr,"ht.%d vout0 t%u\n",pindex->GetHeight(),tx.nLockTime); - if (!tx.IsMint()) - { - if (!view.HaveInputs(tx)) - { - fprintf(stderr, "Connect Block missing inputs tx_number.%d \nvin txid.%s vout.%d \n",i,tx.vin[0].prevout.hash.ToString().c_str(),tx.vin[0].prevout.n); - return state.DoS(100, error("ConnectBlock(): inputs missing/spent"), - REJECT_INVALID, "bad-txns-inputs-missingorspent"); - } - // are the shielded requirements met? - if (!view.HaveShieldedRequirements(tx)) - return state.DoS(100, error("ConnectBlock(): shielded requirements not met"), REJECT_INVALID, "bad-txns-joinsplit-requirements-not-met"); - - if (fAddressIndex || fSpentIndex) - { - for (size_t j = 0; j < tx.vin.size(); j++) - { - //if (tx.IsPegsImport() && j==0) continue; - const CTxIn input = tx.vin[j]; - const CTxOut &prevout = view.GetOutputFor(tx.vin[j]); - - vector> vSols; - CTxDestination vDest; - txnouttype txType = TX_PUBKEYHASH; - uint160 addrHash; - int keyType = GetAddressType(prevout.scriptPubKey, vDest, txType, vSols); - if ( keyType != 0 ) - { - for (auto addr : vSols) - { - addrHash = addr.size() == 20 ? uint160(addr) : Hash160(addr); - // record spending activity - addressIndex.push_back(make_pair(CAddressIndexKey(keyType, addrHash, pindex->GetHeight(), i, txhash, j, true), prevout.nValue * -1)); - - // remove address from unspent index - addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(keyType, addrHash, input.prevout.hash, input.prevout.n), CAddressUnspentValue())); - } - - if (fSpentIndex) { - // add the spent index to determine the txid and input that spent an output - // and to find the amount and address from an input - spentIndex.push_back(make_pair(CSpentIndexKey(input.prevout.hash, input.prevout.n), CSpentIndexValue(txhash, j, pindex->GetHeight(), prevout.nValue, keyType, addrHash))); - } - } - } - } - // Add in sigops done by pay-to-script-hash inputs; - // this is to prevent a "rogue miner" from creating - // an incredibly-expensive-to-validate block. - nSigOps += GetP2SHSigOpCount(tx, view); - if (nSigOps > MAX_BLOCK_SIGOPS) - return state.DoS(100, error("ConnectBlock(): too many sigops"), - REJECT_INVALID, "bad-blk-sigops"); - } - - txdata.emplace_back(tx); - - valueout = tx.GetValueOut(); - if ( HUSH_VALUETOOBIG(valueout) != 0 ) - { - fprintf(stderr,"valueout %.8f too big\n",(double)valueout/COIN); - return state.DoS(100, error("ConnectBlock(): GetValueOut too big"),REJECT_INVALID,"tx valueout is too big"); - } - //prevsum = voutsum; - //voutsum += valueout; - /*if ( HUSH_VALUETOOBIG(voutsum) != 0 ) - { - fprintf(stderr,"voutsum %.8f too big\n",(double)voutsum/COIN); - return state.DoS(100, error("ConnectBlock(): voutsum too big"),REJECT_INVALID,"tx valueout is too big"); - } - else - if ( voutsum < prevsum ) // PRLPAY overflows this and it isnt a conclusive test anyway - return state.DoS(100, error("ConnectBlock(): voutsum less after adding valueout"),REJECT_INVALID,"tx valueout is too big");*/ - if (!tx.IsCoinBase()) - { - nFees += view.GetValueIn(chainActive.LastTip()->GetHeight(),&interest,tx,chainActive.LastTip()->nTime) - valueout; - sum += interest; - - std::vector vChecks; - if (!ContextualCheckInputs(tx, state, view, fExpensiveChecks, flags, false, txdata[i], chainparams.GetConsensus(), consensusBranchId, nScriptCheckThreads ? &vChecks : NULL)) - return false; - control.Add(vChecks); - } - - if (fAddressIndex) { - for (unsigned int k = 0; k < tx.vout.size(); k++) { - const CTxOut &out = tx.vout[k]; - - uint160 addrHash; - - vector> vSols; - CTxDestination vDest; - txnouttype txType = TX_PUBKEYHASH; - int keyType = GetAddressType(out.scriptPubKey, vDest, txType, vSols); - if ( keyType != 0 ) - { - for (auto addr : vSols) - { - addrHash = addr.size() == 20 ? uint160(addr) : Hash160(addr); - // record receiving activity - addressIndex.push_back(make_pair(CAddressIndexKey(keyType, addrHash, pindex->GetHeight(), i, txhash, k, false), out.nValue)); - - // record unspent output - addressUnspentIndex.push_back(make_pair(CAddressUnspentKey(keyType, addrHash, txhash, k), CAddressUnspentValue(out.nValue, out.scriptPubKey, pindex->GetHeight()))); - } - } - } - } - - CTxUndo undoDummy; - if (i > 0) { - blockundo.vtxundo.push_back(CTxUndo()); - } - UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(), pindex->GetHeight()); - - - BOOST_FOREACH(const OutputDescription &outputDescription, tx.vShieldedOutput) { - sapling_tree.append(outputDescription.cm); - } - - vPos.push_back(std::make_pair(tx.GetHash(), pos)); - pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION); - } - - //view.PushAnchor(sprout_tree); - view.PushAnchor(sapling_tree); - if (!fJustCheck) { - pindex->hashFinalSproutRoot = sprout_tree.root(); - } - blockundo.old_sprout_tree_root = old_sprout_tree_root; - - // If Sapling is active, block.hashFinalSaplingRoot must be the - // same as the root of the Sapling tree - const bool sapling = pindex->GetHeight()>=1 ? true : false; //NetworkUpgradeActive(pindex->GetHeight(), chainparams.GetConsensus(), Consensus::UPGRADE_SAPLING); - if (sapling) { - if (block.hashFinalSaplingRoot != sapling_tree.root()) { - return state.DoS(100, - error("ConnectBlock(): block's hashFinalSaplingRoot is incorrect"), - REJECT_INVALID, "bad-sapling-root-in-block"); - } - } - int64_t nTime1 = GetTimeMicros(); nTimeConnect += nTime1 - nTimeStart; - LogPrint("bench", " - Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin) [%.2fs]\n", (unsigned)block.vtx.size(), 0.001 * (nTime1 - nTimeStart), 0.001 * (nTime1 - nTimeStart) / block.vtx.size(), nInputs <= 1 ? 0 : 0.001 * (nTime1 - nTimeStart) / (nInputs-1), nTimeConnect * 0.000001); - - blockReward += nFees + sum; - if ( ASSETCHAINS_COMMISSION != 0 || ASSETCHAINS_FOUNDERS_REWARD != 0 ) //ASSETCHAINS_OVERRIDE_PUBKEY33[0] != 0 && - { - uint64_t checktoshis; - if ( (checktoshis= the_commission((CBlock *)&block,(int32_t)pindex->GetHeight())) != 0 ) - { - if ( block.vtx[0].vout.size() >= 2 && block.vtx[0].vout[1].nValue == checktoshis ) - blockReward += checktoshis; - else if ( pindex->GetHeight() > 1 ) - fprintf(stderr,"checktoshis %.8f vs %.8f numvouts %d\n",dstr(checktoshis),dstr(block.vtx[0].vout[1].nValue),(int32_t)block.vtx[0].vout.size()); - } - } - if (SMART_CHAIN_SYMBOL[0] != 0 && pindex->GetHeight() == 1 && block.vtx[0].GetValueOut() != blockReward) - { - return state.DoS(100, error("ConnectBlock(): coinbase for block 1 pays wrong amount (actual=%d vs correct=%d)", block.vtx[0].GetValueOut(), blockReward), - REJECT_INVALID, "bad-cb-amount"); - } - if ( block.vtx[0].GetValueOut() > blockReward+HUSH_EXTRASATOSHI ) - { - if ( SMART_CHAIN_SYMBOL[0] != 0 || pindex->GetHeight() >= HUSH_NOTARIES_HEIGHT1 || block.vtx[0].vout[0].nValue > blockReward ) - { - //fprintf(stderr, "coinbase pays too much\n"); - //sleepflag = true; - return state.DoS(100, - error("ConnectBlock(): coinbase pays too much (actual=%d vs limit=%d)", - block.vtx[0].GetValueOut(), blockReward), - REJECT_INVALID, "bad-cb-amount"); - } else if ( IS_HUSH_NOTARY != 0 ) - fprintf(stderr,"allow nHeight.%d coinbase %.8f vs %.8f interest %.8f\n",(int32_t)pindex->GetHeight(),dstr(block.vtx[0].GetValueOut()),dstr(blockReward),dstr(sum)); - } - if (!control.Wait()) - return state.DoS(100, false); - int64_t nTime2 = GetTimeMicros(); nTimeVerify += nTime2 - nTimeStart; - LogPrint("bench", " - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n", nInputs - 1, 0.001 * (nTime2 - nTimeStart), nInputs <= 1 ? 0 : 0.001 * (nTime2 - nTimeStart) / (nInputs-1), nTimeVerify * 0.000001); - - if (fJustCheck) - return true; - - // Write undo information to disk - //fprintf(stderr,"nFile.%d isNull %d vs isvalid %d nStatus %x\n",(int32_t)pindex->nFile,pindex->GetUndoPos().IsNull(),pindex->IsValid(BLOCK_VALID_SCRIPTS),(uint32_t)pindex->nStatus); - if (pindex->GetUndoPos().IsNull() || !pindex->IsValid(BLOCK_VALID_SCRIPTS)) - { - if (pindex->GetUndoPos().IsNull()) - { - CDiskBlockPos pos; - if (!FindUndoPos(state, pindex->nFile, pos, ::GetSerializeSize(blockundo, SER_DISK, CLIENT_VERSION) + 40)) - return error("ConnectBlock(): FindUndoPos failed"); - if ( pindex->pprev == 0 ) - fprintf(stderr,"ConnectBlock: unexpected null pprev\n"); - if (!UndoWriteToDisk(blockundo, pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart())) - return AbortNode(state, "Failed to write undo data"); - // update nUndoPos in block index - pindex->nUndoPos = pos.nPos; - pindex->nStatus |= BLOCK_HAVE_UNDO; - } - - // Now that all consensus rules have been validated, set nCachedBranchId. - // Move this if BLOCK_VALID_CONSENSUS is ever altered. - static_assert(BLOCK_VALID_CONSENSUS == BLOCK_VALID_SCRIPTS, - "nCachedBranchId must be set after all consensus rules have been validated."); - if (IsActivationHeightForAnyUpgrade(pindex->GetHeight(), Params().GetConsensus())) { - pindex->nStatus |= BLOCK_ACTIVATES_UPGRADE; - pindex->nCachedBranchId = CurrentEpochBranchId(pindex->GetHeight(), chainparams.GetConsensus()); - } else if (pindex->pprev) { - pindex->nCachedBranchId = pindex->pprev->nCachedBranchId; - } - - pindex->RaiseValidity(BLOCK_VALID_SCRIPTS); - setDirtyBlockIndex.insert(pindex); - } - - ConnectNotarizations(block, pindex->GetHeight()); // MoMoM notarization DB. - - if (fTxIndex) - if (!pblocktree->WriteTxIndex(vPos)) - return AbortNode(state, "Failed to write transaction index"); - if (fAddressIndex) { - if (!pblocktree->WriteAddressIndex(addressIndex)) { - return AbortNode(state, "Failed to write address index"); - } - - if (!pblocktree->UpdateAddressUnspentIndex(addressUnspentIndex)) { - return AbortNode(state, "Failed to write address unspent index"); - } - } - - if (fSpentIndex) - if (!pblocktree->UpdateSpentIndex(spentIndex)) - return AbortNode(state, "Failed to write transaction index"); - - if (fTimestampIndex) - { - unsigned int logicalTS = pindex->nTime; - unsigned int prevLogicalTS = 0; - - // retrieve logical timestamp of the previous block - if (pindex->pprev) - if (!pblocktree->ReadTimestampBlockIndex(pindex->pprev->GetBlockHash(), prevLogicalTS)) - LogPrintf("%s: Failed to read previous block's logical timestamp\n", __func__); - - if (logicalTS <= prevLogicalTS) { - logicalTS = prevLogicalTS + 1; - LogPrintf("%s: Previous logical timestamp is newer Actual[%d] prevLogical[%d] Logical[%d]\n", __func__, pindex->nTime, prevLogicalTS, logicalTS); - } - - if (!pblocktree->WriteTimestampIndex(CTimestampIndexKey(logicalTS, pindex->GetBlockHash()))) - return AbortNode(state, "Failed to write timestamp index"); - - if (!pblocktree->WriteTimestampBlockIndex(CTimestampBlockIndexKey(pindex->GetBlockHash()), CTimestampBlockIndexValue(logicalTS))) - return AbortNode(state, "Failed to write blockhash index"); - } - - // add this block to the view's block chain - view.SetBestBlock(pindex->GetBlockHash()); - - int64_t nTime3 = GetTimeMicros(); nTimeIndex += nTime3 - nTime2; - LogPrint("bench", " - Index writing: %.2fms [%.2fs]\n", 0.001 * (nTime3 - nTime2), nTimeIndex * 0.000001); - - // Watch for changes to the previous coinbase transaction. - static uint256 hashPrevBestCoinBase; - GetMainSignals().UpdatedTransaction(hashPrevBestCoinBase); - hashPrevBestCoinBase = block.vtx[0].GetHash(); - - int64_t nTime4 = GetTimeMicros(); nTimeCallbacks += nTime4 - nTime3; - LogPrint("bench", " - Callbacks: %.2fms [%.2fs]\n", 0.001 * (nTime4 - nTime3), nTimeCallbacks * 0.000001); - - //FlushStateToDisk(); - hush_connectblock(false,pindex,*(CBlock *)&block); // dPoW state update. - if ( ASSETCHAINS_NOTARY_PAY[0] != 0 ) - { - // Update the notary pay with the latest payment. - pindex->nNotaryPay = pindex->pprev->nNotaryPay + notarypaycheque; - //fprintf(stderr, "total notary pay.%li\n", pindex->nNotaryPay); - } - return true; -} - -enum FlushStateMode { - FLUSH_STATE_NONE, - FLUSH_STATE_IF_NEEDED, - FLUSH_STATE_PERIODIC, - FLUSH_STATE_ALWAYS -}; - -/** - * Update the on-disk chain state. - * The caches and indexes are flushed depending on the mode we're called with - * if they're too large, if it's been a while since the last write, - * or always and in all cases if we're in prune mode and are deleting files. - */ -bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) { - LOCK2(cs_main, cs_LastBlockFile); - static int64_t nLastWrite = 0; - static int64_t nLastFlush = 0; - static int64_t nLastSetChain = 0; - std::set setFilesToPrune; - bool fFlushForPrune = false; - try { - if (fPruneMode && fCheckForPruning && !fReindex) { - FindFilesToPrune(setFilesToPrune); - fCheckForPruning = false; - if (!setFilesToPrune.empty()) { - fFlushForPrune = true; - if (!fHavePruned) { - pblocktree->WriteFlag("prunedblockfiles", true); - fHavePruned = true; - } - } - } - int64_t nNow = GetTimeMicros(); - // Avoid writing/flushing immediately after startup. - if (nLastWrite == 0) { - nLastWrite = nNow; - } - if (nLastFlush == 0) { - nLastFlush = nNow; - } - if (nLastSetChain == 0) { - nLastSetChain = nNow; - } - size_t cacheSize = pcoinsTip->DynamicMemoryUsage(); - // The cache is large and close to the limit, but we have time now (not in the middle of a block processing). - bool fCacheLarge = mode == FLUSH_STATE_PERIODIC && cacheSize * (10.0/9) > nCoinCacheUsage; - // The cache is over the limit, we have to write now. - bool fCacheCritical = mode == FLUSH_STATE_IF_NEEDED && cacheSize > nCoinCacheUsage; - // It's been a while since we wrote the block index to disk. Do this frequently, so we don't need to redownload after a crash. - bool fPeriodicWrite = mode == FLUSH_STATE_PERIODIC && nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000; - // It's been very long since we flushed the cache. Do this infrequently, to optimize cache usage. - bool fPeriodicFlush = mode == FLUSH_STATE_PERIODIC && nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000; - // Combine all conditions that result in a full cache flush. - bool fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge || fCacheCritical || fPeriodicFlush || fFlushForPrune; - // Write blocks and block index to disk. - if (fDoFullFlush || fPeriodicWrite) { - // Depend on nMinDiskSpace to ensure we can write block index - if (!CheckDiskSpace(0)) - return state.Error("out of disk space"); - // First make sure all block and undo data is flushed to disk. - FlushBlockFile(); - // Then update all block file information (which may refer to block and undo files). - { - std::vector > vFiles; - vFiles.reserve(setDirtyFileInfo.size()); - for (set::iterator it = setDirtyFileInfo.begin(); it != setDirtyFileInfo.end(); ) { - if ( *it < TMPFILE_START ) - vFiles.push_back(make_pair(*it, &vinfoBlockFile[*it])); - setDirtyFileInfo.erase(it++); - } - std::vector vBlocks; - vBlocks.reserve(setDirtyBlockIndex.size()); - for (set::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) { - vBlocks.push_back(*it); - setDirtyBlockIndex.erase(it++); - } - if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) { - return AbortNode(state, "Files to write to block index database"); - } - // Now that we have written the block indices to the database, we do not - // need to store solutions for these CBlockIndex objects in memory. - // cs_main must be held here. - uint32_t nTrimmed = 0; - for (CBlockIndex *pblockindex : vBlocks) { - pblockindex->TrimSolution(); - ++nTrimmed; - } - LogPrintf("%s: trimmed %d solutions from block index mode=%d\n", __func__, nTrimmed, mode); - } - // Finally remove any pruned files - if (fFlushForPrune) - UnlinkPrunedFiles(setFilesToPrune); - nLastWrite = nNow; - } - // Flush best chain related state. This can only be done if the blocks / block index write was also done. - if (fDoFullFlush) { - // Typical CCoins structures on disk are around 128 bytes in size. - // Pushing a new one to the database can cause it to be written - // twice (once in the log, and once in the tables). This is already - // an overestimation, as most will delete an existing entry or - // overwrite one. Still, use a conservative safety factor of 2. - if (!CheckDiskSpace(128 * 2 * 2 * pcoinsTip->GetCacheSize())) - return state.Error("out of disk space"); - // Flush the chainstate (which may refer to block index entries). - if (!pcoinsTip->Flush()) - return AbortNode(state, "Failed to write to coin database"); - nLastFlush = nNow; - } - if ((mode == FLUSH_STATE_ALWAYS || mode == FLUSH_STATE_PERIODIC) && nNow > nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000) { - // Update best block in wallet (so we can detect restored wallets). - GetMainSignals().SetBestChain(chainActive.GetLocator()); - nLastSetChain = nNow; - } - } catch (const std::runtime_error& e) { - return AbortNode(state, std::string("System error while flushing: ") + e.what()); - } - return true; -} - -void FlushStateToDisk() { - CValidationState state; - if ( HUSH_NSPV_FULLNODE ) - FlushStateToDisk(state, FLUSH_STATE_ALWAYS); -} - -void PruneAndFlush() { - CValidationState state; - fCheckForPruning = true; - FlushStateToDisk(state, FLUSH_STATE_NONE); -} - -/** Update chainActive and related internal data structures. */ -void static UpdateTip(CBlockIndex *pindexNew) { - const CChainParams& chainParams = Params(); - chainActive.SetTip(pindexNew); - - // New best block - nTimeBestReceived = GetTime(); - mempool.AddTransactionsUpdated(1); - HUSH_NEWBLOCKS++; - double progress; - if ( ishush3 ) { - progress = Checkpoints::GuessVerificationProgress(chainParams.Checkpoints(), chainActive.LastTip()); - } else { - int32_t longestchain = hush_longestchain(); - progress = (longestchain > 0 ) ? (double) chainActive.Height() / longestchain : 1.0; - } - - nFirstHalvingHeight = GetArg("-z2zheight",340000); - if(ishush3) { - if (ASSETCHAINS_BLOCKTIME != 75 && (chainActive.Height() >= nFirstHalvingHeight)) { - LogPrintf("%s: Blocktime halving to 75s at height %d!\n",__func__,chainActive.Height()); - ASSETCHAINS_BLOCKTIME = 75; - hush_changeblocktime(); - } - } - - LogPrintf("%s: new best=%s height=%d log2_work=%.8g tx=%lu date=%s progress=%f cache=%.1fMiB(%utx)\n", __func__, - chainActive.LastTip()->GetBlockHash().ToString(), chainActive.Height(), - log(chainActive.Tip()->chainPower.chainWork.getdouble())/log(2.0), - (unsigned long)chainActive.LastTip()->nChainTx, - DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.LastTip()->GetBlockTime()), progress, - pcoinsTip->DynamicMemoryUsage() * (1.0 / (1<<20)), pcoinsTip->GetCacheSize()); - - cvBlockChange.notify_all(); -} - -/** - * Disconnect chainActive's tip. You probably want to call mempool.removeForReorg and - * mempool.removeWithoutBranchId after this, with cs_main held. - */ -bool static DisconnectTip(CValidationState &state, bool fBare = false) { - CBlockIndex *pindexDelete = chainActive.Tip(); - assert(pindexDelete); - // Read block from disk. - CBlock block; - if (!ReadBlockFromDisk(block, pindexDelete,1)) - return AbortNode(state, "Failed to read block"); - //if ( SMART_CHAIN_SYMBOL[0] != 0 || pindexDelete->GetHeight() > 1400000 ) - { - int32_t notarizedht,prevMoMheight; uint256 notarizedhash,txid; - notarizedht = hush_notarized_height(&prevMoMheight,¬arizedhash,&txid); - if ( block.GetHash() == notarizedhash ) - { - fprintf(stderr,"DisconnectTip trying to disconnect notarized block at ht.%d\n",(int32_t)pindexDelete->GetHeight()); - return state.DoS(100, error("AcceptBlock(): DisconnectTip trying to disconnect notarized blockht.%d",(int32_t)pindexDelete->GetHeight()), - REJECT_INVALID, "past-notarized-height"); - } - } - // Apply the block atomically to the chain state. - uint256 sproutAnchorBeforeDisconnect = pcoinsTip->GetBestAnchor(SPROUT); - uint256 saplingAnchorBeforeDisconnect = pcoinsTip->GetBestAnchor(SAPLING); - int64_t nStart = GetTimeMicros(); - { - CCoinsViewCache view(pcoinsTip); - if (!DisconnectBlock(block, state, pindexDelete, view)) - return error("DisconnectTip(): DisconnectBlock %s failed", pindexDelete->GetBlockHash().ToString()); - assert(view.Flush()); - DisconnectNotarizations(block); - } - pindexDelete->segid = -2; - pindexDelete->nNotaryPay = 0; - pindexDelete->newcoins = 0; - pindexDelete->zfunds = 0; - - LogPrint("bench", "- Disconnect block: %.2fms\n", (GetTimeMicros() - nStart) * 0.001); - uint256 sproutAnchorAfterDisconnect = pcoinsTip->GetBestAnchor(SPROUT); - uint256 saplingAnchorAfterDisconnect = pcoinsTip->GetBestAnchor(SAPLING); - // Write the chain state to disk, if necessary. - if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED)) - return false; - - if (!fBare) { - // resurrect mempool transactions from the disconnected block. - for (int i = 0; i < block.vtx.size(); i++) - { - // ignore validation errors in resurrected transactions - CTransaction &tx = block.vtx[i]; - list removed; - CValidationState stateDummy; - - // don't keep staking or invalid transactions - if (tx.IsCoinBase() || ((i == (block.vtx.size() - 1)) && (ASSETCHAINS_STAKED && hush_isPoS((CBlock *)&block,pindexDelete->GetHeight(),true) != 0)) || !AcceptToMemoryPool(mempool, stateDummy, tx, false, NULL)) - { - mempool.remove(tx, removed, true); - } - } - if (sproutAnchorBeforeDisconnect != sproutAnchorAfterDisconnect) { - // The anchor may not change between block disconnects, - // in which case we don't want to evict from the mempool yet! - mempool.removeWithAnchor(sproutAnchorBeforeDisconnect, SPROUT); - } - if (saplingAnchorBeforeDisconnect != saplingAnchorAfterDisconnect) { - // The anchor may not change between block disconnects, - // in which case we don't want to evict from the mempool yet! - mempool.removeWithAnchor(saplingAnchorBeforeDisconnect, SAPLING); - } - } - - // Update chainActive and related variables. - UpdateTip(pindexDelete->pprev); - - // Updates to connected wallets are triggered by ThreadNotifyWallets - - return true; -} - -int32_t hush_activate_sapling(CBlockIndex *pindex) -{ - uint32_t blocktime,prevtime; CBlockIndex *prev; int32_t i,transition=0,height,prevht; - int32_t activation = 0; - if ( pindex == 0 ) - { - fprintf(stderr,"hush_activate_sapling null pindex\n"); - return(0); - } - height = pindex->GetHeight(); - blocktime = (uint32_t)pindex->nTime; - //fprintf(stderr,"hush_activate_sapling.%d starting blocktime %u cmp.%d\n",height,blocktime,blocktime > HUSH_SAPING_ACTIVATION); - - // avoid trying unless we have at least 30 blocks - if (height < 30) - return(0); - - for (i=0; i<30; i++) - { - if ( (prev= pindex->pprev) == 0 ) - break; - pindex = prev; - } - if ( i != 30 ) - { - fprintf(stderr,"couldnt go backwards 30 blocks\n"); - return(0); - } - height = pindex->GetHeight(); - blocktime = (uint32_t)pindex->nTime; - //fprintf(stderr,"starting blocktime %u cmp.%d\n",blocktime,blocktime > HUSH_SAPING_ACTIVATION); - if ( blocktime > HUSH_SAPING_ACTIVATION ) // find the earliest transition - { - while ( (prev= pindex->pprev) != 0 ) - { - prevht = prev->GetHeight(); - prevtime = (uint32_t)prev->nTime; - //fprintf(stderr,"(%d, %u).%d -> (%d, %u).%d\n",prevht,prevtime,prevtime > HUSH_SAPING_ACTIVATION,height,blocktime,blocktime > HUSH_SAPING_ACTIVATION); - if ( prevht+1 != height ) - { - fprintf(stderr,"hush_activate_sapling: unexpected non-contiguous ht %d vs %d\n",prevht,height); - return(0); - } - if ( prevtime <= HUSH_SAPING_ACTIVATION && blocktime > HUSH_SAPING_ACTIVATION ) - { - activation = height + 60; - fprintf(stderr,"%s transition at %d (%d, %u) -> (%d, %u)\n",SMART_CHAIN_SYMBOL,height,prevht,prevtime,height,blocktime); - } - if ( prevtime < HUSH_SAPING_ACTIVATION-3600*24 ) - break; - pindex = prev; - height = prevht; - blocktime = prevtime; - } - } - if ( activation != 0 ) - { - hush_setactivation(activation); - fprintf(stderr,"%s sapling activation at %d\n",SMART_CHAIN_SYMBOL,activation); - ASSETCHAINS_SAPLING = activation; - } - return activation; -} - -static int64_t nTimeReadFromDisk = 0; -static int64_t nTimeConnectTotal = 0; -static int64_t nTimeFlush = 0; -static int64_t nTimeChainState = 0; -static int64_t nTimePostConnect = 0; - -// Protected by cs_main -std::map> recentlyConflictedTxs; -uint64_t nRecentlyConflictedSequence = 0; -uint64_t nNotifiedSequence = 0; - -/** - * Connect a new block to chainActive. pblock is either NULL or a pointer to a CBlock - * corresponding to pindexNew, to bypass loading it again from disk. - * You probably want to call mempool.removeWithoutBranchId after this, with cs_main held. - */ -bool static ConnectTip(CValidationState &state, CBlockIndex *pindexNew, CBlock *pblock) { - - //fprintf(stderr, "%s: Start\n", __FUNCTION__); - assert(pindexNew->pprev == chainActive.Tip()); - // Read block from disk. - int64_t nTime1 = GetTimeMicros(); - CBlock block; - if (!pblock) { - if (!ReadBlockFromDisk(block, pindexNew,1)) - return AbortNode(state, "Failed to read block"); - pblock = █ - } - HUSH_CONNECTING = (int32_t)pindexNew->GetHeight(); - //fprintf(stderr,"%s connecting ht.%d maxsize.%d vs %d\n",SMART_CHAIN_SYMBOL,(int32_t)pindexNew->GetHeight(),MAX_BLOCK_SIZE(pindexNew->GetHeight()),(int32_t)::GetSerializeSize(*pblock, SER_NETWORK, PROTOCOL_VERSION)); - - // Apply the block atomically to the chain state. - int64_t nTime2 = GetTimeMicros(); nTimeReadFromDisk += nTime2 - nTime1; - int64_t nTime3; - LogPrint("bench", " - Load block from disk: %.2fms [%.2fs]\n", (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001); - { - CCoinsViewCache view(pcoinsTip); - bool rv = ConnectBlock(*pblock, state, pindexNew, view, false, true); - HUSH_CONNECTING = -1; - GetMainSignals().BlockChecked(*pblock, state); - if (!rv) { - if (state.IsInvalid()) - { - InvalidBlockFound(pindexNew, state); - /*if ( ASSETCHAINS_CBOPRET != 0 ) - { - pindexNew->nStatus &= ~BLOCK_FAILED_MASK; - fprintf(stderr,"reconsiderblock %d\n",(int32_t)pindexNew->GetHeight()); - }*/ - } - return error("ConnectTip(): ConnectBlock %s failed", pindexNew->GetBlockHash().ToString()); - } - mapBlockSource.erase(pindexNew->GetBlockHash()); - nTime3 = GetTimeMicros(); nTimeConnectTotal += nTime3 - nTime2; - LogPrint("bench", " - Connect total: %.2fms [%.2fs]\n", (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001); - if ( HUSH_NSPV_FULLNODE ) - assert(view.Flush()); - } - int64_t nTime4 = GetTimeMicros(); nTimeFlush += nTime4 - nTime3; - LogPrint("bench", " - Flush: %.2fms [%.2fs]\n", (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001); - // Write the chain state to disk, if necessary. - if ( HUSH_NSPV_FULLNODE ) - { - if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED)) - return false; - } - int64_t nTime5 = GetTimeMicros(); nTimeChainState += nTime5 - nTime4; - LogPrint("bench", " - Writing chainstate: %.2fms [%.2fs]\n", (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001); - - // Remove conflicting transactions from the mempool. - std::list txConflicted; - mempool.removeForBlock(pblock->vtx, pindexNew->GetHeight(), txConflicted, !IsInitialBlockDownload()); - - // Remove transactions that expire at new block height from mempool - auto ids = mempool.removeExpired(pindexNew->GetHeight()); - - for (auto id : ids) { - uiInterface.NotifyTxExpiration(id); - } - - // Update chainActive & related variables. - UpdateTip(pindexNew); - - // Cache the conflicted transactions for subsequent notification. - // Updates to connected wallets are triggered by ThreadNotifyWallets - recentlyConflictedTxs.insert(std::make_pair(pindexNew, txConflicted)); - nRecentlyConflictedSequence += 1; - - int64_t nTime6 = GetTimeMicros(); nTimePostConnect += nTime6 - nTime5; nTimeTotal += nTime6 - nTime1; - LogPrint("bench", " - Connect postprocess: %.2fms [%.2fs]\n", (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001); - LogPrint("bench", "- Connect block: %.2fms [%.2fs]\n", (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001); - if ( HUSH_LONGESTCHAIN != 0 && (pindexNew->GetHeight() >= HUSH_LONGESTCHAIN )) - HUSH_INSYNC = (int32_t)pindexNew->GetHeight(); - else HUSH_INSYNC = 0; - //fprintf(stderr,"connect.%d insync.%d ASSETCHAINS_SAPLING.%d\n",(int32_t)pindexNew->GetHeight(),HUSH_INSYNC,ASSETCHAINS_SAPLING); - - if ( HUSH_NSPV_FULLNODE ) - { - //fprintf(stderr,"%s: HUSH_NSPV_FULLNODE\n", __FUNCTION__); - if ( ASSETCHAINS_CBOPRET != 0 ) - hush_pricesupdate(pindexNew->GetHeight(),pblock); - if ( ASSETCHAINS_SAPLING <= 0 && pindexNew->nTime > HUSH_SAPING_ACTIVATION - 24*3600 ) - hush_activate_sapling(pindexNew); - if ( ASSETCHAINS_CC != 0 && HUSH_SNAPSHOT_INTERVAL != 0 && (pindexNew->GetHeight() % HUSH_SNAPSHOT_INTERVAL) == 0 && pindexNew->GetHeight() >= HUSH_SNAPSHOT_INTERVAL ) - { - uint64_t start = time(NULL); - if ( !hush_dailysnapshot(pindexNew->GetHeight()) ) - { - fprintf(stderr, "daily snapshot failed, please reindex your chain\n"); - StartShutdown(); - } - fprintf(stderr, "snapshot completed in: %d seconds\n", (int32_t)(time(NULL)-start)); - } - } - //fprintf(stderr,"%s: returning true\n", __FUNCTION__); - return true; -} -std::pair>, uint64_t> DrainRecentlyConflicted() -{ - uint64_t recentlyConflictedSequence; - std::map> txs; - { - LOCK(cs_main); - recentlyConflictedSequence = nRecentlyConflictedSequence; - txs.swap(recentlyConflictedTxs); - } - - return std::make_pair(txs, recentlyConflictedSequence); -} - -void SetChainNotifiedSequence(uint64_t recentlyConflictedSequence) { - assert(Params().NetworkIDString() == "regtest"); - LOCK(cs_main); - nNotifiedSequence = recentlyConflictedSequence; -} - -bool ChainIsFullyNotified() { - assert(Params().NetworkIDString() == "regtest"); - LOCK(cs_main); - return nRecentlyConflictedSequence == nNotifiedSequence; -} - -/** - * Return the tip of the chain with the most work in it, that isn't - * known to be invalid (it's however far from certain to be valid). - */ -static CBlockIndex* FindMostWorkChain() { - do { - CBlockIndex *pindexNew = NULL; - - // Find the best candidate header. - { - std::set::reverse_iterator it = setBlockIndexCandidates.rbegin(); - if (it == setBlockIndexCandidates.rend()) - return NULL; - pindexNew = *it; - } - - // Check whether all blocks on the path between the currently active chain and the candidate are valid. - // Just going until the active chain is an optimization, as we know all blocks in it are valid already. - CBlockIndex *pindexTest = pindexNew; - bool fInvalidAncestor = false; - while (pindexTest && !chainActive.Contains(pindexTest)) { - assert(pindexTest->nChainTx || pindexTest->GetHeight() == 0); - - // Pruned nodes may have entries in setBlockIndexCandidates for - // which block files have been deleted. Remove those as candidates - // for the most work chain if we come across them; we can't switch - // to a chain unless we have all the non-active-chain parent blocks. - bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK; - bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA); - if (fFailedChain || fMissingData) { - // Candidate chain is not usable (either invalid or missing data) - if (fFailedChain && (pindexBestInvalid == NULL || pindexNew->chainPower > pindexBestInvalid->chainPower)) - pindexBestInvalid = pindexNew; - CBlockIndex *pindexFailed = pindexNew; - // Remove the entire chain from the set. - while (pindexTest != pindexFailed) { - if (fFailedChain) { - pindexFailed->nStatus |= BLOCK_FAILED_CHILD; - } else if (fMissingData) { - // If we're missing data, then add back to mapBlocksUnlinked, - // so that if the block arrives in the future we can try adding - // to setBlockIndexCandidates again. - mapBlocksUnlinked.insert(std::make_pair(pindexFailed->pprev, pindexFailed)); - } - setBlockIndexCandidates.erase(pindexFailed); - pindexFailed = pindexFailed->pprev; - } - setBlockIndexCandidates.erase(pindexTest); - fInvalidAncestor = true; - break; - } - pindexTest = pindexTest->pprev; - } - if (!fInvalidAncestor) - return pindexNew; - } while(true); -} - -/** Delete all entries in setBlockIndexCandidates that are worse than the current tip. */ -static void PruneBlockIndexCandidates() { - //fprintf(stderr,"%s:, setBlockIndexCandidates.size=%d\n", __FUNCTION__, setBlockIndexCandidates.size() ); - // Note that we can't delete the current block itself, as we may need to return to it later in case a - // reorganization to a better block fails. - std::set::iterator it = setBlockIndexCandidates.begin(); - while (it != setBlockIndexCandidates.end() && setBlockIndexCandidates.value_comp()(*it, chainActive.LastTip())) { - //fprintf(stderr,"%s:, erasing blockindexcandidate element height=%d, time=%d\n", __FUNCTION__, (*it)->GetHeight(), (*it)->GetBlockTime() ); - setBlockIndexCandidates.erase(it++); - //fprintf(stderr,"%s:, erased element\n", __FUNCTION__); - } - //fprintf(stderr,"%s:, setBlockIndexCandidates.size()=%d\n", __FUNCTION__, setBlockIndexCandidates.size() ); - // Either the current tip or a successor of it we're working towards is left in setBlockIndexCandidates. - assert(!setBlockIndexCandidates.empty()); -} - -/** - * Try to make some progress towards making pindexMostWork the active block. - * pblock is either NULL or a pointer to a CBlock corresponding to pindexMostWork. - */ -static bool ActivateBestChainStep(bool fSkipdpow, CValidationState &state, CBlockIndex *pindexMostWork, CBlock *pblock) { - AssertLockHeld(cs_main); - bool fInvalidFound = false; - const CBlockIndex *pindexOldTip = chainActive.Tip(); - const CBlockIndex *pindexFork = chainActive.FindFork(pindexMostWork); - - // stop trying to reorg if the reorged chain is before last notarized height. - // stay on the same chain tip! - int32_t notarizedht,prevMoMheight; uint256 notarizedhash,txid; - notarizedht = hush_notarized_height(&prevMoMheight,¬arizedhash,&txid); - if ( !fSkipdpow && pindexFork != 0 && pindexOldTip->GetHeight() > notarizedht && pindexFork->GetHeight() < notarizedht ) - { - LogPrintf("pindexOldTip->GetHeight().%d > notarizedht %d && pindexFork->GetHeight().%d is < notarizedht %d, so ignore it\n",(int32_t)pindexOldTip->GetHeight(),notarizedht,(int32_t)pindexFork->GetHeight(),notarizedht); - // *** DEBUG *** - if (1) - { - const CBlockIndex *pindexLastNotarized = mapBlockIndex[notarizedhash]; - auto msg = "- " + strprintf(_("Current tip : %s, height %d, work %s"), - pindexOldTip->phashBlock->GetHex(), pindexOldTip->GetHeight(), pindexOldTip->chainPower.chainWork.GetHex()) + "\n" + - "- " + strprintf(_("New tip : %s, height %d, work %s"), - pindexMostWork->phashBlock->GetHex(), pindexMostWork->GetHeight(), pindexMostWork->chainPower.chainWork.GetHex()) + "\n" + - "- " + strprintf(_("Fork point : %s, height %d"), - pindexFork->phashBlock->GetHex(), pindexFork->GetHeight()) + "\n" + - "- " + strprintf(_("Last ntrzd : %s, height %d"), - pindexLastNotarized->phashBlock->GetHex(), pindexLastNotarized->GetHeight()); - LogPrintf("[ Debug ]\n%s\n",msg); - - int nHeight = pindexFork ? pindexFork->GetHeight() : -1; - int nTargetHeight = std::min(nHeight + 32, pindexMostWork->GetHeight()); - - LogPrintf("[ Debug ] nHeight = %d, nTargetHeight = %d\n", nHeight, nTargetHeight); - - CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight); - while (pindexIter && pindexIter->GetHeight() != nHeight) { - LogPrintf("[ Debug -> New blocks list ] %s, height %d\n", pindexIter->phashBlock->GetHex(), pindexIter->GetHeight()); - pindexIter = pindexIter->pprev; - } - } - - CValidationState tmpstate; - InvalidateBlock(tmpstate,pindexMostWork); // trying to invalidate longest chain, which tried to reorg notarized chain (in case of fork point below last notarized block) - return state.DoS(100, error("ActivateBestChainStep(): pindexOldTip->GetHeight().%d > notarizedht %d && pindexFork->GetHeight().%d is < notarizedht %d, so ignore it",(int32_t)pindexOldTip->GetHeight(),notarizedht,(int32_t)pindexFork->GetHeight(),notarizedht), - REJECT_INVALID, "past-notarized-height"); - } - // - On ChainDB initialization, pindexOldTip will be null, so there are no removable blocks. - // - If pindexMostWork is in a chain that doesn't have the same genesis block as our chain, - // then pindexFork will be null, and we would need to remove the entire chain including - // our genesis block. In practice this (probably) won't happen because of checks elsewhere. - auto reorgLength = pindexOldTip ? pindexOldTip->GetHeight() - (pindexFork ? pindexFork->GetHeight() : -1) : 0; - assert(MAX_REORG_LENGTH > 0);//, "We must be able to reorg some distance"); - if ( reorgLength > MAX_REORG_LENGTH) - { - auto msg = strprintf(_( - "A block chain reorganization has been detected that would roll back %d blocks!!! " - "This is larger than the maximum of %d blocks, and so the node is shutting down for your safety." - ), reorgLength, MAX_REORG_LENGTH) + "\n\n" + - _("Reorganization details") + ":\n" + - "- " + strprintf(_("Current tip: %s, height %d, work %s\n"), - pindexOldTip->phashBlock->GetHex(), pindexOldTip->GetHeight(), pindexOldTip->chainPower.chainWork.GetHex()) + "\n" + - "- " + strprintf(_("New tip: %s, height %d, work %s\n"), - pindexMostWork->phashBlock->GetHex(), pindexMostWork->GetHeight(), pindexMostWork->chainPower.chainWork.GetHex()) + "\n" + - "- " + strprintf(_("Fork point: %s %s, height %d"), - SMART_CHAIN_SYMBOL,pindexFork->phashBlock->GetHex(), pindexFork->GetHeight()) + "\n\n" + - _("Please help me, wise human!"); - LogPrintf("*** %s\nif you launch with -maxreorg=%d it might be able to resolve this automatically", msg,reorgLength+10); - fprintf(stderr,"*** %s\nif you launch with -maxreorg=%d it might be able to resolve this automatically", msg.c_str(),reorgLength+10); - uiInterface.ThreadSafeMessageBox(msg, "", CClientUIInterface::MSG_ERROR); - StartShutdown(); - return false; - } - - // Disconnect active blocks which are no longer in the best chain. - bool fBlocksDisconnected = false; - - while (chainActive.Tip() && chainActive.Tip() != pindexFork) { - if (!DisconnectTip(state)) - return false; - fBlocksDisconnected = true; - } - if ( HUSH_REWIND != 0 ) - { - CBlockIndex *tipindex; - fprintf(stderr,">>>>>>>>>>> rewind start ht.%d -> HUSH_REWIND.%d\n",chainActive.LastTip()->GetHeight(),HUSH_REWIND); - while ( HUSH_REWIND > 0 && (tipindex= chainActive.LastTip()) != 0 && tipindex->GetHeight() > HUSH_REWIND ) - { - fBlocksDisconnected = true; - fprintf(stderr,"%d ",(int32_t)tipindex->GetHeight()); - InvalidateBlock(state,tipindex); - if ( !DisconnectTip(state) ) - break; - } - fprintf(stderr,"reached rewind.%d, best to do: ./hush-cli -ac_name=%s stop\n",HUSH_REWIND,SMART_CHAIN_SYMBOL); - sleep(20); - fprintf(stderr,"resuming normal operations\n"); - HUSH_REWIND = 0; - //return(true); - } - // Build list of new blocks to connect. - std::vector vpindexToConnect; - bool fContinue = true; - int nHeight = pindexFork ? pindexFork->GetHeight() : -1; - while (fContinue && nHeight != pindexMostWork->GetHeight()) { - // Don't iterate the entire list of potential improvements toward the best tip, as we likely only need - // a few blocks along the way. - int nTargetHeight = std::min(nHeight + 32, pindexMostWork->GetHeight()); - vpindexToConnect.clear(); - vpindexToConnect.reserve(nTargetHeight - nHeight); - CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight); - while (pindexIter && pindexIter->GetHeight() != nHeight) { - vpindexToConnect.push_back(pindexIter); - pindexIter = pindexIter->pprev; - } - nHeight = nTargetHeight; - - // Connect new blocks. - BOOST_REVERSE_FOREACH(CBlockIndex *pindexConnect, vpindexToConnect) { - if (!ConnectTip(state, pindexConnect, pindexConnect == pindexMostWork ? pblock : NULL)) { - if (state.IsInvalid()) { - // The block violates a consensus rule. - if (!state.CorruptionPossible()) - InvalidChainFound(vpindexToConnect.back()); - state = CValidationState(); - fInvalidFound = true; - fContinue = false; - break; - } else { - // A system error occurred (disk space, database error, ...). - return false; - } - } else { - PruneBlockIndexCandidates(); - if (!pindexOldTip || chainActive.Tip()->chainPower > pindexOldTip->chainPower) { - // We're in a better position than we were. Return temporarily to release the lock. - fContinue = false; - break; - } - } - } - } - - if (fBlocksDisconnected) { - mempool.removeForReorg(pcoinsTip, chainActive.Tip()->GetHeight() + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); - } - mempool.removeWithoutBranchId( - CurrentEpochBranchId(chainActive.Tip()->GetHeight() + 1, Params().GetConsensus())); - mempool.check(pcoinsTip); - - // Callbacks/notifications for a new best chain. - if (fInvalidFound) - CheckForkWarningConditionsOnNewFork(vpindexToConnect.back()); - else - CheckForkWarningConditions(); - - return true; -} - -/** - * Make the best chain active, in multiple steps. The result is either failure - * or an activated best chain. pblock is either NULL or a pointer to a block - * that is already loaded (to avoid loading it again from disk). - */ -bool ActivateBestChain(bool fSkipdpow, CValidationState &state, CBlock *pblock) { - CBlockIndex *pindexNewTip = NULL; - CBlockIndex *pindexMostWork = NULL; - const CChainParams& chainParams = Params(); - do { - // Sleep briefly to allow other threads a chance at grabbing cs_main if - // we are connecting a long chain of blocks and would otherwise hold the - // lock almost continuously. This helps - // the internal wallet, if it is enabled, to keep up with the connected - // blocks, reducing the overall time until the node becomes usable. - // - // This is defined to be an interruption point. - // - boost::this_thread::sleep_for(boost::chrono::microseconds(200)); - - if (ShutdownRequested()) - break; - - bool fInitialDownload; - { - LOCK(cs_main); - pindexMostWork = FindMostWorkChain(); - - // Whether we have anything to do at all. - if (pindexMostWork == NULL || pindexMostWork == chainActive.Tip()) - return true; - - if (!ActivateBestChainStep(fSkipdpow, state, pindexMostWork, pblock && pblock->GetHash() == pindexMostWork->GetBlockHash() ? pblock : NULL)) - return false; - pindexNewTip = chainActive.Tip(); - fInitialDownload = IsInitialBlockDownload(); - } - // When we reach this point, we switched to a new tip (stored in pindexNewTip). - - // Notifications/callbacks that can run without cs_main - if (!fInitialDownload) { - uint256 hashNewTip = pindexNewTip->GetBlockHash(); - // Relay inventory, but don't relay old inventory during initial block download. - int nBlockEstimate = 0; - if (fCheckpointsEnabled) - nBlockEstimate = Checkpoints::GetTotalBlocksEstimate(chainParams.Checkpoints()); - // Don't relay blocks if pruning -- could cause a peer to try to download, resulting - // in a stalled download if the block file is pruned before the request. - if (nLocalServices & NODE_NETWORK) { - LOCK(cs_vNodes); - BOOST_FOREACH(CNode* pnode, vNodes) - if (chainActive.Height() > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate)) - pnode->PushInventory(CInv(MSG_BLOCK, hashNewTip)); - } - // Notify external listeners about the new tip. - GetMainSignals().UpdatedBlockTip(pindexNewTip); - uiInterface.NotifyBlockTip(hashNewTip); - } //else fprintf(stderr,"initial download skips propagation\n"); - } while(pindexMostWork != chainActive.Tip()); - CheckBlockIndex(); - - // Write changes periodically to disk, after relay. - if (!FlushStateToDisk(state, FLUSH_STATE_PERIODIC)) { - return false; - } - - return true; -} - -bool InvalidateBlock(CValidationState& state, CBlockIndex *pindex) { - AssertLockHeld(cs_main); - - // Mark the block itself as invalid. - pindex->nStatus |= BLOCK_FAILED_VALID; - setDirtyBlockIndex.insert(pindex); - setBlockIndexCandidates.erase(pindex); - - while (chainActive.Contains(pindex)) { - CBlockIndex *pindexWalk = chainActive.Tip(); - pindexWalk->nStatus |= BLOCK_FAILED_CHILD; - setDirtyBlockIndex.insert(pindexWalk); - setBlockIndexCandidates.erase(pindexWalk); - // ActivateBestChain considers blocks already in chainActive - // unconditionally valid already, so force disconnect away from it. - if (!DisconnectTip(state)) { - mempool.removeForReorg(pcoinsTip, chainActive.Tip()->GetHeight() + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); - mempool.removeWithoutBranchId( - CurrentEpochBranchId(chainActive.Tip()->GetHeight() + 1, Params().GetConsensus())); - return false; - } - } - //LimitMempoolSize(mempool, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60); - - // The resulting new best tip may not be in setBlockIndexCandidates anymore, so - // add it again. - BlockMap::iterator it = mapBlockIndex.begin(); - while (it != mapBlockIndex.end()) { - if ((it->second != 0) && it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && !setBlockIndexCandidates.value_comp()(it->second, chainActive.Tip())) { - setBlockIndexCandidates.insert(it->second); - } - it++; - } - - InvalidChainFound(pindex); - mempool.removeForReorg(pcoinsTip, chainActive.Tip()->GetHeight() + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); - mempool.removeWithoutBranchId( - CurrentEpochBranchId(chainActive.Tip()->GetHeight() + 1, Params().GetConsensus())); - return true; -} - -bool ReconsiderBlock(CValidationState& state, CBlockIndex *pindex) { - AssertLockHeld(cs_main); - - int nHeight = pindex->GetHeight(); - - // Remove the invalidity flag from this block and all its descendants. - BlockMap::iterator it = mapBlockIndex.begin(); - while (it != mapBlockIndex.end()) { - if ((it->second != 0) && !it->second->IsValid() && it->second->GetAncestor(nHeight) == pindex) { - it->second->nStatus &= ~BLOCK_FAILED_MASK; - setDirtyBlockIndex.insert(it->second); - if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) && it->second->nChainTx && setBlockIndexCandidates.value_comp()(chainActive.Tip(), it->second)) { - setBlockIndexCandidates.insert(it->second); - } - if (it->second == pindexBestInvalid) { - // Reset invalid block marker if it was pointing to one of those. - pindexBestInvalid = NULL; - } - } - it++; - } - - // Remove the invalidity flag from all ancestors too. - while (pindex != NULL) { - if (pindex->nStatus & BLOCK_FAILED_MASK) { - pindex->nStatus &= ~BLOCK_FAILED_MASK; - setDirtyBlockIndex.insert(pindex); - } - pindex = pindex->pprev; - } - return true; -} - -CBlockIndex* AddToBlockIndex(const CBlockHeader& block) -{ - // Check for duplicate - uint256 hash = block.GetHash(); - BlockMap::iterator it = mapBlockIndex.find(hash); - BlockMap::iterator miPrev = mapBlockIndex.find(block.hashPrevBlock); - - // the following block is for debugging, comment when not needed - /* - std::vector vrit; - for (BlockMap::iterator bit = mapBlockIndex.begin(); bit != mapBlockIndex.end(); bit++) - { - if (bit->second == NULL) - vrit.push_back(bit); - } - if (!vrit.empty()) - { - printf("found %d NULL blocks in mapBlockIndex\n", vrit.size()); - } - */ - - if (it != mapBlockIndex.end()) - { - if ( it->second != 0 ) // vNodes.size() >= HUSH_LIMITED_NETWORKSIZE - { - // this is the strange case where somehow the hash is in the mapBlockIndex via as yet undetermined process, but the pindex for the hash is not there. Theoretically it is due to processing the block headers, but I have seen it get this case without having received it from the block headers or anywhere else... jl777 - //fprintf(stderr,"addtoblockindex already there %p\n",it->second); - return it->second; - } - if ( miPrev != mapBlockIndex.end() && (*miPrev).second == 0 ) - { - fprintf(stderr,"%s: edge case of both block and prevblock in the strange state\n", __func__); - return(0); // return here to avoid the state of pindex->GetHeight() not set and pprev NULL - } - } - // Construct new block index object - CBlockIndex* pindexNew = new CBlockIndex(block); - assert(pindexNew); - // We assign the sequence id to blocks only when the full data is available, - // to avoid miners withholding blocks but broadcasting headers, to get a - // competitive advantage. - pindexNew->nSequenceId = 0; - BlockMap::iterator mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first; - pindexNew->phashBlock = &((*mi).first); - if (miPrev != mapBlockIndex.end()) - { - if ( (pindexNew->pprev = (*miPrev).second) != 0 ) - pindexNew->SetHeight(pindexNew->pprev->GetHeight() + 1); - else fprintf(stderr,"unexpected null pprev %s\n",hash.ToString().c_str()); - pindexNew->BuildSkip(); - } - pindexNew->chainPower = (pindexNew->pprev ? CChainPower(pindexNew) + pindexNew->pprev->chainPower : CChainPower(pindexNew)) + GetBlockProof(*pindexNew); - pindexNew->RaiseValidity(BLOCK_VALID_TREE); - if (pindexBestHeader == NULL || pindexBestHeader->chainPower < pindexNew->chainPower) - pindexBestHeader = pindexNew; - - setDirtyBlockIndex.insert(pindexNew); - //fprintf(stderr,"added to block index %s %p\n",hash.ToString().c_str(),pindexNew); - mi->second = pindexNew; - return pindexNew; -} - -/** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */ -bool ReceivedBlockTransactions(const CBlock &block, CValidationState& state, CBlockIndex *pindexNew, const CDiskBlockPos& pos) -{ - pindexNew->nTx = block.vtx.size(); - pindexNew->nChainTx = 0; - CAmount sproutValue = 0; - CAmount saplingValue = 0; - bool isShieldedTx = false; - unsigned int nShieldedSpends=0,nShieldedSpendsInBlock=0,nShieldedOutputs=0,nPayments=0,nShieldedOutputsInBlock=0; - unsigned int nShieldedTx=0,nFullyShieldedTx=0,nDeshieldingTx=0,nShieldingTx=0; - unsigned int nShieldedPayments=0,nFullyShieldedPayments=0,nShieldingPayments=0,nDeshieldingPayments=0; - unsigned int nNotarizations=0; - - for (auto tx : block.vtx) { - // Negative valueBalance "takes" money from the transparent value pool - // and adds it to the Sapling value pool. Positive valueBalance "gives" - // money to the transparent value pool, removing from the Sapling value - // pool. So we invert the sign here. - saplingValue += -tx.valueBalance; - - // Ignore following stats unless -zindex enabled - if (!fZindex) - continue; - - nShieldedSpends = tx.vShieldedSpend.size(); - nShieldedOutputs = tx.vShieldedOutput.size(); - isShieldedTx = (nShieldedSpends + nShieldedOutputs) > 0 ? true : false; - - // We want to avoid full verification with a low false-positive rate - // TODO: A nefarious user could create xtns which meet these criteria and skew stats, what - // else can we look for which is not full validation? - // Can we filter on properties of tx.vout[0] ? - if(tx.vin.size()==13 && tx.vout.size()==2 && tx.vout[1].scriptPubKey.IsOpReturn() && tx.vout[1].nValue==0) { - nNotarizations++; - } - - //NOTE: These are at best heuristics. Improve them as much as possible. - // You cannot compare stats generated from different sets of heuristics, so - // if you change this code, you must reindex or delete datadir + resync from scratch, or you - // will be mixing together data from two set of heuristics. - if(isShieldedTx) { - nShieldedTx++; - // NOTE: It's possible for very complex transactions to be both shielding and deshielding, - // such as (t,z)=>(t,z) Since these transactions cannot be made via RPCs currently, they - // would currently need to be made via raw transactions - if(tx.vin.size()==0 && tx.vout.size()==0) { - nFullyShieldedTx++; - } else if(tx.vin.size()>0) { - nShieldingTx++; - } else if(tx.vout.size()>0) { - nDeshieldingTx++; - } - - if (nShieldedOutputs >= 1) { - // If there are shielded outputs, count each as a payment - // By default, if there is more than 1 output, we assume 1 zaddr change output which is not a payment. - // In the case of multiple outputs which spend inputs exactly, there is no change output and this - // heuristic will undercount payments. Since this edge case is rare, this seems acceptable. - // t->(t,t,z) = 1 shielded payment - // z->(z,z) = 1 shielded payment + shielded change - // t->(z,z) = 1 shielded payment + shielded change - // t->(t,z) = 1 shielded payment + transparent change - // (z,z)->z = 1 shielded payment (has this xtn ever occurred?) - // z->(z,z,z) = 2 shielded payments + shielded change - // Assume that there is always 1 change output when there are more than one output - nShieldedPayments += nShieldedOutputs > 1 ? (nShieldedOutputs-1) : 1; - // since we have at least 1 zoutput, all transparent outputs are payments, not change - nShieldedPayments += tx.vout.size(); - - // Fully shielded do not count toward shielding/deshielding - if(tx.vin.size()==0 && tx.vout.size()==0) { - nFullyShieldedPayments += nShieldedOutputs > 1 ? (nShieldedOutputs-1) : 1; - } else { - nShieldingPayments += nShieldedOutputs > 1 ? (nShieldedOutputs-1) : 1; - // Also count remaining taddr outputs as payments - nShieldedPayments += tx.vout.size(); - } - } else if (nShieldedSpends >=1) { - // Shielded inputs with no shielded outputs. We know none are change output because - // change would flow back to the zaddr - // z->t = 1 shielded payment - // z->(t,t) = 2 shielded payments - // z->(t,t,t) = 3 shielded payments - nShieldedPayments += tx.vout.size(); - nDeshieldingPayments += tx.vout.size() > 1 ? tx.vout.size()-1 : tx.vout.size(); - } - nPayments += nShieldedPayments; - } else { - // No shielded payments, add transparent payments minus a change address - nPayments += tx.vout.size() > 1 ? tx.vout.size()-1 : tx.vout.size(); - } - // To calculate the anonset we must track the sum of spends and zouts in every tx, in every block. -- Duke - nShieldedOutputsInBlock += nShieldedOutputs; - nShieldedSpendsInBlock += nShieldedSpends; - if (fZdebug) { - fprintf(stderr,"%s: tx=%s has zspends=%d zouts=%d\n", __FUNCTION__, tx.GetHash().ToString().c_str(), nShieldedSpends, nShieldedOutputs ); - } - } - if (fDebug) { - fprintf(stderr,"%s: block %s has total zspends=%d zouts=%d\n", __FUNCTION__, block.GetHash().ToString().c_str(), nShieldedSpendsInBlock, nShieldedOutputsInBlock ); - } - - pindexNew->nSproutValue = sproutValue; - pindexNew->nChainSproutValue = boost::none; - pindexNew->nSaplingValue = saplingValue; - pindexNew->nChainSaplingValue = boost::none; - pindexNew->nFile = pos.nFile; - pindexNew->nDataPos = pos.nPos; - pindexNew->nUndoPos = 0; - pindexNew->nStatus |= BLOCK_HAVE_DATA; - pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS); - - if (fZindex) { - pindexNew->nPayments = nPayments; - pindexNew->nShieldedTx = nShieldedTx; - pindexNew->nShieldedOutputs = nShieldedOutputsInBlock; - pindexNew->nShieldedSpends = nShieldedSpendsInBlock; - pindexNew->nFullyShieldedTx = nFullyShieldedTx; - pindexNew->nDeshieldingTx = nDeshieldingTx; - pindexNew->nShieldingTx = nShieldingTx; - pindexNew->nShieldedPayments = nShieldedPayments; - pindexNew->nFullyShieldedPayments = nFullyShieldedPayments; - pindexNew->nDeshieldingPayments = nDeshieldingPayments; - pindexNew->nShieldingPayments = nShieldingPayments; - pindexNew->nNotarizations = nNotarizations; - } - setDirtyBlockIndex.insert(pindexNew); - - if (pindexNew->pprev == NULL || pindexNew->pprev->nChainTx) { - // If pindexNew is the genesis block or all parents are BLOCK_VALID_TRANSACTIONS. - deque queue; - queue.push_back(pindexNew); - - // Recursively process any descendant blocks that now may be eligible to be connected. - while (!queue.empty()) { - CBlockIndex *pindex = queue.front(); - queue.pop_front(); - pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx; - - // Update -zindex stats - if (fZindex) { - if (fZdebug) { - //fprintf(stderr,"%s: setting blockchain zstats with zspends=%d, zouts=%d\n", __FUNCTION__, nShieldedSpendsInBlock, nShieldedOutputsInBlock ); - } - if (pindex->pprev) { - // If chain stats are zero (such as after restart), load data from zindex.dat - if (pindex->pprev->nChainNotarizations == 0) - pindex->pprev->nChainNotarizations = zstats.nChainNotarizations; - if (pindex->pprev->nChainShieldedTx == 0) - pindex->pprev->nChainShieldedTx = zstats.nChainShieldedTx; - if (pindex->pprev->nChainShieldedOutputs == 0) - pindex->pprev->nChainShieldedOutputs = zstats.nChainShieldedOutputs; - if (pindex->pprev->nChainShieldedSpends == 0) { - pindex->pprev->nChainShieldedSpends = zstats.nChainShieldedSpends; - // TODO: if zstats.nHeight != chainActive.Height() the stats will be off - fprintf(stderr, "%s: loaded anonymity set of %li at stats height=%li vs local height=%d from disk\n", __func__, zstats.nChainShieldedOutputs - zstats.nChainShieldedSpends, zstats.nHeight, chainActive.Height() ); - } - if (pindex->pprev->nChainFullyShieldedTx == 0) - pindex->pprev->nChainFullyShieldedTx = zstats.nChainFullyShieldedTx; - if (pindex->pprev->nChainShieldingTx == 0) - pindex->pprev->nChainShieldingTx = zstats.nChainShieldingTx; - if (pindex->pprev->nChainDeshieldingTx == 0) - pindex->pprev->nChainDeshieldingTx = zstats.nChainDeshieldingTx; - if (pindex->pprev->nChainPayments == 0) { - fprintf(stderr, "%s: setting nChainPayments=%li at height %d\n", __func__, zstats.nChainPayments, chainActive.Height() ); - pindex->pprev->nChainPayments = zstats.nChainPayments; - } - if (pindex->pprev->nChainShieldedPayments == 0) - pindex->pprev->nChainShieldedPayments = zstats.nChainShieldedPayments; - if (pindex->pprev->nChainFullyShieldedPayments == 0) - pindex->pprev->nChainFullyShieldedPayments = zstats.nChainFullyShieldedPayments; - if (pindex->pprev->nChainShieldingPayments == 0) - pindex->pprev->nChainShieldingPayments = zstats.nChainShieldingPayments; - if (pindex->pprev->nChainDeshieldingPayments == 0) - pindex->pprev->nChainDeshieldingPayments = zstats.nChainDeshieldingPayments; - } - - pindex->nChainNotarizations = (pindex->pprev ? pindex->pprev->nChainNotarizations : 0) + pindex->nNotarizations; - pindex->nChainShieldedTx = (pindex->pprev ? pindex->pprev->nChainShieldedTx : 0) + pindex->nShieldedTx; - pindex->nChainShieldedOutputs = (pindex->pprev ? pindex->pprev->nChainShieldedOutputs : 0) + pindex->nShieldedOutputs; - pindex->nChainShieldedSpends = (pindex->pprev ? pindex->pprev->nChainShieldedSpends : 0) + pindex->nShieldedSpends; - pindex->nChainFullyShieldedTx = (pindex->pprev ? pindex->pprev->nChainFullyShieldedTx : 0) + pindex->nFullyShieldedTx; - pindex->nChainShieldingTx = (pindex->pprev ? pindex->pprev->nChainShieldingTx : 0) + pindex->nShieldingTx; - pindex->nChainDeshieldingTx = (pindex->pprev ? pindex->pprev->nChainDeshieldingTx : 0) + pindex->nDeshieldingTx; - pindex->nChainPayments = (pindex->pprev ? pindex->pprev->nChainPayments : 0) + pindex->nPayments; - pindex->nChainShieldedPayments = (pindex->pprev ? pindex->pprev->nChainShieldedPayments : 0) + pindex->nShieldedPayments; - pindex->nChainFullyShieldedPayments = (pindex->pprev ? pindex->pprev->nChainFullyShieldedPayments : 0) + pindex->nFullyShieldedPayments; - pindex->nChainShieldingPayments = (pindex->pprev ? pindex->pprev->nChainShieldingPayments : 0) + pindex->nShieldingPayments; - pindex->nChainDeshieldingPayments = (pindex->pprev ? pindex->pprev->nChainDeshieldingPayments : 0) + pindex->nDeshieldingPayments; - - // Update in-memory structure that gets serialized to zindex.dat - zstats.nHeight = pindex->GetHeight(); - zstats.nChainNotarizations = pindex->nChainNotarizations ; - zstats.nChainShieldedTx = pindex->nChainShieldedTx ; - zstats.nChainShieldedOutputs = pindex->nChainShieldedOutputs ; - zstats.nChainShieldedSpends = pindex->nChainShieldedSpends ; - zstats.nChainFullyShieldedTx = pindex->nChainFullyShieldedTx ; - zstats.nChainShieldingTx = pindex->nChainShieldingTx ; - zstats.nChainDeshieldingTx = pindex->nChainDeshieldingTx ; - zstats.nChainPayments = pindex->nChainPayments ; - zstats.nChainShieldedPayments = pindex->nChainShieldedPayments ; - zstats.nChainFullyShieldedPayments = pindex->nChainFullyShieldedPayments ; - zstats.nChainShieldingPayments = pindex->nChainShieldingPayments ; - zstats.nChainDeshieldingPayments = pindex->nChainDeshieldingPayments ; - fprintf(stderr,"%s: setting zstats with height,zouts,zspends,anonset=%li,%li,%li,%li\n", __FUNCTION__, zstats.nHeight, zstats.nChainShieldedOutputs, zstats.nChainShieldedSpends, zstats.nChainShieldedOutputs - zstats.nChainShieldedSpends); - - } - - if (pindex->pprev) { - if (pindex->pprev->nChainSproutValue && pindex->nSproutValue) { - pindex->nChainSproutValue = *pindex->pprev->nChainSproutValue + *pindex->nSproutValue; - } else { - pindex->nChainSproutValue = boost::none; - } - if (pindex->pprev->nChainSaplingValue) { - pindex->nChainSaplingValue = *pindex->pprev->nChainSaplingValue + pindex->nSaplingValue; - } else { - pindex->nChainSaplingValue = boost::none; - } - } else { - pindex->nChainSproutValue = pindex->nSproutValue; - pindex->nChainSaplingValue = pindex->nSaplingValue; - } - { - LOCK(cs_nBlockSequenceId); - pindex->nSequenceId = nBlockSequenceId++; - } - if (chainActive.Tip() == NULL || !setBlockIndexCandidates.value_comp()(pindex, chainActive.Tip())) { - setBlockIndexCandidates.insert(pindex); - } - std::pair::iterator, std::multimap::iterator> range = mapBlocksUnlinked.equal_range(pindex); - while (range.first != range.second) { - std::multimap::iterator it = range.first; - queue.push_back(it->second); - range.first++; - mapBlocksUnlinked.erase(it); - } - } - } else { - if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) { - mapBlocksUnlinked.insert(std::make_pair(pindexNew->pprev, pindexNew)); - } - } - - - if (fZindex) - fprintf(stderr, "ht.%d, ShieldedPayments=%d, ShieldedTx=%d, ShieldedOutputs=%d, FullyShieldedTx=%d, ntz=%d\n", - pindexNew->GetHeight(), nShieldedPayments, nShieldedTx, nShieldedOutputs, nFullyShieldedTx, nNotarizations ); - - return true; -} - -bool FindBlockPos(int32_t tmpflag,CValidationState &state, CDiskBlockPos &pos, unsigned int nAddSize, unsigned int nHeight, uint64_t nTime, bool fKnown) -{ - std::vector *ptr; int *lastfilep; - LOCK(cs_LastBlockFile); - - unsigned int nFile,maxTempFileSize; - - if ( tmpflag != 0 ) - { - ptr = &tmpBlockFiles; - nFile = nLastTmpFile; - lastfilep = &nLastTmpFile; - if (tmpBlockFiles.size() <= nFile) { - tmpBlockFiles.resize(nFile + 1); - } - if ( nFile == 0 ) - maxTempFileSize = maxTempFileSize0; - else if ( nFile == 1 ) - maxTempFileSize = maxTempFileSize1; - } - else - { - ptr = &vinfoBlockFile; - lastfilep = &nLastBlockFile; - nFile = fKnown ? pos.nFile : nLastBlockFile; - if (vinfoBlockFile.size() <= nFile) { - vinfoBlockFile.resize(nFile + 1); - } - } - - if (!fKnown) { - bool tmpfileflag = false; - while ( (*ptr)[nFile].nSize + nAddSize >= ((tmpflag != 0) ? maxTempFileSize : MAX_BLOCKFILE_SIZE) ) { - if ( tmpflag != 0 && tmpfileflag ) - break; - nFile++; - if ((*ptr).size() <= nFile) { - (*ptr).resize(nFile + 1); - } - tmpfileflag = true; - } - pos.nFile = nFile + tmpflag*TMPFILE_START; - pos.nPos = (*ptr)[nFile].nSize; - } - if (nFile != *lastfilep) { - if (!fKnown) { - LogPrintf("Leaving block file %i: %s\n", nFile, (*ptr)[nFile].ToString()); - } - FlushBlockFile(!fKnown); - //fprintf(stderr, "nFile = %i size.%li maxTempFileSize0.%u maxTempFileSize1.%u\n",nFile,tmpBlockFiles.size(),maxTempFileSize0,maxTempFileSize1); - if ( tmpflag != 0 && tmpBlockFiles.size() >= 3 ) - { - if ( nFile == 1 ) // Trying to get to second temp file. - { - if (!PruneOneBlockFile(true,TMPFILE_START+1)) - { - // file 1 is not ready to be used yet increase file 0's size. - fprintf(stderr, "Cant clear file 1!\n"); - // We will reset the position to the end of the first file, even if its over max size. - nFile = 0; - pos.nFile = TMPFILE_START; - pos.nPos = (*ptr)[0].nSize; - // Increase temp file one's max size by a chunk, so we wait a reasonable time to recheck the other file. - maxTempFileSize0 += BLOCKFILE_CHUNK_SIZE; - } - else - { - // The file 1 is able to be used now. Reset max size, and set nfile to use file 1. - fprintf(stderr, "CLEARED file 1!\n"); - maxTempFileSize0 = MAX_TEMPFILE_SIZE; - nFile = 1; - tmpBlockFiles[1].SetNull(); - pos.nFile = TMPFILE_START+1; - pos.nPos = (*ptr)[1].nSize; - boost::filesystem::remove(GetBlockPosFilename(pos, "blk")); - LogPrintf("Prune: deleted temp blk (%05u)\n",nFile); - } - if ( 0 && tmpflag != 0 ) - fprintf(stderr,"pos.nFile %d nPos %u\n",pos.nFile,pos.nPos); - } - else if ( nFile == 2 ) // Trying to get to third temp file. - { - if (!PruneOneBlockFile(true,TMPFILE_START)) - { - fprintf(stderr, "Cant clear file 0!\n"); - // We will reset the position to the end of the second block file, even if its over max size. - nFile = 1; - pos.nFile = TMPFILE_START+1; - pos.nPos = (*ptr)[1].nSize; - // Increase temp file one's max size by a chunk, so we wait a reasonable time to recheck the other file. - maxTempFileSize1 += BLOCKFILE_CHUNK_SIZE; - } - else - { - // The file 0 is able to be used now. Reset max size, and set nfile to use file 0. - fprintf(stderr, "CLEARED file 0!\n"); - maxTempFileSize1 = MAX_TEMPFILE_SIZE; - nFile = 0; - tmpBlockFiles[0].SetNull(); - pos.nFile = TMPFILE_START; - pos.nPos = (*ptr)[0].nSize; - boost::filesystem::remove(GetBlockPosFilename(pos, "blk")); - LogPrintf("Prune: deleted temp blk (%05u)\n",nFile); - } - if ( 0 && tmpflag != 0 ) - fprintf(stderr,"pos.nFile %d nPos %u\n",pos.nFile,pos.nPos); - } - //sleep(30); - } - //fprintf(stderr, "nFile = %i size.%li maxTempFileSize0.%u maxTempFileSize1.%u\n",nFile,tmpBlockFiles.size(),maxTempFileSize0,maxTempFileSize1); sleep(30); - *lastfilep = nFile; - //fprintf(stderr, "*lastfilep = %i\n",*lastfilep); - } - - (*ptr)[nFile].AddBlock(nHeight, nTime); - if (fKnown) - (*ptr)[nFile].nSize = std::max(pos.nPos + nAddSize, (*ptr)[nFile].nSize); - else - (*ptr)[nFile].nSize += nAddSize; - - if (!fKnown) { - unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE; - unsigned int nNewChunks = ((*ptr)[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE; - if (nNewChunks > nOldChunks) { - if (fPruneMode) - fCheckForPruning = true; - if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) { - FILE *file = OpenBlockFile(pos); - if (file) { - LogPrintf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile); - AllocateFileRange(file, pos.nPos, nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos); - fclose(file); - } - } - else - return state.Error("out of disk space"); - } - } - - setDirtyFileInfo.insert(nFile + tmpflag*TMPFILE_START); - return true; -} - -bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize) -{ - std::vector *ptr; int *lastfilep; - LOCK(cs_LastBlockFile); - pos.nFile = nFile; - if ( nFile >= TMPFILE_START ) - { - fprintf(stderr,"skip tmp undo\n"); - return(false); - nFile %= TMPFILE_START; - ptr = &tmpBlockFiles; - } else ptr = &vinfoBlockFile; - - unsigned int nNewSize; - pos.nPos = (*ptr)[nFile].nUndoSize; - nNewSize = (*ptr)[nFile].nUndoSize += nAddSize; - setDirtyFileInfo.insert(nFile); - - unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE; - unsigned int nNewChunks = (nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE; - if (nNewChunks > nOldChunks) { - if (fPruneMode) - fCheckForPruning = true; - if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) { - FILE *file = OpenUndoFile(pos); - if (file) { - LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile); - AllocateFileRange(file, pos.nPos, nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos); - fclose(file); - } - } - else - return state.Error("out of disk space"); - } - - return true; -} - -bool CheckBlockHeader(int32_t *futureblockp,int32_t height,CBlockIndex *pindex, const CBlockHeader& blockhdr, CValidationState& state, bool fCheckPOW) -{ - // Check timestamp - if ( 0 ) - { - uint256 hash; int32_t i; - hash = blockhdr.GetHash(); - for (i=31; i>=0; i--) - fprintf(stderr,"%02x",((uint8_t *)&hash)[i]); - fprintf(stderr," <- CheckBlockHeader\n"); - if ( chainActive.LastTip() != 0 ) - { - hash = chainActive.LastTip()->GetBlockHash(); - for (i=31; i>=0; i--) - fprintf(stderr,"%02x",((uint8_t *)&hash)[i]); - fprintf(stderr," <- chainTip\n"); - } - } - *futureblockp = 0; - if ( ASSETCHAINS_ADAPTIVEPOW > 0 ) - { - if (blockhdr.GetBlockTime() > GetTime() + 4) - { - //LogPrintf("CheckBlockHeader block from future %d error",blockhdr.GetBlockTime() - GetTime()); - return false; - } - } - else if (blockhdr.GetBlockTime() > GetTime() + 60) - { - /*CBlockIndex *tipindex; - //fprintf(stderr,"ht.%d future block %u vs time.%u + 60\n",height,(uint32_t)blockhdr.GetBlockTime(),(uint32_t)GetTime()); - if ( (tipindex= chainActive.Tip()) != 0 && tipindex->GetBlockHash() == blockhdr.hashPrevBlock && blockhdr.GetBlockTime() < GetTime() + 60 + 5 ) - { - //fprintf(stderr,"it is the next block, let's wait for %d seconds\n",GetTime() + 60 - blockhdr.GetBlockTime()); - while ( blockhdr.GetBlockTime() > GetTime() + 60 ) - sleep(1); - //fprintf(stderr,"now its valid\n"); - } - else*/ - { - if (blockhdr.GetBlockTime() < GetTime() + 300) - *futureblockp = 1; - //LogPrintf("CheckBlockHeader block from future %d error",blockhdr.GetBlockTime() - GetTime()); - return false; //state.Invalid(error("CheckBlockHeader(): block timestamp too far in the future"),REJECT_INVALID, "time-too-new"); - } - } - // Check block version - if (height > 0 && blockhdr.nVersion < MIN_BLOCK_VERSION) - return state.DoS(100, error("CheckBlockHeader(): block version too low"),REJECT_INVALID, "version-too-low"); - - // Check Equihash solution is valid - if ( fCheckPOW ) - { - if ( !CheckEquihashSolution(&blockhdr, Params()) ) - return state.DoS(100, error("CheckBlockHeader(): Equihash solution invalid"),REJECT_INVALID, "invalid-solution"); - if ( !CheckRandomXSolution(&blockhdr, height) ) - return state.DoS(100, error("CheckBlockHeader(): RandomX solution invalid"),REJECT_INVALID, "invalid-randomx-solution"); - } - // Check proof of work matches claimed amount - /*hush_index2pubkey33(pubkey33,pindex,height); - if ( fCheckPOW && !CheckProofOfWork(height,pubkey33,blockhdr.GetHash(), blockhdr.nBits, Params().GetConsensus(),blockhdr.nTime) ) - return state.DoS(50, error("CheckBlockHeader(): proof of work failed"),REJECT_INVALID, "high-hash");*/ - return true; -} +// [block_processing.cpp] Lines 1365-4041 extracted int32_t hush_checkPOW(int32_t slowflag,CBlock *pblock,int32_t height); -bool CheckBlock(int32_t *futureblockp,int32_t height,CBlockIndex *pindex,const CBlock& block, CValidationState& state, - libzcash::ProofVerifier& verifier, - bool fCheckPOW, bool fCheckMerkleRoot) -{ - uint8_t pubkey33[33]; uint256 hash; uint32_t tiptime = (uint32_t)block.nTime; - // These are checks that are independent of context. - hash = block.GetHash(); - // Check that the header is valid (particularly PoW). This is mostly redundant with the call in AcceptBlockHeader. - if (!CheckBlockHeader(futureblockp,height,pindex,block,state,fCheckPOW)) - { - if ( *futureblockp == 0 ) - { - LogPrintf("CheckBlock header error"); - return false; - } - } - if ( pindex != 0 && pindex->pprev != 0 ) - tiptime = (uint32_t)pindex->pprev->nTime; - if ( fCheckPOW ) - { - //if ( !CheckEquihashSolution(&block, Params()) ) - // return state.DoS(100, error("CheckBlock: Equihash solution invalid"),REJECT_INVALID, "invalid-solution"); - hush_block2pubkey33(pubkey33,(CBlock *)&block); - if ( !CheckProofOfWork(block,pubkey33,height,Params().GetConsensus()) ) - { - int32_t z; for (z=31; z>=0; z--) - fprintf(stderr,"%02x",((uint8_t *)&hash)[z]); - fprintf(stderr," failed hash ht.%d\n",height); - return state.DoS(50, error("CheckBlock: proof of work failed"),REJECT_INVALID, "high-hash"); - } - if ( ASSETCHAINS_STAKED == 0 && hush_checkPOW(1,(CBlock *)&block,height) < 0 ) // checks Equihash - return state.DoS(100, error("CheckBlock: failed slow_checkPOW"),REJECT_INVALID, "failed-slow_checkPOW"); - } - - // Check the merkle root. - if (fCheckMerkleRoot) { - bool mutated; - uint256 hashMerkleRoot2 = block.BuildMerkleTree(&mutated); - if (block.hashMerkleRoot != hashMerkleRoot2) - return state.DoS(100, error("CheckBlock: hashMerkleRoot mismatch"), - REJECT_INVALID, "bad-txnmrklroot", true); - - // Check for merkle tree malleability (CVE-2012-2459): repeating sequences - // of transactions in a block without affecting the merkle root of a block, - // while still invalidating it. - if (mutated) - return state.DoS(100, error("CheckBlock: duplicate transaction"), - REJECT_INVALID, "bad-txns-duplicate", true); - } - - // All potential-corruption validation must be done before we do any - // transaction validation, as otherwise we may mark the header as invalid - // because we receive the wrong transactions for it. - - // Size limits - //fprintf(stderr,"%s checkblock %d -> %d vs blocksize.%d\n",SMART_CHAIN_SYMBOL,height,MAX_BLOCK_SIZE(height),(int32_t)::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION)); - if (block.vtx.empty() || block.vtx.size() > MAX_BLOCK_SIZE(height) || ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE(height)) - return state.DoS(100, error("CheckBlock: size limits failed"), REJECT_INVALID, "bad-blk-length"); - - // First transaction must be coinbase, the rest must not be - if (block.vtx.empty() || !block.vtx[0].IsCoinBase()) - return state.DoS(100, error("CheckBlock: first tx is not coinbase"), REJECT_INVALID, "bad-cb-missing"); - - for (unsigned int i = 1; i < block.vtx.size(); i++) - if (block.vtx[i].IsCoinBase()) - return state.DoS(100, error("CheckBlock: more than one coinbase"), REJECT_INVALID, "bad-cb-multiple"); - - // Check transactions - CTransaction sTx; - CTransaction *ptx = NULL; - if ( ASSETCHAINS_CC != 0 && !fCheckPOW ) - return true; - - - for (uint32_t i = 0; i < block.vtx.size(); i++) - { - const CTransaction& tx = block.vtx[i]; - if (!CheckTransaction(tiptime,tx, state, verifier, i, (int32_t)block.vtx.size())) - return error("CheckBlock: CheckTransaction failed"); - } - - unsigned int nSigOps = 0; - BOOST_FOREACH(const CTransaction& tx, block.vtx) - { - nSigOps += GetLegacySigOpCount(tx); - } - if (nSigOps > MAX_BLOCK_SIGOPS) - return state.DoS(100, error("CheckBlock: out-of-bounds SigOpCount"), - REJECT_INVALID, "bad-blk-sigops", true); - return true; -} - -bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, CBlockIndex * const pindexPrev) -{ - const CChainParams& chainParams = Params(); - const Consensus::Params& consensusParams = chainParams.GetConsensus(); - uint256 hash = block.GetHash(); - if (hash == consensusParams.hashGenesisBlock) - return true; - - assert(pindexPrev); - - int daaForkHeight = GetArg("-daaforkheight", 450000); - int nHeight = pindexPrev->GetHeight()+1; - bool ishush3 = strncmp(SMART_CHAIN_SYMBOL, "HUSH3",5) == 0 ? true : false; - // Check Proof-of-Work difficulty - if (ishush3) { - - // Difficulty (nBits) relies on the current blocktime of this block - if ((ASSETCHAINS_BLOCKTIME != 75) && (nHeight >= nFirstHalvingHeight)) { - LogPrintf("%s: Blocktime halving to 75s at height %d!\n",__func__,nHeight); - ASSETCHAINS_BLOCKTIME = 75; - hush_changeblocktime(); - } - // The change of blocktime from 150s to 75s caused incorrect AWT of 34 blocks instead of 17 - // caused by the fact that Difficulty Adjustment Algorithms do not take into account blocktime - // changing at run-time, from Consensus::Params being a const struct - unsigned int nNextWork = GetNextWorkRequired(pindexPrev, &block, consensusParams); - - if (fDebug) { - LogPrintf("%s: nbits ,%d,%lu,%lu,%d\n",__func__, nHeight, nNextWork, block.nBits, nNextWork - block.nBits ); - } - if (block.nBits != nNextWork) { - // Enforce correct nbits at DAA fork height, before that, ignore - if (nHeight > daaForkHeight) { - //cout << "Incorrect HUSH diffbits at height " << nHeight << - // " " << block.nBits << " block.nBits vs. calc " << nNextWork << - // " " << block.GetHash().ToString() << " @ " << block.GetBlockTime() << endl; - return state.DoS(100, error("%s: Incorrect diffbits at height %d: %lu vs %lu ", __func__, nHeight, nNextWork, block.nBits), REJECT_INVALID, "bad-diffbits"); - } else { - // cout << "Ignoring nbits for height=" << nHeight << endl; - } - } - } - - // Check timestamp against prev - if (ASSETCHAINS_ADAPTIVEPOW <= 0 || nHeight < 30) { - if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast() ) - { - fprintf(stderr,"ht.%d too early %u vs %u\n",(int32_t)nHeight,(uint32_t)block.GetBlockTime(),(uint32_t)pindexPrev->GetMedianTimePast()); - return state.Invalid(error("%s: block's timestamp is too early based on median time", __func__), REJECT_INVALID, "time-too-old-median"); - } - } else { - if ( block.GetBlockTime() <= pindexPrev->nTime ) - { - fprintf(stderr,"ht.%d too early2 %u vs %u\n",(int32_t)nHeight,(uint32_t)block.GetBlockTime(),(uint32_t)pindexPrev->nTime); - return state.Invalid(error("%s: block's timestamp is too early based on previous block", __func__), REJECT_INVALID, "time-too-old-prevblock"); - } - } - - // Check that timestamp is not too far in the future - if (block.GetBlockTime() > GetTime() + consensusParams.nMaxFutureBlockTime) { - return state.Invalid(error("%s: block timestamp too far in the future", __func__), REJECT_INVALID, "time-too-new"); - } - - if (fCheckpointsEnabled) { - // Check that the block chain matches the known block chain up to a checkpoint - if (!Checkpoints::CheckBlock(chainParams.Checkpoints(), nHeight, hash)) - { - return state.DoS(100, error("%s: rejected by checkpoint lock-in at %d", __func__, nHeight),REJECT_CHECKPOINT, "checkpoint mismatch"); - } - // Don't accept any forks from the main chain prior to last checkpoint - CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(chainParams.Checkpoints()); - int32_t notarized_height; - if ( nHeight == 1 && chainActive.LastTip() != 0 && chainActive.LastTip()->GetHeight() > 1 ) - { - CBlockIndex *heightblock = chainActive[nHeight]; - if ( heightblock != 0 && heightblock->GetBlockHash() == hash ) - return true; - return state.DoS(1, error("%s: trying to change height 1 forbidden", __func__)); - } - if ( nHeight != 0 ) - { - if ( pcheckpoint != 0 && nHeight < pcheckpoint->GetHeight() ) - return state.DoS(1, error("%s: forked chain older than last checkpoint (height %d) vs %d", __func__, nHeight,pcheckpoint->GetHeight())); - if ( hush_checkpoint(¬arized_height,nHeight,hash) < 0 ) - { - CBlockIndex *heightblock = chainActive[nHeight]; - if ( heightblock != 0 && heightblock->GetBlockHash() == hash ) - { - //fprintf(stderr,"got a pre notarization block that matches height.%d\n",(int32_t)nHeight); - return true; - } else return state.DoS(1, error("%s: forked chain %d older than last notarized (height %d) vs %d", __func__,nHeight, notarized_height)); - } - } - } - // Reject block.nVersion < 4 blocks - if (block.nVersion < 4) - return state.Invalid(error("%s : rejected nVersion<4 block", __func__), REJECT_OBSOLETE, "bad-version"); - - return true; -} - -bool ContextualCheckBlock(int32_t slowflag,const CBlock& block, CValidationState& state, CBlockIndex * const pindexPrev) -{ - const int nHeight = pindexPrev == NULL ? 0 : pindexPrev->GetHeight() + 1; - //const Consensus::Params& consensusParams = Params().GetConsensus(); - //bool sapling = true; //NetworkUpgradeActive(nHeight, consensusParams, Consensus::UPGRADE_SAPLING); - - // Check that all transactions are finalized - for (uint32_t i = 0; i < block.vtx.size(); i++) { - const CTransaction& tx = block.vtx[i]; - - // Check transaction contextually against consensus rules at block height - if (!ContextualCheckTransaction(slowflag,&block,pindexPrev,tx, state, nHeight, 100)) { - return false; // Failure reason has been set in validation state object - } - - int nLockTimeFlags = 0; - int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST) - ? pindexPrev->GetMedianTimePast() - : block.GetBlockTime(); - if (!IsFinalTx(tx, nHeight, nLockTimeCutoff)) { - return state.DoS(10, error("%s: contains a non-final transaction", __func__), REJECT_INVALID, "bad-txns-nonfinal"); - } - } - - // Enforce BIP 34 rule that the coinbase starts with serialized block height. - // In Hush this has been enforced since launch, except that the genesis - // block didn't include the height in the coinbase (see Zcash protocol spec - // section '6.8 Bitcoin Improvement Proposals'). - if (nHeight > 0) - { - CScript expect = CScript() << nHeight; - if (block.vtx[0].vin[0].scriptSig.size() < expect.size() || - !std::equal(expect.begin(), expect.end(), block.vtx[0].vin[0].scriptSig.begin())) { - return state.DoS(100, error("%s: block height mismatch in coinbase", __func__), REJECT_INVALID, "bad-cb-height"); - } - } - return true; -} - -bool AcceptBlockHeader(int32_t *futureblockp,const CBlockHeader& block, CValidationState& state, CBlockIndex** ppindex) -{ - static uint256 zero; - const CChainParams& chainparams = Params(); - AssertLockHeld(cs_main); - - // Check for duplicate - uint256 hash = block.GetHash(); - BlockMap::iterator miSelf = mapBlockIndex.find(hash); - if(fDebug) { - std::cerr << __func__ << ": blockhash=" << hash.ToString() << endl; - } - CBlockIndex *pindex = NULL; - if (miSelf != mapBlockIndex.end()) { - // Block header is already known. - if ( (pindex = miSelf->second) == 0 ) - miSelf->second = pindex = AddToBlockIndex(block); - if (ppindex) - *ppindex = pindex; - if ( pindex != 0 && (pindex->nStatus & BLOCK_FAILED_MASK) != 0 ) { - if ( ASSETCHAINS_CC == 0 ) { - std::cerr << __func__ << ": block " << hash.ToString() << " marked invalid"; - return state.Invalid(error("%s: block is marked invalid", __func__), 0, "duplicate"); - } else { - fprintf(stderr,"reconsider block %s\n",hash.GetHex().c_str()); - pindex->nStatus &= ~BLOCK_FAILED_MASK; - } - } - return true; - } - if (!CheckBlockHeader(futureblockp,*ppindex!=0?(*ppindex)->GetHeight():0,*ppindex, block, state,0)) { - if ( *futureblockp == 0 ) { - LogPrintf("%s: CheckBlockHeader futureblock=0\n", __func__); - return false; - } - } - if(fDebug) { - fprintf(stderr,"%s: CheckBlockHeader passed\n",__func__); - } - // Get prev block index - CBlockIndex* pindexPrev = NULL; - if (hash != chainparams.GetConsensus().hashGenesisBlock) - { - BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock); - if (mi == mapBlockIndex.end()) - { - LogPrintf("%s: hashPrevBlock %s not found\n",__func__, block.hashPrevBlock.ToString().c_str()); - //*futureblockp = 1; - return(false); - //return state.DoS(10, error("%s: prev block not found", __func__), 0, "bad-prevblk"); - } - pindexPrev = (*mi).second; - if (pindexPrev == 0 ) - { - LogPrintf("%s: hashPrevBlock %s no pindexPrev\n",__func__,block.hashPrevBlock.ToString().c_str()); - return(false); - } - if ( (pindexPrev->nStatus & BLOCK_FAILED_MASK) ) - return state.DoS(100, error("%s: prev block invalid", __func__), REJECT_INVALID, "bad-prevblk"); - } - if (!ContextualCheckBlockHeader(block, state, pindexPrev)) { - //fprintf(stderr,"AcceptBlockHeader ContextualCheckBlockHeader failed\n"); - LogPrintf("%s: ContextualCheckBlockHeader failed\n",__func__); - return false; - } - if(fDebug) { - fprintf(stderr,"%s: ContextualCheckBlockHeader passed: %s\n", __func__, hash.ToString().c_str()); - } - if (pindex == NULL) - { - if ( (pindex= AddToBlockIndex(block)) != 0 ) - { - miSelf = mapBlockIndex.find(hash); - if (miSelf != mapBlockIndex.end()) - miSelf->second = pindex; - //fprintf(stderr,"AcceptBlockHeader couldnt add to block index\n"); - } - } - if (ppindex) - *ppindex = pindex; - return true; -} - -uint256 Queued_reconsiderblock; - -bool AcceptBlock(int32_t *futureblockp,CBlock& block, CValidationState& state, CBlockIndex** ppindex, bool fRequested, CDiskBlockPos* dbp) -{ - const CChainParams& chainparams = Params(); - AssertLockHeld(cs_main); - - CBlockIndex *&pindex = *ppindex; - if (!AcceptBlockHeader(futureblockp, block, state, &pindex)) - { - if ( *futureblockp == 0 ) - { - LogPrintf("%s: AcceptBlockHeader error\n",__func__); - return false; - } - } - if ( pindex == 0 ) - { - LogPrintf("%s: null pindex\n", __func__); - *futureblockp = true; - return false; - } - //fprintf(stderr,"acceptblockheader passed\n"); - // Try to process all requested blocks that we don't have, but only - // process an unrequested block if it's new and has enough work to - // advance our tip, and isn't too many blocks ahead. - bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA; - bool fHasMoreWork = (chainActive.Tip() ? pindex->chainPower > chainActive.Tip()->chainPower : true); - // Blocks that are too out-of-order needlessly limit the effectiveness of - // pruning, because pruning will not delete block files that contain any - // blocks which are too close in height to the tip. Apply this test - // regardless of whether pruning is enabled; it should generally be safe to - // not process unrequested blocks. - bool fTooFarAhead = (pindex->GetHeight() > int(chainActive.Height() + BLOCK_DOWNLOAD_WINDOW)); //MIN_BLOCKS_TO_KEEP)); - - // TODO: deal better with return value and error conditions for duplicate - // and unrequested blocks. - //fprintf(stderr,"Accept %s flags already.%d requested.%d morework.%d farahead.%d\n",pindex->GetBlockHash().ToString().c_str(),fAlreadyHave,fRequested,fHasMoreWork,fTooFarAhead); - if (fAlreadyHave) return true; - if (!fRequested) { // If we didn't ask for it: - if (pindex->nTx != 0) return true; // This is a previously-processed block that was pruned - if (!fHasMoreWork) return true; // Don't process less-work chains - if (fTooFarAhead) return true; // Block height is too high - } - - // See method docstring for why this is always disabled - auto verifier = libzcash::ProofVerifier::Disabled(); - bool fContextualCheckBlock = ContextualCheckBlock(0,block, state, pindex->pprev); - if ( (!CheckBlock(futureblockp,pindex->GetHeight(),pindex,block, state, verifier,0)) || !fContextualCheckBlock ) - { - static int32_t saplinght = -1; - CBlockIndex *tmpptr; - if ( saplinght == -1 ) - saplinght = Params().GetConsensus().vUpgrades[Consensus::UPGRADE_SAPLING].nActivationHeight; - if ( saplinght < 0 ) - *futureblockp = 1; - // the problem is when a future sapling block comes in before we detected saplinght - if ( saplinght > 0 && (tmpptr= chainActive.LastTip()) != 0 ) - { - fprintf(stderr,"saplinght.%d tipht.%d blockht.%d cmp.%d\n",saplinght,(int32_t)tmpptr->GetHeight(),pindex->GetHeight(),pindex->GetHeight() < 0 || (pindex->GetHeight() >= saplinght && pindex->GetHeight() < saplinght+50000) || (tmpptr->GetHeight() > saplinght-720 && tmpptr->GetHeight() < saplinght+720)); - if ( pindex->GetHeight() < 0 || (pindex->GetHeight() >= saplinght && pindex->GetHeight() < saplinght+50000) || (tmpptr->GetHeight() > saplinght-720 && tmpptr->GetHeight() < saplinght+720) ) - *futureblockp = 1; - if ( ASSETCHAINS_CBOPRET != 0 ) - { - CValidationState tmpstate; CBlockIndex *tmpindex; int32_t ht,longest; - ht = (int32_t)pindex->GetHeight(); - longest = hush_longestchain(); - if ( (longest == 0 || ht < longest-6) && (tmpindex=hush_chainactive(ht)) != 0 ) - { - fprintf(stderr,"reconsider height.%d, longest.%d\n",(int32_t)ht,longest); - if ( Queued_reconsiderblock == zeroid ) - Queued_reconsiderblock = pindex->GetBlockHash(); - } - } - } - if ( *futureblockp == 0 ) - { - if (state.IsInvalid() && !state.CorruptionPossible()) { - pindex->nStatus |= BLOCK_FAILED_VALID; - setDirtyBlockIndex.insert(pindex); - } - LogPrintf("AcceptBlock CheckBlock or ContextualCheckBlock error\n"); - return false; - } - } - if ( fContextualCheckBlock ) - pindex->nStatus |= BLOCK_VALID_CONTEXT; - - int nHeight = pindex->GetHeight(); - // Temp File fix. LABS has been using this for ages with no bad effects. - // Disabled here. Set use tmp to whatever you need to use this for. - int32_t usetmp = 0; - if ( IsInitialBlockDownload() ) - usetmp = 0; - - // Write block to history file - try { - unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); - CDiskBlockPos blockPos; - if (dbp != NULL) - blockPos = *dbp; - if (!FindBlockPos(usetmp,state, blockPos, nBlockSize+8, nHeight, block.GetBlockTime(), dbp != NULL)) - return error("AcceptBlock(): FindBlockPos failed"); - if (dbp == NULL) - if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) - AbortNode(state, "Failed to write block"); - if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) - return error("AcceptBlock(): ReceivedBlockTransactions failed"); - if ( usetmp != 0 ) // not during initialdownload or if futureflag==0 and contextchecks ok - pindex->nStatus |= BLOCK_IN_TMPFILE; - } catch (const std::runtime_error& e) { - return AbortNode(state, std::string("System error: ") + e.what()); - } - - if (fCheckForPruning) - FlushStateToDisk(state, FLUSH_STATE_NONE); // we just allocated more disk space for block files - if ( *futureblockp == 0 ) - return true; - LogPrintf("AcceptBlock block from future error\n"); - return false; -} - -static bool IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned nRequired, const Consensus::Params& consensusParams) -{ - unsigned int nFound = 0; - for (int i = 0; i < consensusParams.nMajorityWindow && nFound < nRequired && pstart != NULL; i++) - { - if (pstart->nVersion >= minVersion) - ++nFound; - pstart = pstart->pprev; - } - return (nFound >= nRequired); -} - void hush_currentheight_set(int32_t height); bool ProcessNewBlock(bool from_miner,int32_t height,CValidationState &state, CNode* pfrom, CBlock* pblock, bool fForceProcessing, CDiskBlockPos *dbp) @@ -5521,1129 +1248,8 @@ bool ProcessNewBlock(bool from_miner,int32_t height,CValidationState &state, CNo return true; } -bool TestBlockValidity(CValidationState &state, const CBlock& block, CBlockIndex * const pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot) -{ - AssertLockHeld(cs_main); - assert(pindexPrev == chainActive.Tip()); +// [block_processing.cpp] Lines 4109-5231 extracted - CCoinsViewCache viewNew(pcoinsTip); - CBlockIndex indexDummy(block); - indexDummy.pprev = pindexPrev; - indexDummy.SetHeight(pindexPrev->GetHeight() + 1); - // zk proofs are verified in ConnectBlock - auto verifier = libzcash::ProofVerifier::Disabled(); - // NOTE: CheckBlockHeader is called by CheckBlock - if (!ContextualCheckBlockHeader(block, state, pindexPrev)) - { - fprintf(stderr,"%s: failure A checkPOW=%d\n",__func__,fCheckPOW); - return false; - } - int32_t futureblock; - if (!CheckBlock(&futureblock,indexDummy.GetHeight(),0,block, state, verifier, fCheckPOW, fCheckMerkleRoot)) - { - fprintf(stderr,"%s: failure B checkPOW=%d\n",__func__, fCheckPOW); - return false; - } - if (!ContextualCheckBlock(0,block, state, pindexPrev)) - { - fprintf(stderr,"%s: failure C checkPOW=%d\n",__func__, fCheckPOW); - return false; - } - if (!ConnectBlock(block, state, &indexDummy, viewNew, true,fCheckPOW)) - { - fprintf(stderr,"%s: failure D checkPOW=%d\n",__func__,fCheckPOW); - return false; - } - assert(state.IsValid()); - if ( futureblock != 0 ) - return(false); - return true; -} - -// BLOCK PRUNING CODE -/* Calculate the amount of disk space the block & undo files currently use */ -uint64_t CalculateCurrentUsage() -{ - uint64_t retval = 0; - BOOST_FOREACH(const CBlockFileInfo &file, vinfoBlockFile) { - retval += file.nSize + file.nUndoSize; - } - return retval; -} - -/* Prune a block file (modify associated database entries)*/ -bool PruneOneBlockFile(bool tempfile, const int fileNumber) -{ - uint256 notarized_hash,notarized_desttxid; int32_t prevMoMheight,notarized_height; - notarized_height = hush_notarized_height(&prevMoMheight,¬arized_hash,¬arized_desttxid); - //fprintf(stderr, "pruneblockfile.%i\n",fileNumber); sleep(15); - for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); ++it) - { - CBlockIndex* pindex = it->second; - if (pindex && pindex->nFile == fileNumber) - { - if ( tempfile && (pindex->nStatus & BLOCK_IN_TMPFILE != 0) ) - { - if ( chainActive.Contains(pindex) ) - { - // Block is in main chain so we cant clear this file! - return(false); - } - fprintf(stderr, "pindex height.%i notarized height.%i \n", pindex->GetHeight(), notarized_height); - if ( pindex->GetHeight() > notarized_height ) // Need to check this, does an invalid block have a height? - { - // This blocks height is not older than last notarization so it can be reorged into the main chain. - // We cant clear this file! - return(false); - } - else - { - // Block is not in main chain and is older than last notarized block so its safe for removal. - fprintf(stderr, "Block [%i] in tempfile.%i We can clear this block!\n",pindex->GetHeight(),fileNumber); - // Add index to list and remove after loop? - } - } - pindex->nStatus &= ~BLOCK_HAVE_DATA; - pindex->nStatus &= ~BLOCK_HAVE_UNDO; - pindex->nFile = 0; - pindex->nDataPos = 0; - pindex->nUndoPos = 0; - setDirtyBlockIndex.insert(pindex); - // Prune from mapBlocksUnlinked -- any block we prune would have - // to be downloaded again in order to consider its chain, at which - // point it would be considered as a candidate for - // mapBlocksUnlinked or setBlockIndexCandidates. - std::pair::iterator, std::multimap::iterator> range = mapBlocksUnlinked.equal_range(pindex->pprev); - while (range.first != range.second) - { - std::multimap::iterator it = range.first; - range.first++; - if (it->second == pindex) - { - mapBlocksUnlinked.erase(it); - } - } - } - } - if (!tempfile) - vinfoBlockFile[fileNumber].SetNull(); - setDirtyFileInfo.insert(fileNumber); - return(true); -} - - -void UnlinkPrunedFiles(std::set& setFilesToPrune) -{ - for (set::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) { - CDiskBlockPos pos(*it, 0); - boost::filesystem::remove(GetBlockPosFilename(pos, "blk")); - boost::filesystem::remove(GetBlockPosFilename(pos, "rev")); - LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it); - } -} - -/* Calculate the block/rev files that should be deleted to remain under target*/ -void FindFilesToPrune(std::set& setFilesToPrune) -{ - LOCK2(cs_main, cs_LastBlockFile); - if (chainActive.Tip() == NULL || nPruneTarget == 0) { - return; - } - if (chainActive.Tip()->GetHeight() <= Params().PruneAfterHeight()) { - return; - } - unsigned int nLastBlockWeCanPrune = chainActive.Tip()->GetHeight() - MIN_BLOCKS_TO_KEEP; - uint64_t nCurrentUsage = CalculateCurrentUsage(); - // We don't check to prune until after we've allocated new space for files - // So we should leave a buffer under our target to account for another allocation - // before the next pruning. - uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE; - uint64_t nBytesToPrune; - int count=0; - - if (nCurrentUsage + nBuffer >= nPruneTarget) { - for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) { - nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize; - - if (vinfoBlockFile[fileNumber].nSize == 0) - continue; - - if (nCurrentUsage + nBuffer < nPruneTarget) // are we below our target? - break; - - // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning - if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) - continue; - - PruneOneBlockFile(false, fileNumber); - // Queue up the files for removal - setFilesToPrune.insert(fileNumber); - nCurrentUsage -= nBytesToPrune; - count++; - } - } - - LogPrint("prune", "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n", - nPruneTarget/1024/1024, nCurrentUsage/1024/1024, - ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024, - nLastBlockWeCanPrune, count); -} - -bool CheckDiskSpace(uint64_t nAdditionalBytes) -{ - uint64_t nFreeBytesAvailable = boost::filesystem::space(GetDataDir()).available; - if(fDebug) { - fprintf(stderr,"Free bytes on disk: %lu\n", nFreeBytesAvailable); - } - // Check for nMinDiskSpace bytes (defined in main.h) - if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes) - return AbortNode("Disk space is low!!!", _("Error: Disk space is low!!!")); - - return true; -} - -FILE* OpenDiskFile(const CDiskBlockPos &pos, const char *prefix, bool fReadOnly) -{ - static int32_t didinit[256]; - if (pos.IsNull()) - return NULL; - boost::filesystem::path path = GetBlockPosFilename(pos, prefix); - boost::filesystem::create_directories(path.parent_path()); - FILE* file = fopen(path.string().c_str(), "rb+"); - if (!file && !fReadOnly) - file = fopen(path.string().c_str(), "wb+"); - if (!file) { - LogPrintf("Unable to open file %s\n", path.string()); - return NULL; - } - if ( pos.nFile < sizeof(didinit)/sizeof(*didinit) && didinit[pos.nFile] == 0 && strcmp(prefix,(char *)"blk") == 0 ) - { - hush_prefetch(file); - didinit[pos.nFile] = 1; - } - if (pos.nPos) { - if (fseek(file, pos.nPos, SEEK_SET)) { - LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, path.string()); - fclose(file); - return NULL; - } - } - return file; -} - -FILE* OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly) { - return OpenDiskFile(pos, "blk", fReadOnly); -} - -FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) { - return OpenDiskFile(pos, "rev", fReadOnly); -} - -boost::filesystem::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix) -{ - return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix, pos.nFile); -} - -CBlockIndex * InsertBlockIndex(uint256 hash) -{ - if (hash.IsNull()) - return NULL; - - // Return existing - BlockMap::iterator mi = mapBlockIndex.find(hash); - if (mi != mapBlockIndex.end() && mi->second != NULL) - return (*mi).second; - - // Create new - CBlockIndex* pindexNew = new CBlockIndex(); - if (!pindexNew) - throw runtime_error("InsertBlockIndex(): new CBlockIndex failed"); - mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first; - pindexNew->phashBlock = &((*mi).first); - //fprintf(stderr,"inserted to block index %s\n",hash.ToString().c_str()); - - return pindexNew; -} - -bool static LoadBlockIndexDB() -{ - const CChainParams& chainparams = Params(); - //LogPrintf("%s: start loading guts\n", __func__); - if (!pblocktree->LoadBlockIndexGuts()) - return false; - LogPrintf("%s: loaded guts\n", __func__); - boost::this_thread::interruption_point(); - - // Calculate chainPower - vector > vSortedByHeight; - vSortedByHeight.reserve(mapBlockIndex.size()); - BOOST_FOREACH(const PAIRTYPE(uint256, CBlockIndex*)& item, mapBlockIndex) - { - CBlockIndex* pindex = item.second; - vSortedByHeight.push_back(make_pair(pindex->GetHeight(), pindex)); - } - if(fDebug) - fprintf(stderr,"load blockindexDB paired %u\n",(uint32_t)time(NULL)); - sort(vSortedByHeight.begin(), vSortedByHeight.end()); - if(fDebug) - fprintf(stderr,"load blockindexDB sorted %u\n",(uint32_t)time(NULL)); - - BOOST_FOREACH(const PAIRTYPE(int, CBlockIndex*)& item, vSortedByHeight) - { - CBlockIndex* pindex = item.second; - pindex->chainPower = (pindex->pprev ? CChainPower(pindex) + pindex->pprev->chainPower : CChainPower(pindex)) + GetBlockProof(*pindex); - // We can link the chain of blocks for which we've received transactions at some point. - // Pruned nodes may have deleted the block. - if (pindex->nTx > 0) { - if (pindex->pprev) { - if (pindex->pprev->nChainTx) { - pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx; - if (fZindex) { - pindex->nChainNotarizations = pindex->pprev->nChainNotarizations + pindex->nNotarizations; - pindex->nChainShieldedTx = pindex->pprev->nChainShieldedTx + pindex->nShieldedTx; - pindex->nChainShieldedOutputs = pindex->pprev->nChainShieldedOutputs + pindex->nShieldedOutputs; - pindex->nChainShieldedPayments = pindex->pprev->nChainShieldedPayments + pindex->nShieldedPayments; - pindex->nChainShieldingTx = pindex->pprev->nChainShieldingTx + pindex->nShieldingTx; - - pindex->nChainPayments = pindex->pprev->nChainPayments + pindex->nPayments; - pindex->nChainShieldingPayments = pindex->pprev->nChainShieldingPayments + pindex->nShieldingPayments; - pindex->nChainDeshieldingTx = pindex->pprev->nChainShieldedTx + pindex->nShieldedTx; - pindex->nChainDeshieldingPayments = pindex->pprev->nChainShieldedPayments + pindex->nShieldedPayments; - pindex->nChainFullyShieldedTx = pindex->pprev->nChainFullyShieldedTx + pindex->nFullyShieldedTx; - pindex->nChainFullyShieldedPayments = pindex->pprev->nChainFullyShieldedPayments + pindex->nFullyShieldedPayments; - } - - if (pindex->pprev->nChainSproutValue && pindex->nSproutValue) { - pindex->nChainSproutValue = *pindex->pprev->nChainSproutValue + *pindex->nSproutValue; - } else { - pindex->nChainSproutValue = boost::none; - } - if (pindex->pprev->nChainSaplingValue) { - pindex->nChainSaplingValue = *pindex->pprev->nChainSaplingValue + pindex->nSaplingValue; - } else { - pindex->nChainSaplingValue = boost::none; - } - } else { - pindex->nChainTx = 0; - if (fZindex) { - pindex->nChainPayments = 0; - pindex->nChainNotarizations = 0; - pindex->nChainShieldedTx = 0; - pindex->nChainShieldedOutputs = 0; - pindex->nChainFullyShieldedTx = 0; - pindex->nChainShieldedPayments = 0; - pindex->nChainShieldingPayments = 0; - pindex->nChainDeshieldingTx = 0; - pindex->nChainDeshieldingPayments = 0; - pindex->nChainFullyShieldedTx = 0; - pindex->nChainFullyShieldedPayments = 0; - } - pindex->nChainSproutValue = boost::none; - pindex->nChainSaplingValue = boost::none; - mapBlocksUnlinked.insert(std::make_pair(pindex->pprev, pindex)); - } - } else { - pindex->nChainTx = pindex->nTx; - pindex->nChainSproutValue = pindex->nSproutValue; - pindex->nChainSaplingValue = pindex->nSaplingValue; - if (fZindex) { - pindex->nChainPayments = pindex->nPayments; - pindex->nChainNotarizations = pindex->nNotarizations; - pindex->nChainShieldedTx = pindex->nShieldedTx; - pindex->nChainShieldedOutputs = pindex->nShieldedOutputs; - pindex->nChainShieldedPayments = pindex->nShieldedPayments; - pindex->nChainShieldingTx = pindex->nShieldingTx; - pindex->nChainShieldingPayments = pindex->nShieldingPayments; - pindex->nChainDeshieldingTx = pindex->nDeshieldingTx; - pindex->nChainDeshieldingPayments = pindex->nDeshieldingPayments; - pindex->nChainFullyShieldedPayments = pindex->nFullyShieldedPayments; - } - } - } - // Construct in-memory chain of branch IDs. - // Relies on invariant: a block that does not activate a network upgrade - // will always be valid under the same consensus rules as its parent. - // Genesis block has a branch ID of zero by definition, but has no - // validity status because it is side-loaded into a fresh chain. - // Activation blocks will have branch IDs set (read from disk). - if (pindex->pprev) { - if (pindex->IsValid(BLOCK_VALID_CONSENSUS) && !pindex->nCachedBranchId) { - pindex->nCachedBranchId = pindex->pprev->nCachedBranchId; - } - } else { - pindex->nCachedBranchId = SPROUT_BRANCH_ID; - } - if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && (pindex->nChainTx || pindex->pprev == NULL)) - setBlockIndexCandidates.insert(pindex); - if (pindex->nStatus & BLOCK_FAILED_MASK && (!pindexBestInvalid || pindex->chainPower > pindexBestInvalid->chainPower)) - pindexBestInvalid = pindex; - if (pindex->pprev) - pindex->BuildSkip(); - if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == NULL || CBlockIndexWorkComparator()(pindexBestHeader, pindex))) - pindexBestHeader = pindex; - } - fprintf(stderr,"load blockindexDB chained %u\n",(uint32_t)time(NULL)); - - // Load block file info - pblocktree->ReadLastBlockFile(nLastBlockFile); - vinfoBlockFile.resize(nLastBlockFile + 1); - tmpBlockFiles.resize(nLastTmpFile + 1); - LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile); - for (int nFile = 0; nFile <= nLastBlockFile; nFile++) { - pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]); - } - LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString()); - for (int nFile = nLastBlockFile + 1; true; nFile++) { - CBlockFileInfo info; - if (pblocktree->ReadBlockFileInfo(nFile, info)) { - vinfoBlockFile.push_back(info); - } else { - break; - } - } - - // Check presence of blk files - LogPrintf("Checking all blk files are present...\n"); - set setBlkDataFiles; - BOOST_FOREACH(const PAIRTYPE(uint256, CBlockIndex*)& item, mapBlockIndex) - { - CBlockIndex* pindex = item.second; - if (pindex->nStatus & BLOCK_HAVE_DATA) { - setBlkDataFiles.insert(pindex->nFile); - } - } - fprintf(stderr,"load blockindexDB %u\n",(uint32_t)time(NULL)); - for (std::set::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) - { - CDiskBlockPos pos(*it, 0); - if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) { - return false; - } - } - - // Check whether we have ever pruned block & undo files - pblocktree->ReadFlag("prunedblockfiles", fHavePruned); - if (fHavePruned) - LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n"); - - // Check whether we need to continue reindexing - bool fReindexing = false; - pblocktree->ReadReindexing(fReindexing); - fReindex |= fReindexing; - - // Check whether we have a transaction index - pblocktree->ReadFlag("txindex", fTxIndex); - LogPrintf("%s: transaction index %s\n", __func__, fTxIndex ? "enabled" : "disabled"); - - // Check whether we have an address index - pblocktree->ReadFlag("addressindex", fAddressIndex); - LogPrintf("%s: address index %s\n", __func__, fAddressIndex ? "enabled" : "disabled"); - - // Check whether we have a shielded index - pblocktree->ReadFlag("zindex", fZindex); - LogPrintf("%s: shielded index %s\n", __func__, fZindex ? "enabled" : "disabled"); - - // Check whether we have a timestamp index - pblocktree->ReadFlag("timestampindex", fTimestampIndex); - LogPrintf("%s: timestamp index %s\n", __func__, fTimestampIndex ? "enabled" : "disabled"); - - // Check whether we have a spent index - pblocktree->ReadFlag("spentindex", fSpentIndex); - LogPrintf("%s: spent index %s\n", __func__, fSpentIndex ? "enabled" : "disabled"); - - // Load pointer to end of best chain - BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock()); - if (it == mapBlockIndex.end()) - return true; - - chainActive.SetTip(it->second); - - // Try to detect if we are z2z based on height of blocks on disk - // This helps to set it correctly on startup before a new block is connected - if(ishush3 && chainActive.Height() >= 340000) { - LogPrintf("%s: enabled ac_private=1 at height=%d\n", __func__, chainActive.Height()); - ASSETCHAINS_PRIVATE = 1; - } - - // Set hashFinalSproutRoot for the end of best chain - it->second->hashFinalSproutRoot = pcoinsTip->GetBestAnchor(SPROUT); - - fprintf(stderr,"about to prune block index\n"); - - PruneBlockIndexCandidates(); - - double progress; - if ( ishush3 ) { - progress = Checkpoints::GuessVerificationProgress(chainparams.Checkpoints(), chainActive.LastTip()); - } else { - int32_t longestchain = hush_longestchain(); - // TODO: hush_longestchain does not have the data it needs at the time LoadBlockIndexDB - // runs, which makes it return 0, so we guess 50% for now - progress = (longestchain > 0 ) ? (double) chainActive.Height() / longestchain : 0.5; - } - LogPrintf("%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__, - chainActive.LastTip()->GetBlockHash().ToString(), chainActive.Height(), - DateTimeStrFormat("%Y-%m-%d %H:%M:%S", chainActive.LastTip()->GetBlockTime()), - progress); - - CBlockIndex *pindex; - if ( (pindex= chainActive.LastTip()) != 0 ) - { - if ( ASSETCHAINS_SAPLING <= 0 ) - { - fprintf(stderr,"set sapling height, if possible from ht.%d %u\n",(int32_t)pindex->GetHeight(),(uint32_t)pindex->nTime); - hush_activate_sapling(pindex); - } - } - return true; -} - -CVerifyDB::CVerifyDB() -{ - uiInterface.ShowProgress(_("Verifying blocks..."), 0); -} - -CVerifyDB::~CVerifyDB() -{ - uiInterface.ShowProgress("", 100); -} - -bool CVerifyDB::VerifyDB(CCoinsView *coinsview, int nCheckLevel, int nCheckDepth) -{ - LOCK(cs_main); - if (chainActive.Tip() == NULL || chainActive.Tip()->pprev == NULL) - return true; - - // Verify blocks in the best chain - if (nCheckDepth <= 0) - nCheckDepth = 1000000000; // suffices until the year 19000 - if (nCheckDepth > chainActive.Height()) - nCheckDepth = chainActive.Height(); - nCheckLevel = std::max(0, std::min(4, nCheckLevel)); - LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel); - CCoinsViewCache coins(coinsview); - CBlockIndex* pindexState = chainActive.Tip(); - CBlockIndex* pindexFailure = NULL; - int nGoodTransactions = 0; - CValidationState state; - // No need to verify shielded req's twice - auto verifier = libzcash::ProofVerifier::Disabled(); - //fprintf(stderr,"start VerifyDB %u\n",(uint32_t)time(NULL)); - for (CBlockIndex* pindex = chainActive.Tip(); pindex && pindex->pprev; pindex = pindex->pprev) - { - boost::this_thread::interruption_point(); - uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, (int)(((double)(chainActive.Height() - pindex->GetHeight())) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))))); - if (pindex->GetHeight() < chainActive.Height()-nCheckDepth) - break; - CBlock block; - // check level 0: read from disk - if (!ReadBlockFromDisk(block, pindex,0)) - return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->GetHeight(), pindex->GetBlockHash().ToString()); - // check level 1: verify block validity - int32_t futureblock; - if (nCheckLevel >= 1 && !CheckBlock(&futureblock,pindex->GetHeight(),pindex,block, state, verifier,0) ) - return error("VerifyDB(): *** found bad block at %d, hash=%s\n", pindex->GetHeight(), pindex->GetBlockHash().ToString()); - // check level 2: verify undo validity - if (nCheckLevel >= 2 && pindex) { - CBlockUndo undo; - CDiskBlockPos pos = pindex->GetUndoPos(); - if (!pos.IsNull()) { - if (!UndoReadFromDisk(undo, pos, pindex->pprev->GetBlockHash())) - return error("VerifyDB(): *** found bad undo data at %d, hash=%s\n", pindex->GetHeight(), pindex->GetBlockHash().ToString()); - } - } - // check level 3: check for inconsistencies during memory-only disconnect of tip blocks - if (nCheckLevel >= 3 && pindex == pindexState && (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <= nCoinCacheUsage) { - bool fClean = true; - if (!DisconnectBlock(block, state, pindex, coins, &fClean)) - return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->GetHeight(), pindex->GetBlockHash().ToString()); - pindexState = pindex->pprev; - if (!fClean) { - nGoodTransactions = 0; - pindexFailure = pindex; - } else - nGoodTransactions += block.vtx.size(); - } - if (ShutdownRequested()) - return true; - } - //fprintf(stderr,"end VerifyDB %u\n",(uint32_t)time(NULL)); - if (pindexFailure) - return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainActive.Height() - pindexFailure->GetHeight() + 1, nGoodTransactions); - - // check level 4: try reconnecting blocks - if (nCheckLevel >= 4) { - CBlockIndex *pindex = pindexState; - while (pindex != chainActive.Tip()) { - boost::this_thread::interruption_point(); - uiInterface.ShowProgress(_("Verifying blocks..."), std::max(1, std::min(99, 100 - (int)(((double)(chainActive.Height() - pindex->GetHeight())) / (double)nCheckDepth * 50)))); - pindex = chainActive.Next(pindex); - CBlock block; - if (!ReadBlockFromDisk(block, pindex,0)) - return error("VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s", pindex->GetHeight(), pindex->GetBlockHash().ToString()); - if (!ConnectBlock(block, state, pindex, coins,false, true)) - return error("VerifyDB(): *** found unconnectable block at %d, hash=%s", pindex->GetHeight(), pindex->GetBlockHash().ToString()); - } - } - - LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", chainActive.Height() - pindexState->GetHeight(), nGoodTransactions); - - return true; -} - -bool RewindBlockIndex(const CChainParams& params, bool& clearWitnessCaches) -{ - LOCK(cs_main); - - // RewindBlockIndex is called after LoadBlockIndex, so at this point every block - // index will have nCachedBranchId set based on the values previously persisted - // to disk. By definition, a set nCachedBranchId means that the block was - // fully-validated under the corresponding consensus rules. Thus we can quickly - // identify whether the current active chain matches our expected sequence of - // consensus rule changes, with two checks: - // - // - BLOCK_ACTIVATES_UPGRADE is set only on blocks that activate upgrades. - // - nCachedBranchId for each block matches what we expect. - auto sufficientlyValidated = [¶ms](const CBlockIndex* pindex) { - auto consensus = params.GetConsensus(); - bool fFlagSet = pindex->nStatus & BLOCK_ACTIVATES_UPGRADE; - bool fFlagExpected = IsActivationHeightForAnyUpgrade(pindex->GetHeight(), consensus); - return fFlagSet == fFlagExpected && - pindex->nCachedBranchId && - *pindex->nCachedBranchId == CurrentEpochBranchId(pindex->GetHeight(), consensus); - }; - - int nHeight = 1; - while (nHeight <= chainActive.Height()) { - if (!sufficientlyValidated(chainActive[nHeight])) { - break; - } - nHeight++; - } - - // nHeight is now the height of the first insufficiently-validated block, or tipheight + 1 - auto rewindLength = chainActive.Height() - nHeight; - if (rewindLength > 0 && rewindLength > MAX_REORG_LENGTH) - { - auto pindexOldTip = chainActive.Tip(); - auto pindexRewind = chainActive[nHeight - 1]; - auto msg = strprintf(_( - "A block chain rewind has been detected that would roll back %d blocks! " - "This is larger than the maximum of %d blocks, and so the node is shutting down for your safety." - ), rewindLength, MAX_REORG_LENGTH) + "\n\n" + - _("Rewind details") + ":\n" + - "- " + strprintf(_("Current tip: %s, height %d"), - pindexOldTip->phashBlock->GetHex(), pindexOldTip->GetHeight()) + "\n" + - "- " + strprintf(_("Rewinding to: %s, height %d"), - pindexRewind->phashBlock->GetHex(), pindexRewind->GetHeight()) + "\n\n" + - _("Please help, human!"); - LogPrintf("*** %s\n", msg); - uiInterface.ThreadSafeMessageBox(msg, "", CClientUIInterface::MSG_ERROR); - StartShutdown(); - return false; - } - - CValidationState state; - CBlockIndex* pindex = chainActive.Tip(); - while (chainActive.Height() >= nHeight) { - if (fPruneMode && !(chainActive.Tip()->nStatus & BLOCK_HAVE_DATA)) { - // If pruning, don't try rewinding past the HAVE_DATA point; - // since older blocks can't be served anyway, there's - // no need to walk further, and trying to DisconnectTip() - // will fail (and require a needless reindex/redownload - // of the blockchain). - break; - } - if (!DisconnectTip(state, true)) { - return error("RewindBlockIndex: unable to disconnect block at height %i", pindex->GetHeight()); - } - // Occasionally flush state to disk. - if (!FlushStateToDisk(state, FLUSH_STATE_PERIODIC)) - return false; - } - - // Reduce validity flag and have-data flags. - - // Collect blocks to be removed (blocks in mapBlockIndex must be at least BLOCK_VALID_TREE). - // We do this after actual disconnecting, otherwise we'll end up writing the lack of data - // to disk before writing the chainstate, resulting in a failure to continue if interrupted. - std::vector vBlocks; - for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) { - CBlockIndex* pindexIter = it->second; - - // Note: If we encounter an insufficiently validated block that - // is on chainActive, it must be because we are a pruning node, and - // this block or some successor doesn't HAVE_DATA, so we were unable to - // rewind all the way. Blocks remaining on chainActive at this point - // must not have their validity reduced. - if (pindexIter && !sufficientlyValidated(pindexIter) && !chainActive.Contains(pindexIter)) { - // Reduce validity - pindexIter->nStatus = - std::min(pindexIter->nStatus & BLOCK_VALID_MASK, BLOCK_VALID_TREE) | - (pindexIter->nStatus & ~BLOCK_VALID_MASK); - // Remove have-data flags - pindexIter->nStatus &= ~(BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO); - // Remove branch ID - pindexIter->nStatus &= ~BLOCK_ACTIVATES_UPGRADE; - pindexIter->nCachedBranchId = boost::none; - // Remove storage location - pindexIter->nFile = 0; - pindexIter->nDataPos = 0; - pindexIter->nUndoPos = 0; - // Remove various other things - pindexIter->nTx = 0; - pindexIter->nChainTx = 0; - pindexIter->nSproutValue = boost::none; - pindexIter->nChainSproutValue = boost::none; - pindexIter->nSaplingValue = 0; - pindexIter->nChainSaplingValue = boost::none; - pindexIter->nSequenceId = 0; - - // Make sure it gets written - /* corresponds to commented out block below as an alternative to setDirtyBlockIndex - vBlocks.push_back(pindexIter); - */ - setDirtyBlockIndex.insert(pindexIter); - if (pindexIter == pindexBestInvalid) - { - //fprintf(stderr,"Reset invalid block marker if it was pointing to this block\n"); - pindexBestInvalid = NULL; - } - - // Update indices - setBlockIndexCandidates.erase(pindexIter); - auto ret = mapBlocksUnlinked.equal_range(pindexIter->pprev); - while (ret.first != ret.second) { - if (ret.first->second == pindexIter) { - mapBlocksUnlinked.erase(ret.first++); - } else { - ++ret.first; - } - } - } else if (pindexIter->IsValid(BLOCK_VALID_TRANSACTIONS) && pindexIter->nChainTx) { - setBlockIndexCandidates.insert(pindexIter); - } - } - - PruneBlockIndexCandidates(); - - CheckBlockIndex(); - - if (!FlushStateToDisk(state, FLUSH_STATE_ALWAYS)) { - return false; - } - - return true; -} - -void UnloadBlockIndex() -{ - LOCK(cs_main); - setBlockIndexCandidates.clear(); - chainActive.SetTip(NULL); - pindexBestInvalid = NULL; - pindexBestHeader = NULL; - mempool.clear(); - mapOrphanTransactions.clear(); - mapOrphanTransactionsByPrev.clear(); - nSyncStarted = 0; - mapBlocksUnlinked.clear(); - vinfoBlockFile.clear(); - tmpBlockFiles.clear(); - nLastBlockFile = 0; - nBlockSequenceId = 1; - mapBlockSource.clear(); - mapBlocksInFlight.clear(); - nQueuedValidatedHeaders = 0; - nPreferredDownload = 0; - setDirtyBlockIndex.clear(); - setDirtyFileInfo.clear(); - mapNodeState.clear(); - recentRejects.reset(NULL); - - BOOST_FOREACH(BlockMap::value_type& entry, mapBlockIndex) { - delete entry.second; - } - mapBlockIndex.clear(); - fHavePruned = false; -} - -bool LoadBlockIndex() -{ - // Load block index from databases - HUSH_LOADINGBLOCKS = 1; - if (!fReindex && !LoadBlockIndexDB()) - { - HUSH_LOADINGBLOCKS = 0; - return false; - } - fprintf(stderr,"finished loading blocks %s\n",SMART_CHAIN_SYMBOL); - return true; -} - - -bool InitBlockIndex() { - const CChainParams& chainparams = Params(); - LOCK(cs_main); - tmpBlockFiles.clear(); - - // Initialize global variables that cannot be constructed at startup. - recentRejects.reset(new CRollingBloomFilter(120000, 0.000001)); - // Check whether we're already initialized - if (chainActive.Genesis() != NULL) - { - return true; - } - if ( pblocktree != 0 ) - { - // Use the provided setting for -txindex in the new database - fTxIndex = GetBoolArg("-txindex", true); - pblocktree->WriteFlag("txindex", fTxIndex); - - // Use the provided setting for -addressindex in the new database - fAddressIndex = GetBoolArg("-addressindex", DEFAULT_ADDRESSINDEX); - pblocktree->WriteFlag("addressindex", fAddressIndex); - - // Use the provided setting for -zindex in the new database - fZindex = GetBoolArg("-zindex", DEFAULT_SHIELDEDINDEX); - pblocktree->WriteFlag("zindex", fZindex); - - // Use the provided setting for -timestampindex in the new database - fTimestampIndex = GetBoolArg("-timestampindex", DEFAULT_TIMESTAMPINDEX); - pblocktree->WriteFlag("timestampindex", fTimestampIndex); - - fSpentIndex = GetBoolArg("-spentindex", DEFAULT_SPENTINDEX); - pblocktree->WriteFlag("spentindex", fSpentIndex); - fprintf(stderr,"fAddressIndex.%d/%d fSpentIndex.%d/%d fZindex.%d/%d\n",fAddressIndex,DEFAULT_ADDRESSINDEX,fSpentIndex,DEFAULT_SPENTINDEX,fZindex, DEFAULT_SHIELDEDINDEX ); - LogPrintf("Initializing databases...\n"); - } - // Only add the genesis block if not reindexing (in which case we reuse the one already on disk) - if (!fReindex) { - try { - CBlock &block = const_cast(Params().GenesisBlock()); - // Start new block file - unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION); - CDiskBlockPos blockPos; - CValidationState state; - if (!FindBlockPos(0,state, blockPos, nBlockSize+8, 0, block.GetBlockTime())) - return error("LoadBlockIndex(): FindBlockPos failed"); - if (!WriteBlockToDisk(block, blockPos, chainparams.MessageStart())) - return error("LoadBlockIndex(): writing genesis block to disk failed"); - CBlockIndex *pindex = AddToBlockIndex(block); - if ( pindex == 0 ) - return error("LoadBlockIndex(): couldnt add to block index"); - if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) - return error("LoadBlockIndex(): genesis block not accepted"); - if (!ActivateBestChain(true, state, &block)) - return error("LoadBlockIndex(): genesis block cannot be activated"); - // Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data - if ( HUSH_NSPV_FULLNODE ) - return FlushStateToDisk(state, FLUSH_STATE_ALWAYS); - else return(true); - } catch (const std::runtime_error& e) { - return error("LoadBlockIndex(): failed to initialize block database: %s", e.what()); - } - } - - return true; -} - - - -bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp) -{ - const CChainParams& chainparams = Params(); - // Map of disk positions for blocks with unknown parent (only used for reindex) - static std::multimap mapBlocksUnknownParent; - int64_t nStart = GetTimeMillis(); - - int nLoaded = 0; - try { - // This takes over fileIn and calls fclose() on it in the CBufferedFile destructor - //CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SIZE, MAX_BLOCK_SIZE+8, SER_DISK, CLIENT_VERSION); - CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SIZE(10000000), MAX_BLOCK_SIZE(10000000)+8, SER_DISK, CLIENT_VERSION); - uint64_t nRewind = blkdat.GetPos(); - while (!blkdat.eof()) { - boost::this_thread::interruption_point(); - - blkdat.SetPos(nRewind); - nRewind++; // start one byte further next time, in case of failure - blkdat.SetLimit(); // remove former limit - unsigned int nSize = 0; - try { - // locate a header - unsigned char buf[MESSAGE_START_SIZE]; - blkdat.FindByte(Params().MessageStart()[0]); - nRewind = blkdat.GetPos()+1; - blkdat >> FLATDATA(buf); - if (memcmp(buf, Params().MessageStart(), MESSAGE_START_SIZE)) - continue; - // read size - blkdat >> nSize; - if (nSize < 80 || nSize > MAX_BLOCK_SIZE(10000000)) - continue; - } catch (const std::exception&) { - // no valid block header found; don't complain - break; - } - try { - // read block - CBlock block; - uint64_t nBlockPos = blkdat.GetPos(); - if (dbp) - dbp->nPos = nBlockPos; - blkdat.SetLimit(nBlockPos + nSize); - blkdat.SetPos(nBlockPos); - blkdat >> block; - - nRewind = blkdat.GetPos(); - // detect out of order blocks, and store them for later - uint256 hash = block.GetHash(); - if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex.find(block.hashPrevBlock) == mapBlockIndex.end()) { - LogPrint("reindex", "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(), - block.hashPrevBlock.ToString()); - if (dbp) - mapBlocksUnknownParent.insert(std::make_pair(block.hashPrevBlock, *dbp)); - continue; - } - - // process in case the block isn't known yet - if (mapBlockIndex.count(hash) == 0 || (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) { - CValidationState state; - if (ProcessNewBlock(0,0,state, NULL, &block, true, dbp)) - nLoaded++; - if (state.IsError()) - break; - } else if (hash != chainparams.GetConsensus().hashGenesisBlock && hush_blockheight(hash) % 1000 == 0) { - LogPrintf("Block Import: already had block %s at height %d\n", hash.ToString(), hush_blockheight(hash)); - } - - // Recursively process earlier encountered successors of this block - deque queue; - queue.push_back(hash); - while (!queue.empty()) { - uint256 head = queue.front(); - queue.pop_front(); - std::pair::iterator, std::multimap::iterator> range = mapBlocksUnknownParent.equal_range(head); - while (range.first != range.second) { - std::multimap::iterator it = range.first; - - if (ReadBlockFromDisk(mapBlockIndex.count(hash)!=0?mapBlockIndex[hash]->GetHeight():0,block, it->second,1)) - { - LogPrintf("%s: Processing out of order child %s of %s\n", __func__, block.GetHash().ToString(), - head.ToString()); - CValidationState dummy; - if (ProcessNewBlock(0,0,dummy, NULL, &block, true, &it->second)) - { - nLoaded++; - queue.push_back(block.GetHash()); - } - } - range.first++; - mapBlocksUnknownParent.erase(it); - } - } - } catch (const std::exception& e) { - LogPrintf("%s: Deserialize or I/O error - %s\n", __func__, e.what()); - } - } - } catch (const std::runtime_error& e) { - AbortNode(std::string("System error: ") + e.what()); - } - if (nLoaded > 0) - LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded, GetTimeMillis() - nStart); - return nLoaded > 0; -} - -void static CheckBlockIndex() -{ - const Consensus::Params& consensusParams = Params().GetConsensus(); - if (!fCheckBlockIndex) { - return; - } - - LOCK(cs_main); - - // During a reindex, we read the genesis block and call CheckBlockIndex before ActivateBestChain, - // so we have the genesis block in mapBlockIndex but no active chain. (A few of the tests when - // iterating the block tree require that chainActive has been initialized.) - if (chainActive.Height() < 0) { - assert(mapBlockIndex.size() <= 1); - return; - } - - // Build forward-pointing map of the entire block tree. - std::multimap forward; - for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) { - if ( it->second != 0 ) - forward.insert(std::make_pair(it->second->pprev, it->second)); - } - if ( Params().NetworkIDString() != "regtest" ) - assert(forward.size() == mapBlockIndex.size()); - - std::pair::iterator,std::multimap::iterator> rangeGenesis = forward.equal_range(NULL); - CBlockIndex *pindex = rangeGenesis.first->second; - rangeGenesis.first++; - assert(rangeGenesis.first == rangeGenesis.second); // There is only one index entry with parent NULL. - - // Iterate over the entire block tree, using depth-first search. - // Along the way, remember whether there are blocks on the path from genesis - // block being explored which are the first to have certain properties. - size_t nNodes = 0; - int nHeight = 0; - CBlockIndex* pindexFirstInvalid = NULL; // Oldest ancestor of pindex which is invalid. - CBlockIndex* pindexFirstMissing = NULL; // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA. - CBlockIndex* pindexFirstNeverProcessed = NULL; // Oldest ancestor of pindex for which nTx == 0. - CBlockIndex* pindexFirstNotTreeValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE (regardless of being valid or not). - CBlockIndex* pindexFirstNotTransactionsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS (regardless of being valid or not). - CBlockIndex* pindexFirstNotChainValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN (regardless of being valid or not). - CBlockIndex* pindexFirstNotScriptsValid = NULL; // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS (regardless of being valid or not). - while (pindex != NULL) { - nNodes++; - if (pindexFirstInvalid == NULL && pindex->nStatus & BLOCK_FAILED_VALID) pindexFirstInvalid = pindex; - if (pindexFirstMissing == NULL && !(pindex->nStatus & BLOCK_HAVE_DATA)) pindexFirstMissing = pindex; - if (pindexFirstNeverProcessed == NULL && pindex->nTx == 0) pindexFirstNeverProcessed = pindex; - if (pindex->pprev != NULL && pindexFirstNotTreeValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) pindexFirstNotTreeValid = pindex; - if (pindex->pprev != NULL && pindexFirstNotTransactionsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) pindexFirstNotTransactionsValid = pindex; - if (pindex->pprev != NULL && pindexFirstNotChainValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) pindexFirstNotChainValid = pindex; - if (pindex->pprev != NULL && pindexFirstNotScriptsValid == NULL && (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) pindexFirstNotScriptsValid = pindex; - - // Begin: actual consistency checks. - if (pindex->pprev == NULL) { - // Genesis block checks. - assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock); // Genesis block's hash must match. - assert(pindex == chainActive.Genesis()); // The current active chain's genesis block must be this block. - } - if (pindex->nChainTx == 0) assert(pindex->nSequenceId == 0); // nSequenceId can't be set for blocks that aren't linked - // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or not pruning has occurred). - // HAVE_DATA is only equivalent to nTx > 0 (or VALID_TRANSACTIONS) if no pruning has occurred. - if (!fHavePruned) { - // If we've never pruned, then HAVE_DATA should be equivalent to nTx > 0 - assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0)); - assert(pindexFirstMissing == pindexFirstNeverProcessed); - } else { - // If we have pruned, then we can only say that HAVE_DATA implies nTx > 0 - if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0); - } - if (pindex->nStatus & BLOCK_HAVE_UNDO) assert(pindex->nStatus & BLOCK_HAVE_DATA); - assert(((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0)); // This is pruning-independent. - // All parents having had data (at some point) is equivalent to all parents being VALID_TRANSACTIONS, which is equivalent to nChainTx being set. - assert((pindexFirstNeverProcessed != NULL) == (pindex->nChainTx == 0)); // nChainTx != 0 is used to signal that all parent blocks have been processed (but may have been pruned). - assert((pindexFirstNotTransactionsValid != NULL) == (pindex->nChainTx == 0)); - assert(pindex->GetHeight() == nHeight); // nHeight must be consistent. - assert(pindex->pprev == NULL || pindex->chainPower >= pindex->pprev->chainPower); // For every block except the genesis block, the chainwork must be larger than the parent's. - assert(nHeight < 2 || (pindex->pskip && (pindex->pskip->GetHeight() < nHeight))); // The pskip pointer must point back for all but the first 2 blocks. - assert(pindexFirstNotTreeValid == NULL); // All mapBlockIndex entries must at least be TREE valid - if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) assert(pindexFirstNotTreeValid == NULL); // TREE valid implies all parents are TREE valid - if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) assert(pindexFirstNotChainValid == NULL); // CHAIN valid implies all parents are CHAIN valid - if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) assert(pindexFirstNotScriptsValid == NULL); // SCRIPTS valid implies all parents are SCRIPTS valid - if (pindexFirstInvalid == NULL) { - // Checks for not-invalid blocks. - assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0); // The failed mask cannot be set for blocks without invalid parents. - } - if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && pindexFirstNeverProcessed == NULL) { - if (pindexFirstInvalid == NULL) { - // If this block sorts at least as good as the current tip and - // is valid and we have all data for its parents, it must be in - // setBlockIndexCandidates. chainActive.Tip() must also be there - // even if some data has been pruned. - if (pindexFirstMissing == NULL || pindex == chainActive.Tip()) { - assert(setBlockIndexCandidates.count(pindex)); - } - // If some parent is missing, then it could be that this block was in - // setBlockIndexCandidates but had to be removed because of the missing data. - // In this case it must be in mapBlocksUnlinked -- see test below. - } - } else { // If this block sorts worse than the current tip or some ancestor's block has never been seen, it cannot be in setBlockIndexCandidates. - assert(setBlockIndexCandidates.count(pindex) == 0); - } - // Check whether this block is in mapBlocksUnlinked. - std::pair::iterator,std::multimap::iterator> rangeUnlinked = mapBlocksUnlinked.equal_range(pindex->pprev); - bool foundInUnlinked = false; - while (rangeUnlinked.first != rangeUnlinked.second) { - assert(rangeUnlinked.first->first == pindex->pprev); - if (rangeUnlinked.first->second == pindex) { - foundInUnlinked = true; - break; - } - rangeUnlinked.first++; - } - if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed != NULL && pindexFirstInvalid == NULL) { - // If this block has block data available, some parent was never received, and has no invalid parents, it must be in mapBlocksUnlinked. - assert(foundInUnlinked); - } - if (!(pindex->nStatus & BLOCK_HAVE_DATA)) assert(!foundInUnlinked); // Can't be in mapBlocksUnlinked if we don't HAVE_DATA - if (pindexFirstMissing == NULL) assert(!foundInUnlinked); // We aren't missing data for any parent -- cannot be in mapBlocksUnlinked. - if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) && pindexFirstNeverProcessed == NULL && pindexFirstMissing != NULL) { - // We HAVE_DATA for this block, have received data for all parents at some point, but we're currently missing data for some parent. - assert(fHavePruned); // We must have pruned. - // This block may have entered mapBlocksUnlinked if: - // - it has a descendant that at some point had more work than the - // tip, and - // - we tried switching to that descendant but were missing - // data for some intermediate block between chainActive and the - // tip. - // So if this block is itself better than chainActive.Tip() and it wasn't in - // setBlockIndexCandidates, then it must be in mapBlocksUnlinked. - if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) && setBlockIndexCandidates.count(pindex) == 0) { - if (pindexFirstInvalid == NULL) { - assert(foundInUnlinked); - } - } - } - // try { - // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow - // } catch (const runtime_error&) { - // assert(!"Failed to read index entry"); - // } - // End: actual consistency checks. - - // Try descending into the first subnode. - std::pair::iterator,std::multimap::iterator> range = forward.equal_range(pindex); - if (range.first != range.second) { - // A subnode was found. - pindex = range.first->second; - nHeight++; - continue; - } - // This is a leaf node. - // Move upwards until we reach a node of which we have not yet visited the last child. - while (pindex) { - // We are going to either move to a parent or a sibling of pindex. - // If pindex was the first with a certain property, unset the corresponding variable. - if (pindex == pindexFirstInvalid) pindexFirstInvalid = NULL; - if (pindex == pindexFirstMissing) pindexFirstMissing = NULL; - if (pindex == pindexFirstNeverProcessed) pindexFirstNeverProcessed = NULL; - if (pindex == pindexFirstNotTreeValid) pindexFirstNotTreeValid = NULL; - if (pindex == pindexFirstNotTransactionsValid) pindexFirstNotTransactionsValid = NULL; - if (pindex == pindexFirstNotChainValid) pindexFirstNotChainValid = NULL; - if (pindex == pindexFirstNotScriptsValid) pindexFirstNotScriptsValid = NULL; - // Find our parent. - CBlockIndex* pindexPar = pindex->pprev; - // Find which child we just visited. - std::pair::iterator,std::multimap::iterator> rangePar = forward.equal_range(pindexPar); - while (rangePar.first->second != pindex) { - assert(rangePar.first != rangePar.second); // Our parent must have at least the node we're coming from as child. - rangePar.first++; - } - // Proceed to the next one. - rangePar.first++; - if (rangePar.first != rangePar.second) { - // Move to the sibling. - pindex = rangePar.first->second; - break; - } else { - // Move up further. - pindex = pindexPar; - nHeight--; - continue; - } - } - } - - // Check that we actually traversed the entire map. - assert(nNodes == forward.size()); -} // CAlert std::string GetWarnings(const std::string& strFor) diff --git a/src/main.h b/src/main.h index 721d80b9b..6de169e4c 100644 --- a/src/main.h +++ b/src/main.h @@ -289,6 +289,14 @@ void PruneAndFlush(); /** (try to) add transaction to memory pool **/ bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree, bool* pfMissingInputs, bool fRejectAbsurdFee=false, int dosLevel=-1); +bool CCTxFixAcceptToMemPoolUnchecked(CTxMemPool& pool, const CTransaction &tx); +bool myAddtomempool(CTransaction &tx, CValidationState *pstate, bool fSkipExpiry); + +/** Orphan transaction management */ +bool AddOrphanTx(const CTransaction& tx, NodeId peer); +void EraseOrphanTx(uint256 hash); +void EraseOrphansFor(NodeId peer); +unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans); struct CNodeStateStats { @@ -708,6 +716,8 @@ bool ContextualCheckTransaction(int32_t slowflag,const CBlock *block, CBlockInde bool (*isInitBlockDownload)() = IsInitialBlockDownload,int32_t validateprices=1); /** Apply the effects of this transaction on the UTXO set represented by view */ +class CTxUndo; +void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight); void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight); /** Transaction validation functions */ diff --git a/src/main_internal.h b/src/main_internal.h new file mode 100644 index 000000000..331eefffc --- /dev/null +++ b/src/main_internal.h @@ -0,0 +1,83 @@ +// Copyright (c) 2016-2024 The Hush developers +// Distributed under the GPLv3 software license, see the accompanying +// file COPYING or https://www.gnu.org/licenses/gpl-3.0.en.html +// +// Internal shared state for main.cpp and block_processing.cpp +// These variables were previously in an anonymous namespace in main.cpp +// but need to be shared across translation units after splitting. + +#ifndef HUSH_MAIN_INTERNAL_H +#define HUSH_MAIN_INTERNAL_H + +#include "chain.h" +#include "sync.h" +#include "bloom.h" +#include "primitives/block.h" +#include "chainparams.h" +#include +#include +#include +#include +#include + +struct CBlockIndexWorkComparator +{ + bool operator()(CBlockIndex *pa, const CBlockIndex *pb) const { + // First sort by most total work, ... + + if (pa->chainPower.chainWork > pb->chainPower.chainWork) return false; + if (pa->chainPower.chainWork < pb->chainPower.chainWork) return true; + + // ... then by earliest time received, ... + if (pa->nSequenceId < pb->nSequenceId) return false; + if (pa->nSequenceId > pb->nSequenceId) return true; + + // Use pointer address as tie breaker (should only happen with blocks + // loaded from disk, as those all have id 0). + if (pa < pb) return false; + if (pa > pb) return true; + + // Identical blocks. + return false; + } +}; + +/** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */ +struct QueuedBlock { + uint256 hash; + CBlockIndex *pindex; //! Optional. + int64_t nTime; //! Time of "getdata" request in microseconds. + bool fValidatedHeaders; //! Whether this block has validated headers at the time of request. + int64_t nTimeDisconnect; //! The timeout for this block request (for disconnecting a slow peer) +}; + +// Shared block-processing state — defined in main.cpp +extern CBlockIndex *pindexBestInvalid; +extern std::set setBlockIndexCandidates; +extern int nSyncStarted; +extern std::multimap mapBlocksUnlinked; + +extern CCriticalSection cs_LastBlockFile; +extern std::vector vinfoBlockFile, tmpBlockFiles; +extern int nLastBlockFile; +extern int nLastTmpFile; +extern unsigned int maxTempFileSize0; +extern unsigned int maxTempFileSize1; +extern bool fCheckForPruning; + +extern CCriticalSection cs_nBlockSequenceId; +extern uint32_t nBlockSequenceId; + +extern std::map mapBlockSource; + +extern boost::scoped_ptr recentRejects; +extern uint256 hashRecentRejectsChainTip; + +extern std::map::iterator> > mapBlocksInFlight; +extern int nQueuedValidatedHeaders; +extern int nPreferredDownload; + +extern std::set setDirtyBlockIndex; +extern std::set setDirtyFileInfo; + +#endif // HUSH_MAIN_INTERNAL_H diff --git a/src/mempool_accept.cpp b/src/mempool_accept.cpp new file mode 100644 index 000000000..ab43c74d4 --- /dev/null +++ b/src/mempool_accept.cpp @@ -0,0 +1,524 @@ +// Copyright (c) 2009-2010 Satoshi Nakamoto +// Copyright (c) 2009-2014 The Bitcoin Core developers +// Copyright (c) 2016-2024 The Hush developers +// Distributed under the GPLv3 software license, see the accompanying +// file COPYING or https://www.gnu.org/licenses/gpl-3.0.en.html +// +// Mempool acceptance and orphan transaction management — extracted from main.cpp +// Functions: AddOrphanTx, EraseOrphanTx, EraseOrphansFor, LimitOrphanTxSize, +// GetMinRelayFee, AcceptToMemoryPool, CCTxFixAcceptToMemPoolUnchecked, myAddtomempool + +#include "main.h" +#include "sodium.h" +#include "arith_uint256.h" +#include "chainparams.h" +#include "consensus/upgrades.h" +#include "consensus/validation.h" +#include "core_io.h" +#include "init.h" +#include "key_io.h" +#include "metrics.h" +#include "net.h" +#include "script/interpreter.h" +#include "timedata.h" +#include "txdb.h" +#include "txmempool.h" +#include "undo.h" +#include "util.h" +#include "utilmoneystr.h" +#include "validationinterface.h" +#include "hush_defs.h" +#include "hush.h" + +#include "librustzcash.h" + +#include +#include +#include +#include +#include +#include +#include + +using namespace std; + +extern int32_t HUSH_LOADINGBLOCKS,HUSH_LONGESTCHAIN,HUSH_INSYNC,HUSH_CONNECTING,HUSH_EXTRASATOSHI; +extern unsigned int expiryDelta; +extern CFeeRate minRelayTxFee; +extern bool fAddressIndex; +extern bool fSpentIndex; +extern const bool ishush3; + +#define ASYNC_RPC_OPERATION_DEFAULT_MINERS_FEE 10000 + +// Orphan transaction data — defined in main.cpp +struct COrphanTx { + CTransaction tx; + NodeId fromPeer; +}; +extern map mapOrphanTransactions; +extern map > mapOrphanTransactionsByPrev; + +bool AddOrphanTx(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +{ + uint256 hash = tx.GetHash(); + if (mapOrphanTransactions.count(hash)) + return false; + + // Ignore big transactions, to avoid a + // send-big-orphans memory exhaustion attack. If a peer has a legitimate + // large transaction with a missing parent then we assume + // it will rebroadcast it later, after the parent transaction(s) + // have been mined or received. + // 10,000 orphans, each of which is at most 5,000 bytes big is + // at most 500 megabytes of orphans: + unsigned int sz = GetSerializeSize(tx, SER_NETWORK, tx.nVersion); + if (sz > 5000) + { + LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString()); + return false; + } + + mapOrphanTransactions[hash].tx = tx; + mapOrphanTransactions[hash].fromPeer = peer; + BOOST_FOREACH(const CTxIn& txin, tx.vin) + mapOrphanTransactionsByPrev[txin.prevout.hash].insert(hash); + + LogPrint("mempool", "stored orphan tx %s (mapsz %u prevsz %u)\n", hash.ToString(), + mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size()); + return true; +} + +void EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +{ + map::iterator it = mapOrphanTransactions.find(hash); + if (it == mapOrphanTransactions.end()) + return; + BOOST_FOREACH(const CTxIn& txin, it->second.tx.vin) + { + map >::iterator itPrev = mapOrphanTransactionsByPrev.find(txin.prevout.hash); + if (itPrev == mapOrphanTransactionsByPrev.end()) + continue; + itPrev->second.erase(hash); + if (itPrev->second.empty()) + mapOrphanTransactionsByPrev.erase(itPrev); + } + mapOrphanTransactions.erase(it); +} + +void EraseOrphansFor(NodeId peer) +{ + int nErased = 0; + map::iterator iter = mapOrphanTransactions.begin(); + while (iter != mapOrphanTransactions.end()) + { + map::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid + if (maybeErase->second.fromPeer == peer) + { + EraseOrphanTx(maybeErase->second.tx.GetHash()); + ++nErased; + } + } + if (nErased > 0) LogPrint("mempool", "Erased %d orphan tx from peer %d\n", nErased, peer); +} + + +unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +{ + unsigned int nEvicted = 0; + while (mapOrphanTransactions.size() > nMaxOrphans) + { + // Evict a random orphan: + uint256 randomhash = GetRandHash(); + map::iterator it = mapOrphanTransactions.lower_bound(randomhash); + if (it == mapOrphanTransactions.end()) + it = mapOrphanTransactions.begin(); + EraseOrphanTx(it->first); + ++nEvicted; + } + return nEvicted; +} + +CAmount GetMinRelayFee(const CTransaction& tx, unsigned int nBytes, bool fAllowFree) +{ + { + LOCK(mempool.cs); + uint256 hash = tx.GetHash(); + double dPriorityDelta = 0; + CAmount nFeeDelta = 0; + mempool.ApplyDeltas(hash, dPriorityDelta, nFeeDelta); + if (dPriorityDelta > 0 || nFeeDelta > 0) + return 0; + } + + CAmount nMinFee = ::minRelayTxFee.GetFee(nBytes); + + if (fAllowFree) + { + // There is a free transaction area in blocks created by most miners, + // * If we are relaying we allow transactions up to DEFAULT_BLOCK_PRIORITY_SIZE - 1000 + // to be considered to fall into this category. We don't want to encourage sending + // multiple transactions instead of one big transaction to avoid fees. + if (nBytes < (DEFAULT_BLOCK_PRIORITY_SIZE - 1000)) + nMinFee = 0; + } + + if (!MoneyRange(nMinFee)) + nMinFee = MAX_MONEY; + return nMinFee; +} + + +bool AcceptToMemoryPool(CTxMemPool& pool, CValidationState &state, const CTransaction &tx, bool fLimitFree,bool* pfMissingInputs, bool fRejectAbsurdFee, int dosLevel) +{ + AssertLockHeld(cs_main); + const uint32_t z2zTransitionWindow = 10; + const uint32_t z2zTransitionStart = 340000 - z2zTransitionWindow; + const uint32_t nHeight = chainActive.Height(); + + // This only applies to HUSH3, other chains can start off z2z via ac_private=1 + if(ishush3) { + if((nHeight >= z2zTransitionStart) || (nHeight <= 340000)) { + // During the z2z transition window, only coinbase tx's as part of blocks are allowed + // Theory: We want an empty mempool at our fork block height, and the only way to assure that + // is to have an empty mempool for a few previous blocks, to take care of potential re-orgs + // and edge cases. This empty mempool assures there will be no transactions involving taddrs + // stuck in the mempool, when the z2z rule takes effect. + // Thanks to jl777 for helping design this + fprintf(stderr,"%s: rejecting all tx's during z2z transition window. Please retry after Block %d !!!\n", __func__,nHeight); + return false; + } + } + if (pfMissingInputs) + *pfMissingInputs = false; + uint32_t tiptime; + int flag=0,nextBlockHeight = chainActive.Height() + 1; + auto consensusBranchId = CurrentEpochBranchId(nextBlockHeight, Params().GetConsensus()); + if ( nextBlockHeight <= 1 || chainActive.LastTip() == 0 ) + tiptime = (uint32_t)time(NULL); + else tiptime = (uint32_t)chainActive.LastTip()->nTime; + + auto verifier = libzcash::ProofVerifier::Strict(); + + if (!CheckTransaction(tiptime,tx, state, verifier, 0, 0)) + { + return error("AcceptToMemoryPool: CheckTransaction failed"); + } + + // Reject duplicate output proofs in a single ztx in mempool + // Migrate this to CheckTransaction() to make it a consensus requirement + { + set vSaplingOutputProof; + BOOST_FOREACH(const OutputDescription& output, tx.vShieldedOutput) + { + if (vSaplingOutputProof.count(output.zkproof)) + return state.Invalid(error("AcceptToMemoryPool: duplicate output proof"),REJECT_DUPLICATE_OUTPUT_PROOF, "bad-txns-duplicate-output-proof"); + vSaplingOutputProof.insert(output.zkproof); + } + } + + // Reject duplicate spend proofs in a single ztx in mempool + // Migrate this to CheckTransaction() to make it a consensus requirement + { + set vSaplingSpendProof; + BOOST_FOREACH(const SpendDescription& spend, tx.vShieldedSpend) + { + if (vSaplingSpendProof.count(spend.zkproof)) + return state.Invalid(error("AcceptToMemoryPool: duplicate spend proof"),REJECT_DUPLICATE_SPEND_PROOF, "bad-txns-duplicate-spend-proof"); + vSaplingSpendProof.insert(spend.zkproof); + } + } + + // DoS level set to 10 to be more forgiving. + // Check transaction contextually against the set of consensus rules which apply in the next block to be mined. + if (!ContextualCheckTransaction(0,0,0,tx, state, nextBlockHeight, (dosLevel == -1) ? 10 : dosLevel)) + { + return error("AcceptToMemoryPool: ContextualCheckTransaction failed"); + } +//fprintf(stderr,"addmempool 2\n"); + // Coinbase is only valid in a block, not as a loose transaction + if (tx.IsCoinBase()) + { + fprintf(stderr,"AcceptToMemoryPool coinbase as individual tx\n"); + return state.DoS(100, error("AcceptToMemoryPool: coinbase as individual tx"),REJECT_INVALID, "coinbase"); + } + + // Rather not work on nonstandard transactions (unless -testnet/-regtest) + string reason; + if (Params().RequireStandard() && !IsStandardTx(tx, reason, nextBlockHeight)) + { + // + //fprintf(stderr,"AcceptToMemoryPool reject nonstandard transaction: %s\nscriptPubKey: %s\n",reason.c_str(),tx.vout[0].scriptPubKey.ToString().c_str()); + return state.DoS(0,error("AcceptToMemoryPool: nonstandard transaction: %s", reason),REJECT_NONSTANDARD, reason); + } + + // Only accept nLockTime-using transactions that can be mined in the next + // block; we don't want our mempool filled up with transactions that can't + // be mined yet. + if (!CheckFinalTx(tx, STANDARD_LOCKTIME_VERIFY_FLAGS)) + { + //fprintf(stderr,"AcceptToMemoryPool reject non-final\n"); + return state.DoS(0, false, REJECT_NONSTANDARD, "non-final"); + } + // is it already in the memory pool? + uint256 hash = tx.GetHash(); + if (pool.exists(hash)) + { + //fprintf(stderr,"already in mempool\n"); + return state.Invalid(false, REJECT_DUPLICATE, "already in mempool"); + } + + // Check for conflicts with in-memory transactions + { + LOCK(pool.cs); // protect pool.mapNextTx + for (unsigned int i = 0; i < tx.vin.size(); i++) + { + COutPoint outpoint = tx.vin[i].prevout; + if (pool.mapNextTx.count(outpoint)) + { + // Disable replacement feature for now + return false; + } + } + + for (const SpendDescription &spendDescription : tx.vShieldedSpend) { + if (pool.nullifierExists(spendDescription.nullifier, SAPLING)) { + return false; + } + } + } + + { + CCoinsView dummy; + CCoinsViewCache view(&dummy); + int64_t interest; + CAmount nValueIn = 0; + { + LOCK(pool.cs); + CCoinsViewMemPool viewMemPool(pcoinsTip, pool); + view.SetBackend(viewMemPool); + + // do we already have it? + if (view.HaveCoins(hash)) { + //fprintf(stderr,"view.HaveCoins(hash) error\n"); + return state.Invalid(false, REJECT_DUPLICATE, "already have coins"); + } + + { + // do all inputs exist? + // Note that this does not check for the presence of actual outputs (see the next check for that), + // and only helps with filling in pfMissingInputs (to determine missing vs spent). + BOOST_FOREACH(const CTxIn txin, tx.vin) + { + if (!view.HaveCoins(txin.prevout.hash)) { + if (pfMissingInputs) + *pfMissingInputs = true; + //fprintf(stderr,"missing inputs\n"); + return false; + // https://github.com/zcash/zcash/blob/master/src/main.cpp#L1490 + // state.DoS(0, error("AcceptToMemoryPool: tx inputs not found"),REJECT_INVALID, "bad-txns-inputs-missing"); + } + } + // are the actual inputs available? + if (!view.HaveInputs(tx)) { + //fprintf(stderr,"accept failure. inputs-spent\n"); + return state.Invalid(error("AcceptToMemoryPool: inputs already spent"),REJECT_DUPLICATE, "bad-txns-inputs-spent"); + } + } + + // are the zaddr requirements met? + if (!view.HaveShieldedRequirements(tx)) { + //fprintf(stderr,"accept failure. ztx reqs not met\n"); + return state.Invalid(error("AcceptToMemoryPool: shielded requirements not met"),REJECT_DUPLICATE, "bad-txns-shielded-requirements-not-met"); + } + + // Bring the best block into scope + view.GetBestBlock(); + + nValueIn = view.GetValueIn(chainActive.LastTip()->GetHeight(),&interest,tx,chainActive.LastTip()->nTime); + // we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool + view.SetBackend(dummy); + } + // Check for non-standard pay-to-script-hash in inputs + if (Params().RequireStandard() && !AreInputsStandard(tx, view, consensusBranchId)) + return error("AcceptToMemoryPool: reject nonstandard transaction input"); + + // Check that the transaction doesn't have an excessive number of + // sigops, making it impossible to mine. Since the coinbase transaction + // itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than + // MAX_BLOCK_SIGOPS; we still consider this an invalid rather than + // merely non-standard transaction. + unsigned int nSigOps = GetLegacySigOpCount(tx); + nSigOps += GetP2SHSigOpCount(tx, view); + if (nSigOps > MAX_STANDARD_TX_SIGOPS) + { + fprintf(stderr,"accept failure.4\n"); + return state.DoS(1, error("AcceptToMemoryPool: too many sigops %s, %d > %d", hash.ToString(), nSigOps, MAX_STANDARD_TX_SIGOPS),REJECT_NONSTANDARD, "bad-txns-too-many-sigops"); + } + + CAmount nValueOut = tx.GetValueOut(); + CAmount nFees = nValueIn-nValueOut; + double dPriority = view.GetPriority(tx, chainActive.Height()); + if ( nValueOut > 777777*COIN && HUSH_VALUETOOBIG(nValueOut - 777777*COIN) != 0 ) // some room for blockreward and txfees + return state.DoS(100, error("AcceptToMemoryPool: GetValueOut too big"),REJECT_INVALID,"tx valueout is too big"); + + // Keep track of transactions that spend a coinbase, which we re-scan + // during reorgs to ensure COINBASE_MATURITY is still met. + bool fSpendsCoinbase = false; + BOOST_FOREACH(const CTxIn &txin, tx.vin) { + const CCoins *coins = view.AccessCoins(txin.prevout.hash); + if (coins->IsCoinBase()) { + fSpendsCoinbase = true; + break; + } + } + // Grab the branch ID we expect this transaction to commit to. We don't + // yet know if it does, but if the entry gets added to the mempool, then + // it has passed ContextualCheckInputs and therefore this is correct. + auto consensusBranchId = CurrentEpochBranchId(chainActive.Height() + 1, Params().GetConsensus()); + + CTxMemPoolEntry entry(tx, nFees, GetTime(), dPriority, chainActive.Height(), mempool.HasNoInputsOf(tx), fSpendsCoinbase, consensusBranchId); + unsigned int nSize = entry.GetTxSize(); + + // Accept a tx if it contains zspends and has at least the default fee specified by z_sendmany. + if (tx.vShieldedSpend.size() > 0 && nFees >= ASYNC_RPC_OPERATION_DEFAULT_MINERS_FEE) { + // In future we will we have more accurate and dynamic computation of fees, derpz + } else { + // Don't accept it if it can't get into a block, yallz + CAmount txMinFee = GetMinRelayFee(tx, nSize, true); + if (fLimitFree && nFees < txMinFee) { + //fprintf(stderr,"accept failure.5\n"); + return state.DoS(0, error("AcceptToMemoryPool: not enough fees %s, %d < %d",hash.ToString(), nFees, txMinFee),REJECT_INSUFFICIENTFEE, "insufficient fee"); + } + } + + // Require that free transactions have sufficient priority to be mined in the next block. + if (GetBoolArg("-relaypriority", false) && nFees < ::minRelayTxFee.GetFee(nSize) && !AllowFree(view.GetPriority(tx, chainActive.Height() + 1))) { + fprintf(stderr,"accept failure.6\n"); + return state.DoS(0, false, REJECT_INSUFFICIENTFEE, "insufficient priority"); + } + + // Continuously rate-limit free (really, very-low-fee) transactions + // This mitigates 'penny-flooding' -- sending thousands of free transactions just to + // be annoying or make others' transactions take longer to confirm. + if (fLimitFree && nFees < ::minRelayTxFee.GetFee(nSize) ) + { + static CCriticalSection csFreeLimiter; + static double dFreeCount; + static int64_t nLastTime; + int64_t nNow = GetTime(); + + LOCK(csFreeLimiter); + + // Use an exponentially decaying ~10-minute window: + dFreeCount *= pow(1.0 - 1.0/600.0, (double)(nNow - nLastTime)); + nLastTime = nNow; + // -limitfreerelay unit is thousand-bytes-per-minute + // At default rate it would take over a month to fill 1GB + if (dFreeCount >= GetArg("-limitfreerelay", 15)*10*1000) + { + fprintf(stderr,"accept failure.7\n"); + return state.DoS(0, error("AcceptToMemoryPool: free transaction rejected by rate limiter"), REJECT_INSUFFICIENTFEE, "rate limited free transaction"); + } + LogPrint("mempool", "Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount+nSize); + dFreeCount += nSize; + } + + fRejectAbsurdFee = false; + + if ( fRejectAbsurdFee && nFees > ::minRelayTxFee.GetFee(nSize) * 10000 && nFees > nValueOut/19) + // Disable checks for absurd fees when adding to the mempool. Instead, this check is done + // when a user attempts to make a transaction with an absurd fee and only rejects absurd + // fees when OP_RETURN data is NOT being used. This means users making normal financial + // transactions (z2z) are protected from absurd fees, it is only users who are storing + // arbitrary data via a z2t transaction are allowed to (or potentially required) to pay high fees + // It would be nice to detect the use of OP_RETURN right here but it seems to only be known + // inside of IsStandard() inside of IsStandardTx() and we want to avoid doing expensive checks + // multiple times. + { + string errmsg = strprintf("absurdly high fees %s, %d > %d", + hash.ToString(), + nFees, ::minRelayTxFee.GetFee(nSize) * 10000); + LogPrint("mempool", errmsg.c_str()); + return state.Error("AcceptToMemoryPool: " + errmsg); + } + //fprintf(stderr,"addmempool 6\n"); + + // Check against previous transactions + // This is done last to help prevent CPU exhaustion denial-of-service attacks. + PrecomputedTransactionData txdata(tx); + if (!ContextualCheckInputs(tx, state, view, true, STANDARD_SCRIPT_VERIFY_FLAGS, true, txdata, Params().GetConsensus(), consensusBranchId)) + { + //fprintf(stderr,"accept failure.9\n"); + return error("AcceptToMemoryPool: ConnectInputs failed %s", hash.ToString()); + } + + // Check again against just the consensus-critical mandatory script + // verification flags, in case of bugs in the standard flags that cause + // transactions to pass as valid when they're actually invalid. For + // instance the STRICTENC flag was incorrectly allowing certain + // CHECKSIG NOT scripts to pass, even though they were invalid. + // + // There is a similar check in CreateNewBlock() to prevent creating + // invalid blocks, however allowing such transactions into the mempool + // can be exploited as a DoS attack. + // XXX: is this neccesary for CryptoConditions? + if ( HUSH_CONNECTING <= 0 && chainActive.LastTip() != 0 ) + { + flag = 1; + HUSH_CONNECTING = (1<<30) + (int32_t)chainActive.LastTip()->GetHeight() + 1; + } + + if (!ContextualCheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS, true, txdata, Params().GetConsensus(), consensusBranchId)) + { + if ( flag != 0 ) + HUSH_CONNECTING = -1; + return error("AcceptToMemoryPool: BUG! PLEASE REPORT THIS! ConnectInputs failed against MANDATORY but not STANDARD flags %s", hash.ToString()); + } + if ( flag != 0 ) + HUSH_CONNECTING = -1; + + { + LOCK(pool.cs); + // Store transaction in memory + pool.addUnchecked(hash, entry, !IsInitialBlockDownload()); + + // Add memory address index + if (fAddressIndex) { + pool.addAddressIndex(entry, view); + } + + // Add memory spent index + if (fSpentIndex) { + pool.addSpentIndex(entry, view); + } + } + } + return true; +} + +bool CCTxFixAcceptToMemPoolUnchecked(CTxMemPool& pool, const CTransaction &tx) +{ + // called from CheckBlock which is in cs_main and mempool.cs locks already. + auto consensusBranchId = CurrentEpochBranchId(chainActive.Height() + 1, Params().GetConsensus()); + CTxMemPoolEntry entry(tx, 0, GetTime(), 0, chainActive.Height(), mempool.HasNoInputsOf(tx), false, consensusBranchId); + //fprintf(stderr, "adding %s to mempool from block %d\n",tx.GetHash().ToString().c_str(),chainActive.GetHeight()); + pool.addUnchecked(tx.GetHash(), entry, false); + return true; +} + +bool myAddtomempool(CTransaction &tx, CValidationState *pstate, bool fSkipExpiry) +{ + CValidationState state; + if (!pstate) + pstate = &state; + CTransaction Ltx; bool fMissingInputs,fOverrideFees = false; + if ( mempool.lookup(tx.GetHash(),Ltx) == 0 ) + { + if ( !fSkipExpiry ) + return(AcceptToMemoryPool(mempool, *pstate, tx, false, &fMissingInputs, !fOverrideFees, -1)); + else + return(CCTxFixAcceptToMemPoolUnchecked(mempool,tx)); + } + else return(true); +} diff --git a/src/tx_validation.cpp b/src/tx_validation.cpp new file mode 100644 index 000000000..1011ec58d --- /dev/null +++ b/src/tx_validation.cpp @@ -0,0 +1,1012 @@ +// Copyright (c) 2009-2010 Satoshi Nakamoto +// Copyright (c) 2009-2014 The Bitcoin Core developers +// Copyright (c) 2016-2024 The Hush developers +// Distributed under the GPLv3 software license, see the accompanying +// file COPYING or https://www.gnu.org/licenses/gpl-3.0.en.html +// +// Transaction validation functions — extracted from main.cpp +// Functions: IsStandardTx, IsFinalTx, IsExpiredTx, CheckFinalTx, +// AreInputsStandard, GetLegacySigOpCount, GetP2SHSigOpCount, +// ContextualCheckCoinbaseTransaction, ContextualCheckTransaction, +// CheckTransaction, hush_isnotaryvout, CheckTransactionWithoutProofVerification, +// UpdateCoins, CScriptCheck::operator(), GetSpendHeight, +// Consensus::CheckTxInputs, ContextualCheckInputs + +#include "main.h" +#include "sodium.h" +#include "arith_uint256.h" +#include "chainparams.h" +#include "checkpoints.h" +#include "consensus/upgrades.h" +#include "consensus/validation.h" +#include "core_io.h" +#include "init.h" +#include "metrics.h" +#include "script/interpreter.h" +#include "txmempool.h" +#include "undo.h" +#include "util.h" +#include "utilmoneystr.h" +#include "hush_defs.h" +#include "key_io.h" +#include "hush.h" + +#include "librustzcash.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace std; + +extern int32_t HUSH_LOADINGBLOCKS,HUSH_LONGESTCHAIN,HUSH_INSYNC,HUSH_CONNECTING,HUSH_EXTRASATOSHI; +extern int32_t nFirstHalvingHeight; +extern unsigned int expiryDelta; +extern CFeeRate minRelayTxFee; + +bool Getscriptaddress(char *destaddr,const CScript &scriptPubKey); + +bool IsStandardTx(const CTransaction& tx, string& reason, const int nHeight) +{ + const bool overwinterActive = nHeight>=1 ? true : false; //NetworkUpgradeActive(nHeight, Params().GetConsensus(), Consensus::UPGRADE_OVERWINTER); + const bool saplingActive = nHeight>=1 ? true : false; //NetworkUpgradeActive(nHeight, Params().GetConsensus(), Consensus::UPGRADE_SAPLING); + + if (saplingActive) { + // Sapling standard rules apply + if (tx.nVersion > CTransaction::SAPLING_MAX_CURRENT_VERSION || tx.nVersion < CTransaction::SAPLING_MIN_CURRENT_VERSION) { + reason = "sapling-version"; + return false; + } + } else if (overwinterActive) { + // Overwinter standard rules apply + if (tx.nVersion > CTransaction::OVERWINTER_MAX_CURRENT_VERSION || tx.nVersion < CTransaction::OVERWINTER_MIN_CURRENT_VERSION) { + reason = "overwinter-version"; + return false; + } + } else { + // Sprout standard rules apply + if (tx.nVersion > CTransaction::SPROUT_MAX_CURRENT_VERSION || tx.nVersion < CTransaction::SPROUT_MIN_CURRENT_VERSION) { + reason = "version"; + return false; + } + } + + BOOST_FOREACH(const CTxIn& txin, tx.vin) + { + // Biggest 'standard' txin is a 15-of-15 P2SH multisig with compressed + // keys. (remember the 520 byte limit on redeemScript size) That works + // out to a (15*(33+1))+3=513 byte redeemScript, 513+1+15*(73+1)+3=1627 + // bytes of scriptSig, which we round off to 1650 bytes for some minor + // future-proofing. That's also enough to spend a 20-of-20 + // CHECKMULTISIG scriptPubKey, though such a scriptPubKey is not + // considered standard) + if (txin.scriptSig.size() > 1650) { + reason = "scriptsig-size"; + return false; + } + if (!txin.scriptSig.IsPushOnly()) { + reason = "scriptsig-not-pushonly"; + return false; + } + } + + unsigned int v=0,nDataOut = 0; + txnouttype whichType; + BOOST_FOREACH(const CTxOut& txout, tx.vout) + { + if (!::IsStandard(txout.scriptPubKey, whichType)) + { + reason = "scriptpubkey"; + //fprintf(stderr," vout.%d nDataout.%d\n",v,nDataOut); + return false; + } + + if (whichType == TX_NULL_DATA) + { + if ( txout.scriptPubKey.size() > DRAGON_MAXSCRIPTSIZE ) + { + reason = "opreturn too big"; + return(false); + } + nDataOut++; + //fprintf(stderr,"is OP_RETURN\n"); + } else if ((whichType == TX_MULTISIG) && (!fIsBareMultisigStd)) { + reason = "bare-multisig"; + return false; + } else if (txout.IsDust(::minRelayTxFee)) { + reason = "dust"; + return false; + } + v++; + } + + // only one OP_RETURN txout is permitted + if (nDataOut > 1) { + reason = "multi-op-return"; + return false; + } + + return true; +} + +bool IsFinalTx(const CTransaction &tx, int nBlockHeight, int64_t nBlockTime) +{ + if (tx.nLockTime == 0) + return true; + if ((int64_t)tx.nLockTime < ((int64_t)tx.nLockTime < LOCKTIME_THRESHOLD ? (int64_t)nBlockHeight : nBlockTime)) + return true; + BOOST_FOREACH(const CTxIn& txin, tx.vin) + { + if ( !hush_hardfork_active(nBlockTime) && txin.nSequence == 0xfffffffe && + //if ( (nBlockTime <= ASSETCHAINS_STAKED_HF_TIMESTAMP ) && txin.nSequence == 0xfffffffe && + ( + ((int64_t)tx.nLockTime >= LOCKTIME_THRESHOLD && (int64_t)tx.nLockTime > nBlockTime) || + ((int64_t)tx.nLockTime < LOCKTIME_THRESHOLD && (int64_t)tx.nLockTime > nBlockHeight) + ) + ) + { + + } + //else if ( nBlockTime > ASSETCHAINS_STAKED_HF_TIMESTAMP && txin.nSequence == 0xfffffffe && + else if ( hush_hardfork_active(nBlockTime) && txin.nSequence == 0xfffffffe && + ( + ((int64_t)tx.nLockTime >= LOCKTIME_THRESHOLD && (int64_t)tx.nLockTime <= nBlockTime) || + ((int64_t)tx.nLockTime < LOCKTIME_THRESHOLD && (int64_t)tx.nLockTime <= nBlockHeight)) + ) + { + + } + else if (!txin.IsFinal()) + { + LogPrintf("non-final txin txid.%s seq.%x locktime.%u vs nTime.%u\n",tx.GetHash().ToString().c_str(),txin.nSequence,(uint32_t)tx.nLockTime,(uint32_t)nBlockTime); + return false; + } + } + return true; +} + +bool IsExpiredTx(const CTransaction &tx, int nBlockHeight) +{ + if (tx.nExpiryHeight == 0 || tx.IsCoinBase()) { + return false; + } + return static_cast(nBlockHeight) > tx.nExpiryHeight; +} + +bool CheckFinalTx(const CTransaction &tx, int flags) +{ + AssertLockHeld(cs_main); + + // By convention a negative value for flags indicates that the + // current network-enforced consensus rules should be used. In + // a future soft-fork scenario that would mean checking which + // rules would be enforced for the next block and setting the + // appropriate flags. At the present time no soft-forks are + // scheduled, so no flags are set. + flags = std::max(flags, 0); + + // CheckFinalTx() uses chainActive.Height()+1 to evaluate + // nLockTime because when IsFinalTx() is called within + // CBlock::AcceptBlock(), the height of the block *being* + // evaluated is what is used. Thus if we want to know if a + // transaction can be part of the *next* block, we need to call + // IsFinalTx() with one more than chainActive.Height(). + const int nBlockHeight = chainActive.Height() + 1; + + // Timestamps on the other hand don't get any special treatment, + // because we can't know what timestamp the next block will have, + // and there aren't timestamp applications where it matters. + // However this changes once median past time-locks are enforced: + const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST) + ? chainActive.Tip()->GetMedianTimePast() + : GetTime(); + + return IsFinalTx(tx, nBlockHeight, nBlockTime); +} + +/** + * Check transaction inputs to mitigate two + * potential denial-of-service attacks: + * + * 1. scriptSigs with extra data stuffed into them, + * not consumed by scriptPubKey (or P2SH script) + * 2. P2SH scripts with a crazy number of expensive + * CHECKSIG/CHECKMULTISIG operations + */ +bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs, uint32_t consensusBranchId) +{ + if (tx.IsCoinBase()) + return true; // Coinbases don't use vin normally + + //if (tx.IsCoinImport()) + // return tx.vin[0].scriptSig.IsCoinImport(); + + for (unsigned int i = 0; i < tx.vin.size(); i++) + { + //if (tx.IsPegsImport() && i==0) continue; + const CTxOut& prev = mapInputs.GetOutputFor(tx.vin[i]); + + vector > vSolutions; + txnouttype whichType; + // get the scriptPubKey corresponding to this input: + const CScript& prevScript = prev.scriptPubKey; + //printf("Previous script: %s\n", prevScript.ToString().c_str()); + + if (!Solver(prevScript, whichType, vSolutions)) + return false; + int nArgsExpected = ScriptSigArgsExpected(whichType, vSolutions); + if (nArgsExpected < 0) + return false; + + // Transactions with extra stuff in their scriptSigs are + // non-standard. Note that this EvalScript() call will + // be quick, because if there are any operations + // beside "push data" in the scriptSig + // IsStandardTx() will have already returned false + // and this method isn't called. + vector > stack; + //printf("Checking script: %s\n", tx.vin[i].scriptSig.ToString().c_str()); + if (!EvalScript(stack, tx.vin[i].scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker(), consensusBranchId)) + return false; + + if (whichType == TX_SCRIPTHASH) + { + if (stack.empty()) + return false; + CScript subscript(stack.back().begin(), stack.back().end()); + vector > vSolutions2; + txnouttype whichType2; + if (Solver(subscript, whichType2, vSolutions2)) + { + int tmpExpected = ScriptSigArgsExpected(whichType2, vSolutions2); + if (tmpExpected < 0) + return false; + nArgsExpected += tmpExpected; + } + else + { + // Any other Script with less than 15 sigops OK: + unsigned int sigops = subscript.GetSigOpCount(true); + // ... extra data left on the stack after execution is OK, too: + return (sigops <= MAX_P2SH_SIGOPS); + } + } + + if (stack.size() != (unsigned int)nArgsExpected) + return false; + } + + return true; +} + +unsigned int GetLegacySigOpCount(const CTransaction& tx) +{ + unsigned int nSigOps = 0; + BOOST_FOREACH(const CTxIn& txin, tx.vin) + { + nSigOps += txin.scriptSig.GetSigOpCount(false); + } + BOOST_FOREACH(const CTxOut& txout, tx.vout) + { + nSigOps += txout.scriptPubKey.GetSigOpCount(false); + } + return nSigOps; +} + +unsigned int GetP2SHSigOpCount(const CTransaction& tx, const CCoinsViewCache& inputs) +{ + if (tx.IsCoinBase()) + return 0; + + unsigned int nSigOps = 0; + for (unsigned int i = 0; i < tx.vin.size(); i++) + { + //if (tx.IsPegsImport() && i==0) continue; + const CTxOut &prevout = inputs.GetOutputFor(tx.vin[i]); + if (prevout.scriptPubKey.IsPayToScriptHash()) + nSigOps += prevout.scriptPubKey.GetSigOpCount(tx.vin[i].scriptSig); + } + return nSigOps; +} + +// Ensure that a coinbase transaction is structured according to the consensus rules of the chain +bool ContextualCheckCoinbaseTransaction(int32_t slowflag,const CBlock *block,CBlockIndex * const previndex,const CTransaction& tx, const int nHeight,int32_t validateprices) +{ + if ( slowflag != 0 && ASSETCHAINS_CBOPRET != 0 && validateprices != 0 && nHeight > 0 && tx.vout.size() > 0 ) + { + if ( hush_opretvalidate(block,previndex,nHeight,tx.vout[tx.vout.size()-1].scriptPubKey) < 0 ) + return(false); + } + return(true); +} + +/** + * Check a transaction contextually against a set of consensus rules valid at a given block height. + * + * Notes: + * 1. AcceptToMemoryPool calls CheckTransaction and this function. + * 2. ProcessNewBlock calls AcceptBlock, which calls CheckBlock (which calls CheckTransaction) + * and ContextualCheckBlock (which calls this function). + * 3. The isInitBlockDownload argument is only to assist with testing. + */ +bool ContextualCheckTransaction(int32_t slowflag,const CBlock *block, CBlockIndex * const previndex, + const CTransaction& tx, + CValidationState &state, + const int nHeight, + const int dosLevel, + bool (*isInitBlockDownload)(),int32_t validateprices) +{ + const bool overwinterActive = nHeight >=1 ? true : false; //NetworkUpgradeActive(nHeight, Params().GetConsensus(), Consensus::UPGRADE_OVERWINTER); + const bool saplingActive = nHeight >=1 ? true : false; //NetworkUpgradeActive(nHeight, Params().GetConsensus(), Consensus::UPGRADE_SAPLING); + + if (saplingActive) { + // Reject transactions with valid version but missing overwintered flag + if (tx.nVersion >= SAPLING_MIN_TX_VERSION && !tx.fOverwintered) { + return state.DoS(dosLevel, error("ContextualCheckTransaction(): overwintered flag must be set"), + REJECT_INVALID, "tx-overwintered-flag-not-set"); + } + + // Reject transactions with non-Sapling version group ID + if (tx.fOverwintered && tx.nVersionGroupId != SAPLING_VERSION_GROUP_ID) + { + //return state.DoS(dosLevel, error("CheckTransaction(): invalid Sapling tx version"),REJECT_INVALID, "bad-sapling-tx-version-group-id"); + if ( 0 ) + { + string strHex = EncodeHexTx(tx); + fprintf(stderr,"invalid Sapling rawtx.%s\n",strHex.c_str()); + } + return state.DoS(isInitBlockDownload() ? 0 : dosLevel, + error("CheckTransaction(): invalid Sapling tx version"), + REJECT_INVALID, "bad-sapling-tx-version-group-id"); + } + + // Reject transactions with invalid version + if (tx.fOverwintered && tx.nVersion < SAPLING_MIN_TX_VERSION ) { + return state.DoS(100, error("CheckTransaction(): Sapling version too low"), + REJECT_INVALID, "bad-tx-sapling-version-too-low"); + } + + // Reject transactions with invalid version + if (tx.fOverwintered && tx.nVersion > SAPLING_MAX_TX_VERSION ) { + return state.DoS(100, error("CheckTransaction(): Sapling version too high"), + REJECT_INVALID, "bad-tx-sapling-version-too-high"); + } + } else if (overwinterActive) { + // Reject transactions with valid version but missing overwinter flag + if (tx.nVersion >= OVERWINTER_MIN_TX_VERSION && !tx.fOverwintered) { + return state.DoS(dosLevel, error("ContextualCheckTransaction(): overwinter flag must be set"), + REJECT_INVALID, "tx-overwinter-flag-not-set"); + } + + // Reject transactions with non-Overwinter version group ID + if (tx.fOverwintered && tx.nVersionGroupId != OVERWINTER_VERSION_GROUP_ID) + { + //return state.DoS(dosLevel, error("CheckTransaction(): invalid Overwinter tx version"),REJECT_INVALID, "bad-overwinter-tx-version-group-id"); + return state.DoS(isInitBlockDownload() ? 0 : dosLevel, + error("CheckTransaction(): invalid Overwinter tx version"), + REJECT_INVALID, "bad-overwinter-tx-version-group-id"); + } + + // Reject transactions with invalid version + if (tx.fOverwintered && tx.nVersion > OVERWINTER_MAX_TX_VERSION ) { + return state.DoS(100, error("CheckTransaction(): overwinter version too high"), + REJECT_INVALID, "bad-tx-overwinter-version-too-high"); + } + } + + // Rules that apply to Overwinter or later: + //fprintf(stderr,"ht.%d overwinterActive.%d tx.overwintered.%d\n",nHeight,overwinterActive,overwinterActive); + if (overwinterActive) + { + // Reject transactions intended for Sprout + if (!tx.fOverwintered) + { + int32_t ht = Params().GetConsensus().vUpgrades[Consensus::UPGRADE_OVERWINTER].nActivationHeight; + fprintf(stderr,"overwinter is active tx.%s not, ht.%d vs %d\n",tx.GetHash().ToString().c_str(),nHeight,ht); + return state.DoS((ASSETCHAINS_PRIVATE != 0 || ht < 0 || nHeight < ht) ? 0 : dosLevel, error("ContextualCheckTransaction: overwinter is active"),REJECT_INVALID, "tx-overwinter-active"); + } + + // Check that all transactions are unexpired + if (IsExpiredTx(tx, nHeight)) { + // Don't increase banscore if the transaction only just expired + //int expiredDosLevel = IsExpiredTx(tx, nHeight - 1) ? (dosLevel > 10 ? dosLevel : 10) : 0; + //string strHex = EncodeHexTx(tx); + //fprintf(stderr, "transaction expired.%s\n",strHex.c_str()); + + // Do not ban nodes which relay expired tx's, it's a bug not an attack + return state.DoS(0, error("ContextualCheckTransaction(): transaction %s is expired, expiry block %i vs current block %i\n",tx.GetHash().ToString(),tx.nExpiryHeight,nHeight), REJECT_INVALID, "tx-overwinter-expired"); + } + } + + // Rules that apply before Sapling: + if (!saplingActive) { + // Size limits + //BOOST_STATIC_ASSERT(MAX_BLOCK_SIZE(chainActive.LastTip()->GetHeight()+1) > MAX_TX_SIZE_BEFORE_SAPLING); // sanity + if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > MAX_TX_SIZE_BEFORE_SAPLING) + return state.DoS(100, error("ContextualCheckTransaction(): size limits failed"), + REJECT_INVALID, "bad-txns-oversize"); + } + + uint256 dataToBeSigned; + + if (!tx.IsMint() && + (!tx.vjoinsplit.empty() || + !tx.vShieldedSpend.empty() || + !tx.vShieldedOutput.empty())) + { + auto consensusBranchId = CurrentEpochBranchId(nHeight, Params().GetConsensus()); + // Empty output script. + CScript scriptCode; + try { + dataToBeSigned = SignatureHash(scriptCode, tx, NOT_AN_INPUT, SIGHASH_ALL, 0, consensusBranchId); + } catch (std::logic_error ex) { + return state.DoS(100, error("CheckTransaction(): error computing signature hash"), + REJECT_INVALID, "error-computing-signature-hash"); + } + + } + + if (tx.IsCoinBase()) + { + if (!ContextualCheckCoinbaseTransaction(slowflag,block,previndex,tx, nHeight,validateprices)) + return state.DoS(100, error("CheckTransaction(): invalid script data for coinbase time lock"), + REJECT_INVALID, "bad-txns-invalid-script-data-for-coinbase-time-lock"); + } + + // Avoid ztx validation during IBD if height is less than latest checkpoint + if (fCheckpointsEnabled && (nHeight < Checkpoints::GetTotalBlocksEstimate(Params().Checkpoints())) ) { + return true; + } + + if (!tx.vShieldedSpend.empty() || + !tx.vShieldedOutput.empty()) + { + auto ctx = librustzcash_sapling_verification_ctx_init(); + + for (const SpendDescription &spend : tx.vShieldedSpend) { + if (!librustzcash_sapling_check_spend( + ctx, + spend.cv.begin(), + spend.anchor.begin(), + spend.nullifier.begin(), + spend.rk.begin(), + spend.zkproof.begin(), + spend.spendAuthSig.begin(), + dataToBeSigned.begin() + )) + { + librustzcash_sapling_verification_ctx_free(ctx); + return state.DoS(100, error("ContextualCheckTransaction(): Sapling spend description invalid"), + REJECT_INVALID, "bad-txns-sapling-spend-description-invalid"); + } + } + + for (const OutputDescription &output : tx.vShieldedOutput) { + if (!librustzcash_sapling_check_output( + ctx, + output.cv.begin(), + output.cm.begin(), + output.ephemeralKey.begin(), + output.zkproof.begin() + )) + { + librustzcash_sapling_verification_ctx_free(ctx); + return state.DoS(100, error("ContextualCheckTransaction(): Sapling output description invalid"), + REJECT_INVALID, "bad-txns-sapling-output-description-invalid"); + } + } + + if (!librustzcash_sapling_final_check( + ctx, + tx.valueBalance, + tx.bindingSig.begin(), + dataToBeSigned.begin() + )) + { + librustzcash_sapling_verification_ctx_free(ctx); + fprintf(stderr,"%s: Invalid sapling binding sig! tx=%s valueBalance=%li, bindingSig.size=%li\n", __func__, tx.GetHash().ToString().c_str(), tx.valueBalance, tx.bindingSig.size() ); + return state.DoS(100, error("ContextualCheckTransaction(): Sapling binding signature invalid"), + REJECT_INVALID, "bad-txns-sapling-binding-signature-invalid"); + } + + librustzcash_sapling_verification_ctx_free(ctx); + } + return true; +} + +bool CheckTransaction(uint32_t tiptime,const CTransaction& tx, CValidationState &state, + libzcash::ProofVerifier& verifier,int32_t txIndex, int32_t numTxs) +{ + // Don't count coinbase transactions because mining skews the count + if (!tx.IsCoinBase()) { + transactionsValidated.increment(); + } + + if (!CheckTransactionWithoutProofVerification(tiptime,tx, state)) { + return false; + } + return true; +} + +// This is and hush_notaries()/gethushseason/getacseason are all consensus code +int32_t hush_isnotaryvout(char *coinaddr,uint32_t tiptime) { + bool ishush3 = strncmp(SMART_CHAIN_SYMBOL, "HUSH3",5) == 0 ? true : false; + bool istush = strncmp(SMART_CHAIN_SYMBOL, "TUSH",4) == 0 ? true : false; + int32_t height = chainActive.LastTip()->GetHeight(); + int32_t season = (ishush3 || istush) ? gethushseason(height) : getacseason(tiptime); + fprintf(stderr,"%s: coinaddr=%s season=%d, tiptime=%d\n", __func__, coinaddr, season,tiptime); + if ( NOTARY_ADDRESSES[season-1][0][0] == 0 ) { + uint8_t pubkeys[64][33]; + hush_notaries(pubkeys,0,tiptime); + } + if ( strcmp(coinaddr,CRYPTO555_HUSHADDR) == 0 ) + return(1); + for (int32_t i = 0; i < NUM_HUSH_NOTARIES; i++) { + if ( strcmp(coinaddr,NOTARY_ADDRESSES[season-1][i]) == 0 ) { + if(fDebug) { + fprintf(stderr, "%s: coinaddr.%s notaryaddress[%i].%s\n",__func__, coinaddr,i,NOTARY_ADDRESSES[season-1][i]); + } + return(1); + } + } + return(0); +} + +int32_t hush_scpublic(uint32_t tiptime); + +bool CheckTransactionWithoutProofVerification(uint32_t tiptime,const CTransaction& tx, CValidationState &state) +{ + // Basic checks that don't depend on any context + int32_t invalid_private_taddr=0,z_z=0,z_t=0,t_z=0,acpublic = hush_scpublic(tiptime); + /** + * Previously: + * 1. The consensus rule below was: + * if (tx.nVersion < SPROUT_MIN_TX_VERSION) { ... } + * which checked if tx.nVersion fell within the range: + * INT32_MIN <= tx.nVersion < SPROUT_MIN_TX_VERSION + * 2. The parser allowed tx.nVersion to be negative + * + * Now: + * 1. The consensus rule checks to see if tx.Version falls within the range: + * 0 <= tx.nVersion < SPROUT_MIN_TX_VERSION + * 2. The previous consensus rule checked for negative values within the range: + * INT32_MIN <= tx.nVersion < 0 + * This is unnecessary for Overwinter transactions since the parser now + * interprets the sign bit as fOverwintered, so tx.nVersion is always >=0, + * and when Overwinter is not active ContextualCheckTransaction rejects + * transactions with fOverwintered set. When fOverwintered is set, + * this function and ContextualCheckTransaction will together check to + * ensure tx.nVersion avoids the following ranges: + * 0 <= tx.nVersion < OVERWINTER_MIN_TX_VERSION + * OVERWINTER_MAX_TX_VERSION < tx.nVersion <= INT32_MAX + */ + if (!tx.fOverwintered && tx.nVersion < SPROUT_MIN_TX_VERSION) { + return state.DoS(100, error("CheckTransaction(): version too low"), + REJECT_INVALID, "bad-txns-version-too-low"); + } else if (tx.fOverwintered) { + if (tx.nVersion < OVERWINTER_MIN_TX_VERSION) { + return state.DoS(100, error("CheckTransaction(): overwinter version too low"), + REJECT_INVALID, "bad-tx-overwinter-version-too-low"); + } + if (tx.nVersionGroupId != OVERWINTER_VERSION_GROUP_ID && + tx.nVersionGroupId != SAPLING_VERSION_GROUP_ID) { + return state.DoS(100, error("CheckTransaction(): unknown tx version group id"), + REJECT_INVALID, "bad-tx-version-group-id"); + } + if (tx.nExpiryHeight >= TX_EXPIRY_HEIGHT_THRESHOLD) { + return state.DoS(100, error("CheckTransaction(): expiry height is too high"), + REJECT_INVALID, "bad-tx-expiry-height-too-high"); + } + } + + // Transactions containing empty `vin` must have non-empty `vShieldedSpend`. + if (tx.vin.empty() && tx.vShieldedSpend.empty()) + return state.DoS(10, error("CheckTransaction(): vin empty"), + REJECT_INVALID, "bad-txns-vin-empty"); + + // Transactions containing empty `vout` must have non-empty `vShieldedOutput`. + if (tx.vout.empty() && tx.vShieldedOutput.empty()) + return state.DoS(10, error("CheckTransaction(): vout empty"), + REJECT_INVALID, "bad-txns-vout-empty"); + + // Size limits + //BOOST_STATIC_ASSERT(MAX_BLOCK_SIZE(chainActive.LastTip()->GetHeight()+1) >= MAX_TX_SIZE_AFTER_SAPLING); // sanity + BOOST_STATIC_ASSERT(MAX_TX_SIZE_AFTER_SAPLING > MAX_TX_SIZE_BEFORE_SAPLING); // sanity + if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > MAX_TX_SIZE_AFTER_SAPLING) + return state.DoS(100, error("CheckTransaction(): size limits failed"), + REJECT_INVALID, "bad-txns-oversize"); + + // Check for negative or overflow output values + CAmount nValueOut = 0; + int32_t iscoinbase = tx.IsCoinBase(); + BOOST_FOREACH(const CTxOut& txout, tx.vout) + { + if (txout.nValue < 0) + return state.DoS(100, error("CheckTransaction(): txout.nValue negative"), + REJECT_INVALID, "bad-txns-vout-negative"); + if (txout.nValue > MAX_MONEY) + { + fprintf(stderr,"%.8f > max %.8f\n",(double)txout.nValue/COIN,(double)MAX_MONEY/COIN); + return state.DoS(100, error("CheckTransaction(): txout.nValue too high"),REJECT_INVALID, "bad-txns-vout-toolarge"); + } + if ( ASSETCHAINS_PRIVATE != 0 ) + { + //fprintf(stderr,"private chain nValue %.8f iscoinbase.%d\n",(double)txout.nValue/COIN,iscoinbase); + if (iscoinbase == 0 && txout.nValue > 0) + { + char destaddr[65]; + Getscriptaddress(destaddr,txout.scriptPubKey); + + if ( hush_isnotaryvout(destaddr,tiptime) == 0 ) + { + const bool isburn = (strcmp(destaddr,BURN_ADDRESS) == 0); + if ((ASSETCHAINS_BURN == 1) && isburn && tx.vin.empty()) { + // -ac_burn=1 means only zaddrs can send to the burn address + fprintf(stderr,"%s: allowing zaddr to send to burn address %s on private chain because ac_burn=1\n", __func__, destaddr); + } else if ((ASSETCHAINS_BURN == 2) && isburn) { + // -ac_burn=2 allows notary taddrs to send directly to the burn address + fprintf(stderr,"%s: allowing burn address %s on private chain because ac_burn=2\n", __func__, destaddr); + } else { + invalid_private_taddr = 1; + fprintf(stderr,"%s: invalid taddr %s on private chain!\n", __func__, destaddr); + } + } + } + } + if ( txout.scriptPubKey.size() > DRAGON_MAXSCRIPTSIZE ) + return state.DoS(100, error("CheckTransaction(): txout.scriptPubKey.size() too big"),REJECT_INVALID, "bad-txns-opret-too-big"); + nValueOut += txout.nValue; + if (!MoneyRange(nValueOut)) + return state.DoS(100, error("CheckTransaction(): txout total out of range"), + REJECT_INVALID, "bad-txns-txouttotal-toolarge"); + } + + // Check for non-zero valueBalance when there are no Sapling inputs or outputs + if (tx.vShieldedSpend.empty() && tx.vShieldedOutput.empty() && tx.valueBalance != 0) { + return state.DoS(100, error("CheckTransaction(): tx.valueBalance has no sources or sinks"), + REJECT_INVALID, "bad-txns-valuebalance-nonzero"); + } + if ( acpublic != 0 && (tx.vShieldedSpend.empty() == 0 || tx.vShieldedOutput.empty() == 0) ) + { + return state.DoS(100, error("CheckTransaction(): this is a public chain, no sapling allowed"), + REJECT_INVALID, "bad-txns-acpublic-chain"); + } + if ( ASSETCHAINS_PRIVATE != 0 && invalid_private_taddr != 0 && tx.vShieldedSpend.empty() == 0 ) + { + return state.DoS(100, error("CheckTransaction(): this is a private chain, no sapling -> taddr"), + REJECT_INVALID, "bad-txns-acprivate-chain"); + } + // Check for overflow valueBalance + if (tx.valueBalance > MAX_MONEY || tx.valueBalance < -MAX_MONEY) { + return state.DoS(100, error("CheckTransaction(): abs(tx.valueBalance) too large"), + REJECT_INVALID, "bad-txns-valuebalance-toolarge"); + } + + if (tx.valueBalance <= 0) { + // NB: negative valueBalance "takes" money from the transparent value pool just as outputs do + nValueOut += -tx.valueBalance; + + if (!MoneyRange(nValueOut)) { + return state.DoS(100, error("CheckTransaction(): txout total out of range"), + REJECT_INVALID, "bad-txns-txouttotal-toolarge"); + } + } + + if ( ASSETCHAINS_PRIVATE != 0 && invalid_private_taddr != 0 ) + { + static uint32_t counter; + if ( counter++ < 10 ) + fprintf(stderr,"found taddr in private chain: z_z.%d z_t.%d t_z.%d vinsize.%d\n",z_z,z_t,t_z,(int32_t)tx.vin.size()); + if ( z_t == 0 || z_z != 0 || t_z != 0 || tx.vin.size() != 0 ) + return state.DoS(100, error("CheckTransaction(): this is a private chain, sending to taddrs not allowed"),REJECT_INVALID, "bad-txns-acprivacy-chain"); + } + if ( ASSETCHAINS_TXPOW != 0 ) + { + // BTC genesis coinbase 4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b + uint256 txid = tx.GetHash(); + if ( ((ASSETCHAINS_TXPOW & 2) != 0 && iscoinbase != 0) || ((ASSETCHAINS_TXPOW & 1) != 0 && iscoinbase == 0) ) + { + if ( ((uint8_t *)&txid)[0] != 0 || ((uint8_t *)&txid)[31] != 0 ) + { + uint256 genesistxid = uint256S("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"); + if ( txid != genesistxid ) + { + fprintf(stderr,"private chain iscoinbase.%d invalid txpow.%d txid.%s\n",iscoinbase,ASSETCHAINS_TXPOW,txid.GetHex().c_str()); + return state.DoS(100, error("CheckTransaction(): this is a txpow chain, must have 0x00 ends"),REJECT_INVALID, "bad-txns-actxpow-chain"); + } + } + } + } + + // Ensure input values do not exceed MAX_MONEY + // We have not resolved the txin values at this stage, + // but we do know what the joinsplits claim to add + // to the value pool. + { + CAmount nValueIn = 0; + + // Also check for Sapling + if (tx.valueBalance >= 0) { + // NB: positive valueBalance "adds" money to the transparent value pool, just as inputs do + nValueIn += tx.valueBalance; + + if (!MoneyRange(nValueIn)) { + return state.DoS(100, error("CheckTransaction(): txin total out of range"), + REJECT_INVALID, "bad-txns-txintotal-toolarge"); + } + } + } + + // Check for duplicate inputs + set vInOutPoints; + BOOST_FOREACH(const CTxIn& txin, tx.vin) + { + if (vInOutPoints.count(txin.prevout)) + return state.DoS(100, error("CheckTransaction(): duplicate inputs"), + REJECT_INVALID, "bad-txns-inputs-duplicate"); + vInOutPoints.insert(txin.prevout); + } + + // Check for duplicate sapling nullifiers in this transaction + { + set vSaplingNullifiers; + BOOST_FOREACH(const SpendDescription& spend_desc, tx.vShieldedSpend) + { + if (vSaplingNullifiers.count(spend_desc.nullifier)) + return state.DoS(100, error("CheckTransaction(): duplicate nullifiers"), + REJECT_INVALID, "bad-spend-description-nullifiers-duplicate"); + + vSaplingNullifiers.insert(spend_desc.nullifier); + } + } + + if (tx.IsMint()) { + // There should be no joinsplits in a coinbase transaction + if (tx.vjoinsplit.size() > 0) + return state.DoS(100, error("CheckTransaction(): coinbase has joinsplits"), + REJECT_INVALID, "bad-cb-has-joinsplits"); + + // A coinbase transaction cannot have spend descriptions or output descriptions + if (tx.vShieldedSpend.size() > 0) + return state.DoS(100, error("CheckTransaction(): coinbase has spend descriptions"), + REJECT_INVALID, "bad-cb-has-spend-description"); + if (tx.vShieldedOutput.size() > 0) + return state.DoS(100, error("CheckTransaction(): coinbase has output descriptions"), + REJECT_INVALID, "bad-cb-has-output-description"); + + if (tx.vin[0].scriptSig.size() < 2 || tx.vin[0].scriptSig.size() > 100) + return state.DoS(100, error("CheckTransaction(): coinbase script size"), + REJECT_INVALID, "bad-cb-length"); + } else { + BOOST_FOREACH(const CTxIn& txin, tx.vin) + if (txin.prevout.IsNull()) + return state.DoS(10, error("CheckTransaction(): prevout is null"), + REJECT_INVALID, "bad-txns-prevout-null"); + } + + return true; +} + +void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, CTxUndo &txundo, int nHeight) +{ + if (!tx.IsMint()) // mark inputs spent + { + txundo.vprevout.reserve(tx.vin.size()); + BOOST_FOREACH(const CTxIn &txin, tx.vin) { + //if (tx.IsPegsImport() && txin.prevout.n==10e8) continue; + CCoinsModifier coins = inputs.ModifyCoins(txin.prevout.hash); + unsigned nPos = txin.prevout.n; + + if (nPos >= coins->vout.size() || coins->vout[nPos].IsNull()) + assert(false); + // mark an outpoint spent, and construct undo information + txundo.vprevout.push_back(CTxInUndo(coins->vout[nPos])); + coins->Spend(nPos); + if (coins->vout.size() == 0) { + CTxInUndo& undo = txundo.vprevout.back(); + undo.nHeight = coins->nHeight; + undo.fCoinBase = coins->fCoinBase; + undo.nVersion = coins->nVersion; + } + } + } + + // spend nullifiers + inputs.SetNullifiers(tx, true); + + inputs.ModifyCoins(tx.GetHash())->FromTx(tx, nHeight); // add outputs +} + +void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight) +{ + CTxUndo txundo; + UpdateCoins(tx, inputs, txundo, nHeight); +} + +bool CScriptCheck::operator()() { + const CScript &scriptSig = ptxTo->vin[nIn].scriptSig; + ServerTransactionSignatureChecker checker(ptxTo, nIn, amount, cacheStore, *txdata); + if (!VerifyScript(scriptSig, scriptPubKey, nFlags, checker, consensusBranchId, &error)) { + return ::error("CScriptCheck(): %s:%d VerifySignature failed: %s", ptxTo->GetHash().ToString(), nIn, ScriptErrorString(error)); + } + return true; +} + +int GetSpendHeight(const CCoinsViewCache& inputs) +{ + LOCK(cs_main); + CBlockIndex* pindexPrev = mapBlockIndex.find(inputs.GetBestBlock())->second; + return pindexPrev->GetHeight() + 1; +} + +namespace Consensus { + bool CheckTxInputs(const CTransaction& tx, CValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, const Consensus::Params& consensusParams) + { + // This doesn't trigger the DoS code on purpose; if it did, it would make it easier + // for an attacker to attempt to split the network. + if (!inputs.HaveInputs(tx)) + return state.Invalid(error("CheckInputs(): %s inputs unavailable", tx.GetHash().ToString())); + + // are the shielded requirements met? + if (!inputs.HaveShieldedRequirements(tx)) + return state.Invalid(error("CheckInputs(): %s shielded requirements not met", tx.GetHash().ToString())); + + CAmount nValueIn = 0; + CAmount nFees = 0; + for (unsigned int i = 0; i < tx.vin.size(); i++) + { + const COutPoint &prevout = tx.vin[i].prevout; + const CCoins *coins = inputs.AccessCoins(prevout.hash); + assert(coins); + + if (coins->IsCoinBase()) { + // ensure that output of coinbases are not still time locked + if (coins->TotalTxValue() >= ASSETCHAINS_TIMELOCKGTE) + { + uint64_t unlockTime = hush_block_unlocktime(coins->nHeight); + if (nSpendHeight < unlockTime) { + return state.DoS(10, + error("CheckInputs(): tried to spend coinbase that is timelocked until block %d", unlockTime), + REJECT_INVALID, "bad-txns-premature-spend-of-coinbase"); + } + } + + // Ensure that coinbases are matured, no DoS as retry may work later + if (nSpendHeight - coins->nHeight < COINBASE_MATURITY) { + return state.Invalid( + error("CheckInputs(): tried to spend coinbase at depth %d/%d", nSpendHeight - coins->nHeight, (int32_t)COINBASE_MATURITY), + REJECT_INVALID, "bad-txns-premature-spend-of-coinbase"); + } + + } + + // Check for negative or overflow input values + nValueIn += coins->vout[prevout.n].nValue; + if (!MoneyRange(coins->vout[prevout.n].nValue) || !MoneyRange(nValueIn)) + return state.DoS(100, error("CheckInputs(): txin values out of range"), + REJECT_INVALID, "bad-txns-inputvalues-outofrange"); + + } + + nValueIn += tx.GetShieldedValueIn(); + if (!MoneyRange(nValueIn)) + return state.DoS(100, error("CheckInputs(): shielded input to transparent value pool out of range"), + REJECT_INVALID, "bad-txns-inputvalues-outofrange"); + + if (nValueIn < tx.GetValueOut()) + { + fprintf(stderr,"spentheight.%d valuein %s vs %s error\n",nSpendHeight,FormatMoney(nValueIn).c_str(), FormatMoney(tx.GetValueOut()).c_str()); + return state.DoS(100, error("CheckInputs(): %s value in (%s) < value out (%s) diff %.8f", + tx.GetHash().ToString(), FormatMoney(nValueIn), FormatMoney(tx.GetValueOut()),((double)nValueIn - tx.GetValueOut())/COIN),REJECT_INVALID, "bad-txns-in-belowout"); + } + // Tally transaction fees + CAmount nTxFee = nValueIn - tx.GetValueOut(); + if (nTxFee < 0) + return state.DoS(100, error("CheckInputs(): %s nTxFee < 0", tx.GetHash().ToString()), + REJECT_INVALID, "bad-txns-fee-negative"); + nFees += nTxFee; + if (!MoneyRange(nFees)) + return state.DoS(100, error("CheckInputs(): nFees out of range"), + REJECT_INVALID, "bad-txns-fee-outofrange"); + + //NOTE: Since we have access to fee here, verify that opreturn pays + //required minimum fee, even though this is a check on outputs not + //inputs. If we don't do it here we would need to duplicate already + //done work somewhere else + + if ( ASSETCHAINS_MINOPRETURNFEE > 0 ) { + BOOST_FOREACH(const CTxOut& txout, tx.vout) { + const bool isopret = txout.scriptPubKey.IsOpReturn(); + + // HUSH+DRGX do not use -ac_minopreturnfee so this does not (yet) + // affect those chains, they will need a height activated consensus + // change + + if ( isopret ) { + // Is there any difference between nTxFee and nFees ? + // They seem to be 2 vars with the same value + fprintf(stderr,"%s: opreturn=1 nFees=%ld nTxFee=%ld\n", __func__, nFees, nTxFee); + if (nTxFee < ASSETCHAINS_MINOPRETURNFEE) { + return state.DoS(100,error("CheckInputs(): tx does not have required mininum fee for OP_RETURN"), REJECT_INVALID, "bad-txns-minopreturnfee"); + } + } + } + } + + return true; + } +}// namespace Consensus + +bool ContextualCheckInputs( + const CTransaction& tx, + CValidationState &state, + const CCoinsViewCache &inputs, + bool fScriptChecks, + unsigned int flags, + bool cacheStore, + PrecomputedTransactionData& txdata, + const Consensus::Params& consensusParams, + uint32_t consensusBranchId, + std::vector *pvChecks) +{ + if (!tx.IsMint()) + { + if (!Consensus::CheckTxInputs(tx, state, inputs, GetSpendHeight(inputs), consensusParams)) { + return false; + } + + if (pvChecks) + pvChecks->reserve(tx.vin.size()); + + // The first loop above does all the inexpensive checks. + // Only if ALL inputs pass do we perform expensive ECDSA signature checks. + // Helps prevent CPU exhaustion attacks. + + // Skip ECDSA signature verification when connecting blocks + // before the last block chain checkpoint. This is safe because block merkle hashes are + // still computed and checked, and any change will be caught at the next checkpoint. + if (fScriptChecks) { + for (unsigned int i = 0; i < tx.vin.size(); i++) { + //if (tx.IsPegsImport() && i==0) continue; + const COutPoint &prevout = tx.vin[i].prevout; + const CCoins* coins = inputs.AccessCoins(prevout.hash); + assert(coins); + + // Verify signature + CScriptCheck check(*coins, tx, i, flags, cacheStore, consensusBranchId, &txdata); + if (pvChecks) { + pvChecks->push_back(CScriptCheck()); + check.swap(pvChecks->back()); + } else if (!check()) { + if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) { + // Check whether the failure was caused by a + // non-mandatory script verification check, such as + // non-standard DER encodings or non-null dummy + // arguments; if so, don't trigger DoS protection to + // avoid splitting the network between upgraded and + // non-upgraded nodes. + CScriptCheck check2(*coins, tx, i, + flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheStore, consensusBranchId, &txdata); + if (check2()) + return state.Invalid(false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError()))); + } + // Failures of other flags indicate a transaction that is + // invalid in new blocks, e.g. a invalid P2SH. We DoS ban + // such nodes as they are not following the protocol. That + // said during an upgrade careful thought should be taken + // as to the correct behavior - we may want to continue + // peering with non-upgraded nodes even after a soft-fork + // super-majority vote has passed. + return state.DoS(100,false, REJECT_INVALID, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(check.GetScriptError()))); + } + } + } + } + + return true; +}