Merge pull request 'Reduce memory usage' (#332) from reduce_memory into dev

Reviewed-on: https://git.hush.is/hush/hush3/pulls/332
This commit is contained in:
duke
2023-10-25 15:11:51 +00:00
13 changed files with 192 additions and 60 deletions

View File

@@ -3254,7 +3254,8 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
uint256 hashPrevBlock = pindex->pprev == NULL ? uint256() : pindex->pprev->GetBlockHash();
if ( hashPrevBlock != view.GetBestBlock() )
{
fprintf(stderr,"ConnectBlock(): hashPrevBlock != view.GetBestBlock()\n");
fprintf(stderr,"ConnectBlock(): hashPrevBlock != view.GetBestBlock() %s != %s\n", hashPrevBlock.ToString().c_str(), view.GetBestBlock().ToString().c_str() );
return state.DoS(1, error("ConnectBlock(): hashPrevBlock != view.GetBestBlock()"),
REJECT_INVALID, "hashPrevBlock-not-bestblock");
}
@@ -3700,7 +3701,7 @@ bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) {
vFiles.push_back(make_pair(*it, &vinfoBlockFile[*it]));
setDirtyFileInfo.erase(it++);
}
std::vector<const CBlockIndex*> vBlocks;
std::vector<CBlockIndex*> vBlocks;
vBlocks.reserve(setDirtyBlockIndex.size());
for (set<CBlockIndex*>::iterator it = setDirtyBlockIndex.begin(); it != setDirtyBlockIndex.end(); ) {
vBlocks.push_back(*it);
@@ -3709,6 +3710,12 @@ bool static FlushStateToDisk(CValidationState &state, FlushStateMode mode) {
if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile, vBlocks)) {
return AbortNode(state, "Files to write to block index database");
}
// Now that we have written the block indices to the database, we do not
// need to store solutions for these CBlockIndex objects in memory.
// cs_main must be held here.
for (CBlockIndex *pblockindex : vBlocks) {
pblockindex->TrimSolution();
}
}
// Finally remove any pruned files
if (fFlushForPrune)
@@ -6591,7 +6598,11 @@ void static CheckBlockIndex()
}
}
}
// assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
// try {
// assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash()); // Perhaps too slow
// } catch (const runtime_error&) {
// assert(!"Failed to read index entry");
// }
// End: actual consistency checks.
// Try descending into the first subnode.