Coverage Report

Created: 2024-10-21 15:10

/root/bitcoin/src/node/blockstorage.cpp
Line
Count
Source (jump to first uncovered line)
1
// Copyright (c) 2011-2022 The Bitcoin Core developers
2
// Distributed under the MIT software license, see the accompanying
3
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5
#include <node/blockstorage.h>
6
7
#include <arith_uint256.h>
8
#include <chain.h>
9
#include <consensus/params.h>
10
#include <consensus/validation.h>
11
#include <dbwrapper.h>
12
#include <flatfile.h>
13
#include <hash.h>
14
#include <kernel/blockmanager_opts.h>
15
#include <kernel/chainparams.h>
16
#include <kernel/messagestartchars.h>
17
#include <kernel/notifications_interface.h>
18
#include <logging.h>
19
#include <pow.h>
20
#include <primitives/block.h>
21
#include <primitives/transaction.h>
22
#include <random.h>
23
#include <serialize.h>
24
#include <signet.h>
25
#include <span.h>
26
#include <streams.h>
27
#include <sync.h>
28
#include <tinyformat.h>
29
#include <uint256.h>
30
#include <undo.h>
31
#include <util/batchpriority.h>
32
#include <util/check.h>
33
#include <util/fs.h>
34
#include <util/signalinterrupt.h>
35
#include <util/strencodings.h>
36
#include <util/translation.h>
37
#include <validation.h>
38
39
#include <map>
40
#include <ranges>
41
#include <unordered_map>
42
43
namespace kernel {
44
static constexpr uint8_t DB_BLOCK_FILES{'f'};
45
static constexpr uint8_t DB_BLOCK_INDEX{'b'};
46
static constexpr uint8_t DB_FLAG{'F'};
47
static constexpr uint8_t DB_REINDEX_FLAG{'R'};
48
static constexpr uint8_t DB_LAST_BLOCK{'l'};
49
// Keys used in previous version that might still be found in the DB:
50
// BlockTreeDB::DB_TXINDEX_BLOCK{'T'};
51
// BlockTreeDB::DB_TXINDEX{'t'}
52
// BlockTreeDB::ReadFlag("txindex")
53
54
bool BlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo& info)
55
0
{
56
0
    return Read(std::make_pair(DB_BLOCK_FILES, nFile), info);
57
0
}
58
59
bool BlockTreeDB::WriteReindexing(bool fReindexing)
60
0
{
61
0
    if (fReindexing) {
62
0
        return Write(DB_REINDEX_FLAG, uint8_t{'1'});
63
0
    } else {
64
0
        return Erase(DB_REINDEX_FLAG);
65
0
    }
66
0
}
67
68
void BlockTreeDB::ReadReindexing(bool& fReindexing)
69
0
{
70
0
    fReindexing = Exists(DB_REINDEX_FLAG);
71
0
}
72
73
bool BlockTreeDB::ReadLastBlockFile(int& nFile)
74
0
{
75
0
    return Read(DB_LAST_BLOCK, nFile);
76
0
}
77
78
bool BlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*>>& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo)
79
0
{
80
0
    CDBBatch batch(*this);
81
0
    for (const auto& [file, info] : fileInfo) {
82
0
        batch.Write(std::make_pair(DB_BLOCK_FILES, file), *info);
83
0
    }
84
0
    batch.Write(DB_LAST_BLOCK, nLastFile);
85
0
    for (const CBlockIndex* bi : blockinfo) {
86
0
        batch.Write(std::make_pair(DB_BLOCK_INDEX, bi->GetBlockHash()), CDiskBlockIndex{bi});
87
0
    }
88
0
    return WriteBatch(batch, true);
89
0
}
90
91
bool BlockTreeDB::WriteFlag(const std::string& name, bool fValue)
92
0
{
93
0
    return Write(std::make_pair(DB_FLAG, name), fValue ? uint8_t{'1'} : uint8_t{'0'});
94
0
}
95
96
bool BlockTreeDB::ReadFlag(const std::string& name, bool& fValue)
97
0
{
98
0
    uint8_t ch;
99
0
    if (!Read(std::make_pair(DB_FLAG, name), ch)) {
100
0
        return false;
101
0
    }
102
0
    fValue = ch == uint8_t{'1'};
103
0
    return true;
104
0
}
105
106
bool BlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex, const util::SignalInterrupt& interrupt)
107
0
{
108
0
    AssertLockHeld(::cs_main);
109
0
    std::unique_ptr<CDBIterator> pcursor(NewIterator());
110
0
    pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256()));
111
112
    // Load m_block_index
113
0
    while (pcursor->Valid()) {
114
0
        if (interrupt) return false;
115
0
        std::pair<uint8_t, uint256> key;
116
0
        if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX) {
117
0
            CDiskBlockIndex diskindex;
118
0
            if (pcursor->GetValue(diskindex)) {
119
                // Construct block index object
120
0
                CBlockIndex* pindexNew = insertBlockIndex(diskindex.ConstructBlockHash());
121
0
                pindexNew->pprev          = insertBlockIndex(diskindex.hashPrev);
122
0
                pindexNew->nHeight        = diskindex.nHeight;
123
0
                pindexNew->nFile          = diskindex.nFile;
124
0
                pindexNew->nDataPos       = diskindex.nDataPos;
125
0
                pindexNew->nUndoPos       = diskindex.nUndoPos;
126
0
                pindexNew->nVersion       = diskindex.nVersion;
127
0
                pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
128
0
                pindexNew->nTime          = diskindex.nTime;
129
0
                pindexNew->nBits          = diskindex.nBits;
130
0
                pindexNew->nNonce         = diskindex.nNonce;
131
0
                pindexNew->nStatus        = diskindex.nStatus;
132
0
                pindexNew->nTx            = diskindex.nTx;
133
134
0
                if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams)) {
135
0
                    LogError("%s: CheckProofOfWork failed: %s\n", __func__, pindexNew->ToString());
136
0
                    return false;
137
0
                }
138
139
0
                pcursor->Next();
140
0
            } else {
141
0
                LogError("%s: failed to read value\n", __func__);
142
0
                return false;
143
0
            }
144
0
        } else {
145
0
            break;
146
0
        }
147
0
    }
148
149
0
    return true;
150
0
}
151
} // namespace kernel
152
153
namespace node {
154
155
bool CBlockIndexWorkComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const
156
0
{
157
    // First sort by most total work, ...
158
0
    if (pa->nChainWork > pb->nChainWork) return false;
159
0
    if (pa->nChainWork < pb->nChainWork) return true;
160
161
    // ... then by earliest time received, ...
162
0
    if (pa->nSequenceId < pb->nSequenceId) return false;
163
0
    if (pa->nSequenceId > pb->nSequenceId) return true;
164
165
    // Use pointer address as tie breaker (should only happen with blocks
166
    // loaded from disk, as those all have id 0).
167
0
    if (pa < pb) return false;
168
0
    if (pa > pb) return true;
169
170
    // Identical blocks.
171
0
    return false;
172
0
}
173
174
bool CBlockIndexHeightOnlyComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const
175
0
{
176
0
    return pa->nHeight < pb->nHeight;
177
0
}
178
179
std::vector<CBlockIndex*> BlockManager::GetAllBlockIndices()
180
0
{
181
0
    AssertLockHeld(cs_main);
182
0
    std::vector<CBlockIndex*> rv;
183
0
    rv.reserve(m_block_index.size());
184
0
    for (auto& [_, block_index] : m_block_index) {
185
0
        rv.push_back(&block_index);
186
0
    }
187
0
    return rv;
188
0
}
189
190
CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash)
191
0
{
192
0
    AssertLockHeld(cs_main);
193
0
    BlockMap::iterator it = m_block_index.find(hash);
194
0
    return it == m_block_index.end() ? nullptr : &it->second;
195
0
}
196
197
const CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
198
0
{
199
0
    AssertLockHeld(cs_main);
200
0
    BlockMap::const_iterator it = m_block_index.find(hash);
201
0
    return it == m_block_index.end() ? nullptr : &it->second;
202
0
}
203
204
CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block, CBlockIndex*& best_header)
205
0
{
206
0
    AssertLockHeld(cs_main);
207
208
0
    auto [mi, inserted] = m_block_index.try_emplace(block.GetHash(), block);
209
0
    if (!inserted) {
210
0
        return &mi->second;
211
0
    }
212
0
    CBlockIndex* pindexNew = &(*mi).second;
213
214
    // We assign the sequence id to blocks only when the full data is available,
215
    // to avoid miners withholding blocks but broadcasting headers, to get a
216
    // competitive advantage.
217
0
    pindexNew->nSequenceId = 0;
218
219
0
    pindexNew->phashBlock = &((*mi).first);
220
0
    BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
221
0
    if (miPrev != m_block_index.end()) {
222
0
        pindexNew->pprev = &(*miPrev).second;
223
0
        pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
224
0
        pindexNew->BuildSkip();
225
0
    }
226
0
    pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
227
0
    pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
228
0
    pindexNew->RaiseValidity(BLOCK_VALID_TREE);
229
0
    if (best_header == nullptr || best_header->nChainWork < pindexNew->nChainWork) {
230
0
        best_header = pindexNew;
231
0
    }
232
233
0
    m_dirty_blockindex.insert(pindexNew);
234
235
0
    return pindexNew;
236
0
}
237
238
void BlockManager::PruneOneBlockFile(const int fileNumber)
239
0
{
240
0
    AssertLockHeld(cs_main);
241
0
    LOCK(cs_LastBlockFile);
242
243
0
    for (auto& entry : m_block_index) {
244
0
        CBlockIndex* pindex = &entry.second;
245
0
        if (pindex->nFile == fileNumber) {
246
0
            pindex->nStatus &= ~BLOCK_HAVE_DATA;
247
0
            pindex->nStatus &= ~BLOCK_HAVE_UNDO;
248
0
            pindex->nFile = 0;
249
0
            pindex->nDataPos = 0;
250
0
            pindex->nUndoPos = 0;
251
0
            m_dirty_blockindex.insert(pindex);
252
253
            // Prune from m_blocks_unlinked -- any block we prune would have
254
            // to be downloaded again in order to consider its chain, at which
255
            // point it would be considered as a candidate for
256
            // m_blocks_unlinked or setBlockIndexCandidates.
257
0
            auto range = m_blocks_unlinked.equal_range(pindex->pprev);
258
0
            while (range.first != range.second) {
259
0
                std::multimap<CBlockIndex*, CBlockIndex*>::iterator _it = range.first;
260
0
                range.first++;
261
0
                if (_it->second == pindex) {
262
0
                    m_blocks_unlinked.erase(_it);
263
0
                }
264
0
            }
265
0
        }
266
0
    }
267
268
0
    m_blockfile_info.at(fileNumber) = CBlockFileInfo{};
269
0
    m_dirty_fileinfo.insert(fileNumber);
270
0
}
271
272
void BlockManager::FindFilesToPruneManual(
273
    std::set<int>& setFilesToPrune,
274
    int nManualPruneHeight,
275
    const Chainstate& chain,
276
    ChainstateManager& chainman)
277
0
{
278
0
    assert(IsPruneMode() && nManualPruneHeight > 0);
279
280
0
    LOCK2(cs_main, cs_LastBlockFile);
281
0
    if (chain.m_chain.Height() < 0) {
282
0
        return;
283
0
    }
284
285
0
    const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, nManualPruneHeight);
286
287
0
    int count = 0;
288
0
    for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
289
0
        const auto& fileinfo = m_blockfile_info[fileNumber];
290
0
        if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
291
0
            continue;
292
0
        }
293
294
0
        PruneOneBlockFile(fileNumber);
295
0
        setFilesToPrune.insert(fileNumber);
296
0
        count++;
297
0
    }
298
0
    LogPrintf("[%s] Prune (Manual): prune_height=%d removed %d blk/rev pairs\n",
299
0
        chain.GetRole(), last_block_can_prune, count);
300
0
}
301
302
void BlockManager::FindFilesToPrune(
303
    std::set<int>& setFilesToPrune,
304
    int last_prune,
305
    const Chainstate& chain,
306
    ChainstateManager& chainman)
307
0
{
308
0
    LOCK2(cs_main, cs_LastBlockFile);
309
    // Distribute our -prune budget over all chainstates.
310
0
    const auto target = std::max(
311
0
        MIN_DISK_SPACE_FOR_BLOCK_FILES, GetPruneTarget() / chainman.GetAll().size());
312
0
    const uint64_t target_sync_height = chainman.m_best_header->nHeight;
313
314
0
    if (chain.m_chain.Height() < 0 || target == 0) {
315
0
        return;
316
0
    }
317
0
    if (static_cast<uint64_t>(chain.m_chain.Height()) <= chainman.GetParams().PruneAfterHeight()) {
318
0
        return;
319
0
    }
320
321
0
    const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, last_prune);
322
323
0
    uint64_t nCurrentUsage = CalculateCurrentUsage();
324
    // We don't check to prune until after we've allocated new space for files
325
    // So we should leave a buffer under our target to account for another allocation
326
    // before the next pruning.
327
0
    uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
328
0
    uint64_t nBytesToPrune;
329
0
    int count = 0;
330
331
0
    if (nCurrentUsage + nBuffer >= target) {
332
        // On a prune event, the chainstate DB is flushed.
333
        // To avoid excessive prune events negating the benefit of high dbcache
334
        // values, we should not prune too rapidly.
335
        // So when pruning in IBD, increase the buffer to avoid a re-prune too soon.
336
0
        const auto chain_tip_height = chain.m_chain.Height();
337
0
        if (chainman.IsInitialBlockDownload() && target_sync_height > (uint64_t)chain_tip_height) {
338
            // Since this is only relevant during IBD, we assume blocks are at least 1 MB on average
339
0
            static constexpr uint64_t average_block_size = 1000000;  /* 1 MB */
340
0
            const uint64_t remaining_blocks = target_sync_height - chain_tip_height;
341
0
            nBuffer += average_block_size * remaining_blocks;
342
0
        }
343
344
0
        for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
345
0
            const auto& fileinfo = m_blockfile_info[fileNumber];
346
0
            nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize;
347
348
0
            if (fileinfo.nSize == 0) {
349
0
                continue;
350
0
            }
351
352
0
            if (nCurrentUsage + nBuffer < target) { // are we below our target?
353
0
                break;
354
0
            }
355
356
            // don't prune files that could have a block that's not within the allowable
357
            // prune range for the chain being pruned.
358
0
            if (fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
359
0
                continue;
360
0
            }
361
362
0
            PruneOneBlockFile(fileNumber);
363
            // Queue up the files for removal
364
0
            setFilesToPrune.insert(fileNumber);
365
0
            nCurrentUsage -= nBytesToPrune;
366
0
            count++;
367
0
        }
368
0
    }
369
370
0
    LogDebug(BCLog::PRUNE, "[%s] target=%dMiB actual=%dMiB diff=%dMiB min_height=%d max_prune_height=%d removed %d blk/rev pairs\n",
371
0
             chain.GetRole(), target / 1024 / 1024, nCurrentUsage / 1024 / 1024,
372
0
             (int64_t(target) - int64_t(nCurrentUsage)) / 1024 / 1024,
373
0
             min_block_to_prune, last_block_can_prune, count);
374
0
}
375
376
0
void BlockManager::UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) {
377
0
    AssertLockHeld(::cs_main);
378
0
    m_prune_locks[name] = lock_info;
379
0
}
380
381
CBlockIndex* BlockManager::InsertBlockIndex(const uint256& hash)
382
0
{
383
0
    AssertLockHeld(cs_main);
384
385
0
    if (hash.IsNull()) {
386
0
        return nullptr;
387
0
    }
388
389
0
    const auto [mi, inserted]{m_block_index.try_emplace(hash)};
390
0
    CBlockIndex* pindex = &(*mi).second;
391
0
    if (inserted) {
392
0
        pindex->phashBlock = &((*mi).first);
393
0
    }
394
0
    return pindex;
395
0
}
396
397
bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockhash)
398
0
{
399
0
    if (!m_block_tree_db->LoadBlockIndexGuts(
400
0
            GetConsensus(), [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }, m_interrupt)) {
401
0
        return false;
402
0
    }
403
404
0
    if (snapshot_blockhash) {
405
0
        const std::optional<AssumeutxoData> maybe_au_data = GetParams().AssumeutxoForBlockhash(*snapshot_blockhash);
406
0
        if (!maybe_au_data) {
407
0
            m_opts.notifications.fatalError(strprintf(_("Assumeutxo data not found for the given blockhash '%s'."), snapshot_blockhash->ToString()));
408
0
            return false;
409
0
        }
410
0
        const AssumeutxoData& au_data = *Assert(maybe_au_data);
411
0
        m_snapshot_height = au_data.height;
412
0
        CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)};
413
414
        // Since m_chain_tx_count (responsible for estimated progress) isn't persisted
415
        // to disk, we must bootstrap the value for assumedvalid chainstates
416
        // from the hardcoded assumeutxo chainparams.
417
0
        base->m_chain_tx_count = au_data.m_chain_tx_count;
418
0
        LogPrintf("[snapshot] set m_chain_tx_count=%d for %s\n", au_data.m_chain_tx_count, snapshot_blockhash->ToString());
419
0
    } else {
420
        // If this isn't called with a snapshot blockhash, make sure the cached snapshot height
421
        // is null. This is relevant during snapshot completion, when the blockman may be loaded
422
        // with a height that then needs to be cleared after the snapshot is fully validated.
423
0
        m_snapshot_height.reset();
424
0
    }
425
426
0
    Assert(m_snapshot_height.has_value() == snapshot_blockhash.has_value());
427
428
    // Calculate nChainWork
429
0
    std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()};
430
0
    std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
431
0
              CBlockIndexHeightOnlyComparator());
432
433
0
    CBlockIndex* previous_index{nullptr};
434
0
    for (CBlockIndex* pindex : vSortedByHeight) {
435
0
        if (m_interrupt) return false;
436
0
        if (previous_index && pindex->nHeight > previous_index->nHeight + 1) {
437
0
            LogError("%s: block index is non-contiguous, index of height %d missing\n", __func__, previous_index->nHeight + 1);
438
0
            return false;
439
0
        }
440
0
        previous_index = pindex;
441
0
        pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
442
0
        pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
443
444
        // We can link the chain of blocks for which we've received transactions at some point, or
445
        // blocks that are assumed-valid on the basis of snapshot load (see
446
        // PopulateAndValidateSnapshot()).
447
        // Pruned nodes may have deleted the block.
448
0
        if (pindex->nTx > 0) {
449
0
            if (pindex->pprev) {
450
0
                if (m_snapshot_height && pindex->nHeight == *m_snapshot_height &&
451
0
                        pindex->GetBlockHash() == *snapshot_blockhash) {
452
                    // Should have been set above; don't disturb it with code below.
453
0
                    Assert(pindex->m_chain_tx_count > 0);
454
0
                } else if (pindex->pprev->m_chain_tx_count > 0) {
455
0
                    pindex->m_chain_tx_count = pindex->pprev->m_chain_tx_count + pindex->nTx;
456
0
                } else {
457
0
                    pindex->m_chain_tx_count = 0;
458
0
                    m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
459
0
                }
460
0
            } else {
461
0
                pindex->m_chain_tx_count = pindex->nTx;
462
0
            }
463
0
        }
464
0
        if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
465
0
            pindex->nStatus |= BLOCK_FAILED_CHILD;
466
0
            m_dirty_blockindex.insert(pindex);
467
0
        }
468
0
        if (pindex->pprev) {
469
0
            pindex->BuildSkip();
470
0
        }
471
0
    }
472
473
0
    return true;
474
0
}
475
476
bool BlockManager::WriteBlockIndexDB()
477
0
{
478
0
    AssertLockHeld(::cs_main);
479
0
    std::vector<std::pair<int, const CBlockFileInfo*>> vFiles;
480
0
    vFiles.reserve(m_dirty_fileinfo.size());
481
0
    for (std::set<int>::iterator it = m_dirty_fileinfo.begin(); it != m_dirty_fileinfo.end();) {
482
0
        vFiles.emplace_back(*it, &m_blockfile_info[*it]);
483
0
        m_dirty_fileinfo.erase(it++);
484
0
    }
485
0
    std::vector<const CBlockIndex*> vBlocks;
486
0
    vBlocks.reserve(m_dirty_blockindex.size());
487
0
    for (std::set<CBlockIndex*>::iterator it = m_dirty_blockindex.begin(); it != m_dirty_blockindex.end();) {
488
0
        vBlocks.push_back(*it);
489
0
        m_dirty_blockindex.erase(it++);
490
0
    }
491
0
    int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
492
0
    if (!m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks)) {
493
0
        return false;
494
0
    }
495
0
    return true;
496
0
}
497
498
bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_blockhash)
499
0
{
500
0
    if (!LoadBlockIndex(snapshot_blockhash)) {
501
0
        return false;
502
0
    }
503
0
    int max_blockfile_num{0};
504
505
    // Load block file info
506
0
    m_block_tree_db->ReadLastBlockFile(max_blockfile_num);
507
0
    m_blockfile_info.resize(max_blockfile_num + 1);
508
0
    LogPrintf("%s: last block file = %i\n", __func__, max_blockfile_num);
509
0
    for (int nFile = 0; nFile <= max_blockfile_num; nFile++) {
510
0
        m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]);
511
0
    }
512
0
    LogPrintf("%s: last block file info: %s\n", __func__, m_blockfile_info[max_blockfile_num].ToString());
513
0
    for (int nFile = max_blockfile_num + 1; true; nFile++) {
514
0
        CBlockFileInfo info;
515
0
        if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
516
0
            m_blockfile_info.push_back(info);
517
0
        } else {
518
0
            break;
519
0
        }
520
0
    }
521
522
    // Check presence of blk files
523
0
    LogPrintf("Checking all blk files are present...\n");
524
0
    std::set<int> setBlkDataFiles;
525
0
    for (const auto& [_, block_index] : m_block_index) {
526
0
        if (block_index.nStatus & BLOCK_HAVE_DATA) {
527
0
            setBlkDataFiles.insert(block_index.nFile);
528
0
        }
529
0
    }
530
0
    for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
531
0
        FlatFilePos pos(*it, 0);
532
0
        if (OpenBlockFile(pos, true).IsNull()) {
533
0
            return false;
534
0
        }
535
0
    }
536
537
0
    {
538
        // Initialize the blockfile cursors.
539
0
        LOCK(cs_LastBlockFile);
540
0
        for (size_t i = 0; i < m_blockfile_info.size(); ++i) {
541
0
            const auto last_height_in_file = m_blockfile_info[i].nHeightLast;
542
0
            m_blockfile_cursors[BlockfileTypeForHeight(last_height_in_file)] = {static_cast<int>(i), 0};
543
0
        }
544
0
    }
545
546
    // Check whether we have ever pruned block & undo files
547
0
    m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned);
548
0
    if (m_have_pruned) {
549
0
        LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
550
0
    }
551
552
    // Check whether we need to continue reindexing
553
0
    bool fReindexing = false;
554
0
    m_block_tree_db->ReadReindexing(fReindexing);
555
0
    if (fReindexing) m_blockfiles_indexed = false;
556
557
0
    return true;
558
0
}
559
560
void BlockManager::ScanAndUnlinkAlreadyPrunedFiles()
561
0
{
562
0
    AssertLockHeld(::cs_main);
563
0
    int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
564
0
    if (!m_have_pruned) {
565
0
        return;
566
0
    }
567
568
0
    std::set<int> block_files_to_prune;
569
0
    for (int file_number = 0; file_number < max_blockfile; file_number++) {
570
0
        if (m_blockfile_info[file_number].nSize == 0) {
571
0
            block_files_to_prune.insert(file_number);
572
0
        }
573
0
    }
574
575
0
    UnlinkPrunedFiles(block_files_to_prune);
576
0
}
577
578
const CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
579
0
{
580
0
    const MapCheckpoints& checkpoints = data.mapCheckpoints;
581
582
0
    for (const MapCheckpoints::value_type& i : checkpoints | std::views::reverse) {
583
0
        const uint256& hash = i.second;
584
0
        const CBlockIndex* pindex = LookupBlockIndex(hash);
585
0
        if (pindex) {
586
0
            return pindex;
587
0
        }
588
0
    }
589
0
    return nullptr;
590
0
}
591
592
bool BlockManager::IsBlockPruned(const CBlockIndex& block) const
593
0
{
594
0
    AssertLockHeld(::cs_main);
595
0
    return m_have_pruned && !(block.nStatus & BLOCK_HAVE_DATA) && (block.nTx > 0);
596
0
}
597
598
const CBlockIndex* BlockManager::GetFirstBlock(const CBlockIndex& upper_block, uint32_t status_mask, const CBlockIndex* lower_block) const
599
0
{
600
0
    AssertLockHeld(::cs_main);
601
0
    const CBlockIndex* last_block = &upper_block;
602
0
    assert((last_block->nStatus & status_mask) == status_mask); // 'upper_block' must satisfy the status mask
603
0
    while (last_block->pprev && ((last_block->pprev->nStatus & status_mask) == status_mask)) {
604
0
        if (lower_block) {
605
            // Return if we reached the lower_block
606
0
            if (last_block == lower_block) return lower_block;
607
            // if range was surpassed, means that 'lower_block' is not part of the 'upper_block' chain
608
            // and so far this is not allowed.
609
0
            assert(last_block->nHeight >= lower_block->nHeight);
610
0
        }
611
0
        last_block = last_block->pprev;
612
0
    }
613
0
    assert(last_block != nullptr);
614
0
    return last_block;
615
0
}
616
617
bool BlockManager::CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block)
618
0
{
619
0
    if (!(upper_block.nStatus & BLOCK_HAVE_DATA)) return false;
620
0
    return GetFirstBlock(upper_block, BLOCK_HAVE_DATA, &lower_block) == &lower_block;
621
0
}
622
623
// If we're using -prune with -reindex, then delete block files that will be ignored by the
624
// reindex.  Since reindexing works by starting at block file 0 and looping until a blockfile
625
// is missing, do the same here to delete any later block files after a gap.  Also delete all
626
// rev files since they'll be rewritten by the reindex anyway.  This ensures that m_blockfile_info
627
// is in sync with what's actually on disk by the time we start downloading, so that pruning
628
// works correctly.
629
void BlockManager::CleanupBlockRevFiles() const
630
0
{
631
0
    std::map<std::string, fs::path> mapBlockFiles;
632
633
    // Glob all blk?????.dat and rev?????.dat files from the blocks directory.
634
    // Remove the rev files immediately and insert the blk file paths into an
635
    // ordered map keyed by block file index.
636
0
    LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n");
637
0
    for (fs::directory_iterator it(m_opts.blocks_dir); it != fs::directory_iterator(); it++) {
638
0
        const std::string path = fs::PathToString(it->path().filename());
639
0
        if (fs::is_regular_file(*it) &&
640
0
            path.length() == 12 &&
641
0
            path.substr(8,4) == ".dat")
642
0
        {
643
0
            if (path.substr(0, 3) == "blk") {
644
0
                mapBlockFiles[path.substr(3, 5)] = it->path();
645
0
            } else if (path.substr(0, 3) == "rev") {
646
0
                remove(it->path());
647
0
            }
648
0
        }
649
0
    }
650
651
    // Remove all block files that aren't part of a contiguous set starting at
652
    // zero by walking the ordered map (keys are block file indices) by
653
    // keeping a separate counter.  Once we hit a gap (or if 0 doesn't exist)
654
    // start removing block files.
655
0
    int nContigCounter = 0;
656
0
    for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) {
657
0
        if (LocaleIndependentAtoi<int>(item.first) == nContigCounter) {
658
0
            nContigCounter++;
659
0
            continue;
660
0
        }
661
0
        remove(item.second);
662
0
    }
663
0
}
664
665
CBlockFileInfo* BlockManager::GetBlockFileInfo(size_t n)
666
0
{
667
0
    LOCK(cs_LastBlockFile);
668
669
0
    return &m_blockfile_info.at(n);
670
0
}
671
672
bool BlockManager::UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock) const
673
0
{
674
    // Open history file to append
675
0
    AutoFile fileout{OpenUndoFile(pos)};
676
0
    if (fileout.IsNull()) {
677
0
        LogError("%s: OpenUndoFile failed\n", __func__);
678
0
        return false;
679
0
    }
680
681
    // Write index header
682
0
    unsigned int nSize = GetSerializeSize(blockundo);
683
0
    fileout << GetParams().MessageStart() << nSize;
684
685
    // Write undo data
686
0
    long fileOutPos = fileout.tell();
687
0
    pos.nPos = (unsigned int)fileOutPos;
688
0
    fileout << blockundo;
689
690
    // calculate & write checksum
691
0
    HashWriter hasher{};
692
0
    hasher << hashBlock;
693
0
    hasher << blockundo;
694
0
    fileout << hasher.GetHash();
695
696
0
    return true;
697
0
}
698
699
bool BlockManager::UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex& index) const
700
0
{
701
0
    const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())};
702
703
    // Open history file to read
704
0
    AutoFile filein{OpenUndoFile(pos, true)};
705
0
    if (filein.IsNull()) {
706
0
        LogError("%s: OpenUndoFile failed for %s\n", __func__, pos.ToString());
707
0
        return false;
708
0
    }
709
710
    // Read block
711
0
    uint256 hashChecksum;
712
0
    HashVerifier verifier{filein}; // Use HashVerifier as reserializing may lose data, c.f. commit d342424301013ec47dc146a4beb49d5c9319d80a
713
0
    try {
714
0
        verifier << index.pprev->GetBlockHash();
715
0
        verifier >> blockundo;
716
0
        filein >> hashChecksum;
717
0
    } catch (const std::exception& e) {
718
0
        LogError("%s: Deserialize or I/O error - %s at %s\n", __func__, e.what(), pos.ToString());
719
0
        return false;
720
0
    }
721
722
    // Verify checksum
723
0
    if (hashChecksum != verifier.GetHash()) {
724
0
        LogError("%s: Checksum mismatch at %s\n", __func__, pos.ToString());
725
0
        return false;
726
0
    }
727
728
0
    return true;
729
0
}
730
731
bool BlockManager::FlushUndoFile(int block_file, bool finalize)
732
0
{
733
0
    FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize);
734
0
    if (!m_undo_file_seq.Flush(undo_pos_old, finalize)) {
735
0
        m_opts.notifications.flushError(_("Flushing undo file to disk failed. This is likely the result of an I/O error."));
736
0
        return false;
737
0
    }
738
0
    return true;
739
0
}
740
741
bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo)
742
0
{
743
0
    bool success = true;
744
0
    LOCK(cs_LastBlockFile);
745
746
0
    if (m_blockfile_info.size() < 1) {
747
        // Return if we haven't loaded any blockfiles yet. This happens during
748
        // chainstate init, when we call ChainstateManager::MaybeRebalanceCaches() (which
749
        // then calls FlushStateToDisk()), resulting in a call to this function before we
750
        // have populated `m_blockfile_info` via LoadBlockIndexDB().
751
0
        return true;
752
0
    }
753
0
    assert(static_cast<int>(m_blockfile_info.size()) > blockfile_num);
754
755
0
    FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize);
756
0
    if (!m_block_file_seq.Flush(block_pos_old, fFinalize)) {
757
0
        m_opts.notifications.flushError(_("Flushing block file to disk failed. This is likely the result of an I/O error."));
758
0
        success = false;
759
0
    }
760
    // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
761
    // e.g. during IBD or a sync after a node going offline
762
0
    if (!fFinalize || finalize_undo) {
763
0
        if (!FlushUndoFile(blockfile_num, finalize_undo)) {
764
0
            success = false;
765
0
        }
766
0
    }
767
0
    return success;
768
0
}
769
770
BlockfileType BlockManager::BlockfileTypeForHeight(int height)
771
0
{
772
0
    if (!m_snapshot_height) {
773
0
        return BlockfileType::NORMAL;
774
0
    }
775
0
    return (height >= *m_snapshot_height) ? BlockfileType::ASSUMED : BlockfileType::NORMAL;
776
0
}
777
778
bool BlockManager::FlushChainstateBlockFile(int tip_height)
779
0
{
780
0
    LOCK(cs_LastBlockFile);
781
0
    auto& cursor = m_blockfile_cursors[BlockfileTypeForHeight(tip_height)];
782
    // If the cursor does not exist, it means an assumeutxo snapshot is loaded,
783
    // but no blocks past the snapshot height have been written yet, so there
784
    // is no data associated with the chainstate, and it is safe not to flush.
785
0
    if (cursor) {
786
0
        return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false);
787
0
    }
788
    // No need to log warnings in this case.
789
0
    return true;
790
0
}
791
792
uint64_t BlockManager::CalculateCurrentUsage()
793
0
{
794
0
    LOCK(cs_LastBlockFile);
795
796
0
    uint64_t retval = 0;
797
0
    for (const CBlockFileInfo& file : m_blockfile_info) {
798
0
        retval += file.nSize + file.nUndoSize;
799
0
    }
800
0
    return retval;
801
0
}
802
803
void BlockManager::UnlinkPrunedFiles(const std::set<int>& setFilesToPrune) const
804
0
{
805
0
    std::error_code ec;
806
0
    for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
807
0
        FlatFilePos pos(*it, 0);
808
0
        const bool removed_blockfile{fs::remove(m_block_file_seq.FileName(pos), ec)};
809
0
        const bool removed_undofile{fs::remove(m_undo_file_seq.FileName(pos), ec)};
810
0
        if (removed_blockfile || removed_undofile) {
811
0
            LogDebug(BCLog::BLOCKSTORAGE, "Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
812
0
        }
813
0
    }
814
0
}
815
816
AutoFile BlockManager::OpenBlockFile(const FlatFilePos& pos, bool fReadOnly) const
817
0
{
818
0
    return AutoFile{m_block_file_seq.Open(pos, fReadOnly), m_xor_key};
819
0
}
820
821
/** Open an undo file (rev?????.dat) */
822
AutoFile BlockManager::OpenUndoFile(const FlatFilePos& pos, bool fReadOnly) const
823
0
{
824
0
    return AutoFile{m_undo_file_seq.Open(pos, fReadOnly), m_xor_key};
825
0
}
826
827
fs::path BlockManager::GetBlockPosFilename(const FlatFilePos& pos) const
828
0
{
829
0
    return m_block_file_seq.FileName(pos);
830
0
}
831
832
FlatFilePos BlockManager::FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime)
833
0
{
834
0
    LOCK(cs_LastBlockFile);
835
836
0
    const BlockfileType chain_type = BlockfileTypeForHeight(nHeight);
837
838
0
    if (!m_blockfile_cursors[chain_type]) {
839
        // If a snapshot is loaded during runtime, we may not have initialized this cursor yet.
840
0
        assert(chain_type == BlockfileType::ASSUMED);
841
0
        const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1};
842
0
        m_blockfile_cursors[chain_type] = new_cursor;
843
0
        LogDebug(BCLog::BLOCKSTORAGE, "[%s] initializing blockfile cursor to %s\n", chain_type, new_cursor);
844
0
    }
845
0
    const int last_blockfile = m_blockfile_cursors[chain_type]->file_num;
846
847
0
    int nFile = last_blockfile;
848
0
    if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
849
0
        m_blockfile_info.resize(nFile + 1);
850
0
    }
851
852
0
    bool finalize_undo = false;
853
0
    unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE};
854
    // Use smaller blockfiles in test-only -fastprune mode - but avoid
855
    // the possibility of having a block not fit into the block file.
856
0
    if (m_opts.fast_prune) {
857
0
        max_blockfile_size = 0x10000; // 64kiB
858
0
        if (nAddSize >= max_blockfile_size) {
859
            // dynamically adjust the blockfile size to be larger than the added size
860
0
            max_blockfile_size = nAddSize + 1;
861
0
        }
862
0
    }
863
0
    assert(nAddSize < max_blockfile_size);
864
865
0
    while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) {
866
        // when the undo file is keeping up with the block file, we want to flush it explicitly
867
        // when it is lagging behind (more blocks arrive than are being connected), we let the
868
        // undo block write case handle it
869
0
        finalize_undo = (static_cast<int>(m_blockfile_info[nFile].nHeightLast) ==
870
0
                         Assert(m_blockfile_cursors[chain_type])->undo_height);
871
872
        // Try the next unclaimed blockfile number
873
0
        nFile = this->MaxBlockfileNum() + 1;
874
        // Set to increment MaxBlockfileNum() for next iteration
875
0
        m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
876
877
0
        if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
878
0
            m_blockfile_info.resize(nFile + 1);
879
0
        }
880
0
    }
881
0
    FlatFilePos pos;
882
0
    pos.nFile = nFile;
883
0
    pos.nPos = m_blockfile_info[nFile].nSize;
884
885
0
    if (nFile != last_blockfile) {
886
0
        LogDebug(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n",
887
0
                 last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight);
888
889
        // Do not propagate the return code. The flush concerns a previous block
890
        // and undo file that has already been written to. If a flush fails
891
        // here, and we crash, there is no expected additional block data
892
        // inconsistency arising from the flush failure here. However, the undo
893
        // data may be inconsistent after a crash if the flush is called during
894
        // a reindex. A flush error might also leave some of the data files
895
        // untrimmed.
896
0
        if (!FlushBlockFile(last_blockfile, /*fFinalize=*/true, finalize_undo)) {
897
0
            LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning,
898
0
                          "Failed to flush previous block file %05i (finalize=1, finalize_undo=%i) before opening new block file %05i\n",
899
0
                          last_blockfile, finalize_undo, nFile);
900
0
        }
901
        // No undo data yet in the new file, so reset our undo-height tracking.
902
0
        m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
903
0
    }
904
905
0
    m_blockfile_info[nFile].AddBlock(nHeight, nTime);
906
0
    m_blockfile_info[nFile].nSize += nAddSize;
907
908
0
    bool out_of_space;
909
0
    size_t bytes_allocated = m_block_file_seq.Allocate(pos, nAddSize, out_of_space);
910
0
    if (out_of_space) {
911
0
        m_opts.notifications.fatalError(_("Disk space is too low!"));
912
0
        return {};
913
0
    }
914
0
    if (bytes_allocated != 0 && IsPruneMode()) {
915
0
        m_check_for_pruning = true;
916
0
    }
917
918
0
    m_dirty_fileinfo.insert(nFile);
919
0
    return pos;
920
0
}
921
922
void BlockManager::UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos)
923
0
{
924
0
    LOCK(cs_LastBlockFile);
925
926
    // Update the cursor so it points to the last file.
927
0
    const BlockfileType chain_type{BlockfileTypeForHeight(nHeight)};
928
0
    auto& cursor{m_blockfile_cursors[chain_type]};
929
0
    if (!cursor || cursor->file_num < pos.nFile) {
930
0
        m_blockfile_cursors[chain_type] = BlockfileCursor{pos.nFile};
931
0
    }
932
933
    // Update the file information with the current block.
934
0
    const unsigned int added_size = ::GetSerializeSize(TX_WITH_WITNESS(block));
935
0
    const int nFile = pos.nFile;
936
0
    if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
937
0
        m_blockfile_info.resize(nFile + 1);
938
0
    }
939
0
    m_blockfile_info[nFile].AddBlock(nHeight, block.GetBlockTime());
940
0
    m_blockfile_info[nFile].nSize = std::max(pos.nPos + added_size, m_blockfile_info[nFile].nSize);
941
0
    m_dirty_fileinfo.insert(nFile);
942
0
}
943
944
bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize)
945
0
{
946
0
    pos.nFile = nFile;
947
948
0
    LOCK(cs_LastBlockFile);
949
950
0
    pos.nPos = m_blockfile_info[nFile].nUndoSize;
951
0
    m_blockfile_info[nFile].nUndoSize += nAddSize;
952
0
    m_dirty_fileinfo.insert(nFile);
953
954
0
    bool out_of_space;
955
0
    size_t bytes_allocated = m_undo_file_seq.Allocate(pos, nAddSize, out_of_space);
956
0
    if (out_of_space) {
957
0
        return FatalError(m_opts.notifications, state, _("Disk space is too low!"));
958
0
    }
959
0
    if (bytes_allocated != 0 && IsPruneMode()) {
960
0
        m_check_for_pruning = true;
961
0
    }
962
963
0
    return true;
964
0
}
965
966
bool BlockManager::WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const
967
0
{
968
    // Open history file to append
969
0
    AutoFile fileout{OpenBlockFile(pos)};
970
0
    if (fileout.IsNull()) {
971
0
        LogError("%s: OpenBlockFile failed\n", __func__);
972
0
        return false;
973
0
    }
974
975
    // Write index header
976
0
    unsigned int nSize = GetSerializeSize(TX_WITH_WITNESS(block));
977
0
    fileout << GetParams().MessageStart() << nSize;
978
979
    // Write block
980
0
    long fileOutPos = fileout.tell();
981
0
    pos.nPos = (unsigned int)fileOutPos;
982
0
    fileout << TX_WITH_WITNESS(block);
983
984
0
    return true;
985
0
}
986
987
bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block)
988
0
{
989
0
    AssertLockHeld(::cs_main);
990
0
    const BlockfileType type = BlockfileTypeForHeight(block.nHeight);
991
0
    auto& cursor = *Assert(WITH_LOCK(cs_LastBlockFile, return m_blockfile_cursors[type]));
992
993
    // Write undo information to disk
994
0
    if (block.GetUndoPos().IsNull()) {
995
0
        FlatFilePos _pos;
996
0
        if (!FindUndoPos(state, block.nFile, _pos, ::GetSerializeSize(blockundo) + 40)) {
997
0
            LogError("%s: FindUndoPos failed\n", __func__);
998
0
            return false;
999
0
        }
1000
0
        if (!UndoWriteToDisk(blockundo, _pos, block.pprev->GetBlockHash())) {
1001
0
            return FatalError(m_opts.notifications, state, _("Failed to write undo data."));
1002
0
        }
1003
        // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
1004
        // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
1005
        // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
1006
        // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
1007
        // the FindNextBlockPos function
1008
0
        if (_pos.nFile < cursor.file_num && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) {
1009
            // Do not propagate the return code, a failed flush here should not
1010
            // be an indication for a failed write. If it were propagated here,
1011
            // the caller would assume the undo data not to be written, when in
1012
            // fact it is. Note though, that a failed flush might leave the data
1013
            // file untrimmed.
1014
0
            if (!FlushUndoFile(_pos.nFile, true)) {
1015
0
                LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", _pos.nFile);
1016
0
            }
1017
0
        } else if (_pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) {
1018
0
            cursor.undo_height = block.nHeight;
1019
0
        }
1020
        // update nUndoPos in block index
1021
0
        block.nUndoPos = _pos.nPos;
1022
0
        block.nStatus |= BLOCK_HAVE_UNDO;
1023
0
        m_dirty_blockindex.insert(&block);
1024
0
    }
1025
1026
0
    return true;
1027
0
}
1028
1029
bool BlockManager::ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos) const
1030
0
{
1031
0
    block.SetNull();
1032
1033
    // Open history file to read
1034
0
    AutoFile filein{OpenBlockFile(pos, true)};
1035
0
    if (filein.IsNull()) {
1036
0
        LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
1037
0
        return false;
1038
0
    }
1039
1040
    // Read block
1041
0
    try {
1042
0
        filein >> TX_WITH_WITNESS(block);
1043
0
    } catch (const std::exception& e) {
1044
0
        LogError("%s: Deserialize or I/O error - %s at %s\n", __func__, e.what(), pos.ToString());
1045
0
        return false;
1046
0
    }
1047
1048
    // Check the header
1049
0
    if (!CheckProofOfWork(block.GetHash(), block.nBits, GetConsensus())) {
1050
0
        LogError("%s: Errors in block header at %s\n", __func__, pos.ToString());
1051
0
        return false;
1052
0
    }
1053
1054
    // Signet only: check block solution
1055
0
    if (GetConsensus().signet_blocks && !CheckSignetBlockSolution(block, GetConsensus())) {
1056
0
        LogError("%s: Errors in block solution at %s\n", __func__, pos.ToString());
1057
0
        return false;
1058
0
    }
1059
1060
0
    return true;
1061
0
}
1062
1063
bool BlockManager::ReadBlockFromDisk(CBlock& block, const CBlockIndex& index) const
1064
0
{
1065
0
    const FlatFilePos block_pos{WITH_LOCK(cs_main, return index.GetBlockPos())};
1066
1067
0
    if (!ReadBlockFromDisk(block, block_pos)) {
1068
0
        return false;
1069
0
    }
1070
0
    if (block.GetHash() != index.GetBlockHash()) {
1071
0
        LogError("%s: GetHash() doesn't match index for %s at %s\n", __func__, index.ToString(), block_pos.ToString());
1072
0
        return false;
1073
0
    }
1074
0
    return true;
1075
0
}
1076
1077
bool BlockManager::ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos) const
1078
0
{
1079
0
    FlatFilePos hpos = pos;
1080
    // If nPos is less than 8 the pos is null and we don't have the block data
1081
    // Return early to prevent undefined behavior of unsigned int underflow
1082
0
    if (hpos.nPos < 8) {
1083
0
        LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
1084
0
        return false;
1085
0
    }
1086
0
    hpos.nPos -= 8; // Seek back 8 bytes for meta header
1087
0
    AutoFile filein{OpenBlockFile(hpos, true)};
1088
0
    if (filein.IsNull()) {
1089
0
        LogError("%s: OpenBlockFile failed for %s\n", __func__, pos.ToString());
1090
0
        return false;
1091
0
    }
1092
1093
0
    try {
1094
0
        MessageStartChars blk_start;
1095
0
        unsigned int blk_size;
1096
1097
0
        filein >> blk_start >> blk_size;
1098
1099
0
        if (blk_start != GetParams().MessageStart()) {
1100
0
            LogError("%s: Block magic mismatch for %s: %s versus expected %s\n", __func__, pos.ToString(),
1101
0
                         HexStr(blk_start),
1102
0
                         HexStr(GetParams().MessageStart()));
1103
0
            return false;
1104
0
        }
1105
1106
0
        if (blk_size > MAX_SIZE) {
1107
0
            LogError("%s: Block data is larger than maximum deserialization size for %s: %s versus %s\n", __func__, pos.ToString(),
1108
0
                         blk_size, MAX_SIZE);
1109
0
            return false;
1110
0
        }
1111
1112
0
        block.resize(blk_size); // Zeroing of memory is intentional here
1113
0
        filein.read(MakeWritableByteSpan(block));
1114
0
    } catch (const std::exception& e) {
1115
0
        LogError("%s: Read from block file failed: %s for %s\n", __func__, e.what(), pos.ToString());
1116
0
        return false;
1117
0
    }
1118
1119
0
    return true;
1120
0
}
1121
1122
FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight)
1123
0
{
1124
0
    unsigned int nBlockSize = ::GetSerializeSize(TX_WITH_WITNESS(block));
1125
    // Account for the 4 magic message start bytes + the 4 length bytes (8 bytes total,
1126
    // defined as BLOCK_SERIALIZATION_HEADER_SIZE)
1127
0
    nBlockSize += static_cast<unsigned int>(BLOCK_SERIALIZATION_HEADER_SIZE);
1128
0
    FlatFilePos blockPos{FindNextBlockPos(nBlockSize, nHeight, block.GetBlockTime())};
1129
0
    if (blockPos.IsNull()) {
1130
0
        LogError("%s: FindNextBlockPos failed\n", __func__);
1131
0
        return FlatFilePos();
1132
0
    }
1133
0
    if (!WriteBlockToDisk(block, blockPos)) {
1134
0
        m_opts.notifications.fatalError(_("Failed to write block."));
1135
0
        return FlatFilePos();
1136
0
    }
1137
0
    return blockPos;
1138
0
}
1139
1140
static auto InitBlocksdirXorKey(const BlockManager::Options& opts)
1141
0
{
1142
    // Bytes are serialized without length indicator, so this is also the exact
1143
    // size of the XOR-key file.
1144
0
    std::array<std::byte, 8> xor_key{};
1145
1146
0
    if (opts.use_xor && fs::is_empty(opts.blocks_dir)) {
1147
        // Only use random fresh key when the boolean option is set and on the
1148
        // very first start of the program.
1149
0
        FastRandomContext{}.fillrand(xor_key);
1150
0
    }
1151
1152
0
    const fs::path xor_key_path{opts.blocks_dir / "xor.dat"};
1153
0
    if (fs::exists(xor_key_path)) {
1154
        // A pre-existing xor key file has priority.
1155
0
        AutoFile xor_key_file{fsbridge::fopen(xor_key_path, "rb")};
1156
0
        xor_key_file >> xor_key;
1157
0
    } else {
1158
        // Create initial or missing xor key file
1159
0
        AutoFile xor_key_file{fsbridge::fopen(xor_key_path,
1160
#ifdef __MINGW64__
1161
            "wb" // Temporary workaround for https://github.com/bitcoin/bitcoin/issues/30210
1162
#else
1163
0
            "wbx"
1164
0
#endif
1165
0
        )};
1166
0
        xor_key_file << xor_key;
1167
0
    }
1168
    // If the user disabled the key, it must be zero.
1169
0
    if (!opts.use_xor && xor_key != decltype(xor_key){}) {
1170
0
        throw std::runtime_error{
1171
0
            strprintf("The blocksdir XOR-key can not be disabled when a random key was already stored! "
1172
0
                      "Stored key: '%s', stored path: '%s'.",
1173
0
                      HexStr(xor_key), fs::PathToString(xor_key_path)),
1174
0
        };
1175
0
    }
1176
0
    LogInfo("Using obfuscation key for blocksdir *.dat files (%s): '%s'\n", fs::PathToString(opts.blocks_dir), HexStr(xor_key));
1177
0
    return std::vector<std::byte>{xor_key.begin(), xor_key.end()};
1178
0
}
1179
1180
BlockManager::BlockManager(const util::SignalInterrupt& interrupt, Options opts)
1181
0
    : m_prune_mode{opts.prune_target > 0},
1182
0
      m_xor_key{InitBlocksdirXorKey(opts)},
1183
0
      m_opts{std::move(opts)},
1184
0
      m_block_file_seq{FlatFileSeq{m_opts.blocks_dir, "blk", m_opts.fast_prune ? 0x4000 /* 16kB */ : BLOCKFILE_CHUNK_SIZE}},
1185
0
      m_undo_file_seq{FlatFileSeq{m_opts.blocks_dir, "rev", UNDOFILE_CHUNK_SIZE}},
1186
0
      m_interrupt{interrupt} {}
1187
1188
class ImportingNow
1189
{
1190
    std::atomic<bool>& m_importing;
1191
1192
public:
1193
0
    ImportingNow(std::atomic<bool>& importing) : m_importing{importing}
1194
0
    {
1195
0
        assert(m_importing == false);
1196
0
        m_importing = true;
1197
0
    }
1198
    ~ImportingNow()
1199
0
    {
1200
0
        assert(m_importing == true);
1201
0
        m_importing = false;
1202
0
    }
1203
};
1204
1205
void ImportBlocks(ChainstateManager& chainman, std::span<const fs::path> import_paths)
1206
0
{
1207
0
    ImportingNow imp{chainman.m_blockman.m_importing};
1208
1209
    // -reindex
1210
0
    if (!chainman.m_blockman.m_blockfiles_indexed) {
1211
0
        int nFile = 0;
1212
        // Map of disk positions for blocks with unknown parent (only used for reindex);
1213
        // parent hash -> child disk position, multiple children can have the same parent.
1214
0
        std::multimap<uint256, FlatFilePos> blocks_with_unknown_parent;
1215
0
        while (true) {
1216
0
            FlatFilePos pos(nFile, 0);
1217
0
            if (!fs::exists(chainman.m_blockman.GetBlockPosFilename(pos))) {
1218
0
                break; // No block files left to reindex
1219
0
            }
1220
0
            AutoFile file{chainman.m_blockman.OpenBlockFile(pos, true)};
1221
0
            if (file.IsNull()) {
1222
0
                break; // This error is logged in OpenBlockFile
1223
0
            }
1224
0
            LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile);
1225
0
            chainman.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent);
1226
0
            if (chainman.m_interrupt) {
1227
0
                LogPrintf("Interrupt requested. Exit %s\n", __func__);
1228
0
                return;
1229
0
            }
1230
0
            nFile++;
1231
0
        }
1232
0
        WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false));
1233
0
        chainman.m_blockman.m_blockfiles_indexed = true;
1234
0
        LogPrintf("Reindexing finished\n");
1235
        // To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked):
1236
0
        chainman.ActiveChainstate().LoadGenesisBlock();
1237
0
    }
1238
1239
    // -loadblock=
1240
0
    for (const fs::path& path : import_paths) {
1241
0
        AutoFile file{fsbridge::fopen(path, "rb")};
1242
0
        if (!file.IsNull()) {
1243
0
            LogPrintf("Importing blocks file %s...\n", fs::PathToString(path));
1244
0
            chainman.LoadExternalBlockFile(file);
1245
0
            if (chainman.m_interrupt) {
1246
0
                LogPrintf("Interrupt requested. Exit %s\n", __func__);
1247
0
                return;
1248
0
            }
1249
0
        } else {
1250
0
            LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path));
1251
0
        }
1252
0
    }
1253
1254
    // scan for better chains in the block chain database, that are not yet connected in the active best chain
1255
1256
    // We can't hold cs_main during ActivateBestChain even though we're accessing
1257
    // the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve
1258
    // the relevant pointers before the ABC call.
1259
0
    for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) {
1260
0
        BlockValidationState state;
1261
0
        if (!chainstate->ActivateBestChain(state, nullptr)) {
1262
0
            chainman.GetNotifications().fatalError(strprintf(_("Failed to connect best block (%s)."), state.ToString()));
1263
0
            return;
1264
0
        }
1265
0
    }
1266
    // End scope of ImportingNow
1267
0
}
1268
1269
0
std::ostream& operator<<(std::ostream& os, const BlockfileType& type) {
1270
0
    switch(type) {
1271
0
        case BlockfileType::NORMAL: os << "normal"; break;
1272
0
        case BlockfileType::ASSUMED: os << "assumed"; break;
1273
0
        default: os.setstate(std::ios_base::failbit);
1274
0
    }
1275
0
    return os;
1276
0
}
1277
1278
0
std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor) {
1279
0
    os << strprintf("BlockfileCursor(file_num=%d, undo_height=%d)", cursor.file_num, cursor.undo_height);
1280
0
    return os;
1281
0
}
1282
} // namespace node