Coverage Report

Created: 2025-09-19 18:31

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/root/bitcoin/src/node/blockstorage.cpp
Line
Count
Source
1
// Copyright (c) 2011-2022 The Bitcoin Core developers
2
// Distributed under the MIT software license, see the accompanying
3
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5
#include <node/blockstorage.h>
6
7
#include <arith_uint256.h>
8
#include <chain.h>
9
#include <consensus/params.h>
10
#include <consensus/validation.h>
11
#include <dbwrapper.h>
12
#include <flatfile.h>
13
#include <hash.h>
14
#include <kernel/blockmanager_opts.h>
15
#include <kernel/chainparams.h>
16
#include <kernel/messagestartchars.h>
17
#include <kernel/notifications_interface.h>
18
#include <logging.h>
19
#include <pow.h>
20
#include <primitives/block.h>
21
#include <primitives/transaction.h>
22
#include <random.h>
23
#include <serialize.h>
24
#include <signet.h>
25
#include <span.h>
26
#include <streams.h>
27
#include <sync.h>
28
#include <tinyformat.h>
29
#include <uint256.h>
30
#include <undo.h>
31
#include <util/batchpriority.h>
32
#include <util/check.h>
33
#include <util/fs.h>
34
#include <util/obfuscation.h>
35
#include <util/signalinterrupt.h>
36
#include <util/strencodings.h>
37
#include <util/syserror.h>
38
#include <util/translation.h>
39
#include <validation.h>
40
41
#include <cstddef>
42
#include <map>
43
#include <optional>
44
#include <unordered_map>
45
46
namespace kernel {
47
static constexpr uint8_t DB_BLOCK_FILES{'f'};
48
static constexpr uint8_t DB_BLOCK_INDEX{'b'};
49
static constexpr uint8_t DB_FLAG{'F'};
50
static constexpr uint8_t DB_REINDEX_FLAG{'R'};
51
static constexpr uint8_t DB_LAST_BLOCK{'l'};
52
// Keys used in previous version that might still be found in the DB:
53
// BlockTreeDB::DB_TXINDEX_BLOCK{'T'};
54
// BlockTreeDB::DB_TXINDEX{'t'}
55
// BlockTreeDB::ReadFlag("txindex")
56
57
bool BlockTreeDB::ReadBlockFileInfo(int nFile, CBlockFileInfo& info)
58
0
{
59
0
    return Read(std::make_pair(DB_BLOCK_FILES, nFile), info);
60
0
}
61
62
bool BlockTreeDB::WriteReindexing(bool fReindexing)
63
0
{
64
0
    if (fReindexing) {
65
0
        return Write(DB_REINDEX_FLAG, uint8_t{'1'});
66
0
    } else {
67
0
        return Erase(DB_REINDEX_FLAG);
68
0
    }
69
0
}
70
71
void BlockTreeDB::ReadReindexing(bool& fReindexing)
72
0
{
73
0
    fReindexing = Exists(DB_REINDEX_FLAG);
74
0
}
75
76
bool BlockTreeDB::ReadLastBlockFile(int& nFile)
77
0
{
78
0
    return Read(DB_LAST_BLOCK, nFile);
79
0
}
80
81
bool BlockTreeDB::WriteBatchSync(const std::vector<std::pair<int, const CBlockFileInfo*>>& fileInfo, int nLastFile, const std::vector<const CBlockIndex*>& blockinfo)
82
11
{
83
11
    CDBBatch batch(*this);
84
11
    for (const auto& [file, info] : fileInfo) {
85
1
        batch.Write(std::make_pair(DB_BLOCK_FILES, file), *info);
86
1
    }
87
11
    batch.Write(DB_LAST_BLOCK, nLastFile);
88
199
    for (const CBlockIndex* bi : blockinfo) {
89
199
        batch.Write(std::make_pair(DB_BLOCK_INDEX, bi->GetBlockHash()), CDiskBlockIndex{bi});
90
199
    }
91
11
    return WriteBatch(batch, true);
92
11
}
93
94
bool BlockTreeDB::WriteFlag(const std::string& name, bool fValue)
95
0
{
96
0
    return Write(std::make_pair(DB_FLAG, name), fValue ? uint8_t{'1'} : uint8_t{'0'});
97
0
}
98
99
bool BlockTreeDB::ReadFlag(const std::string& name, bool& fValue)
100
0
{
101
0
    uint8_t ch;
102
0
    if (!Read(std::make_pair(DB_FLAG, name), ch)) {
103
0
        return false;
104
0
    }
105
0
    fValue = ch == uint8_t{'1'};
106
0
    return true;
107
0
}
108
109
bool BlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex, const util::SignalInterrupt& interrupt)
110
0
{
111
0
    AssertLockHeld(::cs_main);
112
0
    std::unique_ptr<CDBIterator> pcursor(NewIterator());
113
0
    pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256()));
114
115
    // Load m_block_index
116
0
    while (pcursor->Valid()) {
117
0
        if (interrupt) return false;
118
0
        std::pair<uint8_t, uint256> key;
119
0
        if (pcursor->GetKey(key) && key.first == DB_BLOCK_INDEX) {
120
0
            CDiskBlockIndex diskindex;
121
0
            if (pcursor->GetValue(diskindex)) {
122
                // Construct block index object
123
0
                CBlockIndex* pindexNew = insertBlockIndex(diskindex.ConstructBlockHash());
124
0
                pindexNew->pprev          = insertBlockIndex(diskindex.hashPrev);
125
0
                pindexNew->nHeight        = diskindex.nHeight;
126
0
                pindexNew->nFile          = diskindex.nFile;
127
0
                pindexNew->nDataPos       = diskindex.nDataPos;
128
0
                pindexNew->nUndoPos       = diskindex.nUndoPos;
129
0
                pindexNew->nVersion       = diskindex.nVersion;
130
0
                pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
131
0
                pindexNew->nTime          = diskindex.nTime;
132
0
                pindexNew->nBits          = diskindex.nBits;
133
0
                pindexNew->nNonce         = diskindex.nNonce;
134
0
                pindexNew->nStatus        = diskindex.nStatus;
135
0
                pindexNew->nTx            = diskindex.nTx;
136
137
0
                if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams)) {
138
0
                    LogError("%s: CheckProofOfWork failed: %s\n", __func__, pindexNew->ToString());
139
0
                    return false;
140
0
                }
141
142
0
                pcursor->Next();
143
0
            } else {
144
0
                LogError("%s: failed to read value\n", __func__);
145
0
                return false;
146
0
            }
147
0
        } else {
148
0
            break;
149
0
        }
150
0
    }
151
152
0
    return true;
153
0
}
154
} // namespace kernel
155
156
namespace node {
157
158
bool CBlockIndexWorkComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const
159
0
{
160
    // First sort by most total work, ...
161
0
    if (pa->nChainWork > pb->nChainWork) return false;
162
0
    if (pa->nChainWork < pb->nChainWork) return true;
163
164
    // ... then by earliest time received, ...
165
0
    if (pa->nSequenceId < pb->nSequenceId) return false;
166
0
    if (pa->nSequenceId > pb->nSequenceId) return true;
167
168
    // Use pointer address as tie breaker (should only happen with blocks
169
    // loaded from disk, as those all have id 0).
170
0
    if (pa < pb) return false;
171
0
    if (pa > pb) return true;
172
173
    // Identical blocks.
174
0
    return false;
175
0
}
176
177
bool CBlockIndexHeightOnlyComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const
178
0
{
179
0
    return pa->nHeight < pb->nHeight;
180
0
}
181
182
std::vector<CBlockIndex*> BlockManager::GetAllBlockIndices()
183
0
{
184
0
    AssertLockHeld(cs_main);
185
0
    std::vector<CBlockIndex*> rv;
186
0
    rv.reserve(m_block_index.size());
187
0
    for (auto& [_, block_index] : m_block_index) {
188
0
        rv.push_back(&block_index);
189
0
    }
190
0
    return rv;
191
0
}
192
193
CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash)
194
60.6k
{
195
60.6k
    AssertLockHeld(cs_main);
196
60.6k
    BlockMap::iterator it = m_block_index.find(hash);
197
60.6k
    return it == m_block_index.end() ? nullptr : &it->second;
198
60.6k
}
199
200
const CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
201
0
{
202
0
    AssertLockHeld(cs_main);
203
0
    BlockMap::const_iterator it = m_block_index.find(hash);
204
0
    return it == m_block_index.end() ? nullptr : &it->second;
205
0
}
206
207
CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block, CBlockIndex*& best_header)
208
0
{
209
0
    AssertLockHeld(cs_main);
210
211
0
    auto [mi, inserted] = m_block_index.try_emplace(block.GetHash(), block);
212
0
    if (!inserted) {
213
0
        return &mi->second;
214
0
    }
215
0
    CBlockIndex* pindexNew = &(*mi).second;
216
217
    // We assign the sequence id to blocks only when the full data is available,
218
    // to avoid miners withholding blocks but broadcasting headers, to get a
219
    // competitive advantage.
220
0
    pindexNew->nSequenceId = 0;
221
222
0
    pindexNew->phashBlock = &((*mi).first);
223
0
    BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
224
0
    if (miPrev != m_block_index.end()) {
225
0
        pindexNew->pprev = &(*miPrev).second;
226
0
        pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
227
0
        pindexNew->BuildSkip();
228
0
    }
229
0
    pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
230
0
    pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
231
0
    pindexNew->RaiseValidity(BLOCK_VALID_TREE);
232
0
    if (best_header == nullptr || best_header->nChainWork < pindexNew->nChainWork) {
233
0
        best_header = pindexNew;
234
0
    }
235
236
0
    m_dirty_blockindex.insert(pindexNew);
237
238
0
    return pindexNew;
239
0
}
240
241
void BlockManager::PruneOneBlockFile(const int fileNumber)
242
0
{
243
0
    AssertLockHeld(cs_main);
244
0
    LOCK(cs_LastBlockFile);
245
246
0
    for (auto& entry : m_block_index) {
247
0
        CBlockIndex* pindex = &entry.second;
248
0
        if (pindex->nFile == fileNumber) {
249
0
            pindex->nStatus &= ~BLOCK_HAVE_DATA;
250
0
            pindex->nStatus &= ~BLOCK_HAVE_UNDO;
251
0
            pindex->nFile = 0;
252
0
            pindex->nDataPos = 0;
253
0
            pindex->nUndoPos = 0;
254
0
            m_dirty_blockindex.insert(pindex);
255
256
            // Prune from m_blocks_unlinked -- any block we prune would have
257
            // to be downloaded again in order to consider its chain, at which
258
            // point it would be considered as a candidate for
259
            // m_blocks_unlinked or setBlockIndexCandidates.
260
0
            auto range = m_blocks_unlinked.equal_range(pindex->pprev);
261
0
            while (range.first != range.second) {
262
0
                std::multimap<CBlockIndex*, CBlockIndex*>::iterator _it = range.first;
263
0
                range.first++;
264
0
                if (_it->second == pindex) {
265
0
                    m_blocks_unlinked.erase(_it);
266
0
                }
267
0
            }
268
0
        }
269
0
    }
270
271
0
    m_blockfile_info.at(fileNumber) = CBlockFileInfo{};
272
0
    m_dirty_fileinfo.insert(fileNumber);
273
0
}
274
275
void BlockManager::FindFilesToPruneManual(
276
    std::set<int>& setFilesToPrune,
277
    int nManualPruneHeight,
278
    const Chainstate& chain,
279
    ChainstateManager& chainman)
280
0
{
281
0
    assert(IsPruneMode() && nManualPruneHeight > 0);
282
283
0
    LOCK2(cs_main, cs_LastBlockFile);
284
0
    if (chain.m_chain.Height() < 0) {
285
0
        return;
286
0
    }
287
288
0
    const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, nManualPruneHeight);
289
290
0
    int count = 0;
291
0
    for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
292
0
        const auto& fileinfo = m_blockfile_info[fileNumber];
293
0
        if (fileinfo.nSize == 0 || fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
294
0
            continue;
295
0
        }
296
297
0
        PruneOneBlockFile(fileNumber);
298
0
        setFilesToPrune.insert(fileNumber);
299
0
        count++;
300
0
    }
301
0
    LogInfo("[%s] Prune (Manual): prune_height=%d removed %d blk/rev pairs",
302
0
        chain.GetRole(), last_block_can_prune, count);
303
0
}
304
305
void BlockManager::FindFilesToPrune(
306
    std::set<int>& setFilesToPrune,
307
    int last_prune,
308
    const Chainstate& chain,
309
    ChainstateManager& chainman)
310
0
{
311
0
    LOCK2(cs_main, cs_LastBlockFile);
312
    // Distribute our -prune budget over all chainstates.
313
0
    const auto target = std::max(
314
0
        MIN_DISK_SPACE_FOR_BLOCK_FILES, GetPruneTarget() / chainman.GetAll().size());
315
0
    const uint64_t target_sync_height = chainman.m_best_header->nHeight;
316
317
0
    if (chain.m_chain.Height() < 0 || target == 0) {
318
0
        return;
319
0
    }
320
0
    if (static_cast<uint64_t>(chain.m_chain.Height()) <= chainman.GetParams().PruneAfterHeight()) {
321
0
        return;
322
0
    }
323
324
0
    const auto [min_block_to_prune, last_block_can_prune] = chainman.GetPruneRange(chain, last_prune);
325
326
0
    uint64_t nCurrentUsage = CalculateCurrentUsage();
327
    // We don't check to prune until after we've allocated new space for files
328
    // So we should leave a buffer under our target to account for another allocation
329
    // before the next pruning.
330
0
    uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
331
0
    uint64_t nBytesToPrune;
332
0
    int count = 0;
333
334
0
    if (nCurrentUsage + nBuffer >= target) {
335
        // On a prune event, the chainstate DB is flushed.
336
        // To avoid excessive prune events negating the benefit of high dbcache
337
        // values, we should not prune too rapidly.
338
        // So when pruning in IBD, increase the buffer to avoid a re-prune too soon.
339
0
        const auto chain_tip_height = chain.m_chain.Height();
340
0
        if (chainman.IsInitialBlockDownload() && target_sync_height > (uint64_t)chain_tip_height) {
341
            // Since this is only relevant during IBD, we assume blocks are at least 1 MB on average
342
0
            static constexpr uint64_t average_block_size = 1000000;  /* 1 MB */
343
0
            const uint64_t remaining_blocks = target_sync_height - chain_tip_height;
344
0
            nBuffer += average_block_size * remaining_blocks;
345
0
        }
346
347
0
        for (int fileNumber = 0; fileNumber < this->MaxBlockfileNum(); fileNumber++) {
348
0
            const auto& fileinfo = m_blockfile_info[fileNumber];
349
0
            nBytesToPrune = fileinfo.nSize + fileinfo.nUndoSize;
350
351
0
            if (fileinfo.nSize == 0) {
352
0
                continue;
353
0
            }
354
355
0
            if (nCurrentUsage + nBuffer < target) { // are we below our target?
356
0
                break;
357
0
            }
358
359
            // don't prune files that could have a block that's not within the allowable
360
            // prune range for the chain being pruned.
361
0
            if (fileinfo.nHeightLast > (unsigned)last_block_can_prune || fileinfo.nHeightFirst < (unsigned)min_block_to_prune) {
362
0
                continue;
363
0
            }
364
365
0
            PruneOneBlockFile(fileNumber);
366
            // Queue up the files for removal
367
0
            setFilesToPrune.insert(fileNumber);
368
0
            nCurrentUsage -= nBytesToPrune;
369
0
            count++;
370
0
        }
371
0
    }
372
373
0
    LogDebug(BCLog::PRUNE, "[%s] target=%dMiB actual=%dMiB diff=%dMiB min_height=%d max_prune_height=%d removed %d blk/rev pairs\n",
374
0
             chain.GetRole(), target / 1024 / 1024, nCurrentUsage / 1024 / 1024,
375
0
             (int64_t(target) - int64_t(nCurrentUsage)) / 1024 / 1024,
376
0
             min_block_to_prune, last_block_can_prune, count);
377
0
}
378
379
0
void BlockManager::UpdatePruneLock(const std::string& name, const PruneLockInfo& lock_info) {
380
0
    AssertLockHeld(::cs_main);
381
0
    m_prune_locks[name] = lock_info;
382
0
}
383
384
CBlockIndex* BlockManager::InsertBlockIndex(const uint256& hash)
385
0
{
386
0
    AssertLockHeld(cs_main);
387
388
0
    if (hash.IsNull()) {
389
0
        return nullptr;
390
0
    }
391
392
0
    const auto [mi, inserted]{m_block_index.try_emplace(hash)};
393
0
    CBlockIndex* pindex = &(*mi).second;
394
0
    if (inserted) {
395
0
        pindex->phashBlock = &((*mi).first);
396
0
    }
397
0
    return pindex;
398
0
}
399
400
bool BlockManager::LoadBlockIndex(const std::optional<uint256>& snapshot_blockhash)
401
0
{
402
0
    if (!m_block_tree_db->LoadBlockIndexGuts(
403
0
            GetConsensus(), [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); }, m_interrupt)) {
404
0
        return false;
405
0
    }
406
407
0
    if (snapshot_blockhash) {
408
0
        const std::optional<AssumeutxoData> maybe_au_data = GetParams().AssumeutxoForBlockhash(*snapshot_blockhash);
409
0
        if (!maybe_au_data) {
410
0
            m_opts.notifications.fatalError(strprintf(_("Assumeutxo data not found for the given blockhash '%s'."), snapshot_blockhash->ToString()));
411
0
            return false;
412
0
        }
413
0
        const AssumeutxoData& au_data = *Assert(maybe_au_data);
414
0
        m_snapshot_height = au_data.height;
415
0
        CBlockIndex* base{LookupBlockIndex(*snapshot_blockhash)};
416
417
        // Since m_chain_tx_count (responsible for estimated progress) isn't persisted
418
        // to disk, we must bootstrap the value for assumedvalid chainstates
419
        // from the hardcoded assumeutxo chainparams.
420
0
        base->m_chain_tx_count = au_data.m_chain_tx_count;
421
0
        LogInfo("[snapshot] set m_chain_tx_count=%d for %s", au_data.m_chain_tx_count, snapshot_blockhash->ToString());
422
0
    } else {
423
        // If this isn't called with a snapshot blockhash, make sure the cached snapshot height
424
        // is null. This is relevant during snapshot completion, when the blockman may be loaded
425
        // with a height that then needs to be cleared after the snapshot is fully validated.
426
0
        m_snapshot_height.reset();
427
0
    }
428
429
0
    Assert(m_snapshot_height.has_value() == snapshot_blockhash.has_value());
430
431
    // Calculate nChainWork
432
0
    std::vector<CBlockIndex*> vSortedByHeight{GetAllBlockIndices()};
433
0
    std::sort(vSortedByHeight.begin(), vSortedByHeight.end(),
434
0
              CBlockIndexHeightOnlyComparator());
435
436
0
    CBlockIndex* previous_index{nullptr};
437
0
    for (CBlockIndex* pindex : vSortedByHeight) {
438
0
        if (m_interrupt) return false;
439
0
        if (previous_index && pindex->nHeight > previous_index->nHeight + 1) {
440
0
            LogError("%s: block index is non-contiguous, index of height %d missing\n", __func__, previous_index->nHeight + 1);
441
0
            return false;
442
0
        }
443
0
        previous_index = pindex;
444
0
        pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
445
0
        pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
446
447
        // We can link the chain of blocks for which we've received transactions at some point, or
448
        // blocks that are assumed-valid on the basis of snapshot load (see
449
        // PopulateAndValidateSnapshot()).
450
        // Pruned nodes may have deleted the block.
451
0
        if (pindex->nTx > 0) {
452
0
            if (pindex->pprev) {
453
0
                if (m_snapshot_height && pindex->nHeight == *m_snapshot_height &&
454
0
                        pindex->GetBlockHash() == *snapshot_blockhash) {
455
                    // Should have been set above; don't disturb it with code below.
456
0
                    Assert(pindex->m_chain_tx_count > 0);
457
0
                } else if (pindex->pprev->m_chain_tx_count > 0) {
458
0
                    pindex->m_chain_tx_count = pindex->pprev->m_chain_tx_count + pindex->nTx;
459
0
                } else {
460
0
                    pindex->m_chain_tx_count = 0;
461
0
                    m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
462
0
                }
463
0
            } else {
464
0
                pindex->m_chain_tx_count = pindex->nTx;
465
0
            }
466
0
        }
467
0
        if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
468
0
            pindex->nStatus |= BLOCK_FAILED_CHILD;
469
0
            m_dirty_blockindex.insert(pindex);
470
0
        }
471
0
        if (pindex->pprev) {
472
0
            pindex->BuildSkip();
473
0
        }
474
0
    }
475
476
0
    return true;
477
0
}
478
479
bool BlockManager::WriteBlockIndexDB()
480
11
{
481
11
    AssertLockHeld(::cs_main);
482
11
    std::vector<std::pair<int, const CBlockFileInfo*>> vFiles;
483
11
    vFiles.reserve(m_dirty_fileinfo.size());
484
12
    for (std::set<int>::iterator it = m_dirty_fileinfo.begin(); it != m_dirty_fileinfo.end();) {
485
1
        vFiles.emplace_back(*it, &m_blockfile_info[*it]);
486
1
        m_dirty_fileinfo.erase(it++);
487
1
    }
488
11
    std::vector<const CBlockIndex*> vBlocks;
489
11
    vBlocks.reserve(m_dirty_blockindex.size());
490
210
    for (std::set<CBlockIndex*>::iterator it = m_dirty_blockindex.begin(); it != m_dirty_blockindex.end();) {
491
199
        vBlocks.push_back(*it);
492
199
        m_dirty_blockindex.erase(it++);
493
199
    }
494
11
    int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
495
11
    if (!m_block_tree_db->WriteBatchSync(vFiles, max_blockfile, vBlocks)) {
496
0
        return false;
497
0
    }
498
11
    return true;
499
11
}
500
501
bool BlockManager::LoadBlockIndexDB(const std::optional<uint256>& snapshot_blockhash)
502
0
{
503
0
    if (!LoadBlockIndex(snapshot_blockhash)) {
504
0
        return false;
505
0
    }
506
0
    int max_blockfile_num{0};
507
508
    // Load block file info
509
0
    m_block_tree_db->ReadLastBlockFile(max_blockfile_num);
510
0
    m_blockfile_info.resize(max_blockfile_num + 1);
511
0
    LogInfo("Loading block index db: last block file = %i", max_blockfile_num);
512
0
    for (int nFile = 0; nFile <= max_blockfile_num; nFile++) {
513
0
        m_block_tree_db->ReadBlockFileInfo(nFile, m_blockfile_info[nFile]);
514
0
    }
515
0
    LogInfo("Loading block index db: last block file info: %s", m_blockfile_info[max_blockfile_num].ToString());
516
0
    for (int nFile = max_blockfile_num + 1; true; nFile++) {
517
0
        CBlockFileInfo info;
518
0
        if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
519
0
            m_blockfile_info.push_back(info);
520
0
        } else {
521
0
            break;
522
0
        }
523
0
    }
524
525
    // Check presence of blk files
526
0
    LogInfo("Checking all blk files are present...");
527
0
    std::set<int> setBlkDataFiles;
528
0
    for (const auto& [_, block_index] : m_block_index) {
529
0
        if (block_index.nStatus & BLOCK_HAVE_DATA) {
530
0
            setBlkDataFiles.insert(block_index.nFile);
531
0
        }
532
0
    }
533
0
    for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
534
0
        FlatFilePos pos(*it, 0);
535
0
        if (OpenBlockFile(pos, /*fReadOnly=*/true).IsNull()) {
536
0
            return false;
537
0
        }
538
0
    }
539
540
0
    {
541
        // Initialize the blockfile cursors.
542
0
        LOCK(cs_LastBlockFile);
543
0
        for (size_t i = 0; i < m_blockfile_info.size(); ++i) {
544
0
            const auto last_height_in_file = m_blockfile_info[i].nHeightLast;
545
0
            m_blockfile_cursors[BlockfileTypeForHeight(last_height_in_file)] = {static_cast<int>(i), 0};
546
0
        }
547
0
    }
548
549
    // Check whether we have ever pruned block & undo files
550
0
    m_block_tree_db->ReadFlag("prunedblockfiles", m_have_pruned);
551
0
    if (m_have_pruned) {
552
0
        LogInfo("Loading block index db: Block files have previously been pruned");
553
0
    }
554
555
    // Check whether we need to continue reindexing
556
0
    bool fReindexing = false;
557
0
    m_block_tree_db->ReadReindexing(fReindexing);
558
0
    if (fReindexing) m_blockfiles_indexed = false;
559
560
0
    return true;
561
0
}
562
563
void BlockManager::ScanAndUnlinkAlreadyPrunedFiles()
564
0
{
565
0
    AssertLockHeld(::cs_main);
566
0
    int max_blockfile = WITH_LOCK(cs_LastBlockFile, return this->MaxBlockfileNum());
567
0
    if (!m_have_pruned) {
568
0
        return;
569
0
    }
570
571
0
    std::set<int> block_files_to_prune;
572
0
    for (int file_number = 0; file_number < max_blockfile; file_number++) {
573
0
        if (m_blockfile_info[file_number].nSize == 0) {
574
0
            block_files_to_prune.insert(file_number);
575
0
        }
576
0
    }
577
578
0
    UnlinkPrunedFiles(block_files_to_prune);
579
0
}
580
581
bool BlockManager::IsBlockPruned(const CBlockIndex& block) const
582
0
{
583
0
    AssertLockHeld(::cs_main);
584
0
    return m_have_pruned && !(block.nStatus & BLOCK_HAVE_DATA) && (block.nTx > 0);
585
0
}
586
587
const CBlockIndex* BlockManager::GetFirstBlock(const CBlockIndex& upper_block, uint32_t status_mask, const CBlockIndex* lower_block) const
588
0
{
589
0
    AssertLockHeld(::cs_main);
590
0
    const CBlockIndex* last_block = &upper_block;
591
0
    assert((last_block->nStatus & status_mask) == status_mask); // 'upper_block' must satisfy the status mask
592
0
    while (last_block->pprev && ((last_block->pprev->nStatus & status_mask) == status_mask)) {
593
0
        if (lower_block) {
594
            // Return if we reached the lower_block
595
0
            if (last_block == lower_block) return lower_block;
596
            // if range was surpassed, means that 'lower_block' is not part of the 'upper_block' chain
597
            // and so far this is not allowed.
598
0
            assert(last_block->nHeight >= lower_block->nHeight);
599
0
        }
600
0
        last_block = last_block->pprev;
601
0
    }
602
0
    assert(last_block != nullptr);
603
0
    return last_block;
604
0
}
605
606
bool BlockManager::CheckBlockDataAvailability(const CBlockIndex& upper_block, const CBlockIndex& lower_block)
607
0
{
608
0
    if (!(upper_block.nStatus & BLOCK_HAVE_DATA)) return false;
609
0
    return GetFirstBlock(upper_block, BLOCK_HAVE_DATA, &lower_block) == &lower_block;
610
0
}
611
612
// If we're using -prune with -reindex, then delete block files that will be ignored by the
613
// reindex.  Since reindexing works by starting at block file 0 and looping until a blockfile
614
// is missing, do the same here to delete any later block files after a gap.  Also delete all
615
// rev files since they'll be rewritten by the reindex anyway.  This ensures that m_blockfile_info
616
// is in sync with what's actually on disk by the time we start downloading, so that pruning
617
// works correctly.
618
void BlockManager::CleanupBlockRevFiles() const
619
0
{
620
0
    std::map<std::string, fs::path> mapBlockFiles;
621
622
    // Glob all blk?????.dat and rev?????.dat files from the blocks directory.
623
    // Remove the rev files immediately and insert the blk file paths into an
624
    // ordered map keyed by block file index.
625
0
    LogInfo("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune");
626
0
    for (fs::directory_iterator it(m_opts.blocks_dir); it != fs::directory_iterator(); it++) {
627
0
        const std::string path = fs::PathToString(it->path().filename());
628
0
        if (fs::is_regular_file(*it) &&
629
0
            path.length() == 12 &&
630
0
            path.ends_with(".dat"))
631
0
        {
632
0
            if (path.starts_with("blk")) {
633
0
                mapBlockFiles[path.substr(3, 5)] = it->path();
634
0
            } else if (path.starts_with("rev")) {
635
0
                remove(it->path());
636
0
            }
637
0
        }
638
0
    }
639
640
    // Remove all block files that aren't part of a contiguous set starting at
641
    // zero by walking the ordered map (keys are block file indices) by
642
    // keeping a separate counter.  Once we hit a gap (or if 0 doesn't exist)
643
    // start removing block files.
644
0
    int nContigCounter = 0;
645
0
    for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) {
646
0
        if (LocaleIndependentAtoi<int>(item.first) == nContigCounter) {
647
0
            nContigCounter++;
648
0
            continue;
649
0
        }
650
0
        remove(item.second);
651
0
    }
652
0
}
653
654
CBlockFileInfo* BlockManager::GetBlockFileInfo(size_t n)
655
0
{
656
0
    LOCK(cs_LastBlockFile);
657
658
0
    return &m_blockfile_info.at(n);
659
0
}
660
661
bool BlockManager::ReadBlockUndo(CBlockUndo& blockundo, const CBlockIndex& index) const
662
0
{
663
0
    const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())};
664
665
    // Open history file to read
666
0
    AutoFile file{OpenUndoFile(pos, true)};
667
0
    if (file.IsNull()) {
668
0
        LogError("OpenUndoFile failed for %s while reading block undo", pos.ToString());
669
0
        return false;
670
0
    }
671
0
    BufferedReader filein{std::move(file)};
672
673
0
    try {
674
        // Read block
675
0
        HashVerifier verifier{filein}; // Use HashVerifier, as reserializing may lose data, c.f. commit d3424243
676
677
0
        verifier << index.pprev->GetBlockHash();
678
0
        verifier >> blockundo;
679
680
0
        uint256 hashChecksum;
681
0
        filein >> hashChecksum;
682
683
        // Verify checksum
684
0
        if (hashChecksum != verifier.GetHash()) {
685
0
            LogError("Checksum mismatch at %s while reading block undo", pos.ToString());
686
0
            return false;
687
0
        }
688
0
    } catch (const std::exception& e) {
689
0
        LogError("Deserialize or I/O error - %s at %s while reading block undo", e.what(), pos.ToString());
690
0
        return false;
691
0
    }
692
693
0
    return true;
694
0
}
695
696
bool BlockManager::FlushUndoFile(int block_file, bool finalize)
697
11
{
698
11
    FlatFilePos undo_pos_old(block_file, m_blockfile_info[block_file].nUndoSize);
699
11
    if (!m_undo_file_seq.Flush(undo_pos_old, finalize)) {
700
0
        m_opts.notifications.flushError(_("Flushing undo file to disk failed. This is likely the result of an I/O error."));
701
0
        return false;
702
0
    }
703
11
    return true;
704
11
}
705
706
bool BlockManager::FlushBlockFile(int blockfile_num, bool fFinalize, bool finalize_undo)
707
11
{
708
11
    bool success = true;
709
11
    LOCK(cs_LastBlockFile);
710
711
11
    if (m_blockfile_info.size() < 1) {
712
        // Return if we haven't loaded any blockfiles yet. This happens during
713
        // chainstate init, when we call ChainstateManager::MaybeRebalanceCaches() (which
714
        // then calls FlushStateToDisk()), resulting in a call to this function before we
715
        // have populated `m_blockfile_info` via LoadBlockIndexDB().
716
0
        return true;
717
0
    }
718
11
    assert(static_cast<int>(m_blockfile_info.size()) > blockfile_num);
719
720
11
    FlatFilePos block_pos_old(blockfile_num, m_blockfile_info[blockfile_num].nSize);
721
11
    if (!m_block_file_seq.Flush(block_pos_old, fFinalize)) {
722
0
        m_opts.notifications.flushError(_("Flushing block file to disk failed. This is likely the result of an I/O error."));
723
0
        success = false;
724
0
    }
725
    // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
726
    // e.g. during IBD or a sync after a node going offline
727
11
    if (!fFinalize || finalize_undo) {
728
11
        if (!FlushUndoFile(blockfile_num, finalize_undo)) {
729
0
            success = false;
730
0
        }
731
11
    }
732
11
    return success;
733
11
}
734
735
BlockfileType BlockManager::BlockfileTypeForHeight(int height)
736
11
{
737
11
    if (!m_snapshot_height) {
738
11
        return BlockfileType::NORMAL;
739
11
    }
740
0
    return (height >= *m_snapshot_height) ? BlockfileType::ASSUMED : BlockfileType::NORMAL;
741
11
}
742
743
bool BlockManager::FlushChainstateBlockFile(int tip_height)
744
11
{
745
11
    LOCK(cs_LastBlockFile);
746
11
    auto& cursor = m_blockfile_cursors[BlockfileTypeForHeight(tip_height)];
747
    // If the cursor does not exist, it means an assumeutxo snapshot is loaded,
748
    // but no blocks past the snapshot height have been written yet, so there
749
    // is no data associated with the chainstate, and it is safe not to flush.
750
11
    if (cursor) {
751
11
        return FlushBlockFile(cursor->file_num, /*fFinalize=*/false, /*finalize_undo=*/false);
752
11
    }
753
    // No need to log warnings in this case.
754
0
    return true;
755
11
}
756
757
uint64_t BlockManager::CalculateCurrentUsage()
758
0
{
759
0
    LOCK(cs_LastBlockFile);
760
761
0
    uint64_t retval = 0;
762
0
    for (const CBlockFileInfo& file : m_blockfile_info) {
763
0
        retval += file.nSize + file.nUndoSize;
764
0
    }
765
0
    return retval;
766
0
}
767
768
void BlockManager::UnlinkPrunedFiles(const std::set<int>& setFilesToPrune) const
769
0
{
770
0
    std::error_code ec;
771
0
    for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
772
0
        FlatFilePos pos(*it, 0);
773
0
        const bool removed_blockfile{fs::remove(m_block_file_seq.FileName(pos), ec)};
774
0
        const bool removed_undofile{fs::remove(m_undo_file_seq.FileName(pos), ec)};
775
0
        if (removed_blockfile || removed_undofile) {
776
0
            LogDebug(BCLog::BLOCKSTORAGE, "Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
777
0
        }
778
0
    }
779
0
}
780
781
AutoFile BlockManager::OpenBlockFile(const FlatFilePos& pos, bool fReadOnly) const
782
0
{
783
0
    return AutoFile{m_block_file_seq.Open(pos, fReadOnly), m_obfuscation};
784
0
}
785
786
/** Open an undo file (rev?????.dat) */
787
AutoFile BlockManager::OpenUndoFile(const FlatFilePos& pos, bool fReadOnly) const
788
0
{
789
0
    return AutoFile{m_undo_file_seq.Open(pos, fReadOnly), m_obfuscation};
790
0
}
791
792
fs::path BlockManager::GetBlockPosFilename(const FlatFilePos& pos) const
793
0
{
794
0
    return m_block_file_seq.FileName(pos);
795
0
}
796
797
FlatFilePos BlockManager::FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime)
798
0
{
799
0
    LOCK(cs_LastBlockFile);
800
801
0
    const BlockfileType chain_type = BlockfileTypeForHeight(nHeight);
802
803
0
    if (!m_blockfile_cursors[chain_type]) {
804
        // If a snapshot is loaded during runtime, we may not have initialized this cursor yet.
805
0
        assert(chain_type == BlockfileType::ASSUMED);
806
0
        const auto new_cursor = BlockfileCursor{this->MaxBlockfileNum() + 1};
807
0
        m_blockfile_cursors[chain_type] = new_cursor;
808
0
        LogDebug(BCLog::BLOCKSTORAGE, "[%s] initializing blockfile cursor to %s\n", chain_type, new_cursor);
809
0
    }
810
0
    const int last_blockfile = m_blockfile_cursors[chain_type]->file_num;
811
812
0
    int nFile = last_blockfile;
813
0
    if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
814
0
        m_blockfile_info.resize(nFile + 1);
815
0
    }
816
817
0
    bool finalize_undo = false;
818
0
    unsigned int max_blockfile_size{MAX_BLOCKFILE_SIZE};
819
    // Use smaller blockfiles in test-only -fastprune mode - but avoid
820
    // the possibility of having a block not fit into the block file.
821
0
    if (m_opts.fast_prune) {
822
0
        max_blockfile_size = 0x10000; // 64kiB
823
0
        if (nAddSize >= max_blockfile_size) {
824
            // dynamically adjust the blockfile size to be larger than the added size
825
0
            max_blockfile_size = nAddSize + 1;
826
0
        }
827
0
    }
828
0
    assert(nAddSize < max_blockfile_size);
829
830
0
    while (m_blockfile_info[nFile].nSize + nAddSize >= max_blockfile_size) {
831
        // when the undo file is keeping up with the block file, we want to flush it explicitly
832
        // when it is lagging behind (more blocks arrive than are being connected), we let the
833
        // undo block write case handle it
834
0
        finalize_undo = (static_cast<int>(m_blockfile_info[nFile].nHeightLast) ==
835
0
                         Assert(m_blockfile_cursors[chain_type])->undo_height);
836
837
        // Try the next unclaimed blockfile number
838
0
        nFile = this->MaxBlockfileNum() + 1;
839
        // Set to increment MaxBlockfileNum() for next iteration
840
0
        m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
841
842
0
        if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
843
0
            m_blockfile_info.resize(nFile + 1);
844
0
        }
845
0
    }
846
0
    FlatFilePos pos;
847
0
    pos.nFile = nFile;
848
0
    pos.nPos = m_blockfile_info[nFile].nSize;
849
850
0
    if (nFile != last_blockfile) {
851
0
        LogDebug(BCLog::BLOCKSTORAGE, "Leaving block file %i: %s (onto %i) (height %i)\n",
852
0
                 last_blockfile, m_blockfile_info[last_blockfile].ToString(), nFile, nHeight);
853
854
        // Do not propagate the return code. The flush concerns a previous block
855
        // and undo file that has already been written to. If a flush fails
856
        // here, and we crash, there is no expected additional block data
857
        // inconsistency arising from the flush failure here. However, the undo
858
        // data may be inconsistent after a crash if the flush is called during
859
        // a reindex. A flush error might also leave some of the data files
860
        // untrimmed.
861
0
        if (!FlushBlockFile(last_blockfile, /*fFinalize=*/true, finalize_undo)) {
862
0
            LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning,
863
0
                          "Failed to flush previous block file %05i (finalize=1, finalize_undo=%i) before opening new block file %05i\n",
864
0
                          last_blockfile, finalize_undo, nFile);
865
0
        }
866
        // No undo data yet in the new file, so reset our undo-height tracking.
867
0
        m_blockfile_cursors[chain_type] = BlockfileCursor{nFile};
868
0
    }
869
870
0
    m_blockfile_info[nFile].AddBlock(nHeight, nTime);
871
0
    m_blockfile_info[nFile].nSize += nAddSize;
872
873
0
    bool out_of_space;
874
0
    size_t bytes_allocated = m_block_file_seq.Allocate(pos, nAddSize, out_of_space);
875
0
    if (out_of_space) {
876
0
        m_opts.notifications.fatalError(_("Disk space is too low!"));
877
0
        return {};
878
0
    }
879
0
    if (bytes_allocated != 0 && IsPruneMode()) {
880
0
        m_check_for_pruning = true;
881
0
    }
882
883
0
    m_dirty_fileinfo.insert(nFile);
884
0
    return pos;
885
0
}
886
887
void BlockManager::UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos)
888
0
{
889
0
    LOCK(cs_LastBlockFile);
890
891
    // Update the cursor so it points to the last file.
892
0
    const BlockfileType chain_type{BlockfileTypeForHeight(nHeight)};
893
0
    auto& cursor{m_blockfile_cursors[chain_type]};
894
0
    if (!cursor || cursor->file_num < pos.nFile) {
895
0
        m_blockfile_cursors[chain_type] = BlockfileCursor{pos.nFile};
896
0
    }
897
898
    // Update the file information with the current block.
899
0
    const unsigned int added_size = ::GetSerializeSize(TX_WITH_WITNESS(block));
900
0
    const int nFile = pos.nFile;
901
0
    if (static_cast<int>(m_blockfile_info.size()) <= nFile) {
902
0
        m_blockfile_info.resize(nFile + 1);
903
0
    }
904
0
    m_blockfile_info[nFile].AddBlock(nHeight, block.GetBlockTime());
905
0
    m_blockfile_info[nFile].nSize = std::max(pos.nPos + added_size, m_blockfile_info[nFile].nSize);
906
0
    m_dirty_fileinfo.insert(nFile);
907
0
}
908
909
bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize)
910
0
{
911
0
    pos.nFile = nFile;
912
913
0
    LOCK(cs_LastBlockFile);
914
915
0
    pos.nPos = m_blockfile_info[nFile].nUndoSize;
916
0
    m_blockfile_info[nFile].nUndoSize += nAddSize;
917
0
    m_dirty_fileinfo.insert(nFile);
918
919
0
    bool out_of_space;
920
0
    size_t bytes_allocated = m_undo_file_seq.Allocate(pos, nAddSize, out_of_space);
921
0
    if (out_of_space) {
922
0
        return FatalError(m_opts.notifications, state, _("Disk space is too low!"));
923
0
    }
924
0
    if (bytes_allocated != 0 && IsPruneMode()) {
925
0
        m_check_for_pruning = true;
926
0
    }
927
928
0
    return true;
929
0
}
930
931
bool BlockManager::WriteBlockUndo(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block)
932
0
{
933
0
    AssertLockHeld(::cs_main);
934
0
    const BlockfileType type = BlockfileTypeForHeight(block.nHeight);
935
0
    auto& cursor = *Assert(WITH_LOCK(cs_LastBlockFile, return m_blockfile_cursors[type]));
936
937
    // Write undo information to disk
938
0
    if (block.GetUndoPos().IsNull()) {
939
0
        FlatFilePos pos;
940
0
        const auto blockundo_size{static_cast<uint32_t>(GetSerializeSize(blockundo))};
941
0
        if (!FindUndoPos(state, block.nFile, pos, blockundo_size + UNDO_DATA_DISK_OVERHEAD)) {
942
0
            LogError("FindUndoPos failed for %s while writing block undo", pos.ToString());
943
0
            return false;
944
0
        }
945
946
        // Open history file to append
947
0
        AutoFile file{OpenUndoFile(pos)};
948
0
        if (file.IsNull()) {
949
0
            LogError("OpenUndoFile failed for %s while writing block undo", pos.ToString());
950
0
            return FatalError(m_opts.notifications, state, _("Failed to write undo data."));
951
0
        }
952
0
        {
953
0
            BufferedWriter fileout{file};
954
955
            // Write index header
956
0
            fileout << GetParams().MessageStart() << blockundo_size;
957
0
            pos.nPos += STORAGE_HEADER_BYTES;
958
0
            {
959
                // Calculate checksum
960
0
                HashWriter hasher{};
961
0
                hasher << block.pprev->GetBlockHash() << blockundo;
962
                // Write undo data & checksum
963
0
                fileout << blockundo << hasher.GetHash();
964
0
            }
965
            // BufferedWriter will flush pending data to file when fileout goes out of scope.
966
0
        }
967
968
        // Make sure that the file is closed before we call `FlushUndoFile`.
969
0
        if (file.fclose() != 0) {
970
0
            LogError("Failed to close block undo file %s: %s", pos.ToString(), SysErrorString(errno));
971
0
            return FatalError(m_opts.notifications, state, _("Failed to close block undo file."));
972
0
        }
973
974
        // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
975
        // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
976
        // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
977
        // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
978
        // the FindNextBlockPos function
979
0
        if (pos.nFile < cursor.file_num && static_cast<uint32_t>(block.nHeight) == m_blockfile_info[pos.nFile].nHeightLast) {
980
            // Do not propagate the return code, a failed flush here should not
981
            // be an indication for a failed write. If it were propagated here,
982
            // the caller would assume the undo data not to be written, when in
983
            // fact it is. Note though, that a failed flush might leave the data
984
            // file untrimmed.
985
0
            if (!FlushUndoFile(pos.nFile, true)) {
986
0
                LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", pos.nFile);
987
0
            }
988
0
        } else if (pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) {
989
0
            cursor.undo_height = block.nHeight;
990
0
        }
991
        // update nUndoPos in block index
992
0
        block.nUndoPos = pos.nPos;
993
0
        block.nStatus |= BLOCK_HAVE_UNDO;
994
0
        m_dirty_blockindex.insert(&block);
995
0
    }
996
997
0
    return true;
998
0
}
999
1000
bool BlockManager::ReadBlock(CBlock& block, const FlatFilePos& pos, const std::optional<uint256>& expected_hash) const
1001
0
{
1002
0
    block.SetNull();
1003
1004
    // Open history file to read
1005
0
    std::vector<std::byte> block_data;
1006
0
    if (!ReadRawBlock(block_data, pos)) {
1007
0
        return false;
1008
0
    }
1009
1010
0
    try {
1011
        // Read block
1012
0
        SpanReader{block_data} >> TX_WITH_WITNESS(block);
1013
0
    } catch (const std::exception& e) {
1014
0
        LogError("Deserialize or I/O error - %s at %s while reading block", e.what(), pos.ToString());
1015
0
        return false;
1016
0
    }
1017
1018
0
    const auto block_hash{block.GetHash()};
1019
1020
    // Check the header
1021
0
    if (!CheckProofOfWork(block_hash, block.nBits, GetConsensus())) {
1022
0
        LogError("Errors in block header at %s while reading block", pos.ToString());
1023
0
        return false;
1024
0
    }
1025
1026
    // Signet only: check block solution
1027
0
    if (GetConsensus().signet_blocks && !CheckSignetBlockSolution(block, GetConsensus())) {
1028
0
        LogError("Errors in block solution at %s while reading block", pos.ToString());
1029
0
        return false;
1030
0
    }
1031
1032
0
    if (expected_hash && block_hash != *expected_hash) {
1033
0
        LogError("GetHash() doesn't match index at %s while reading block (%s != %s)",
1034
0
                 pos.ToString(), block_hash.ToString(), expected_hash->ToString());
1035
0
        return false;
1036
0
    }
1037
1038
0
    return true;
1039
0
}
1040
1041
bool BlockManager::ReadBlock(CBlock& block, const CBlockIndex& index) const
1042
0
{
1043
0
    const FlatFilePos block_pos{WITH_LOCK(cs_main, return index.GetBlockPos())};
1044
0
    return ReadBlock(block, block_pos, index.GetBlockHash());
1045
0
}
1046
1047
bool BlockManager::ReadRawBlock(std::vector<std::byte>& block, const FlatFilePos& pos) const
1048
0
{
1049
0
    if (pos.nPos < STORAGE_HEADER_BYTES) {
1050
        // If nPos is less than STORAGE_HEADER_BYTES, we can't read the header that precedes the block data
1051
        // This would cause an unsigned integer underflow when trying to position the file cursor
1052
        // This can happen after pruning or default constructed positions
1053
0
        LogError("Failed for %s while reading raw block storage header", pos.ToString());
1054
0
        return false;
1055
0
    }
1056
0
    AutoFile filein{OpenBlockFile({pos.nFile, pos.nPos - STORAGE_HEADER_BYTES}, /*fReadOnly=*/true)};
1057
0
    if (filein.IsNull()) {
1058
0
        LogError("OpenBlockFile failed for %s while reading raw block", pos.ToString());
1059
0
        return false;
1060
0
    }
1061
1062
0
    try {
1063
0
        MessageStartChars blk_start;
1064
0
        unsigned int blk_size;
1065
1066
0
        filein >> blk_start >> blk_size;
1067
1068
0
        if (blk_start != GetParams().MessageStart()) {
1069
0
            LogError("Block magic mismatch for %s: %s versus expected %s while reading raw block",
1070
0
                pos.ToString(), HexStr(blk_start), HexStr(GetParams().MessageStart()));
1071
0
            return false;
1072
0
        }
1073
1074
0
        if (blk_size > MAX_SIZE) {
1075
0
            LogError("Block data is larger than maximum deserialization size for %s: %s versus %s while reading raw block",
1076
0
                pos.ToString(), blk_size, MAX_SIZE);
1077
0
            return false;
1078
0
        }
1079
1080
0
        block.resize(blk_size); // Zeroing of memory is intentional here
1081
0
        filein.read(block);
1082
0
    } catch (const std::exception& e) {
1083
0
        LogError("Read from block file failed: %s for %s while reading raw block", e.what(), pos.ToString());
1084
0
        return false;
1085
0
    }
1086
1087
0
    return true;
1088
0
}
1089
1090
FlatFilePos BlockManager::WriteBlock(const CBlock& block, int nHeight)
1091
0
{
1092
0
    const unsigned int block_size{static_cast<unsigned int>(GetSerializeSize(TX_WITH_WITNESS(block)))};
1093
0
    FlatFilePos pos{FindNextBlockPos(block_size + STORAGE_HEADER_BYTES, nHeight, block.GetBlockTime())};
1094
0
    if (pos.IsNull()) {
1095
0
        LogError("FindNextBlockPos failed for %s while writing block", pos.ToString());
1096
0
        return FlatFilePos();
1097
0
    }
1098
0
    AutoFile file{OpenBlockFile(pos, /*fReadOnly=*/false)};
1099
0
    if (file.IsNull()) {
1100
0
        LogError("OpenBlockFile failed for %s while writing block", pos.ToString());
1101
0
        m_opts.notifications.fatalError(_("Failed to write block."));
1102
0
        return FlatFilePos();
1103
0
    }
1104
0
    {
1105
0
        BufferedWriter fileout{file};
1106
1107
        // Write index header
1108
0
        fileout << GetParams().MessageStart() << block_size;
1109
0
        pos.nPos += STORAGE_HEADER_BYTES;
1110
        // Write block
1111
0
        fileout << TX_WITH_WITNESS(block);
1112
0
    }
1113
1114
0
    if (file.fclose() != 0) {
1115
0
        LogError("Failed to close block file %s: %s", pos.ToString(), SysErrorString(errno));
1116
0
        m_opts.notifications.fatalError(_("Failed to close file when writing block."));
1117
0
        return FlatFilePos();
1118
0
    }
1119
1120
0
    return pos;
1121
0
}
1122
1123
static auto InitBlocksdirXorKey(const BlockManager::Options& opts)
1124
0
{
1125
    // Bytes are serialized without length indicator, so this is also the exact
1126
    // size of the XOR-key file.
1127
0
    std::array<std::byte, Obfuscation::KEY_SIZE> obfuscation{};
1128
1129
    // Consider this to be the first run if the blocksdir contains only hidden
1130
    // files (those which start with a .). Checking for a fully-empty dir would
1131
    // be too aggressive as a .lock file may have already been written.
1132
0
    bool first_run = true;
1133
0
    for (const auto& entry : fs::directory_iterator(opts.blocks_dir)) {
1134
0
        const std::string path = fs::PathToString(entry.path().filename());
1135
0
        if (!entry.is_regular_file() || !path.starts_with('.')) {
1136
0
            first_run = false;
1137
0
            break;
1138
0
        }
1139
0
    }
1140
1141
0
    if (opts.use_xor && first_run) {
1142
        // Only use random fresh key when the boolean option is set and on the
1143
        // very first start of the program.
1144
0
        FastRandomContext{}.fillrand(obfuscation);
1145
0
    }
1146
1147
0
    const fs::path xor_key_path{opts.blocks_dir / "xor.dat"};
1148
0
    if (fs::exists(xor_key_path)) {
1149
        // A pre-existing xor key file has priority.
1150
0
        AutoFile xor_key_file{fsbridge::fopen(xor_key_path, "rb")};
1151
0
        xor_key_file >> obfuscation;
1152
0
    } else {
1153
        // Create initial or missing xor key file
1154
0
        AutoFile xor_key_file{fsbridge::fopen(xor_key_path,
1155
#ifdef __MINGW64__
1156
            "wb" // Temporary workaround for https://github.com/bitcoin/bitcoin/issues/30210
1157
#else
1158
0
            "wbx"
1159
0
#endif
1160
0
        )};
1161
0
        xor_key_file << obfuscation;
1162
0
        if (xor_key_file.fclose() != 0) {
1163
0
            throw std::runtime_error{strprintf("Error closing XOR key file %s: %s",
1164
0
                                               fs::PathToString(xor_key_path),
1165
0
                                               SysErrorString(errno))};
1166
0
        }
1167
0
    }
1168
    // If the user disabled the key, it must be zero.
1169
0
    if (!opts.use_xor && obfuscation != decltype(obfuscation){}) {
1170
0
        throw std::runtime_error{
1171
0
            strprintf("The blocksdir XOR-key can not be disabled when a random key was already stored! "
1172
0
                      "Stored key: '%s', stored path: '%s'.",
1173
0
                      HexStr(obfuscation), fs::PathToString(xor_key_path)),
1174
0
        };
1175
0
    }
1176
0
    LogInfo("Using obfuscation key for blocksdir *.dat files (%s): '%s'\n", fs::PathToString(opts.blocks_dir), HexStr(obfuscation));
1177
0
    return Obfuscation{obfuscation};
1178
0
}
1179
1180
BlockManager::BlockManager(const util::SignalInterrupt& interrupt, Options opts)
1181
0
    : m_prune_mode{opts.prune_target > 0},
1182
0
      m_obfuscation{InitBlocksdirXorKey(opts)},
1183
0
      m_opts{std::move(opts)},
1184
0
      m_block_file_seq{FlatFileSeq{m_opts.blocks_dir, "blk", m_opts.fast_prune ? 0x4000 /* 16kB */ : BLOCKFILE_CHUNK_SIZE}},
1185
0
      m_undo_file_seq{FlatFileSeq{m_opts.blocks_dir, "rev", UNDOFILE_CHUNK_SIZE}},
1186
0
      m_interrupt{interrupt}
1187
0
{
1188
0
    m_block_tree_db = std::make_unique<BlockTreeDB>(m_opts.block_tree_db_params);
1189
1190
0
    if (m_opts.block_tree_db_params.wipe_data) {
1191
0
        m_block_tree_db->WriteReindexing(true);
1192
0
        m_blockfiles_indexed = false;
1193
        // If we're reindexing in prune mode, wipe away unusable block files and all undo data files
1194
0
        if (m_prune_mode) {
1195
0
            CleanupBlockRevFiles();
1196
0
        }
1197
0
    }
1198
0
}
1199
1200
class ImportingNow
1201
{
1202
    std::atomic<bool>& m_importing;
1203
1204
public:
1205
0
    ImportingNow(std::atomic<bool>& importing) : m_importing{importing}
1206
0
    {
1207
0
        assert(m_importing == false);
1208
0
        m_importing = true;
1209
0
    }
1210
    ~ImportingNow()
1211
0
    {
1212
0
        assert(m_importing == true);
1213
0
        m_importing = false;
1214
0
    }
1215
};
1216
1217
void ImportBlocks(ChainstateManager& chainman, std::span<const fs::path> import_paths)
1218
0
{
1219
0
    ImportingNow imp{chainman.m_blockman.m_importing};
1220
1221
    // -reindex
1222
0
    if (!chainman.m_blockman.m_blockfiles_indexed) {
1223
0
        int nFile = 0;
1224
        // Map of disk positions for blocks with unknown parent (only used for reindex);
1225
        // parent hash -> child disk position, multiple children can have the same parent.
1226
0
        std::multimap<uint256, FlatFilePos> blocks_with_unknown_parent;
1227
0
        while (true) {
1228
0
            FlatFilePos pos(nFile, 0);
1229
0
            if (!fs::exists(chainman.m_blockman.GetBlockPosFilename(pos))) {
1230
0
                break; // No block files left to reindex
1231
0
            }
1232
0
            AutoFile file{chainman.m_blockman.OpenBlockFile(pos, /*fReadOnly=*/true)};
1233
0
            if (file.IsNull()) {
1234
0
                break; // This error is logged in OpenBlockFile
1235
0
            }
1236
0
            LogInfo("Reindexing block file blk%05u.dat...", (unsigned int)nFile);
1237
0
            chainman.LoadExternalBlockFile(file, &pos, &blocks_with_unknown_parent);
1238
0
            if (chainman.m_interrupt) {
1239
0
                LogInfo("Interrupt requested. Exit reindexing.");
1240
0
                return;
1241
0
            }
1242
0
            nFile++;
1243
0
        }
1244
0
        WITH_LOCK(::cs_main, chainman.m_blockman.m_block_tree_db->WriteReindexing(false));
1245
0
        chainman.m_blockman.m_blockfiles_indexed = true;
1246
0
        LogInfo("Reindexing finished");
1247
        // To avoid ending up in a situation without genesis block, re-try initializing (no-op if reindexing worked):
1248
0
        chainman.ActiveChainstate().LoadGenesisBlock();
1249
0
    }
1250
1251
    // -loadblock=
1252
0
    for (const fs::path& path : import_paths) {
1253
0
        AutoFile file{fsbridge::fopen(path, "rb")};
1254
0
        if (!file.IsNull()) {
1255
0
            LogInfo("Importing blocks file %s...", fs::PathToString(path));
1256
0
            chainman.LoadExternalBlockFile(file);
1257
0
            if (chainman.m_interrupt) {
1258
0
                LogInfo("Interrupt requested. Exit block importing.");
1259
0
                return;
1260
0
            }
1261
0
        } else {
1262
0
            LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path));
1263
0
        }
1264
0
    }
1265
1266
    // scan for better chains in the block chain database, that are not yet connected in the active best chain
1267
1268
    // We can't hold cs_main during ActivateBestChain even though we're accessing
1269
    // the chainman unique_ptrs since ABC requires us not to be holding cs_main, so retrieve
1270
    // the relevant pointers before the ABC call.
1271
0
    for (Chainstate* chainstate : WITH_LOCK(::cs_main, return chainman.GetAll())) {
1272
0
        BlockValidationState state;
1273
0
        if (!chainstate->ActivateBestChain(state, nullptr)) {
1274
0
            chainman.GetNotifications().fatalError(strprintf(_("Failed to connect best block (%s)."), state.ToString()));
1275
0
            return;
1276
0
        }
1277
0
    }
1278
    // End scope of ImportingNow
1279
0
}
1280
1281
0
std::ostream& operator<<(std::ostream& os, const BlockfileType& type) {
1282
0
    switch(type) {
1283
0
        case BlockfileType::NORMAL: os << "normal"; break;
1284
0
        case BlockfileType::ASSUMED: os << "assumed"; break;
1285
0
        default: os.setstate(std::ios_base::failbit);
1286
0
    }
1287
0
    return os;
1288
0
}
1289
1290
0
std::ostream& operator<<(std::ostream& os, const BlockfileCursor& cursor) {
1291
0
    os << strprintf("BlockfileCursor(file_num=%d, undo_height=%d)", cursor.file_num, cursor.undo_height);
1292
0
    return os;
1293
0
}
1294
} // namespace node