Coverage Report

Created: 2025-09-19 18:31

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/root/bitcoin/src/blockencodings.cpp
Line
Count
Source
1
// Copyright (c) 2016-2022 The Bitcoin Core developers
2
// Distributed under the MIT software license, see the accompanying
3
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
5
#include <blockencodings.h>
6
#include <chainparams.h>
7
#include <common/system.h>
8
#include <consensus/consensus.h>
9
#include <consensus/validation.h>
10
#include <crypto/sha256.h>
11
#include <crypto/siphash.h>
12
#include <logging.h>
13
#include <random.h>
14
#include <streams.h>
15
#include <txmempool.h>
16
#include <validation.h>
17
18
#include <unordered_map>
19
20
CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block, const uint64_t nonce) :
21
0
        nonce(nonce),
22
0
        shorttxids(block.vtx.size() - 1), prefilledtxn(1), header(block) {
23
0
    FillShortTxIDSelector();
24
    //TODO: Use our mempool prior to block acceptance to predictively fill more than just the coinbase
25
0
    prefilledtxn[0] = {0, block.vtx[0]};
26
0
    for (size_t i = 1; i < block.vtx.size(); i++) {
27
0
        const CTransaction& tx = *block.vtx[i];
28
0
        shorttxids[i - 1] = GetShortID(tx.GetWitnessHash());
29
0
    }
30
0
}
31
32
0
void CBlockHeaderAndShortTxIDs::FillShortTxIDSelector() const {
33
0
    DataStream stream{};
34
0
    stream << header << nonce;
35
0
    CSHA256 hasher;
36
0
    hasher.Write((unsigned char*)&(*stream.begin()), stream.end() - stream.begin());
37
0
    uint256 shorttxidhash;
38
0
    hasher.Finalize(shorttxidhash.begin());
39
0
    shorttxidk0 = shorttxidhash.GetUint64(0);
40
0
    shorttxidk1 = shorttxidhash.GetUint64(1);
41
0
}
42
43
0
uint64_t CBlockHeaderAndShortTxIDs::GetShortID(const Wtxid& wtxid) const {
44
0
    static_assert(SHORTTXIDS_LENGTH == 6, "shorttxids calculation assumes 6-byte shorttxids");
45
0
    return SipHashUint256(shorttxidk0, shorttxidk1, wtxid.ToUint256()) & 0xffffffffffffL;
46
0
}
47
48
/* Reconstructing a compact block is in the hot-path for block relay,
49
 * so we want to do it as quickly as possible. Because this often
50
 * involves iterating over the entire mempool, we put all the data we
51
 * need (ie the wtxid and a reference to the actual transaction data)
52
 * in a vector and iterate over the vector directly. This allows optimal
53
 * CPU caching behaviour, at a cost of only 40 bytes per transaction.
54
 */
55
ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& cmpctblock, const std::vector<std::pair<Wtxid, CTransactionRef>>& extra_txn)
56
0
{
57
0
    LogDebug(BCLog::CMPCTBLOCK, "Initializing PartiallyDownloadedBlock for block %s using a cmpctblock of %u bytes\n", cmpctblock.header.GetHash().ToString(), GetSerializeSize(cmpctblock));
58
0
    if (cmpctblock.header.IsNull() || (cmpctblock.shorttxids.empty() && cmpctblock.prefilledtxn.empty()))
59
0
        return READ_STATUS_INVALID;
60
0
    if (cmpctblock.shorttxids.size() + cmpctblock.prefilledtxn.size() > MAX_BLOCK_WEIGHT / MIN_SERIALIZABLE_TRANSACTION_WEIGHT)
61
0
        return READ_STATUS_INVALID;
62
63
0
    if (!header.IsNull() || !txn_available.empty()) return READ_STATUS_INVALID;
64
65
0
    header = cmpctblock.header;
66
0
    txn_available.resize(cmpctblock.BlockTxCount());
67
68
0
    int32_t lastprefilledindex = -1;
69
0
    for (size_t i = 0; i < cmpctblock.prefilledtxn.size(); i++) {
70
0
        if (cmpctblock.prefilledtxn[i].tx->IsNull())
71
0
            return READ_STATUS_INVALID;
72
73
0
        lastprefilledindex += cmpctblock.prefilledtxn[i].index + 1; //index is a uint16_t, so can't overflow here
74
0
        if (lastprefilledindex > std::numeric_limits<uint16_t>::max())
75
0
            return READ_STATUS_INVALID;
76
0
        if ((uint32_t)lastprefilledindex > cmpctblock.shorttxids.size() + i) {
77
            // If we are inserting a tx at an index greater than our full list of shorttxids
78
            // plus the number of prefilled txn we've inserted, then we have txn for which we
79
            // have neither a prefilled txn or a shorttxid!
80
0
            return READ_STATUS_INVALID;
81
0
        }
82
0
        txn_available[lastprefilledindex] = cmpctblock.prefilledtxn[i].tx;
83
0
    }
84
0
    prefilled_count = cmpctblock.prefilledtxn.size();
85
86
    // Calculate map of txids -> positions and check mempool to see what we have (or don't)
87
    // Because well-formed cmpctblock messages will have a (relatively) uniform distribution
88
    // of short IDs, any highly-uneven distribution of elements can be safely treated as a
89
    // READ_STATUS_FAILED.
90
0
    std::unordered_map<uint64_t, uint16_t> shorttxids(cmpctblock.shorttxids.size());
91
0
    uint16_t index_offset = 0;
92
0
    for (size_t i = 0; i < cmpctblock.shorttxids.size(); i++) {
93
0
        while (txn_available[i + index_offset])
94
0
            index_offset++;
95
0
        shorttxids[cmpctblock.shorttxids[i]] = i + index_offset;
96
        // To determine the chance that the number of entries in a bucket exceeds N,
97
        // we use the fact that the number of elements in a single bucket is
98
        // binomially distributed (with n = the number of shorttxids S, and p =
99
        // 1 / the number of buckets), that in the worst case the number of buckets is
100
        // equal to S (due to std::unordered_map having a default load factor of 1.0),
101
        // and that the chance for any bucket to exceed N elements is at most
102
        // buckets * (the chance that any given bucket is above N elements).
103
        // Thus: P(max_elements_per_bucket > N) <= S * (1 - cdf(binomial(n=S,p=1/S), N)).
104
        // If we assume blocks of up to 16000, allowing 12 elements per bucket should
105
        // only fail once per ~1 million block transfers (per peer and connection).
106
0
        if (shorttxids.bucket_size(shorttxids.bucket(cmpctblock.shorttxids[i])) > 12)
107
0
            return READ_STATUS_FAILED;
108
0
    }
109
    // TODO: in the shortid-collision case, we should instead request both transactions
110
    // which collided. Falling back to full-block-request here is overkill.
111
0
    if (shorttxids.size() != cmpctblock.shorttxids.size())
112
0
        return READ_STATUS_FAILED; // Short ID collision
113
114
0
    std::vector<bool> have_txn(txn_available.size());
115
0
    {
116
0
    LOCK(pool->cs);
117
0
    for (const auto& [wtxid, txit] : pool->txns_randomized) {
118
0
        uint64_t shortid = cmpctblock.GetShortID(wtxid);
119
0
        std::unordered_map<uint64_t, uint16_t>::iterator idit = shorttxids.find(shortid);
120
0
        if (idit != shorttxids.end()) {
121
0
            if (!have_txn[idit->second]) {
122
0
                txn_available[idit->second] = txit->GetSharedTx();
123
0
                have_txn[idit->second]  = true;
124
0
                mempool_count++;
125
0
            } else {
126
                // If we find two mempool txn that match the short id, just request it.
127
                // This should be rare enough that the extra bandwidth doesn't matter,
128
                // but eating a round-trip due to FillBlock failure would be annoying
129
0
                if (txn_available[idit->second]) {
130
0
                    txn_available[idit->second].reset();
131
0
                    mempool_count--;
132
0
                }
133
0
            }
134
0
        }
135
        // Though ideally we'd continue scanning for the two-txn-match-shortid case,
136
        // the performance win of an early exit here is too good to pass up and worth
137
        // the extra risk.
138
0
        if (mempool_count == shorttxids.size())
139
0
            break;
140
0
    }
141
0
    }
142
143
0
    for (size_t i = 0; i < extra_txn.size(); i++) {
144
0
        uint64_t shortid = cmpctblock.GetShortID(extra_txn[i].first);
145
0
        std::unordered_map<uint64_t, uint16_t>::iterator idit = shorttxids.find(shortid);
146
0
        if (idit != shorttxids.end()) {
147
0
            if (!have_txn[idit->second]) {
148
0
                txn_available[idit->second] = extra_txn[i].second;
149
0
                have_txn[idit->second]  = true;
150
0
                mempool_count++;
151
0
                extra_count++;
152
0
            } else {
153
                // If we find two mempool/extra txn that match the short id, just
154
                // request it.
155
                // This should be rare enough that the extra bandwidth doesn't matter,
156
                // but eating a round-trip due to FillBlock failure would be annoying
157
                // Note that we don't want duplication between extra_txn and mempool to
158
                // trigger this case, so we compare witness hashes first
159
0
                if (txn_available[idit->second] &&
160
0
                        txn_available[idit->second]->GetWitnessHash() != extra_txn[i].second->GetWitnessHash()) {
161
0
                    txn_available[idit->second].reset();
162
0
                    mempool_count--;
163
0
                    extra_count--;
164
0
                }
165
0
            }
166
0
        }
167
        // Though ideally we'd continue scanning for the two-txn-match-shortid case,
168
        // the performance win of an early exit here is too good to pass up and worth
169
        // the extra risk.
170
0
        if (mempool_count == shorttxids.size())
171
0
            break;
172
0
    }
173
174
0
    LogDebug(BCLog::CMPCTBLOCK, "Initialized PartiallyDownloadedBlock for block %s using a cmpctblock of %u bytes\n", cmpctblock.header.GetHash().ToString(), GetSerializeSize(cmpctblock));
175
176
0
    return READ_STATUS_OK;
177
0
}
178
179
bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const
180
0
{
181
0
    if (header.IsNull()) return false;
182
183
0
    assert(index < txn_available.size());
184
0
    return txn_available[index] != nullptr;
185
0
}
186
187
ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing, bool segwit_active)
188
0
{
189
0
    if (header.IsNull()) return READ_STATUS_INVALID;
190
191
0
    uint256 hash = header.GetHash();
192
0
    block = header;
193
0
    block.vtx.resize(txn_available.size());
194
195
0
    unsigned int tx_missing_size = 0;
196
0
    size_t tx_missing_offset = 0;
197
0
    for (size_t i = 0; i < txn_available.size(); i++) {
198
0
        if (!txn_available[i]) {
199
0
            if (vtx_missing.size() <= tx_missing_offset)
200
0
                return READ_STATUS_INVALID;
201
0
            block.vtx[i] = vtx_missing[tx_missing_offset++];
202
0
            tx_missing_size += block.vtx[i]->GetTotalSize();
203
0
        } else
204
0
            block.vtx[i] = std::move(txn_available[i]);
205
0
    }
206
207
    // Make sure we can't call FillBlock again.
208
0
    header.SetNull();
209
0
    txn_available.clear();
210
211
0
    if (vtx_missing.size() != tx_missing_offset)
212
0
        return READ_STATUS_INVALID;
213
214
    // Check for possible mutations early now that we have a seemingly good block
215
0
    IsBlockMutatedFn check_mutated{m_check_block_mutated_mock ? m_check_block_mutated_mock : IsBlockMutated};
216
0
    if (check_mutated(/*block=*/block,
217
0
                       /*check_witness_root=*/segwit_active)) {
218
0
        return READ_STATUS_FAILED; // Possible Short ID collision
219
0
    }
220
221
0
    LogDebug(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %u txn prefilled, %u txn from mempool (incl at least %u from extra pool) and %u txn (%u bytes) requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size(), tx_missing_size);
222
0
    if (vtx_missing.size() < 5) {
223
0
        for (const auto& tx : vtx_missing) {
224
0
            LogDebug(BCLog::CMPCTBLOCK, "Reconstructed block %s required tx %s\n", hash.ToString(), tx->GetHash().ToString());
225
0
        }
226
0
    }
227
228
0
    return READ_STATUS_OK;
229
0
}