/root/bitcoin/src/net_processing.cpp
| Line | Count | Source | 
| 1 |  | // Copyright (c) 2009-2010 Satoshi Nakamoto | 
| 2 |  | // Copyright (c) 2009-present The Bitcoin Core developers | 
| 3 |  | // Distributed under the MIT software license, see the accompanying | 
| 4 |  | // file COPYING or http://www.opensource.org/licenses/mit-license.php. | 
| 5 |  |  | 
| 6 |  | #include <net_processing.h> | 
| 7 |  |  | 
| 8 |  | #include <addrman.h> | 
| 9 |  | #include <arith_uint256.h> | 
| 10 |  | #include <banman.h> | 
| 11 |  | #include <blockencodings.h> | 
| 12 |  | #include <blockfilter.h> | 
| 13 |  | #include <chain.h> | 
| 14 |  | #include <chainparams.h> | 
| 15 |  | #include <common/bloom.h> | 
| 16 |  | #include <consensus/amount.h> | 
| 17 |  | #include <consensus/params.h> | 
| 18 |  | #include <consensus/validation.h> | 
| 19 |  | #include <core_memusage.h> | 
| 20 |  | #include <crypto/siphash.h> | 
| 21 |  | #include <deploymentstatus.h> | 
| 22 |  | #include <flatfile.h> | 
| 23 |  | #include <headerssync.h> | 
| 24 |  | #include <index/blockfilterindex.h> | 
| 25 |  | #include <kernel/chain.h> | 
| 26 |  | #include <logging.h> | 
| 27 |  | #include <merkleblock.h> | 
| 28 |  | #include <net.h> | 
| 29 |  | #include <net_permissions.h> | 
| 30 |  | #include <netaddress.h> | 
| 31 |  | #include <netbase.h> | 
| 32 |  | #include <netmessagemaker.h> | 
| 33 |  | #include <node/blockstorage.h> | 
| 34 |  | #include <node/connection_types.h> | 
| 35 |  | #include <node/protocol_version.h> | 
| 36 |  | #include <node/timeoffsets.h> | 
| 37 |  | #include <node/txdownloadman.h> | 
| 38 |  | #include <node/txorphanage.h> | 
| 39 |  | #include <node/txreconciliation.h> | 
| 40 |  | #include <node/warnings.h> | 
| 41 |  | #include <policy/feerate.h> | 
| 42 |  | #include <policy/fees.h> | 
| 43 |  | #include <policy/packages.h> | 
| 44 |  | #include <policy/policy.h> | 
| 45 |  | #include <primitives/block.h> | 
| 46 |  | #include <primitives/transaction.h> | 
| 47 |  | #include <protocol.h> | 
| 48 |  | #include <random.h> | 
| 49 |  | #include <scheduler.h> | 
| 50 |  | #include <script/script.h> | 
| 51 |  | #include <serialize.h> | 
| 52 |  | #include <span.h> | 
| 53 |  | #include <streams.h> | 
| 54 |  | #include <sync.h> | 
| 55 |  | #include <tinyformat.h> | 
| 56 |  | #include <txmempool.h> | 
| 57 |  | #include <uint256.h> | 
| 58 |  | #include <util/check.h> | 
| 59 |  | #include <util/strencodings.h> | 
| 60 |  | #include <util/time.h> | 
| 61 |  | #include <util/trace.h> | 
| 62 |  | #include <validation.h> | 
| 63 |  |  | 
| 64 |  | #include <algorithm> | 
| 65 |  | #include <array> | 
| 66 |  | #include <atomic> | 
| 67 |  | #include <compare> | 
| 68 |  | #include <cstddef> | 
| 69 |  | #include <deque> | 
| 70 |  | #include <exception> | 
| 71 |  | #include <functional> | 
| 72 |  | #include <future> | 
| 73 |  | #include <initializer_list> | 
| 74 |  | #include <iterator> | 
| 75 |  | #include <limits> | 
| 76 |  | #include <list> | 
| 77 |  | #include <map> | 
| 78 |  | #include <memory> | 
| 79 |  | #include <optional> | 
| 80 |  | #include <queue> | 
| 81 |  | #include <ranges> | 
| 82 |  | #include <ratio> | 
| 83 |  | #include <set> | 
| 84 |  | #include <span> | 
| 85 |  | #include <typeinfo> | 
| 86 |  | #include <utility> | 
| 87 |  |  | 
| 88 |  | using namespace util::hex_literals; | 
| 89 |  |  | 
| 90 |  | TRACEPOINT_SEMAPHORE(net, inbound_message); | 
| 91 |  | TRACEPOINT_SEMAPHORE(net, misbehaving_connection); | 
| 92 |  |  | 
| 93 |  | /** Headers download timeout. | 
| 94 |  |  *  Timeout = base + per_header * (expected number of headers) */ | 
| 95 |  | static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min; | 
| 96 |  | static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms; | 
| 97 |  | /** How long to wait for a peer to respond to a getheaders request */ | 
| 98 |  | static constexpr auto HEADERS_RESPONSE_TIME{2min}; | 
| 99 |  | /** Protect at least this many outbound peers from disconnection due to slow/ | 
| 100 |  |  * behind headers chain. | 
| 101 |  |  */ | 
| 102 |  | static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4; | 
| 103 |  | /** Timeout for (unprotected) outbound peers to sync to our chainwork */ | 
| 104 |  | static constexpr auto CHAIN_SYNC_TIMEOUT{20min}; | 
| 105 |  | /** How frequently to check for stale tips */ | 
| 106 |  | static constexpr auto STALE_CHECK_INTERVAL{10min}; | 
| 107 |  | /** How frequently to check for extra outbound peers and disconnect */ | 
| 108 |  | static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s}; | 
| 109 |  | /** Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict */ | 
| 110 |  | static constexpr auto MINIMUM_CONNECT_TIME{30s}; | 
| 111 |  | /** SHA256("main address relay")[0:8] */ | 
| 112 |  | static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; | 
| 113 |  | /// Age after which a stale block will no longer be served if requested as | 
| 114 |  | /// protection against fingerprinting. Set to one month, denominated in seconds. | 
| 115 |  | static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60; | 
| 116 |  | /// Age after which a block is considered historical for purposes of rate | 
| 117 |  | /// limiting block relay. Set to one week, denominated in seconds. | 
| 118 |  | static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60; | 
| 119 |  | /** Time between pings automatically sent out for latency probing and keepalive */ | 
| 120 |  | static constexpr auto PING_INTERVAL{2min}; | 
| 121 |  | /** The maximum number of entries in a locator */ | 
| 122 |  | static const unsigned int MAX_LOCATOR_SZ = 101; | 
| 123 |  | /** The maximum number of entries in an 'inv' protocol message */ | 
| 124 |  | static const unsigned int MAX_INV_SZ = 50000; | 
| 125 |  | /** Limit to avoid sending big packets. Not used in processing incoming GETDATA for compatibility */ | 
| 126 |  | static const unsigned int MAX_GETDATA_SZ = 1000; | 
| 127 |  | /** Number of blocks that can be requested at any given time from a single peer. */ | 
| 128 |  | static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16; | 
| 129 |  | /** Default time during which a peer must stall block download progress before being disconnected. | 
| 130 |  |  * the actual timeout is increased temporarily if peers are disconnected for hitting the timeout */ | 
| 131 |  | static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s}; | 
| 132 |  | /** Maximum timeout for stalling block download. */ | 
| 133 |  | static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s}; | 
| 134 |  | /** Maximum depth of blocks we're willing to serve as compact blocks to peers | 
| 135 |  |  *  when requested. For older blocks, a regular BLOCK response will be sent. */ | 
| 136 |  | static const int MAX_CMPCTBLOCK_DEPTH = 5; | 
| 137 |  | /** Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for. */ | 
| 138 |  | static const int MAX_BLOCKTXN_DEPTH = 10; | 
| 139 |  | static_assert(MAX_BLOCKTXN_DEPTH <= MIN_BLOCKS_TO_KEEP, "MAX_BLOCKTXN_DEPTH too high"); | 
| 140 |  | /** Size of the "block download window": how far ahead of our current height do we fetch? | 
| 141 |  |  *  Larger windows tolerate larger download speed differences between peer, but increase the potential | 
| 142 |  |  *  degree of disordering of blocks on disk (which make reindexing and pruning harder). We'll probably | 
| 143 |  |  *  want to make this a per-peer adaptive value at some point. */ | 
| 144 |  | static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024; | 
| 145 |  | /** Block download timeout base, expressed in multiples of the block interval (i.e. 10 min) */ | 
| 146 |  | static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1; | 
| 147 |  | /** Additional block download timeout per parallel downloading peer (i.e. 5 min) */ | 
| 148 |  | static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5; | 
| 149 |  | /** Maximum number of headers to announce when relaying blocks with headers message.*/ | 
| 150 |  | static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8; | 
| 151 |  | /** Minimum blocks required to signal NODE_NETWORK_LIMITED */ | 
| 152 |  | static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288; | 
| 153 |  | /** Window, in blocks, for connecting to NODE_NETWORK_LIMITED peers */ | 
| 154 |  | static const unsigned int NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS = 144; | 
| 155 |  | /** Average delay between local address broadcasts */ | 
| 156 |  | static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h}; | 
| 157 |  | /** Average delay between peer address broadcasts */ | 
| 158 |  | static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s}; | 
| 159 |  | /** Delay between rotating the peers we relay a particular address to */ | 
| 160 |  | static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL{24h}; | 
| 161 |  | /** Average delay between trickled inventory transmissions for inbound peers. | 
| 162 |  |  *  Blocks and peers with NetPermissionFlags::NoBan permission bypass this. */ | 
| 163 |  | static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s}; | 
| 164 |  | /** Average delay between trickled inventory transmissions for outbound peers. | 
| 165 |  |  *  Use a smaller delay as there is less privacy concern for them. | 
| 166 |  |  *  Blocks and peers with NetPermissionFlags::NoBan permission bypass this. */ | 
| 167 |  | static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL{2s}; | 
| 168 |  | /** Maximum rate of inventory items to send per second. | 
| 169 |  |  *  Limits the impact of low-fee transaction floods. */ | 
| 170 |  | static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7; | 
| 171 |  | /** Target number of tx inventory items to send per transmission. */ | 
| 172 |  | static constexpr unsigned int INVENTORY_BROADCAST_TARGET = INVENTORY_BROADCAST_PER_SECOND * count_seconds(INBOUND_INVENTORY_BROADCAST_INTERVAL); | 
| 173 |  | /** Maximum number of inventory items to send per transmission. */ | 
| 174 |  | static constexpr unsigned int INVENTORY_BROADCAST_MAX = 1000; | 
| 175 |  | static_assert(INVENTORY_BROADCAST_MAX >= INVENTORY_BROADCAST_TARGET, "INVENTORY_BROADCAST_MAX too low"); | 
| 176 |  | static_assert(INVENTORY_BROADCAST_MAX <= node::MAX_PEER_TX_ANNOUNCEMENTS, "INVENTORY_BROADCAST_MAX too high"); | 
| 177 |  | /** Average delay between feefilter broadcasts in seconds. */ | 
| 178 |  | static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min}; | 
| 179 |  | /** Maximum feefilter broadcast delay after significant change. */ | 
| 180 |  | static constexpr auto MAX_FEEFILTER_CHANGE_DELAY{5min}; | 
| 181 |  | /** Maximum number of compact filters that may be requested with one getcfilters. See BIP 157. */ | 
| 182 |  | static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000; | 
| 183 |  | /** Maximum number of cf hashes that may be requested with one getcfheaders. See BIP 157. */ | 
| 184 |  | static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000; | 
| 185 |  | /** the maximum percentage of addresses from our addrman to return in response to a getaddr message. */ | 
| 186 |  | static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23; | 
| 187 |  | /** The maximum number of address records permitted in an ADDR message. */ | 
| 188 |  | static constexpr size_t MAX_ADDR_TO_SEND{1000}; | 
| 189 |  | /** The maximum rate of address records we're willing to process on average. Can be bypassed using | 
| 190 |  |  *  the NetPermissionFlags::Addr permission. */ | 
| 191 |  | static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1}; | 
| 192 |  | /** The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND | 
| 193 |  |  *  based increments won't go above this, but the MAX_ADDR_TO_SEND increment following GETADDR | 
| 194 |  |  *  is exempt from this limit). */ | 
| 195 |  | static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET{MAX_ADDR_TO_SEND}; | 
| 196 |  | /** The compactblocks version we support. See BIP 152. */ | 
| 197 |  | static constexpr uint64_t CMPCTBLOCKS_VERSION{2}; | 
| 198 |  |  | 
| 199 |  | // Internal stuff | 
| 200 |  | namespace { | 
| 201 |  | /** Blocks that are in flight, and that are in the queue to be downloaded. */ | 
| 202 |  | struct QueuedBlock { | 
| 203 |  |     /** BlockIndex. We must have this since we only request blocks when we've already validated the header. */ | 
| 204 |  |     const CBlockIndex* pindex; | 
| 205 |  |     /** Optional, used for CMPCTBLOCK downloads */ | 
| 206 |  |     std::unique_ptr<PartiallyDownloadedBlock> partialBlock; | 
| 207 |  | }; | 
| 208 |  |  | 
| 209 |  | /** | 
| 210 |  |  * Data structure for an individual peer. This struct is not protected by | 
| 211 |  |  * cs_main since it does not contain validation-critical data. | 
| 212 |  |  * | 
| 213 |  |  * Memory is owned by shared pointers and this object is destructed when | 
| 214 |  |  * the refcount drops to zero. | 
| 215 |  |  * | 
| 216 |  |  * Mutexes inside this struct must not be held when locking m_peer_mutex. | 
| 217 |  |  * | 
| 218 |  |  * TODO: move most members from CNodeState to this structure. | 
| 219 |  |  * TODO: move remaining application-layer data members from CNode to this structure. | 
| 220 |  |  */ | 
| 221 |  | struct Peer { | 
| 222 |  |     /** Same id as the CNode object for this peer */ | 
| 223 |  |     const NodeId m_id{0}; | 
| 224 |  |  | 
| 225 |  |     /** Services we offered to this peer. | 
| 226 |  |      * | 
| 227 |  |      *  This is supplied by CConnman during peer initialization. It's const | 
| 228 |  |      *  because there is no protocol defined for renegotiating services | 
| 229 |  |      *  initially offered to a peer. The set of local services we offer should | 
| 230 |  |      *  not change after initialization. | 
| 231 |  |      * | 
| 232 |  |      *  An interesting example of this is NODE_NETWORK and initial block | 
| 233 |  |      *  download: a node which starts up from scratch doesn't have any blocks | 
| 234 |  |      *  to serve, but still advertises NODE_NETWORK because it will eventually | 
| 235 |  |      *  fulfill this role after IBD completes. P2P code is written in such a | 
| 236 |  |      *  way that it can gracefully handle peers who don't make good on their | 
| 237 |  |      *  service advertisements. */ | 
| 238 |  |     const ServiceFlags m_our_services; | 
| 239 |  |     /** Services this peer offered to us. */ | 
| 240 |  |     std::atomic<ServiceFlags> m_their_services{NODE_NONE}; | 
| 241 |  |  | 
| 242 |  |     //! Whether this peer is an inbound connection | 
| 243 |  |     const bool m_is_inbound; | 
| 244 |  |  | 
| 245 |  |     /** Protects misbehavior data members */ | 
| 246 |  |     Mutex m_misbehavior_mutex; | 
| 247 |  |     /** Whether this peer should be disconnected and marked as discouraged (unless it has NetPermissionFlags::NoBan permission). */ | 
| 248 |  |     bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false}; | 
| 249 |  |  | 
| 250 |  |     /** Protects block inventory data members */ | 
| 251 |  |     Mutex m_block_inv_mutex; | 
| 252 |  |     /** List of blocks that we'll announce via an `inv` message. | 
| 253 |  |      * There is no final sorting before sending, as they are always sent | 
| 254 |  |      * immediately and in the order requested. */ | 
| 255 |  |     std::vector<uint256> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex); | 
| 256 |  |     /** Unfiltered list of blocks that we'd like to announce via a `headers` | 
| 257 |  |      * message. If we can't announce via a `headers` message, we'll fall back to | 
| 258 |  |      * announcing via `inv`. */ | 
| 259 |  |     std::vector<uint256> m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex); | 
| 260 |  |     /** The final block hash that we sent in an `inv` message to this peer. | 
| 261 |  |      * When the peer requests this block, we send an `inv` message to trigger | 
| 262 |  |      * the peer to request the next sequence of block hashes. | 
| 263 |  |      * Most peers use headers-first syncing, which doesn't use this mechanism */ | 
| 264 |  |     uint256 m_continuation_block GUARDED_BY(m_block_inv_mutex) {}; | 
| 265 |  |  | 
| 266 |  |     /** Set to true once initial VERSION message was sent (only relevant for outbound peers). */ | 
| 267 |  |     bool m_outbound_version_message_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; | 
| 268 |  |  | 
| 269 |  |     /** This peer's reported block height when we connected */ | 
| 270 |  |     std::atomic<int> m_starting_height{-1}; | 
| 271 |  |  | 
| 272 |  |     /** The pong reply we're expecting, or 0 if no pong expected. */ | 
| 273 |  |     std::atomic<uint64_t> m_ping_nonce_sent{0}; | 
| 274 |  |     /** When the last ping was sent, or 0 if no ping was ever sent */ | 
| 275 |  |     std::atomic<std::chrono::microseconds> m_ping_start{0us}; | 
| 276 |  |     /** Whether a ping has been requested by the user */ | 
| 277 |  |     std::atomic<bool> m_ping_queued{false}; | 
| 278 |  |  | 
| 279 |  |     /** Whether this peer relays txs via wtxid */ | 
| 280 |  |     std::atomic<bool> m_wtxid_relay{false}; | 
| 281 |  |     /** The feerate in the most recent BIP133 `feefilter` message sent to the peer. | 
| 282 |  |      *  It is *not* a p2p protocol violation for the peer to send us | 
| 283 |  |      *  transactions with a lower fee rate than this. See BIP133. */ | 
| 284 |  |     CAmount m_fee_filter_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0}; | 
| 285 |  |     /** Timestamp after which we will send the next BIP133 `feefilter` message | 
| 286 |  |       * to the peer. */ | 
| 287 |  |     std::chrono::microseconds m_next_send_feefilter GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0}; | 
| 288 |  |  | 
| 289 |  |     struct TxRelay { | 
| 290 |  |         mutable RecursiveMutex m_bloom_filter_mutex; | 
| 291 |  |         /** Whether we relay transactions to this peer. */ | 
| 292 |  |         bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false}; | 
| 293 |  |         /** A bloom filter for which transactions to announce to the peer. See BIP37. */ | 
| 294 |  |         std::unique_ptr<CBloomFilter> m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex) GUARDED_BY(m_bloom_filter_mutex){nullptr}; | 
| 295 |  |  | 
| 296 |  |         mutable RecursiveMutex m_tx_inventory_mutex; | 
| 297 |  |         /** A filter of all the (w)txids that the peer has announced to | 
| 298 |  |          *  us or we have announced to the peer. We use this to avoid announcing | 
| 299 |  |          *  the same (w)txid to a peer that already has the transaction. */ | 
| 300 |  |         CRollingBloomFilter m_tx_inventory_known_filter GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001}; | 
| 301 |  |         /** Set of wtxids we still have to announce. For non-wtxid-relay peers, | 
| 302 |  |          *  we retrieve the txid from the corresponding mempool transaction when | 
| 303 |  |          *  constructing the `inv` message. We use the mempool to sort transactions | 
| 304 |  |          *  in dependency order before relay, so this does not have to be sorted. */ | 
| 305 |  |         std::set<Wtxid> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex); | 
| 306 |  |         /** Whether the peer has requested us to send our complete mempool. Only | 
| 307 |  |          *  permitted if the peer has NetPermissionFlags::Mempool or we advertise | 
| 308 |  |          *  NODE_BLOOM. See BIP35. */ | 
| 309 |  |         bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false}; | 
| 310 |  |         /** The next time after which we will send an `inv` message containing | 
| 311 |  |          *  transaction announcements to this peer. */ | 
| 312 |  |         std::chrono::microseconds m_next_inv_send_time GUARDED_BY(m_tx_inventory_mutex){0}; | 
| 313 |  |         /** The mempool sequence num at which we sent the last `inv` message to this peer. | 
| 314 |  |          *  Can relay txs with lower sequence numbers than this (see CTxMempool::info_for_relay). */ | 
| 315 |  |         uint64_t m_last_inv_sequence GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1}; | 
| 316 |  |  | 
| 317 |  |         /** Minimum fee rate with which to filter transaction announcements to this node. See BIP133. */ | 
| 318 |  |         std::atomic<CAmount> m_fee_filter_received{0}; | 
| 319 |  |     }; | 
| 320 |  |  | 
| 321 |  |     /* Initializes a TxRelay struct for this peer. Can be called at most once for a peer. */ | 
| 322 |  |     TxRelay* SetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) | 
| 323 | 0 |     { | 
| 324 | 0 |         LOCK(m_tx_relay_mutex); | 
| 325 | 0 |         Assume(!m_tx_relay); | 
| 326 | 0 |         m_tx_relay = std::make_unique<Peer::TxRelay>(); | 
| 327 | 0 |         return m_tx_relay.get(); | 
| 328 | 0 |     }; | 
| 329 |  |  | 
| 330 |  |     TxRelay* GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) | 
| 331 | 0 |     { | 
| 332 | 0 |         return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get()); | 
| 333 | 0 |     }; | 
| 334 |  |  | 
| 335 |  |     /** A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND. */ | 
| 336 |  |     std::vector<CAddress> m_addrs_to_send GUARDED_BY(NetEventsInterface::g_msgproc_mutex); | 
| 337 |  |     /** Probabilistic filter to track recent addr messages relayed with this | 
| 338 |  |      *  peer. Used to avoid relaying redundant addresses to this peer. | 
| 339 |  |      * | 
| 340 |  |      *  We initialize this filter for outbound peers (other than | 
| 341 |  |      *  block-relay-only connections) or when an inbound peer sends us an | 
| 342 |  |      *  address related message (ADDR, ADDRV2, GETADDR). | 
| 343 |  |      * | 
| 344 |  |      *  Presence of this filter must correlate with m_addr_relay_enabled. | 
| 345 |  |      **/ | 
| 346 |  |     std::unique_ptr<CRollingBloomFilter> m_addr_known GUARDED_BY(NetEventsInterface::g_msgproc_mutex); | 
| 347 |  |     /** Whether we are participating in address relay with this connection. | 
| 348 |  |      * | 
| 349 |  |      *  We set this bool to true for outbound peers (other than | 
| 350 |  |      *  block-relay-only connections), or when an inbound peer sends us an | 
| 351 |  |      *  address related message (ADDR, ADDRV2, GETADDR). | 
| 352 |  |      * | 
| 353 |  |      *  We use this bool to decide whether a peer is eligible for gossiping | 
| 354 |  |      *  addr messages. This avoids relaying to peers that are unlikely to | 
| 355 |  |      *  forward them, effectively blackholing self announcements. Reasons | 
| 356 |  |      *  peers might support addr relay on the link include that they connected | 
| 357 |  |      *  to us as a block-relay-only peer or they are a light client. | 
| 358 |  |      * | 
| 359 |  |      *  This field must correlate with whether m_addr_known has been | 
| 360 |  |      *  initialized.*/ | 
| 361 |  |     std::atomic_bool m_addr_relay_enabled{false}; | 
| 362 |  |     /** Whether a getaddr request to this peer is outstanding. */ | 
| 363 |  |     bool m_getaddr_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; | 
| 364 |  |     /** Guards address sending timers. */ | 
| 365 |  |     mutable Mutex m_addr_send_times_mutex; | 
| 366 |  |     /** Time point to send the next ADDR message to this peer. */ | 
| 367 |  |     std::chrono::microseconds m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0}; | 
| 368 |  |     /** Time point to possibly re-announce our local address to this peer. */ | 
| 369 |  |     std::chrono::microseconds m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0}; | 
| 370 |  |     /** Whether the peer has signaled support for receiving ADDRv2 (BIP155) | 
| 371 |  |      *  messages, indicating a preference to receive ADDRv2 instead of ADDR ones. */ | 
| 372 |  |     std::atomic_bool m_wants_addrv2{false}; | 
| 373 |  |     /** Whether this peer has already sent us a getaddr message. */ | 
| 374 |  |     bool m_getaddr_recvd GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; | 
| 375 |  |     /** Number of addresses that can be processed from this peer. Start at 1 to | 
| 376 |  |      *  permit self-announcement. */ | 
| 377 |  |     double m_addr_token_bucket GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1.0}; | 
| 378 |  |     /** When m_addr_token_bucket was last updated */ | 
| 379 |  |     std::chrono::microseconds m_addr_token_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){GetTime<std::chrono::microseconds>()}; | 
| 380 |  |     /** Total number of addresses that were dropped due to rate limiting. */ | 
| 381 |  |     std::atomic<uint64_t> m_addr_rate_limited{0}; | 
| 382 |  |     /** Total number of addresses that were processed (excludes rate-limited ones). */ | 
| 383 |  |     std::atomic<uint64_t> m_addr_processed{0}; | 
| 384 |  |  | 
| 385 |  |     /** Whether we've sent this peer a getheaders in response to an inv prior to initial-headers-sync completing */ | 
| 386 |  |     bool m_inv_triggered_getheaders_before_sync GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; | 
| 387 |  |  | 
| 388 |  |     /** Protects m_getdata_requests **/ | 
| 389 |  |     Mutex m_getdata_requests_mutex; | 
| 390 |  |     /** Work queue of items requested by this peer **/ | 
| 391 |  |     std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex); | 
| 392 |  |  | 
| 393 |  |     /** Time of the last getheaders message to this peer */ | 
| 394 |  |     NodeClock::time_point m_last_getheaders_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){}; | 
| 395 |  |  | 
| 396 |  |     /** Protects m_headers_sync **/ | 
| 397 |  |     Mutex m_headers_sync_mutex; | 
| 398 |  |     /** Headers-sync state for this peer (eg for initial sync, or syncing large | 
| 399 |  |      * reorgs) **/ | 
| 400 |  |     std::unique_ptr<HeadersSyncState> m_headers_sync PT_GUARDED_BY(m_headers_sync_mutex) GUARDED_BY(m_headers_sync_mutex) {}; | 
| 401 |  |  | 
| 402 |  |     /** Whether we've sent our peer a sendheaders message. **/ | 
| 403 |  |     std::atomic<bool> m_sent_sendheaders{false}; | 
| 404 |  |  | 
| 405 |  |     /** When to potentially disconnect peer for stalling headers download */ | 
| 406 |  |     std::chrono::microseconds m_headers_sync_timeout GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0us}; | 
| 407 |  |  | 
| 408 |  |     /** Whether this peer wants invs or headers (when possible) for block announcements */ | 
| 409 |  |     bool m_prefers_headers GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; | 
| 410 |  |  | 
| 411 |  |     /** Time offset computed during the version handshake based on the | 
| 412 |  |      * timestamp the peer sent in the version message. */ | 
| 413 |  |     std::atomic<std::chrono::seconds> m_time_offset{0s}; | 
| 414 |  |  | 
| 415 |  |     explicit Peer(NodeId id, ServiceFlags our_services, bool is_inbound) | 
| 416 | 0 |         : m_id{id} | 
| 417 | 0 |         , m_our_services{our_services} | 
| 418 | 0 |         , m_is_inbound{is_inbound} | 
| 419 | 0 |     {} | 
| 420 |  |  | 
| 421 |  | private: | 
| 422 |  |     mutable Mutex m_tx_relay_mutex; | 
| 423 |  |  | 
| 424 |  |     /** Transaction relay data. May be a nullptr. */ | 
| 425 |  |     std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex); | 
| 426 |  | }; | 
| 427 |  |  | 
| 428 |  | using PeerRef = std::shared_ptr<Peer>; | 
| 429 |  |  | 
| 430 |  | /** | 
| 431 |  |  * Maintain validation-specific state about nodes, protected by cs_main, instead | 
| 432 |  |  * by CNode's own locks. This simplifies asynchronous operation, where | 
| 433 |  |  * processing of incoming data is done after the ProcessMessage call returns, | 
| 434 |  |  * and we're no longer holding the node's locks. | 
| 435 |  |  */ | 
| 436 |  | struct CNodeState { | 
| 437 |  |     //! The best known block we know this peer has announced. | 
| 438 |  |     const CBlockIndex* pindexBestKnownBlock{nullptr}; | 
| 439 |  |     //! The hash of the last unknown block this peer has announced. | 
| 440 |  |     uint256 hashLastUnknownBlock{}; | 
| 441 |  |     //! The last full block we both have. | 
| 442 |  |     const CBlockIndex* pindexLastCommonBlock{nullptr}; | 
| 443 |  |     //! The best header we have sent our peer. | 
| 444 |  |     const CBlockIndex* pindexBestHeaderSent{nullptr}; | 
| 445 |  |     //! Whether we've started headers synchronization with this peer. | 
| 446 |  |     bool fSyncStarted{false}; | 
| 447 |  |     //! Since when we're stalling block download progress (in microseconds), or 0. | 
| 448 |  |     std::chrono::microseconds m_stalling_since{0us}; | 
| 449 |  |     std::list<QueuedBlock> vBlocksInFlight; | 
| 450 |  |     //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty. | 
| 451 |  |     std::chrono::microseconds m_downloading_since{0us}; | 
| 452 |  |     //! Whether we consider this a preferred download peer. | 
| 453 |  |     bool fPreferredDownload{false}; | 
| 454 |  |     /** Whether this peer wants invs or cmpctblocks (when possible) for block announcements. */ | 
| 455 |  |     bool m_requested_hb_cmpctblocks{false}; | 
| 456 |  |     /** Whether this peer will send us cmpctblocks if we request them. */ | 
| 457 |  |     bool m_provides_cmpctblocks{false}; | 
| 458 |  |  | 
| 459 |  |     /** State used to enforce CHAIN_SYNC_TIMEOUT and EXTRA_PEER_CHECK_INTERVAL logic. | 
| 460 |  |       * | 
| 461 |  |       * Both are only in effect for outbound, non-manual, non-protected connections. | 
| 462 |  |       * Any peer protected (m_protect = true) is not chosen for eviction. A peer is | 
| 463 |  |       * marked as protected if all of these are true: | 
| 464 |  |       *   - its connection type is IsBlockOnlyConn() == false | 
| 465 |  |       *   - it gave us a valid connecting header | 
| 466 |  |       *   - we haven't reached MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT yet | 
| 467 |  |       *   - its chain tip has at least as much work as ours | 
| 468 |  |       * | 
| 469 |  |       * CHAIN_SYNC_TIMEOUT: if a peer's best known block has less work than our tip, | 
| 470 |  |       * set a timeout CHAIN_SYNC_TIMEOUT in the future: | 
| 471 |  |       *   - If at timeout their best known block now has more work than our tip | 
| 472 |  |       *     when the timeout was set, then either reset the timeout or clear it | 
| 473 |  |       *     (after comparing against our current tip's work) | 
| 474 |  |       *   - If at timeout their best known block still has less work than our | 
| 475 |  |       *     tip did when the timeout was set, then send a getheaders message, | 
| 476 |  |       *     and set a shorter timeout, HEADERS_RESPONSE_TIME seconds in future. | 
| 477 |  |       *     If their best known block is still behind when that new timeout is | 
| 478 |  |       *     reached, disconnect. | 
| 479 |  |       * | 
| 480 |  |       * EXTRA_PEER_CHECK_INTERVAL: after each interval, if we have too many outbound peers, | 
| 481 |  |       * drop the outbound one that least recently announced us a new block. | 
| 482 |  |       */ | 
| 483 |  |     struct ChainSyncTimeoutState { | 
| 484 |  |         //! A timeout used for checking whether our peer has sufficiently synced | 
| 485 |  |         std::chrono::seconds m_timeout{0s}; | 
| 486 |  |         //! A header with the work we require on our peer's chain | 
| 487 |  |         const CBlockIndex* m_work_header{nullptr}; | 
| 488 |  |         //! After timeout is reached, set to true after sending getheaders | 
| 489 |  |         bool m_sent_getheaders{false}; | 
| 490 |  |         //! Whether this peer is protected from disconnection due to a bad/slow chain | 
| 491 |  |         bool m_protect{false}; | 
| 492 |  |     }; | 
| 493 |  |  | 
| 494 |  |     ChainSyncTimeoutState m_chain_sync; | 
| 495 |  |  | 
| 496 |  |     //! Time of last new block announcement | 
| 497 |  |     int64_t m_last_block_announcement{0}; | 
| 498 |  | }; | 
| 499 |  |  | 
| 500 |  | class PeerManagerImpl final : public PeerManager | 
| 501 |  | { | 
| 502 |  | public: | 
| 503 |  |     PeerManagerImpl(CConnman& connman, AddrMan& addrman, | 
| 504 |  |                     BanMan* banman, ChainstateManager& chainman, | 
| 505 |  |                     CTxMemPool& pool, node::Warnings& warnings, Options opts); | 
| 506 |  |  | 
| 507 |  |     /** Overridden from CValidationInterface. */ | 
| 508 |  |     void ActiveTipChange(const CBlockIndex& new_tip, bool) override | 
| 509 |  |         EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex); | 
| 510 |  |     void BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override | 
| 511 |  |         EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex); | 
| 512 |  |     void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override | 
| 513 |  |         EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex); | 
| 514 |  |     void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override | 
| 515 |  |         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); | 
| 516 |  |     void BlockChecked(const std::shared_ptr<const CBlock>& block, const BlockValidationState& state) override | 
| 517 |  |         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); | 
| 518 |  |     void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) override | 
| 519 |  |         EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex); | 
| 520 |  |  | 
| 521 |  |     /** Implement NetEventsInterface */ | 
| 522 |  |     void InitializeNode(const CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_tx_download_mutex); | 
| 523 |  |     void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, !m_tx_download_mutex); | 
| 524 |  |     bool HasAllDesirableServiceFlags(ServiceFlags services) const override; | 
| 525 |  |     bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override | 
| 526 |  |         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex); | 
| 527 |  |     bool SendMessages(CNode* pto) override | 
| 528 |  |         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, g_msgproc_mutex, !m_tx_download_mutex); | 
| 529 |  |  | 
| 530 |  |     /** Implement PeerManager */ | 
| 531 |  |     void StartScheduledTasks(CScheduler& scheduler) override; | 
| 532 |  |     void CheckForStaleTipAndEvictPeers() override; | 
| 533 |  |     std::optional<std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override | 
| 534 |  |         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); | 
| 535 |  |     bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); | 
| 536 |  |     std::vector<node::TxOrphanage::OrphanInfo> GetOrphanTransactions() override EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex); | 
| 537 |  |     PeerManagerInfo GetInfo() const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); | 
| 538 |  |     void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); | 
| 539 |  |     void RelayTransaction(const Txid& txid, const Wtxid& wtxid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); | 
| 540 |  |     void SetBestBlock(int height, std::chrono::seconds time) override | 
| 541 | 0 |     { | 
| 542 | 0 |         m_best_height = height; | 
| 543 | 0 |         m_best_block_time = time; | 
| 544 | 0 |     }; | 
| 545 | 0 |     void UnitTestMisbehaving(NodeId peer_id) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) { Misbehaving(*Assert(GetPeerRef(peer_id)), ""); }; | 
| 546 |  |     void ProcessMessage(CNode& pfrom, const std::string& msg_type, DataStream& vRecv, | 
| 547 |  |                         const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override | 
| 548 |  |         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex); | 
| 549 |  |     void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) override; | 
| 550 |  |     ServiceFlags GetDesirableServiceFlags(ServiceFlags services) const override; | 
| 551 |  |  | 
| 552 |  | private: | 
| 553 |  |     /** Consider evicting an outbound peer based on the amount of time they've been behind our tip */ | 
| 554 |  |     void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_msgproc_mutex); | 
| 555 |  |  | 
| 556 |  |     /** If we have extra outbound peers, try to disconnect the one with the oldest block announcement */ | 
| 557 |  |     void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 558 |  |  | 
| 559 |  |     /** Retrieve unbroadcast transactions from the mempool and reattempt sending to peers */ | 
| 560 |  |     void ReattemptInitialBroadcast(CScheduler& scheduler) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); | 
| 561 |  |  | 
| 562 |  |     /** Get a shared pointer to the Peer object. | 
| 563 |  |      *  May return an empty shared_ptr if the Peer object can't be found. */ | 
| 564 |  |     PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); | 
| 565 |  |  | 
| 566 |  |     /** Get a shared pointer to the Peer object and remove it from m_peer_map. | 
| 567 |  |      *  May return an empty shared_ptr if the Peer object can't be found. */ | 
| 568 |  |     PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); | 
| 569 |  |  | 
| 570 |  |     /** Mark a peer as misbehaving, which will cause it to be disconnected and its | 
| 571 |  |      *  address discouraged. */ | 
| 572 |  |     void Misbehaving(Peer& peer, const std::string& message); | 
| 573 |  |  | 
| 574 |  |     /** | 
| 575 |  |      * Potentially mark a node discouraged based on the contents of a BlockValidationState object | 
| 576 |  |      * | 
| 577 |  |      * @param[in] via_compact_block this bool is passed in because net_processing should | 
| 578 |  |      * punish peers differently depending on whether the data was provided in a compact | 
| 579 |  |      * block message or not. If the compact block had a valid header, but contained invalid | 
| 580 |  |      * txs, the peer should not be punished. See BIP 152. | 
| 581 |  |      */ | 
| 582 |  |     void MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state, | 
| 583 |  |                                  bool via_compact_block, const std::string& message = "") | 
| 584 |  |         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); | 
| 585 |  |  | 
| 586 |  |     /** Maybe disconnect a peer and discourage future connections from its address. | 
| 587 |  |      * | 
| 588 |  |      * @param[in]   pnode     The node to check. | 
| 589 |  |      * @param[in]   peer      The peer object to check. | 
| 590 |  |      * @return                True if the peer was marked for disconnection in this function | 
| 591 |  |      */ | 
| 592 |  |     bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer); | 
| 593 |  |  | 
| 594 |  |     /** Handle a transaction whose result was not MempoolAcceptResult::ResultType::VALID. | 
| 595 |  |      * @param[in]   first_time_failure            Whether we should consider inserting into vExtraTxnForCompact, adding | 
| 596 |  |      *                                            a new orphan to resolve, or looking for a package to submit. | 
| 597 |  |      *                                            Set to true for transactions just received over p2p. | 
| 598 |  |      *                                            Set to false if the tx has already been rejected before, | 
| 599 |  |      *                                            e.g. is already in the orphanage, to avoid adding duplicate entries. | 
| 600 |  |      * Updates m_txrequest, m_lazy_recent_rejects, m_lazy_recent_rejects_reconsiderable, m_orphanage, and vExtraTxnForCompact. | 
| 601 |  |      * | 
| 602 |  |      * @returns a PackageToValidate if this transaction has a reconsiderable failure and an eligible package was found, | 
| 603 |  |      * or std::nullopt otherwise. | 
| 604 |  |      */ | 
| 605 |  |     std::optional<node::PackageToValidate> ProcessInvalidTx(NodeId nodeid, const CTransactionRef& tx, const TxValidationState& result, | 
| 606 |  |                                                       bool first_time_failure) | 
| 607 |  |         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex); | 
| 608 |  |  | 
| 609 |  |     /** Handle a transaction whose result was MempoolAcceptResult::ResultType::VALID. | 
| 610 |  |      * Updates m_txrequest, m_orphanage, and vExtraTxnForCompact. Also queues the tx for relay. */ | 
| 611 |  |     void ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions) | 
| 612 |  |         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex); | 
| 613 |  |  | 
| 614 |  |     /** Handle the results of package validation: calls ProcessValidTx and ProcessInvalidTx for | 
| 615 |  |      * individual transactions, and caches rejection for the package as a group. | 
| 616 |  |      */ | 
| 617 |  |     void ProcessPackageResult(const node::PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result) | 
| 618 |  |         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex); | 
| 619 |  |  | 
| 620 |  |     /** | 
| 621 |  |      * Reconsider orphan transactions after a parent has been accepted to the mempool. | 
| 622 |  |      * | 
| 623 |  |      * @peer[in]  peer     The peer whose orphan transactions we will reconsider. Generally only | 
| 624 |  |      *                     one orphan will be reconsidered on each call of this function. If an | 
| 625 |  |      *                     accepted orphan has orphaned children, those will need to be | 
| 626 |  |      *                     reconsidered, creating more work, possibly for other peers. | 
| 627 |  |      * @return             True if meaningful work was done (an orphan was accepted/rejected). | 
| 628 |  |      *                     If no meaningful work was done, then the work set for this peer | 
| 629 |  |      *                     will be empty. | 
| 630 |  |      */ | 
| 631 |  |     bool ProcessOrphanTx(Peer& peer) | 
| 632 |  |         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, !m_tx_download_mutex); | 
| 633 |  |  | 
| 634 |  |     /** Process a single headers message from a peer. | 
| 635 |  |      * | 
| 636 |  |      * @param[in]   pfrom     CNode of the peer | 
| 637 |  |      * @param[in]   peer      The peer sending us the headers | 
| 638 |  |      * @param[in]   headers   The headers received. Note that this may be modified within ProcessHeadersMessage. | 
| 639 |  |      * @param[in]   via_compact_block   Whether this header came in via compact block handling. | 
| 640 |  |     */ | 
| 641 |  |     void ProcessHeadersMessage(CNode& pfrom, Peer& peer, | 
| 642 |  |                                std::vector<CBlockHeader>&& headers, | 
| 643 |  |                                bool via_compact_block) | 
| 644 |  |         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex); | 
| 645 |  |     /** Various helpers for headers processing, invoked by ProcessHeadersMessage() */ | 
| 646 |  |     /** Return true if headers are continuous and have valid proof-of-work (DoS points assigned on failure) */ | 
| 647 |  |     bool CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer); | 
| 648 |  |     /** Calculate an anti-DoS work threshold for headers chains */ | 
| 649 |  |     arith_uint256 GetAntiDoSWorkThreshold(); | 
| 650 |  |     /** Deal with state tracking and headers sync for peers that send | 
| 651 |  |      * non-connecting headers (this can happen due to BIP 130 headers | 
| 652 |  |      * announcements for blocks interacting with the 2hr (MAX_FUTURE_BLOCK_TIME) rule). */ | 
| 653 |  |     void HandleUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); | 
| 654 |  |     /** Return true if the headers connect to each other, false otherwise */ | 
| 655 |  |     bool CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const; | 
| 656 |  |     /** Try to continue a low-work headers sync that has already begun. | 
| 657 |  |      * Assumes the caller has already verified the headers connect, and has | 
| 658 |  |      * checked that each header satisfies the proof-of-work target included in | 
| 659 |  |      * the header. | 
| 660 |  |      *  @param[in]  peer                            The peer we're syncing with. | 
| 661 |  |      *  @param[in]  pfrom                           CNode of the peer | 
| 662 |  |      *  @param[in,out] headers                      The headers to be processed. | 
| 663 |  |      *  @return     True if the passed in headers were successfully processed | 
| 664 |  |      *              as the continuation of a low-work headers sync in progress; | 
| 665 |  |      *              false otherwise. | 
| 666 |  |      *              If false, the passed in headers will be returned back to | 
| 667 |  |      *              the caller. | 
| 668 |  |      *              If true, the returned headers may be empty, indicating | 
| 669 |  |      *              there is no more work for the caller to do; or the headers | 
| 670 |  |      *              may be populated with entries that have passed anti-DoS | 
| 671 |  |      *              checks (and therefore may be validated for block index | 
| 672 |  |      *              acceptance by the caller). | 
| 673 |  |      */ | 
| 674 |  |     bool IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, | 
| 675 |  |             std::vector<CBlockHeader>& headers) | 
| 676 |  |         EXCLUSIVE_LOCKS_REQUIRED(peer.m_headers_sync_mutex, !m_headers_presync_mutex, g_msgproc_mutex); | 
| 677 |  |     /** Check work on a headers chain to be processed, and if insufficient, | 
| 678 |  |      * initiate our anti-DoS headers sync mechanism. | 
| 679 |  |      * | 
| 680 |  |      * @param[in]   peer                The peer whose headers we're processing. | 
| 681 |  |      * @param[in]   pfrom               CNode of the peer | 
| 682 |  |      * @param[in]   chain_start_header  Where these headers connect in our index. | 
| 683 |  |      * @param[in,out]   headers             The headers to be processed. | 
| 684 |  |      * | 
| 685 |  |      * @return      True if chain was low work (headers will be empty after | 
| 686 |  |      *              calling); false otherwise. | 
| 687 |  |      */ | 
| 688 |  |     bool TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, | 
| 689 |  |                                   const CBlockIndex* chain_start_header, | 
| 690 |  |                                   std::vector<CBlockHeader>& headers) | 
| 691 |  |         EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex); | 
| 692 |  |  | 
| 693 |  |     /** Return true if the given header is an ancestor of | 
| 694 |  |      *  m_chainman.m_best_header or our current tip */ | 
| 695 |  |     bool IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 696 |  |  | 
| 697 |  |     /** Request further headers from this peer with a given locator. | 
| 698 |  |      * We don't issue a getheaders message if we have a recent one outstanding. | 
| 699 |  |      * This returns true if a getheaders is actually sent, and false otherwise. | 
| 700 |  |      */ | 
| 701 |  |     bool MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); | 
| 702 |  |     /** Potentially fetch blocks from this peer upon receipt of a new headers tip */ | 
| 703 |  |     void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header); | 
| 704 |  |     /** Update peer state based on received headers message */ | 
| 705 |  |     void UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer, const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers) | 
| 706 |  |         EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); | 
| 707 |  |  | 
| 708 |  |     void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req); | 
| 709 |  |  | 
| 710 |  |     /** Send a message to a peer */ | 
| 711 | 0 |     void PushMessage(CNode& node, CSerializedNetMsg&& msg) const { m_connman.PushMessage(&node, std::move(msg)); } | 
| 712 |  |     template <typename... Args> | 
| 713 |  |     void MakeAndPushMessage(CNode& node, std::string msg_type, Args&&... args) const | 
| 714 | 0 |     { | 
| 715 | 0 |         m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); | 
| 716 | 0 |     } Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJbRKmEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRSt6vectorI4CInvSaIS3_EEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRKiRmRKlS4_13ParamsWrapperIN8CNetAddr9SerParamsE8CServiceES4_SB_S4_RNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEES3_RKbEEEvR5CNodeSH_DpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRKjRKmEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRKSt5arrayISt4byteLm168EEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRK13CBlockLocator7uint256EEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJ13ParamsWrapperI20TransactionSerParamsK12CTransactionEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJSt4spanISt4byteLm18446744073709551615EEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJ13ParamsWrapperI20TransactionSerParamsK6CBlockEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJR12CMerkleBlockEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRK25CBlockHeaderAndShortTxIDsEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJR25CBlockHeaderAndShortTxIDsEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJR17BlockTransactionsEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJSt6vectorI12CBlockHeaderSaIS3_EEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJ13ParamsWrapperI20TransactionSerParamsSt6vectorI6CBlockSaIS5_EEEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJR24BlockTransactionsRequestEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRmEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRK11BlockFilterEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRh7uint256RS3_RSt6vectorIS3_SaIS3_EEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRh7uint256RSt6vectorIS3_SaIS3_EEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJ13ParamsWrapperIN8CAddress9SerParamsESt6vectorIS3_SaIS3_EEEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRlEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ | 
| 717 |  |  | 
| 718 |  |     /** Send a version message to a peer */ | 
| 719 |  |     void PushNodeVersion(CNode& pnode, const Peer& peer); | 
| 720 |  |  | 
| 721 |  |     /** Send a ping message every PING_INTERVAL or if requested via RPC. May | 
| 722 |  |      *  mark the peer to be disconnected if a ping has timed out. | 
| 723 |  |      *  We use mockable time for ping timeouts, so setmocktime may cause pings | 
| 724 |  |      *  to time out. */ | 
| 725 |  |     void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now); | 
| 726 |  |  | 
| 727 |  |     /** Send `addr` messages on a regular schedule. */ | 
| 728 |  |     void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); | 
| 729 |  |  | 
| 730 |  |     /** Send a single `sendheaders` message, after we have completed headers sync with a peer. */ | 
| 731 |  |     void MaybeSendSendHeaders(CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); | 
| 732 |  |  | 
| 733 |  |     /** Relay (gossip) an address to a few randomly chosen nodes. | 
| 734 |  |      * | 
| 735 |  |      * @param[in] originator   The id of the peer that sent us the address. We don't want to relay it back. | 
| 736 |  |      * @param[in] addr         Address to relay. | 
| 737 |  |      * @param[in] fReachable   Whether the address' network is reachable. We relay unreachable | 
| 738 |  |      *                         addresses less. | 
| 739 |  |      */ | 
| 740 |  |     void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex); | 
| 741 |  |  | 
| 742 |  |     /** Send `feefilter` message. */ | 
| 743 |  |     void MaybeSendFeefilter(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); | 
| 744 |  |  | 
| 745 |  |     FastRandomContext m_rng GUARDED_BY(NetEventsInterface::g_msgproc_mutex); | 
| 746 |  |  | 
| 747 |  |     FeeFilterRounder m_fee_filter_rounder GUARDED_BY(NetEventsInterface::g_msgproc_mutex); | 
| 748 |  |  | 
| 749 |  |     const CChainParams& m_chainparams; | 
| 750 |  |     CConnman& m_connman; | 
| 751 |  |     AddrMan& m_addrman; | 
| 752 |  |     /** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */ | 
| 753 |  |     BanMan* const m_banman; | 
| 754 |  |     ChainstateManager& m_chainman; | 
| 755 |  |     CTxMemPool& m_mempool; | 
| 756 |  |  | 
| 757 |  |     /** Synchronizes tx download including TxRequestTracker, rejection filters, and TxOrphanage. | 
| 758 |  |      * Lock invariants: | 
| 759 |  |      * - A txhash (txid or wtxid) in m_txrequest is not also in m_orphanage. | 
| 760 |  |      * - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_rejects. | 
| 761 |  |      * - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_rejects_reconsiderable. | 
| 762 |  |      * - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_confirmed_transactions. | 
| 763 |  |      * - Each data structure's limits hold (m_orphanage max size, m_txrequest per-peer limits, etc). | 
| 764 |  |      */ | 
| 765 |  |     Mutex m_tx_download_mutex ACQUIRED_BEFORE(m_mempool.cs); | 
| 766 |  |     node::TxDownloadManager m_txdownloadman GUARDED_BY(m_tx_download_mutex); | 
| 767 |  |  | 
| 768 |  |     std::unique_ptr<TxReconciliationTracker> m_txreconciliation; | 
| 769 |  |  | 
| 770 |  |     /** The height of the best chain */ | 
| 771 |  |     std::atomic<int> m_best_height{-1}; | 
| 772 |  |     /** The time of the best chain tip block */ | 
| 773 |  |     std::atomic<std::chrono::seconds> m_best_block_time{0s}; | 
| 774 |  |  | 
| 775 |  |     /** Next time to check for stale tip */ | 
| 776 |  |     std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s}; | 
| 777 |  |  | 
| 778 |  |     node::Warnings& m_warnings; | 
| 779 |  |     TimeOffsets m_outbound_time_offsets{m_warnings}; | 
| 780 |  |  | 
| 781 |  |     const Options m_opts; | 
| 782 |  |  | 
| 783 |  |     bool RejectIncomingTxs(const CNode& peer) const; | 
| 784 |  |  | 
| 785 |  |     /** Whether we've completed initial sync yet, for determining when to turn | 
| 786 |  |       * on extra block-relay-only peers. */ | 
| 787 |  |     bool m_initial_sync_finished GUARDED_BY(cs_main){false}; | 
| 788 |  |  | 
| 789 |  |     /** Protects m_peer_map. This mutex must not be locked while holding a lock | 
| 790 |  |      *  on any of the mutexes inside a Peer object. */ | 
| 791 |  |     mutable Mutex m_peer_mutex; | 
| 792 |  |     /** | 
| 793 |  |      * Map of all Peer objects, keyed by peer id. This map is protected | 
| 794 |  |      * by the m_peer_mutex. Once a shared pointer reference is | 
| 795 |  |      * taken, the lock may be released. Individual fields are protected by | 
| 796 |  |      * their own locks. | 
| 797 |  |      */ | 
| 798 |  |     std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex); | 
| 799 |  |  | 
| 800 |  |     /** Map maintaining per-node state. */ | 
| 801 |  |     std::map<NodeId, CNodeState> m_node_states GUARDED_BY(cs_main); | 
| 802 |  |  | 
| 803 |  |     /** Get a pointer to a const CNodeState, used when not mutating the CNodeState object. */ | 
| 804 |  |     const CNodeState* State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 805 |  |     /** Get a pointer to a mutable CNodeState. */ | 
| 806 |  |     CNodeState* State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 807 |  |  | 
| 808 |  |     uint32_t GetFetchFlags(const Peer& peer) const; | 
| 809 |  |  | 
| 810 |  |     std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us}; | 
| 811 |  |  | 
| 812 |  |     /** Number of nodes with fSyncStarted. */ | 
| 813 |  |     int nSyncStarted GUARDED_BY(cs_main) = 0; | 
| 814 |  |  | 
| 815 |  |     /** Hash of the last block we received via INV */ | 
| 816 |  |     uint256 m_last_block_inv_triggering_headers_sync GUARDED_BY(g_msgproc_mutex){}; | 
| 817 |  |  | 
| 818 |  |     /** | 
| 819 |  |      * Sources of received blocks, saved to be able punish them when processing | 
| 820 |  |      * happens afterwards. | 
| 821 |  |      * Set mapBlockSource[hash].second to false if the node should not be | 
| 822 |  |      * punished if the block is invalid. | 
| 823 |  |      */ | 
| 824 |  |     std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main); | 
| 825 |  |  | 
| 826 |  |     /** Number of peers with wtxid relay. */ | 
| 827 |  |     std::atomic<int> m_wtxid_relay_peers{0}; | 
| 828 |  |  | 
| 829 |  |     /** Number of outbound peers with m_chain_sync.m_protect. */ | 
| 830 |  |     int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0; | 
| 831 |  |  | 
| 832 |  |     /** Number of preferable block download peers. */ | 
| 833 |  |     int m_num_preferred_download_peers GUARDED_BY(cs_main){0}; | 
| 834 |  |  | 
| 835 |  |     /** Stalling timeout for blocks in IBD */ | 
| 836 |  |     std::atomic<std::chrono::seconds> m_block_stalling_timeout{BLOCK_STALLING_TIMEOUT_DEFAULT}; | 
| 837 |  |  | 
| 838 |  |     /** | 
| 839 |  |      * For sending `inv`s to inbound peers, we use a single (exponentially | 
| 840 |  |      * distributed) timer for all peers. If we used a separate timer for each | 
| 841 |  |      * peer, a spy node could make multiple inbound connections to us to | 
| 842 |  |      * accurately determine when we received the transaction (and potentially | 
| 843 |  |      * determine the transaction's origin). */ | 
| 844 |  |     std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now, | 
| 845 |  |                                                 std::chrono::seconds average_interval) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); | 
| 846 |  |  | 
| 847 |  |  | 
| 848 |  |     // All of the following cache a recent block, and are protected by m_most_recent_block_mutex | 
| 849 |  |     Mutex m_most_recent_block_mutex; | 
| 850 |  |     std::shared_ptr<const CBlock> m_most_recent_block GUARDED_BY(m_most_recent_block_mutex); | 
| 851 |  |     std::shared_ptr<const CBlockHeaderAndShortTxIDs> m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex); | 
| 852 |  |     uint256 m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex); | 
| 853 |  |     std::unique_ptr<const std::map<GenTxid, CTransactionRef>> m_most_recent_block_txs GUARDED_BY(m_most_recent_block_mutex); | 
| 854 |  |  | 
| 855 |  |     // Data about the low-work headers synchronization, aggregated from all peers' HeadersSyncStates. | 
| 856 |  |     /** Mutex guarding the other m_headers_presync_* variables. */ | 
| 857 |  |     Mutex m_headers_presync_mutex; | 
| 858 |  |     /** A type to represent statistics about a peer's low-work headers sync. | 
| 859 |  |      * | 
| 860 |  |      * - The first field is the total verified amount of work in that synchronization. | 
| 861 |  |      * - The second is: | 
| 862 |  |      *   - nullopt: the sync is in REDOWNLOAD phase (phase 2). | 
| 863 |  |      *   - {height, timestamp}: the sync has the specified tip height and block timestamp (phase 1). | 
| 864 |  |      */ | 
| 865 |  |     using HeadersPresyncStats = std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>; | 
| 866 |  |     /** Statistics for all peers in low-work headers sync. */ | 
| 867 |  |     std::map<NodeId, HeadersPresyncStats> m_headers_presync_stats GUARDED_BY(m_headers_presync_mutex) {}; | 
| 868 |  |     /** The peer with the most-work entry in m_headers_presync_stats. */ | 
| 869 |  |     NodeId m_headers_presync_bestpeer GUARDED_BY(m_headers_presync_mutex) {-1}; | 
| 870 |  |     /** The m_headers_presync_stats improved, and needs signalling. */ | 
| 871 |  |     std::atomic_bool m_headers_presync_should_signal{false}; | 
| 872 |  |  | 
| 873 |  |     /** Height of the highest block announced using BIP 152 high-bandwidth mode. */ | 
| 874 |  |     int m_highest_fast_announce GUARDED_BY(::cs_main){0}; | 
| 875 |  |  | 
| 876 |  |     /** Have we requested this block from a peer */ | 
| 877 |  |     bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 878 |  |  | 
| 879 |  |     /** Have we requested this block from an outbound peer */ | 
| 880 |  |     bool IsBlockRequestedFromOutbound(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex); | 
| 881 |  |  | 
| 882 |  |     /** Remove this block from our tracked requested blocks. Called if: | 
| 883 |  |      *  - the block has been received from a peer | 
| 884 |  |      *  - the request for the block has timed out | 
| 885 |  |      * If "from_peer" is specified, then only remove the block if it is in | 
| 886 |  |      * flight from that peer (to avoid one peer's network traffic from | 
| 887 |  |      * affecting another's state). | 
| 888 |  |      */ | 
| 889 |  |     void RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 890 |  |  | 
| 891 |  |     /* Mark a block as in flight | 
| 892 |  |      * Returns false, still setting pit, if the block was already in flight from the same peer | 
| 893 |  |      * pit will only be valid as long as the same cs_main lock is being held | 
| 894 |  |      */ | 
| 895 |  |     bool BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 896 |  |  | 
| 897 |  |     bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 898 |  |  | 
| 899 |  |     /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has | 
| 900 |  |      *  at most count entries. | 
| 901 |  |      */ | 
| 902 |  |     void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 903 |  |  | 
| 904 |  |     /** Request blocks for the background chainstate, if one is in use. */ | 
| 905 |  |     void TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex* from_tip, const CBlockIndex* target_block) EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 906 |  |  | 
| 907 |  |     /** | 
| 908 |  |     * \brief Find next blocks to download from a peer after a starting block. | 
| 909 |  |     * | 
| 910 |  |     * \param vBlocks      Vector of blocks to download which will be appended to. | 
| 911 |  |     * \param peer         Peer which blocks will be downloaded from. | 
| 912 |  |     * \param state        Pointer to the state of the peer. | 
| 913 |  |     * \param pindexWalk   Pointer to the starting block to add to vBlocks. | 
| 914 |  |     * \param count        Maximum number of blocks to allow in vBlocks. No more | 
| 915 |  |     *                     blocks will be added if it reaches this size. | 
| 916 |  |     * \param nWindowEnd   Maximum height of blocks to allow in vBlocks. No | 
| 917 |  |     *                     blocks will be added above this height. | 
| 918 |  |     * \param activeChain  Optional pointer to a chain to compare against. If | 
| 919 |  |     *                     provided, any next blocks which are already contained | 
| 920 |  |     *                     in this chain will not be appended to vBlocks, but | 
| 921 |  |     *                     instead will be used to update the | 
| 922 |  |     *                     state->pindexLastCommonBlock pointer. | 
| 923 |  |     * \param nodeStaller  Optional pointer to a NodeId variable that will receive | 
| 924 |  |     *                     the ID of another peer that might be causing this peer | 
| 925 |  |     *                     to stall. This is set to the ID of the peer which | 
| 926 |  |     *                     first requested the first in-flight block in the | 
| 927 |  |     *                     download window. It is only set if vBlocks is empty at | 
| 928 |  |     *                     the end of this function call and if increasing | 
| 929 |  |     *                     nWindowEnd by 1 would cause it to be non-empty (which | 
| 930 |  |     *                     indicates the download might be stalled because every | 
| 931 |  |     *                     block in the window is in flight and no other peer is | 
| 932 |  |     *                     trying to download the next block). | 
| 933 |  |     */ | 
| 934 |  |     void FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain=nullptr, NodeId* nodeStaller=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 935 |  |  | 
| 936 |  |     /* Multimap used to preserve insertion order */ | 
| 937 |  |     typedef std::multimap<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator>> BlockDownloadMap; | 
| 938 |  |     BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main); | 
| 939 |  |  | 
| 940 |  |     /** When our tip was last updated. */ | 
| 941 |  |     std::atomic<std::chrono::seconds> m_last_tip_update{0s}; | 
| 942 |  |  | 
| 943 |  |     /** Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). */ | 
| 944 |  |     CTransactionRef FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid) | 
| 945 |  |         EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, NetEventsInterface::g_msgproc_mutex); | 
| 946 |  |  | 
| 947 |  |     void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc) | 
| 948 |  |         EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex, NetEventsInterface::g_msgproc_mutex) | 
| 949 |  |         LOCKS_EXCLUDED(::cs_main); | 
| 950 |  |  | 
| 951 |  |     /** Process a new block. Perform any post-processing housekeeping */ | 
| 952 |  |     void ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked); | 
| 953 |  |  | 
| 954 |  |     /** Process compact block txns  */ | 
| 955 |  |     void ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions) | 
| 956 |  |         EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex); | 
| 957 |  |  | 
| 958 |  |     /** | 
| 959 |  |      * When a peer sends us a valid block, instruct it to announce blocks to us | 
| 960 |  |      * using CMPCTBLOCK if possible by adding its nodeid to the end of | 
| 961 |  |      * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by | 
| 962 |  |      * removing the first element if necessary. | 
| 963 |  |      */ | 
| 964 |  |     void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex); | 
| 965 |  |  | 
| 966 |  |     /** Stack of nodes which we have set to announce using compact blocks */ | 
| 967 |  |     std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main); | 
| 968 |  |  | 
| 969 |  |     /** Number of peers from which we're downloading blocks. */ | 
| 970 |  |     int m_peers_downloading_from GUARDED_BY(cs_main) = 0; | 
| 971 |  |  | 
| 972 |  |     void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); | 
| 973 |  |  | 
| 974 |  |     /** Orphan/conflicted/etc transactions that are kept for compact block reconstruction. | 
| 975 |  |      *  The last -blockreconstructionextratxn/DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN of | 
| 976 |  |      *  these are kept in a ring buffer */ | 
| 977 |  |     std::vector<std::pair<Wtxid, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(g_msgproc_mutex); | 
| 978 |  |     /** Offset into vExtraTxnForCompact to insert the next tx */ | 
| 979 |  |     size_t vExtraTxnForCompactIt GUARDED_BY(g_msgproc_mutex) = 0; | 
| 980 |  |  | 
| 981 |  |     /** Check whether the last unknown block a peer advertised is not yet known. */ | 
| 982 |  |     void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 983 |  |     /** Update tracking information about which blocks a peer is assumed to have. */ | 
| 984 |  |     void UpdateBlockAvailability(NodeId nodeid, const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 985 |  |     bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 986 |  |  | 
| 987 |  |     /** | 
| 988 |  |      * Estimates the distance, in blocks, between the best-known block and the network chain tip. | 
| 989 |  |      * Utilizes the best-block time and the chainparams blocks spacing to approximate it. | 
| 990 |  |      */ | 
| 991 |  |     int64_t ApproximateBestBlockDepth() const; | 
| 992 |  |  | 
| 993 |  |     /** | 
| 994 |  |      * To prevent fingerprinting attacks, only send blocks/headers outside of | 
| 995 |  |      * the active chain if they are no more than a month older (both in time, | 
| 996 |  |      * and in best equivalent proof of work) than the best header chain we know | 
| 997 |  |      * about and we fully-validated them at some point. | 
| 998 |  |      */ | 
| 999 |  |     bool BlockRequestAllowed(const CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 1000 |  |     bool AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); | 
| 1001 |  |     void ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv) | 
| 1002 |  |         EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex); | 
| 1003 |  |  | 
| 1004 |  |     /** | 
| 1005 |  |      * Validation logic for compact filters request handling. | 
| 1006 |  |      * | 
| 1007 |  |      * May disconnect from the peer in the case of a bad request. | 
| 1008 |  |      * | 
| 1009 |  |      * @param[in]   node            The node that we received the request from | 
| 1010 |  |      * @param[in]   peer            The peer that we received the request from | 
| 1011 |  |      * @param[in]   filter_type     The filter type the request is for. Must be basic filters. | 
| 1012 |  |      * @param[in]   start_height    The start height for the request | 
| 1013 |  |      * @param[in]   stop_hash       The stop_hash for the request | 
| 1014 |  |      * @param[in]   max_height_diff The maximum number of items permitted to request, as specified in BIP 157 | 
| 1015 |  |      * @param[out]  stop_index      The CBlockIndex for the stop_hash block, if the request can be serviced. | 
| 1016 |  |      * @param[out]  filter_index    The filter index, if the request can be serviced. | 
| 1017 |  |      * @return                      True if the request can be serviced. | 
| 1018 |  |      */ | 
| 1019 |  |     bool PrepareBlockFilterRequest(CNode& node, Peer& peer, | 
| 1020 |  |                                    BlockFilterType filter_type, uint32_t start_height, | 
| 1021 |  |                                    const uint256& stop_hash, uint32_t max_height_diff, | 
| 1022 |  |                                    const CBlockIndex*& stop_index, | 
| 1023 |  |                                    BlockFilterIndex*& filter_index); | 
| 1024 |  |  | 
| 1025 |  |     /** | 
| 1026 |  |      * Handle a cfilters request. | 
| 1027 |  |      * | 
| 1028 |  |      * May disconnect from the peer in the case of a bad request. | 
| 1029 |  |      * | 
| 1030 |  |      * @param[in]   node            The node that we received the request from | 
| 1031 |  |      * @param[in]   peer            The peer that we received the request from | 
| 1032 |  |      * @param[in]   vRecv           The raw message received | 
| 1033 |  |      */ | 
| 1034 |  |     void ProcessGetCFilters(CNode& node, Peer& peer, DataStream& vRecv); | 
| 1035 |  |  | 
| 1036 |  |     /** | 
| 1037 |  |      * Handle a cfheaders request. | 
| 1038 |  |      * | 
| 1039 |  |      * May disconnect from the peer in the case of a bad request. | 
| 1040 |  |      * | 
| 1041 |  |      * @param[in]   node            The node that we received the request from | 
| 1042 |  |      * @param[in]   peer            The peer that we received the request from | 
| 1043 |  |      * @param[in]   vRecv           The raw message received | 
| 1044 |  |      */ | 
| 1045 |  |     void ProcessGetCFHeaders(CNode& node, Peer& peer, DataStream& vRecv); | 
| 1046 |  |  | 
| 1047 |  |     /** | 
| 1048 |  |      * Handle a getcfcheckpt request. | 
| 1049 |  |      * | 
| 1050 |  |      * May disconnect from the peer in the case of a bad request. | 
| 1051 |  |      * | 
| 1052 |  |      * @param[in]   node            The node that we received the request from | 
| 1053 |  |      * @param[in]   peer            The peer that we received the request from | 
| 1054 |  |      * @param[in]   vRecv           The raw message received | 
| 1055 |  |      */ | 
| 1056 |  |     void ProcessGetCFCheckPt(CNode& node, Peer& peer, DataStream& vRecv); | 
| 1057 |  |  | 
| 1058 |  |     /** Checks if address relay is permitted with peer. If needed, initializes | 
| 1059 |  |      * the m_addr_known bloom filter and sets m_addr_relay_enabled to true. | 
| 1060 |  |      * | 
| 1061 |  |      *  @return   True if address relay is enabled with peer | 
| 1062 |  |      *            False if address relay is disallowed | 
| 1063 |  |      */ | 
| 1064 |  |     bool SetupAddressRelay(const CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); | 
| 1065 |  |  | 
| 1066 |  |     void AddAddressKnown(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); | 
| 1067 |  |     void PushAddress(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); | 
| 1068 |  |  | 
| 1069 |  |     void LogBlockHeader(const CBlockIndex& index, const CNode& peer, bool via_compact_block); | 
| 1070 |  | }; | 
| 1071 |  |  | 
| 1072 |  | const CNodeState* PeerManagerImpl::State(NodeId pnode) const | 
| 1073 | 0 | { | 
| 1074 | 0 |     std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode); | 
| 1075 | 0 |     if (it == m_node_states.end()) | 
| 1076 | 0 |         return nullptr; | 
| 1077 | 0 |     return &it->second; | 
| 1078 | 0 | } | 
| 1079 |  |  | 
| 1080 |  | CNodeState* PeerManagerImpl::State(NodeId pnode) | 
| 1081 | 0 | { | 
| 1082 | 0 |     return const_cast<CNodeState*>(std::as_const(*this).State(pnode)); | 
| 1083 | 0 | } | 
| 1084 |  |  | 
| 1085 |  | /** | 
| 1086 |  |  * Whether the peer supports the address. For example, a peer that does not | 
| 1087 |  |  * implement BIP155 cannot receive Tor v3 addresses because it requires | 
| 1088 |  |  * ADDRv2 (BIP155) encoding. | 
| 1089 |  |  */ | 
| 1090 |  | static bool IsAddrCompatible(const Peer& peer, const CAddress& addr) | 
| 1091 | 0 | { | 
| 1092 | 0 |     return peer.m_wants_addrv2 || addr.IsAddrV1Compatible(); | 
| 1093 | 0 | } | 
| 1094 |  |  | 
| 1095 |  | void PeerManagerImpl::AddAddressKnown(Peer& peer, const CAddress& addr) | 
| 1096 | 0 | { | 
| 1097 | 0 |     assert(peer.m_addr_known); | 
| 1098 | 0 |     peer.m_addr_known->insert(addr.GetKey()); | 
| 1099 | 0 | } | 
| 1100 |  |  | 
| 1101 |  | void PeerManagerImpl::PushAddress(Peer& peer, const CAddress& addr) | 
| 1102 | 0 | { | 
| 1103 |  |     // Known checking here is only to save space from duplicates. | 
| 1104 |  |     // Before sending, we'll filter it again for known addresses that were | 
| 1105 |  |     // added after addresses were pushed. | 
| 1106 | 0 |     assert(peer.m_addr_known); | 
| 1107 | 0 |     if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) && IsAddrCompatible(peer, addr)) { | 
| 1108 | 0 |         if (peer.m_addrs_to_send.size() >= MAX_ADDR_TO_SEND) { | 
| 1109 | 0 |             peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] = addr; | 
| 1110 | 0 |         } else { | 
| 1111 | 0 |             peer.m_addrs_to_send.push_back(addr); | 
| 1112 | 0 |         } | 
| 1113 | 0 |     } | 
| 1114 | 0 | } | 
| 1115 |  |  | 
| 1116 |  | static void AddKnownTx(Peer& peer, const uint256& hash) | 
| 1117 | 0 | { | 
| 1118 | 0 |     auto tx_relay = peer.GetTxRelay(); | 
| 1119 | 0 |     if (!tx_relay) return; | 
| 1120 |  |  | 
| 1121 | 0 |     LOCK(tx_relay->m_tx_inventory_mutex); | 
| 1122 | 0 |     tx_relay->m_tx_inventory_known_filter.insert(hash); | 
| 1123 | 0 | } | 
| 1124 |  |  | 
| 1125 |  | /** Whether this peer can serve us blocks. */ | 
| 1126 |  | static bool CanServeBlocks(const Peer& peer) | 
| 1127 | 0 | { | 
| 1128 | 0 |     return peer.m_their_services & (NODE_NETWORK|NODE_NETWORK_LIMITED); | 
| 1129 | 0 | } | 
| 1130 |  |  | 
| 1131 |  | /** Whether this peer can only serve limited recent blocks (e.g. because | 
| 1132 |  |  *  it prunes old blocks) */ | 
| 1133 |  | static bool IsLimitedPeer(const Peer& peer) | 
| 1134 | 0 | { | 
| 1135 | 0 |     return (!(peer.m_their_services & NODE_NETWORK) && | 
| 1136 | 0 |              (peer.m_their_services & NODE_NETWORK_LIMITED)); | 
| 1137 | 0 | } | 
| 1138 |  |  | 
| 1139 |  | /** Whether this peer can serve us witness data */ | 
| 1140 |  | static bool CanServeWitnesses(const Peer& peer) | 
| 1141 | 0 | { | 
| 1142 | 0 |     return peer.m_their_services & NODE_WITNESS; | 
| 1143 | 0 | } | 
| 1144 |  |  | 
| 1145 |  | std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now, | 
| 1146 |  |                                                              std::chrono::seconds average_interval) | 
| 1147 | 0 | { | 
| 1148 | 0 |     if (m_next_inv_to_inbounds.load() < now) { | 
| 1149 |  |         // If this function were called from multiple threads simultaneously | 
| 1150 |  |         // it would possible that both update the next send variable, and return a different result to their caller. | 
| 1151 |  |         // This is not possible in practice as only the net processing thread invokes this function. | 
| 1152 | 0 |         m_next_inv_to_inbounds = now + m_rng.rand_exp_duration(average_interval); | 
| 1153 | 0 |     } | 
| 1154 | 0 |     return m_next_inv_to_inbounds; | 
| 1155 | 0 | } | 
| 1156 |  |  | 
| 1157 |  | bool PeerManagerImpl::IsBlockRequested(const uint256& hash) | 
| 1158 | 0 | { | 
| 1159 | 0 |     return mapBlocksInFlight.count(hash); | 
| 1160 | 0 | } | 
| 1161 |  |  | 
| 1162 |  | bool PeerManagerImpl::IsBlockRequestedFromOutbound(const uint256& hash) | 
| 1163 | 0 | { | 
| 1164 | 0 |     for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) { | 
| 1165 | 0 |         auto [nodeid, block_it] = range.first->second; | 
| 1166 | 0 |         PeerRef peer{GetPeerRef(nodeid)}; | 
| 1167 | 0 |         if (peer && !peer->m_is_inbound) return true; | 
| 1168 | 0 |     } | 
| 1169 |  |  | 
| 1170 | 0 |     return false; | 
| 1171 | 0 | } | 
| 1172 |  |  | 
| 1173 |  | void PeerManagerImpl::RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer) | 
| 1174 | 0 | { | 
| 1175 | 0 |     auto range = mapBlocksInFlight.equal_range(hash); | 
| 1176 | 0 |     if (range.first == range.second) { | 
| 1177 |  |         // Block was not requested from any peer | 
| 1178 | 0 |         return; | 
| 1179 | 0 |     } | 
| 1180 |  |  | 
| 1181 |  |     // We should not have requested too many of this block | 
| 1182 | 0 |     Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK); | 
| 1183 |  | 
 | 
| 1184 | 0 |     while (range.first != range.second) { | 
| 1185 | 0 |         const auto& [node_id, list_it]{range.first->second}; | 
| 1186 |  | 
 | 
| 1187 | 0 |         if (from_peer && *from_peer != node_id) { | 
| 1188 | 0 |             range.first++; | 
| 1189 | 0 |             continue; | 
| 1190 | 0 |         } | 
| 1191 |  |  | 
| 1192 | 0 |         CNodeState& state = *Assert(State(node_id)); | 
| 1193 |  | 
 | 
| 1194 | 0 |         if (state.vBlocksInFlight.begin() == list_it) { | 
| 1195 |  |             // First block on the queue was received, update the start download time for the next one | 
| 1196 | 0 |             state.m_downloading_since = std::max(state.m_downloading_since, GetTime<std::chrono::microseconds>()); | 
| 1197 | 0 |         } | 
| 1198 | 0 |         state.vBlocksInFlight.erase(list_it); | 
| 1199 |  | 
 | 
| 1200 | 0 |         if (state.vBlocksInFlight.empty()) { | 
| 1201 |  |             // Last validated block on the queue for this peer was received. | 
| 1202 | 0 |             m_peers_downloading_from--; | 
| 1203 | 0 |         } | 
| 1204 | 0 |         state.m_stalling_since = 0us; | 
| 1205 |  | 
 | 
| 1206 | 0 |         range.first = mapBlocksInFlight.erase(range.first); | 
| 1207 | 0 |     } | 
| 1208 | 0 | } | 
| 1209 |  |  | 
| 1210 |  | bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit) | 
| 1211 | 0 | { | 
| 1212 | 0 |     const uint256& hash{block.GetBlockHash()}; | 
| 1213 |  | 
 | 
| 1214 | 0 |     CNodeState *state = State(nodeid); | 
| 1215 | 0 |     assert(state != nullptr); | 
| 1216 |  |  | 
| 1217 | 0 |     Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK); | 
| 1218 |  |  | 
| 1219 |  |     // Short-circuit most stuff in case it is from the same node | 
| 1220 | 0 |     for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) { | 
| 1221 | 0 |         if (range.first->second.first == nodeid) { | 
| 1222 | 0 |             if (pit) { | 
| 1223 | 0 |                 *pit = &range.first->second.second; | 
| 1224 | 0 |             } | 
| 1225 | 0 |             return false; | 
| 1226 | 0 |         } | 
| 1227 | 0 |     } | 
| 1228 |  |  | 
| 1229 |  |     // Make sure it's not being fetched already from same peer. | 
| 1230 | 0 |     RemoveBlockRequest(hash, nodeid); | 
| 1231 |  | 
 | 
| 1232 | 0 |     std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), | 
| 1233 | 0 |             {&block, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)}); | 
| 1234 | 0 |     if (state->vBlocksInFlight.size() == 1) { | 
| 1235 |  |         // We're starting a block download (batch) from this peer. | 
| 1236 | 0 |         state->m_downloading_since = GetTime<std::chrono::microseconds>(); | 
| 1237 | 0 |         m_peers_downloading_from++; | 
| 1238 | 0 |     } | 
| 1239 | 0 |     auto itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))); | 
| 1240 | 0 |     if (pit) { | 
| 1241 | 0 |         *pit = &itInFlight->second.second; | 
| 1242 | 0 |     } | 
| 1243 | 0 |     return true; | 
| 1244 | 0 | } | 
| 1245 |  |  | 
| 1246 |  | void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) | 
| 1247 | 0 | { | 
| 1248 | 0 |     AssertLockHeld(cs_main); | 
| 1249 |  |  | 
| 1250 |  |     // When in -blocksonly mode, never request high-bandwidth mode from peers. Our | 
| 1251 |  |     // mempool will not contain the transactions necessary to reconstruct the | 
| 1252 |  |     // compact block. | 
| 1253 | 0 |     if (m_opts.ignore_incoming_txs) return; | 
| 1254 |  |  | 
| 1255 | 0 |     CNodeState* nodestate = State(nodeid); | 
| 1256 | 0 |     PeerRef peer{GetPeerRef(nodeid)}; | 
| 1257 | 0 |     if (!nodestate || !nodestate->m_provides_cmpctblocks) { | 
| 1258 |  |         // Don't request compact blocks if the peer has not signalled support | 
| 1259 | 0 |         return; | 
| 1260 | 0 |     } | 
| 1261 |  |  | 
| 1262 | 0 |     int num_outbound_hb_peers = 0; | 
| 1263 | 0 |     for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) { | 
| 1264 | 0 |         if (*it == nodeid) { | 
| 1265 | 0 |             lNodesAnnouncingHeaderAndIDs.erase(it); | 
| 1266 | 0 |             lNodesAnnouncingHeaderAndIDs.push_back(nodeid); | 
| 1267 | 0 |             return; | 
| 1268 | 0 |         } | 
| 1269 | 0 |         PeerRef peer_ref{GetPeerRef(*it)}; | 
| 1270 | 0 |         if (peer_ref && !peer_ref->m_is_inbound) ++num_outbound_hb_peers; | 
| 1271 | 0 |     } | 
| 1272 | 0 |     if (peer && peer->m_is_inbound) { | 
| 1273 |  |         // If we're adding an inbound HB peer, make sure we're not removing | 
| 1274 |  |         // our last outbound HB peer in the process. | 
| 1275 | 0 |         if (lNodesAnnouncingHeaderAndIDs.size() >= 3 && num_outbound_hb_peers == 1) { | 
| 1276 | 0 |             PeerRef remove_peer{GetPeerRef(lNodesAnnouncingHeaderAndIDs.front())}; | 
| 1277 | 0 |             if (remove_peer && !remove_peer->m_is_inbound) { | 
| 1278 |  |                 // Put the HB outbound peer in the second slot, so that it | 
| 1279 |  |                 // doesn't get removed. | 
| 1280 | 0 |                 std::swap(lNodesAnnouncingHeaderAndIDs.front(), *std::next(lNodesAnnouncingHeaderAndIDs.begin())); | 
| 1281 | 0 |             } | 
| 1282 | 0 |         } | 
| 1283 | 0 |     } | 
| 1284 | 0 |     m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { | 
| 1285 | 0 |         AssertLockHeld(::cs_main); | 
| 1286 | 0 |         if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { | 
| 1287 |  |             // As per BIP152, we only get 3 of our peers to announce | 
| 1288 |  |             // blocks using compact encodings. | 
| 1289 | 0 |             m_connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [this](CNode* pnodeStop){ | 
| 1290 | 0 |                 MakeAndPushMessage(*pnodeStop, NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION); | 
| 1291 |  |                 // save BIP152 bandwidth state: we select peer to be low-bandwidth | 
| 1292 | 0 |                 pnodeStop->m_bip152_highbandwidth_to = false; | 
| 1293 | 0 |                 return true; | 
| 1294 | 0 |             }); | 
| 1295 | 0 |             lNodesAnnouncingHeaderAndIDs.pop_front(); | 
| 1296 | 0 |         } | 
| 1297 | 0 |         MakeAndPushMessage(*pfrom, NetMsgType::SENDCMPCT, /*high_bandwidth=*/true, /*version=*/CMPCTBLOCKS_VERSION); | 
| 1298 |  |         // save BIP152 bandwidth state: we select peer to be high-bandwidth | 
| 1299 | 0 |         pfrom->m_bip152_highbandwidth_to = true; | 
| 1300 | 0 |         lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); | 
| 1301 | 0 |         return true; | 
| 1302 | 0 |     }); | 
| 1303 | 0 | } | 
| 1304 |  |  | 
| 1305 |  | bool PeerManagerImpl::TipMayBeStale() | 
| 1306 | 0 | { | 
| 1307 | 0 |     AssertLockHeld(cs_main); | 
| 1308 | 0 |     const Consensus::Params& consensusParams = m_chainparams.GetConsensus(); | 
| 1309 | 0 |     if (m_last_tip_update.load() == 0s) { | 
| 1310 | 0 |         m_last_tip_update = GetTime<std::chrono::seconds>(); | 
| 1311 | 0 |     } | 
| 1312 | 0 |     return m_last_tip_update.load() < GetTime<std::chrono::seconds>() - std::chrono::seconds{consensusParams.nPowTargetSpacing * 3} && mapBlocksInFlight.empty(); | 
| 1313 | 0 | } | 
| 1314 |  |  | 
| 1315 |  | int64_t PeerManagerImpl::ApproximateBestBlockDepth() const | 
| 1316 | 0 | { | 
| 1317 | 0 |     return (GetTime<std::chrono::seconds>() - m_best_block_time.load()).count() / m_chainparams.GetConsensus().nPowTargetSpacing; | 
| 1318 | 0 | } | 
| 1319 |  |  | 
| 1320 |  | bool PeerManagerImpl::CanDirectFetch() | 
| 1321 | 0 | { | 
| 1322 | 0 |     return m_chainman.ActiveChain().Tip()->Time() > NodeClock::now() - m_chainparams.GetConsensus().PowTargetSpacing() * 20; | 
| 1323 | 0 | } | 
| 1324 |  |  | 
| 1325 |  | static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main) | 
| 1326 | 0 | { | 
| 1327 | 0 |     if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) | 
| 1328 | 0 |         return true; | 
| 1329 | 0 |     if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) | 
| 1330 | 0 |         return true; | 
| 1331 | 0 |     return false; | 
| 1332 | 0 | } | 
| 1333 |  |  | 
| 1334 | 0 | void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) { | 
| 1335 | 0 |     CNodeState *state = State(nodeid); | 
| 1336 | 0 |     assert(state != nullptr); | 
| 1337 |  |  | 
| 1338 | 0 |     if (!state->hashLastUnknownBlock.IsNull()) { | 
| 1339 | 0 |         const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock); | 
| 1340 | 0 |         if (pindex && pindex->nChainWork > 0) { | 
| 1341 | 0 |             if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) { | 
| 1342 | 0 |                 state->pindexBestKnownBlock = pindex; | 
| 1343 | 0 |             } | 
| 1344 | 0 |             state->hashLastUnknownBlock.SetNull(); | 
| 1345 | 0 |         } | 
| 1346 | 0 |     } | 
| 1347 | 0 | } | 
| 1348 |  |  | 
| 1349 | 0 | void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) { | 
| 1350 | 0 |     CNodeState *state = State(nodeid); | 
| 1351 | 0 |     assert(state != nullptr); | 
| 1352 |  |  | 
| 1353 | 0 |     ProcessBlockAvailability(nodeid); | 
| 1354 |  | 
 | 
| 1355 | 0 |     const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash); | 
| 1356 | 0 |     if (pindex && pindex->nChainWork > 0) { | 
| 1357 |  |         // An actually better block was announced. | 
| 1358 | 0 |         if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) { | 
| 1359 | 0 |             state->pindexBestKnownBlock = pindex; | 
| 1360 | 0 |         } | 
| 1361 | 0 |     } else { | 
| 1362 |  |         // An unknown block was announced; just assume that the latest one is the best one. | 
| 1363 | 0 |         state->hashLastUnknownBlock = hash; | 
| 1364 | 0 |     } | 
| 1365 | 0 | } | 
| 1366 |  |  | 
| 1367 |  | // Logic for calculating which blocks to download from a given peer, given our current tip. | 
| 1368 |  | void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) | 
| 1369 | 0 | { | 
| 1370 | 0 |     if (count == 0) | 
| 1371 | 0 |         return; | 
| 1372 |  |  | 
| 1373 | 0 |     vBlocks.reserve(vBlocks.size() + count); | 
| 1374 | 0 |     CNodeState *state = State(peer.m_id); | 
| 1375 | 0 |     assert(state != nullptr); | 
| 1376 |  |  | 
| 1377 |  |     // Make sure pindexBestKnownBlock is up to date, we'll need it. | 
| 1378 | 0 |     ProcessBlockAvailability(peer.m_id); | 
| 1379 |  | 
 | 
| 1380 | 0 |     if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < m_chainman.ActiveChain().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) { | 
| 1381 |  |         // This peer has nothing interesting. | 
| 1382 | 0 |         return; | 
| 1383 | 0 |     } | 
| 1384 |  |  | 
| 1385 |  |     // When we sync with AssumeUtxo and discover the snapshot is not in the peer's best chain, abort: | 
| 1386 |  |     // We can't reorg to this chain due to missing undo data until the background sync has finished, | 
| 1387 |  |     // so downloading blocks from it would be futile. | 
| 1388 | 0 |     const CBlockIndex* snap_base{m_chainman.GetSnapshotBaseBlock()}; | 
| 1389 | 0 |     if (snap_base && state->pindexBestKnownBlock->GetAncestor(snap_base->nHeight) != snap_base) { | 
| 1390 | 0 |         LogDebug(BCLog::NET, "Not downloading blocks from peer=%d, which doesn't have the snapshot block in its best chain.\n", peer.m_id); | 
| 1391 | 0 |         return; | 
| 1392 | 0 |     } | 
| 1393 |  |  | 
| 1394 |  |     // Bootstrap quickly by guessing a parent of our best tip is the forking point. | 
| 1395 |  |     // Guessing wrong in either direction is not a problem. | 
| 1396 |  |     // Also reset pindexLastCommonBlock after a snapshot was loaded, so that blocks after the snapshot will be prioritised for download. | 
| 1397 | 0 |     if (state->pindexLastCommonBlock == nullptr || | 
| 1398 | 0 |         (snap_base && state->pindexLastCommonBlock->nHeight < snap_base->nHeight)) { | 
| 1399 | 0 |         state->pindexLastCommonBlock = m_chainman.ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight, m_chainman.ActiveChain().Height())]; | 
| 1400 | 0 |     } | 
| 1401 |  |  | 
| 1402 |  |     // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor | 
| 1403 |  |     // of its current tip anymore. Go back enough to fix that. | 
| 1404 | 0 |     state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock); | 
| 1405 | 0 |     if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) | 
| 1406 | 0 |         return; | 
| 1407 |  |  | 
| 1408 | 0 |     const CBlockIndex *pindexWalk = state->pindexLastCommonBlock; | 
| 1409 |  |     // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last | 
| 1410 |  |     // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to | 
| 1411 |  |     // download that next block if the window were 1 larger. | 
| 1412 | 0 |     int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW; | 
| 1413 |  | 
 | 
| 1414 | 0 |     FindNextBlocks(vBlocks, peer, state, pindexWalk, count, nWindowEnd, &m_chainman.ActiveChain(), &nodeStaller); | 
| 1415 | 0 | } | 
| 1416 |  |  | 
| 1417 |  | void PeerManagerImpl::TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex *from_tip, const CBlockIndex* target_block) | 
| 1418 | 0 | { | 
| 1419 | 0 |     Assert(from_tip); | 
| 1420 | 0 |     Assert(target_block); | 
| 1421 |  | 
 | 
| 1422 | 0 |     if (vBlocks.size() >= count) { | 
| 1423 | 0 |         return; | 
| 1424 | 0 |     } | 
| 1425 |  |  | 
| 1426 | 0 |     vBlocks.reserve(count); | 
| 1427 | 0 |     CNodeState *state = Assert(State(peer.m_id)); | 
| 1428 |  | 
 | 
| 1429 | 0 |     if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->GetAncestor(target_block->nHeight) != target_block) { | 
| 1430 |  |         // This peer can't provide us the complete series of blocks leading up to the | 
| 1431 |  |         // assumeutxo snapshot base. | 
| 1432 |  |         // | 
| 1433 |  |         // Presumably this peer's chain has less work than our ActiveChain()'s tip, or else we | 
| 1434 |  |         // will eventually crash when we try to reorg to it. Let other logic | 
| 1435 |  |         // deal with whether we disconnect this peer. | 
| 1436 |  |         // | 
| 1437 |  |         // TODO at some point in the future, we might choose to request what blocks | 
| 1438 |  |         // this peer does have from the historical chain, despite it not having a | 
| 1439 |  |         // complete history beneath the snapshot base. | 
| 1440 | 0 |         return; | 
| 1441 | 0 |     } | 
| 1442 |  |  | 
| 1443 | 0 |     FindNextBlocks(vBlocks, peer, state, from_tip, count, std::min<int>(from_tip->nHeight + BLOCK_DOWNLOAD_WINDOW, target_block->nHeight)); | 
| 1444 | 0 | } | 
| 1445 |  |  | 
| 1446 |  | void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain, NodeId* nodeStaller) | 
| 1447 | 0 | { | 
| 1448 | 0 |     std::vector<const CBlockIndex*> vToFetch; | 
| 1449 | 0 |     int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1); | 
| 1450 | 0 |     bool is_limited_peer = IsLimitedPeer(peer); | 
| 1451 | 0 |     NodeId waitingfor = -1; | 
| 1452 | 0 |     while (pindexWalk->nHeight < nMaxHeight) { | 
| 1453 |  |         // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards | 
| 1454 |  |         // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive | 
| 1455 |  |         // as iterating over ~100 CBlockIndex* entries anyway. | 
| 1456 | 0 |         int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128)); | 
| 1457 | 0 |         vToFetch.resize(nToFetch); | 
| 1458 | 0 |         pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch); | 
| 1459 | 0 |         vToFetch[nToFetch - 1] = pindexWalk; | 
| 1460 | 0 |         for (unsigned int i = nToFetch - 1; i > 0; i--) { | 
| 1461 | 0 |             vToFetch[i - 1] = vToFetch[i]->pprev; | 
| 1462 | 0 |         } | 
| 1463 |  |  | 
| 1464 |  |         // Iterate over those blocks in vToFetch (in forward direction), adding the ones that | 
| 1465 |  |         // are not yet downloaded and not in flight to vBlocks. In the meantime, update | 
| 1466 |  |         // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's | 
| 1467 |  |         // already part of our chain (and therefore don't need it even if pruned). | 
| 1468 | 0 |         for (const CBlockIndex* pindex : vToFetch) { | 
| 1469 | 0 |             if (!pindex->IsValid(BLOCK_VALID_TREE)) { | 
| 1470 |  |                 // We consider the chain that this peer is on invalid. | 
| 1471 | 0 |                 return; | 
| 1472 | 0 |             } | 
| 1473 |  |  | 
| 1474 | 0 |             if (!CanServeWitnesses(peer) && DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) { | 
| 1475 |  |                 // We wouldn't download this block or its descendants from this peer. | 
| 1476 | 0 |                 return; | 
| 1477 | 0 |             } | 
| 1478 |  |  | 
| 1479 | 0 |             if (pindex->nStatus & BLOCK_HAVE_DATA || (activeChain && activeChain->Contains(pindex))) { | 
| 1480 | 0 |                 if (activeChain && pindex->HaveNumChainTxs()) { | 
| 1481 | 0 |                     state->pindexLastCommonBlock = pindex; | 
| 1482 | 0 |                 } | 
| 1483 | 0 |                 continue; | 
| 1484 | 0 |             } | 
| 1485 |  |  | 
| 1486 |  |             // Is block in-flight? | 
| 1487 | 0 |             if (IsBlockRequested(pindex->GetBlockHash())) { | 
| 1488 | 0 |                 if (waitingfor == -1) { | 
| 1489 |  |                     // This is the first already-in-flight block. | 
| 1490 | 0 |                     waitingfor = mapBlocksInFlight.lower_bound(pindex->GetBlockHash())->second.first; | 
| 1491 | 0 |                 } | 
| 1492 | 0 |                 continue; | 
| 1493 | 0 |             } | 
| 1494 |  |  | 
| 1495 |  |             // The block is not already downloaded, and not yet in flight. | 
| 1496 | 0 |             if (pindex->nHeight > nWindowEnd) { | 
| 1497 |  |                 // We reached the end of the window. | 
| 1498 | 0 |                 if (vBlocks.size() == 0 && waitingfor != peer.m_id) { | 
| 1499 |  |                     // We aren't able to fetch anything, but we would be if the download window was one larger. | 
| 1500 | 0 |                     if (nodeStaller) *nodeStaller = waitingfor; | 
| 1501 | 0 |                 } | 
| 1502 | 0 |                 return; | 
| 1503 | 0 |             } | 
| 1504 |  |  | 
| 1505 |  |             // Don't request blocks that go further than what limited peers can provide | 
| 1506 | 0 |             if (is_limited_peer && (state->pindexBestKnownBlock->nHeight - pindex->nHeight >= static_cast<int>(NODE_NETWORK_LIMITED_MIN_BLOCKS) - 2 /* two blocks buffer for possible races */)) { | 
| 1507 | 0 |                 continue; | 
| 1508 | 0 |             } | 
| 1509 |  |  | 
| 1510 | 0 |             vBlocks.push_back(pindex); | 
| 1511 | 0 |             if (vBlocks.size() == count) { | 
| 1512 | 0 |                 return; | 
| 1513 | 0 |             } | 
| 1514 | 0 |         } | 
| 1515 | 0 |     } | 
| 1516 | 0 | } | 
| 1517 |  |  | 
| 1518 |  | } // namespace | 
| 1519 |  |  | 
| 1520 |  | void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer) | 
| 1521 | 0 | { | 
| 1522 | 0 |     uint64_t my_services{peer.m_our_services}; | 
| 1523 | 0 |     const int64_t nTime{count_seconds(GetTime<std::chrono::seconds>())}; | 
| 1524 | 0 |     uint64_t nonce = pnode.GetLocalNonce(); | 
| 1525 | 0 |     const int nNodeStartingHeight{m_best_height}; | 
| 1526 | 0 |     NodeId nodeid = pnode.GetId(); | 
| 1527 | 0 |     CAddress addr = pnode.addr; | 
| 1528 |  | 
 | 
| 1529 | 0 |     CService addr_you = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ? addr : CService(); | 
| 1530 | 0 |     uint64_t your_services{addr.nServices}; | 
| 1531 |  | 
 | 
| 1532 | 0 |     const bool tx_relay{!RejectIncomingTxs(pnode)}; | 
| 1533 | 0 |     MakeAndPushMessage(pnode, NetMsgType::VERSION, PROTOCOL_VERSION, my_services, nTime, | 
| 1534 | 0 |             your_services, CNetAddr::V1(addr_you), // Together the pre-version-31402 serialization of CAddress "addrYou" (without nTime) | 
| 1535 | 0 |             my_services, CNetAddr::V1(CService{}), // Together the pre-version-31402 serialization of CAddress "addrMe" (without nTime) | 
| 1536 | 0 |             nonce, strSubVersion, nNodeStartingHeight, tx_relay); | 
| 1537 |  | 
 | 
| 1538 | 0 |     if (fLogIPs) { | 
| 1539 | 0 |         LogDebug(BCLog::NET, "send version message: version %d, blocks=%d, them=%s, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addr_you.ToStringAddrPort(), tx_relay, nodeid); | 
| 1540 | 0 |     } else { | 
| 1541 | 0 |         LogDebug(BCLog::NET, "send version message: version %d, blocks=%d, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, tx_relay, nodeid); | 
| 1542 | 0 |     } | 
| 1543 | 0 | } | 
| 1544 |  |  | 
| 1545 |  | void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) | 
| 1546 | 0 | { | 
| 1547 | 0 |     LOCK(cs_main); | 
| 1548 | 0 |     CNodeState *state = State(node); | 
| 1549 | 0 |     if (state) state->m_last_block_announcement = time_in_seconds; | 
| 1550 | 0 | } | 
| 1551 |  |  | 
| 1552 |  | void PeerManagerImpl::InitializeNode(const CNode& node, ServiceFlags our_services) | 
| 1553 | 0 | { | 
| 1554 | 0 |     NodeId nodeid = node.GetId(); | 
| 1555 | 0 |     { | 
| 1556 | 0 |         LOCK(cs_main); // For m_node_states | 
| 1557 | 0 |         m_node_states.try_emplace(m_node_states.end(), nodeid); | 
| 1558 | 0 |     } | 
| 1559 | 0 |     WITH_LOCK(m_tx_download_mutex, m_txdownloadman.CheckIsEmpty(nodeid)); | 
| 1560 |  | 
 | 
| 1561 | 0 |     if (NetPermissions::HasFlag(node.m_permission_flags, NetPermissionFlags::BloomFilter)) { | 
| 1562 | 0 |         our_services = static_cast<ServiceFlags>(our_services | NODE_BLOOM); | 
| 1563 | 0 |     } | 
| 1564 |  | 
 | 
| 1565 | 0 |     PeerRef peer = std::make_shared<Peer>(nodeid, our_services, node.IsInboundConn()); | 
| 1566 | 0 |     { | 
| 1567 | 0 |         LOCK(m_peer_mutex); | 
| 1568 | 0 |         m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer); | 
| 1569 | 0 |     } | 
| 1570 | 0 | } | 
| 1571 |  |  | 
| 1572 |  | void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler) | 
| 1573 | 0 | { | 
| 1574 | 0 |     std::set<Txid> unbroadcast_txids = m_mempool.GetUnbroadcastTxs(); | 
| 1575 |  | 
 | 
| 1576 | 0 |     for (const auto& txid : unbroadcast_txids) { | 
| 1577 | 0 |         CTransactionRef tx = m_mempool.get(txid); | 
| 1578 |  | 
 | 
| 1579 | 0 |         if (tx != nullptr) { | 
| 1580 | 0 |             RelayTransaction(txid, tx->GetWitnessHash()); | 
| 1581 | 0 |         } else { | 
| 1582 | 0 |             m_mempool.RemoveUnbroadcastTx(txid, true); | 
| 1583 | 0 |         } | 
| 1584 | 0 |     } | 
| 1585 |  |  | 
| 1586 |  |     // Schedule next run for 10-15 minutes in the future. | 
| 1587 |  |     // We add randomness on every cycle to avoid the possibility of P2P fingerprinting. | 
| 1588 | 0 |     const auto delta = 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min); | 
| 1589 | 0 |     scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta); | 
| 1590 | 0 | } | 
| 1591 |  |  | 
| 1592 |  | void PeerManagerImpl::FinalizeNode(const CNode& node) | 
| 1593 | 0 | { | 
| 1594 | 0 |     NodeId nodeid = node.GetId(); | 
| 1595 | 0 |     { | 
| 1596 | 0 |     LOCK(cs_main); | 
| 1597 | 0 |     { | 
| 1598 |  |         // We remove the PeerRef from g_peer_map here, but we don't always | 
| 1599 |  |         // destruct the Peer. Sometimes another thread is still holding a | 
| 1600 |  |         // PeerRef, so the refcount is >= 1. Be careful not to do any | 
| 1601 |  |         // processing here that assumes Peer won't be changed before it's | 
| 1602 |  |         // destructed. | 
| 1603 | 0 |         PeerRef peer = RemovePeer(nodeid); | 
| 1604 | 0 |         assert(peer != nullptr); | 
| 1605 | 0 |         m_wtxid_relay_peers -= peer->m_wtxid_relay; | 
| 1606 | 0 |         assert(m_wtxid_relay_peers >= 0); | 
| 1607 | 0 |     } | 
| 1608 | 0 |     CNodeState *state = State(nodeid); | 
| 1609 | 0 |     assert(state != nullptr); | 
| 1610 |  |  | 
| 1611 | 0 |     if (state->fSyncStarted) | 
| 1612 | 0 |         nSyncStarted--; | 
| 1613 |  | 
 | 
| 1614 | 0 |     for (const QueuedBlock& entry : state->vBlocksInFlight) { | 
| 1615 | 0 |         auto range = mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash()); | 
| 1616 | 0 |         while (range.first != range.second) { | 
| 1617 | 0 |             auto [node_id, list_it] = range.first->second; | 
| 1618 | 0 |             if (node_id != nodeid) { | 
| 1619 | 0 |                 range.first++; | 
| 1620 | 0 |             } else { | 
| 1621 | 0 |                 range.first = mapBlocksInFlight.erase(range.first); | 
| 1622 | 0 |             } | 
| 1623 | 0 |         } | 
| 1624 | 0 |     } | 
| 1625 | 0 |     { | 
| 1626 | 0 |         LOCK(m_tx_download_mutex); | 
| 1627 | 0 |         m_txdownloadman.DisconnectedPeer(nodeid); | 
| 1628 | 0 |     } | 
| 1629 | 0 |     if (m_txreconciliation) m_txreconciliation->ForgetPeer(nodeid); | 
| 1630 | 0 |     m_num_preferred_download_peers -= state->fPreferredDownload; | 
| 1631 | 0 |     m_peers_downloading_from -= (!state->vBlocksInFlight.empty()); | 
| 1632 | 0 |     assert(m_peers_downloading_from >= 0); | 
| 1633 | 0 |     m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; | 
| 1634 | 0 |     assert(m_outbound_peers_with_protect_from_disconnect >= 0); | 
| 1635 |  |  | 
| 1636 | 0 |     m_node_states.erase(nodeid); | 
| 1637 |  | 
 | 
| 1638 | 0 |     if (m_node_states.empty()) { | 
| 1639 |  |         // Do a consistency check after the last peer is removed. | 
| 1640 | 0 |         assert(mapBlocksInFlight.empty()); | 
| 1641 | 0 |         assert(m_num_preferred_download_peers == 0); | 
| 1642 | 0 |         assert(m_peers_downloading_from == 0); | 
| 1643 | 0 |         assert(m_outbound_peers_with_protect_from_disconnect == 0); | 
| 1644 | 0 |         assert(m_wtxid_relay_peers == 0); | 
| 1645 | 0 |         WITH_LOCK(m_tx_download_mutex, m_txdownloadman.CheckIsEmpty()); | 
| 1646 | 0 |     } | 
| 1647 | 0 |     } // cs_main | 
| 1648 | 0 |     if (node.fSuccessfullyConnected && | 
| 1649 | 0 |         !node.IsBlockOnlyConn() && !node.IsInboundConn()) { | 
| 1650 |  |         // Only change visible addrman state for full outbound peers.  We don't | 
| 1651 |  |         // call Connected() for feeler connections since they don't have | 
| 1652 |  |         // fSuccessfullyConnected set. | 
| 1653 | 0 |         m_addrman.Connected(node.addr); | 
| 1654 | 0 |     } | 
| 1655 | 0 |     { | 
| 1656 | 0 |         LOCK(m_headers_presync_mutex); | 
| 1657 | 0 |         m_headers_presync_stats.erase(nodeid); | 
| 1658 | 0 |     } | 
| 1659 | 0 |     LogDebug(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid); | 
| 1660 | 0 | } | 
| 1661 |  |  | 
| 1662 |  | bool PeerManagerImpl::HasAllDesirableServiceFlags(ServiceFlags services) const | 
| 1663 | 0 | { | 
| 1664 |  |     // Shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services) | 
| 1665 | 0 |     return !(GetDesirableServiceFlags(services) & (~services)); | 
| 1666 | 0 | } | 
| 1667 |  |  | 
| 1668 |  | ServiceFlags PeerManagerImpl::GetDesirableServiceFlags(ServiceFlags services) const | 
| 1669 | 0 | { | 
| 1670 | 0 |     if (services & NODE_NETWORK_LIMITED) { | 
| 1671 |  |         // Limited peers are desirable when we are close to the tip. | 
| 1672 | 0 |         if (ApproximateBestBlockDepth() < NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS) { | 
| 1673 | 0 |             return ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS); | 
| 1674 | 0 |         } | 
| 1675 | 0 |     } | 
| 1676 | 0 |     return ServiceFlags(NODE_NETWORK | NODE_WITNESS); | 
| 1677 | 0 | } | 
| 1678 |  |  | 
| 1679 |  | PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const | 
| 1680 | 0 | { | 
| 1681 | 0 |     LOCK(m_peer_mutex); | 
| 1682 | 0 |     auto it = m_peer_map.find(id); | 
| 1683 | 0 |     return it != m_peer_map.end() ? it->second : nullptr; | 
| 1684 | 0 | } | 
| 1685 |  |  | 
| 1686 |  | PeerRef PeerManagerImpl::RemovePeer(NodeId id) | 
| 1687 | 0 | { | 
| 1688 | 0 |     PeerRef ret; | 
| 1689 | 0 |     LOCK(m_peer_mutex); | 
| 1690 | 0 |     auto it = m_peer_map.find(id); | 
| 1691 | 0 |     if (it != m_peer_map.end()) { | 
| 1692 | 0 |         ret = std::move(it->second); | 
| 1693 | 0 |         m_peer_map.erase(it); | 
| 1694 | 0 |     } | 
| 1695 | 0 |     return ret; | 
| 1696 | 0 | } | 
| 1697 |  |  | 
| 1698 |  | bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const | 
| 1699 | 0 | { | 
| 1700 | 0 |     { | 
| 1701 | 0 |         LOCK(cs_main); | 
| 1702 | 0 |         const CNodeState* state = State(nodeid); | 
| 1703 | 0 |         if (state == nullptr) | 
| 1704 | 0 |             return false; | 
| 1705 | 0 |         stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1; | 
| 1706 | 0 |         stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1; | 
| 1707 | 0 |         for (const QueuedBlock& queue : state->vBlocksInFlight) { | 
| 1708 | 0 |             if (queue.pindex) | 
| 1709 | 0 |                 stats.vHeightInFlight.push_back(queue.pindex->nHeight); | 
| 1710 | 0 |         } | 
| 1711 | 0 |     } | 
| 1712 |  |  | 
| 1713 | 0 |     PeerRef peer = GetPeerRef(nodeid); | 
| 1714 | 0 |     if (peer == nullptr) return false; | 
| 1715 | 0 |     stats.their_services = peer->m_their_services; | 
| 1716 | 0 |     stats.m_starting_height = peer->m_starting_height; | 
| 1717 |  |     // It is common for nodes with good ping times to suddenly become lagged, | 
| 1718 |  |     // due to a new block arriving or other large transfer. | 
| 1719 |  |     // Merely reporting pingtime might fool the caller into thinking the node was still responsive, | 
| 1720 |  |     // since pingtime does not update until the ping is complete, which might take a while. | 
| 1721 |  |     // So, if a ping is taking an unusually long time in flight, | 
| 1722 |  |     // the caller can immediately detect that this is happening. | 
| 1723 | 0 |     auto ping_wait{0us}; | 
| 1724 | 0 |     if ((0 != peer->m_ping_nonce_sent) && (0 != peer->m_ping_start.load().count())) { | 
| 1725 | 0 |         ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load(); | 
| 1726 | 0 |     } | 
| 1727 |  | 
 | 
| 1728 | 0 |     if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { | 
| 1729 | 0 |         stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs); | 
| 1730 | 0 |         stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load(); | 
| 1731 | 0 |     } else { | 
| 1732 | 0 |         stats.m_relay_txs = false; | 
| 1733 | 0 |         stats.m_fee_filter_received = 0; | 
| 1734 | 0 |     } | 
| 1735 |  | 
 | 
| 1736 | 0 |     stats.m_ping_wait = ping_wait; | 
| 1737 | 0 |     stats.m_addr_processed = peer->m_addr_processed.load(); | 
| 1738 | 0 |     stats.m_addr_rate_limited = peer->m_addr_rate_limited.load(); | 
| 1739 | 0 |     stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load(); | 
| 1740 | 0 |     { | 
| 1741 | 0 |         LOCK(peer->m_headers_sync_mutex); | 
| 1742 | 0 |         if (peer->m_headers_sync) { | 
| 1743 | 0 |             stats.presync_height = peer->m_headers_sync->GetPresyncHeight(); | 
| 1744 | 0 |         } | 
| 1745 | 0 |     } | 
| 1746 | 0 |     stats.time_offset = peer->m_time_offset; | 
| 1747 |  | 
 | 
| 1748 | 0 |     return true; | 
| 1749 | 0 | } | 
| 1750 |  |  | 
| 1751 |  | std::vector<node::TxOrphanage::OrphanInfo> PeerManagerImpl::GetOrphanTransactions() | 
| 1752 | 0 | { | 
| 1753 | 0 |     LOCK(m_tx_download_mutex); | 
| 1754 | 0 |     return m_txdownloadman.GetOrphanTransactions(); | 
| 1755 | 0 | } | 
| 1756 |  |  | 
| 1757 |  | PeerManagerInfo PeerManagerImpl::GetInfo() const | 
| 1758 | 0 | { | 
| 1759 | 0 |     return PeerManagerInfo{ | 
| 1760 | 0 |         .median_outbound_time_offset = m_outbound_time_offsets.Median(), | 
| 1761 | 0 |         .ignores_incoming_txs = m_opts.ignore_incoming_txs, | 
| 1762 | 0 |     }; | 
| 1763 | 0 | } | 
| 1764 |  |  | 
| 1765 |  | void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef& tx) | 
| 1766 | 0 | { | 
| 1767 | 0 |     if (m_opts.max_extra_txs <= 0) | 
| 1768 | 0 |         return; | 
| 1769 | 0 |     if (!vExtraTxnForCompact.size()) | 
| 1770 | 0 |         vExtraTxnForCompact.resize(m_opts.max_extra_txs); | 
| 1771 | 0 |     vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetWitnessHash(), tx); | 
| 1772 | 0 |     vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs; | 
| 1773 | 0 | } | 
| 1774 |  |  | 
| 1775 |  | void PeerManagerImpl::Misbehaving(Peer& peer, const std::string& message) | 
| 1776 | 0 | { | 
| 1777 | 0 |     LOCK(peer.m_misbehavior_mutex); | 
| 1778 |  | 
 | 
| 1779 | 0 |     const std::string message_prefixed = message.empty() ? "" : (": " + message); | 
| 1780 | 0 |     peer.m_should_discourage = true; | 
| 1781 | 0 |     LogDebug(BCLog::NET, "Misbehaving: peer=%d%s\n", peer.m_id, message_prefixed); | 
| 1782 | 0 |     TRACEPOINT(net, misbehaving_connection, | 
| 1783 | 0 |         peer.m_id, | 
| 1784 | 0 |         message.c_str() | 
| 1785 | 0 |     ); | 
| 1786 | 0 | } | 
| 1787 |  |  | 
| 1788 |  | void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state, | 
| 1789 |  |                                               bool via_compact_block, const std::string& message) | 
| 1790 | 0 | { | 
| 1791 | 0 |     PeerRef peer{GetPeerRef(nodeid)}; | 
| 1792 | 0 |     switch (state.GetResult()) { | 
| 1793 | 0 |     case BlockValidationResult::BLOCK_RESULT_UNSET: | 
| 1794 | 0 |         break; | 
| 1795 | 0 |     case BlockValidationResult::BLOCK_HEADER_LOW_WORK: | 
| 1796 |  |         // We didn't try to process the block because the header chain may have | 
| 1797 |  |         // too little work. | 
| 1798 | 0 |         break; | 
| 1799 |  |     // The node is providing invalid data: | 
| 1800 | 0 |     case BlockValidationResult::BLOCK_CONSENSUS: | 
| 1801 | 0 |     case BlockValidationResult::BLOCK_MUTATED: | 
| 1802 | 0 |         if (!via_compact_block) { | 
| 1803 | 0 |             if (peer) Misbehaving(*peer, message); | 
| 1804 | 0 |             return; | 
| 1805 | 0 |         } | 
| 1806 | 0 |         break; | 
| 1807 | 0 |     case BlockValidationResult::BLOCK_CACHED_INVALID: | 
| 1808 | 0 |         { | 
| 1809 |  |             // Discourage outbound (but not inbound) peers if on an invalid chain. | 
| 1810 |  |             // Exempt HB compact block peers. Manual connections are always protected from discouragement. | 
| 1811 | 0 |             if (peer && !via_compact_block && !peer->m_is_inbound) { | 
| 1812 | 0 |                 if (peer) Misbehaving(*peer, message); | 
| 1813 | 0 |                 return; | 
| 1814 | 0 |             } | 
| 1815 | 0 |             break; | 
| 1816 | 0 |         } | 
| 1817 | 0 |     case BlockValidationResult::BLOCK_INVALID_HEADER: | 
| 1818 | 0 |     case BlockValidationResult::BLOCK_INVALID_PREV: | 
| 1819 | 0 |         if (peer) Misbehaving(*peer, message); | 
| 1820 | 0 |         return; | 
| 1821 |  |     // Conflicting (but not necessarily invalid) data or different policy: | 
| 1822 | 0 |     case BlockValidationResult::BLOCK_MISSING_PREV: | 
| 1823 | 0 |         if (peer) Misbehaving(*peer, message); | 
| 1824 | 0 |         return; | 
| 1825 | 0 |     case BlockValidationResult::BLOCK_TIME_FUTURE: | 
| 1826 | 0 |         break; | 
| 1827 | 0 |     } | 
| 1828 | 0 |     if (message != "") { | 
| 1829 | 0 |         LogDebug(BCLog::NET, "peer=%d: %s\n", nodeid, message); | 
| 1830 | 0 |     } | 
| 1831 | 0 | } | 
| 1832 |  |  | 
| 1833 |  | bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex) | 
| 1834 | 0 | { | 
| 1835 | 0 |     AssertLockHeld(cs_main); | 
| 1836 | 0 |     if (m_chainman.ActiveChain().Contains(pindex)) return true; | 
| 1837 | 0 |     return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (m_chainman.m_best_header != nullptr) && | 
| 1838 | 0 |            (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) && | 
| 1839 | 0 |            (GetBlockProofEquivalentTime(*m_chainman.m_best_header, *pindex, *m_chainman.m_best_header, m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT); | 
| 1840 | 0 | } | 
| 1841 |  |  | 
| 1842 |  | std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBlockIndex& block_index) | 
| 1843 | 0 | { | 
| 1844 | 0 |     if (m_chainman.m_blockman.LoadingBlocks()) return "Loading blocks ..."; | 
| 1845 |  |  | 
| 1846 |  |     // Ensure this peer exists and hasn't been disconnected | 
| 1847 | 0 |     PeerRef peer = GetPeerRef(peer_id); | 
| 1848 | 0 |     if (peer == nullptr) return "Peer does not exist"; | 
| 1849 |  |  | 
| 1850 |  |     // Ignore pre-segwit peers | 
| 1851 | 0 |     if (!CanServeWitnesses(*peer)) return "Pre-SegWit peer"; | 
| 1852 |  |  | 
| 1853 | 0 |     LOCK(cs_main); | 
| 1854 |  |  | 
| 1855 |  |     // Forget about all prior requests | 
| 1856 | 0 |     RemoveBlockRequest(block_index.GetBlockHash(), std::nullopt); | 
| 1857 |  |  | 
| 1858 |  |     // Mark block as in-flight | 
| 1859 | 0 |     if (!BlockRequested(peer_id, block_index)) return "Already requested from this peer"; | 
| 1860 |  |  | 
| 1861 |  |     // Construct message to request the block | 
| 1862 | 0 |     const uint256& hash{block_index.GetBlockHash()}; | 
| 1863 | 0 |     std::vector<CInv> invs{CInv(MSG_BLOCK | MSG_WITNESS_FLAG, hash)}; | 
| 1864 |  |  | 
| 1865 |  |     // Send block request message to the peer | 
| 1866 | 0 |     bool success = m_connman.ForNode(peer_id, [this, &invs](CNode* node) { | 
| 1867 | 0 |         this->MakeAndPushMessage(*node, NetMsgType::GETDATA, invs); | 
| 1868 | 0 |         return true; | 
| 1869 | 0 |     }); | 
| 1870 |  | 
 | 
| 1871 | 0 |     if (!success) return "Peer not fully connected"; | 
| 1872 |  |  | 
| 1873 | 0 |     LogDebug(BCLog::NET, "Requesting block %s from peer=%d\n", | 
| 1874 | 0 |                  hash.ToString(), peer_id); | 
| 1875 | 0 |     return std::nullopt; | 
| 1876 | 0 | } | 
| 1877 |  |  | 
| 1878 |  | std::unique_ptr<PeerManager> PeerManager::make(CConnman& connman, AddrMan& addrman, | 
| 1879 |  |                                                BanMan* banman, ChainstateManager& chainman, | 
| 1880 |  |                                                CTxMemPool& pool, node::Warnings& warnings, Options opts) | 
| 1881 | 0 | { | 
| 1882 | 0 |     return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman, pool, warnings, opts); | 
| 1883 | 0 | } | 
| 1884 |  |  | 
| 1885 |  | PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman, | 
| 1886 |  |                                  BanMan* banman, ChainstateManager& chainman, | 
| 1887 |  |                                  CTxMemPool& pool, node::Warnings& warnings, Options opts) | 
| 1888 | 0 |     : m_rng{opts.deterministic_rng}, | 
| 1889 | 0 |       m_fee_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}, m_rng}, | 
| 1890 | 0 |       m_chainparams(chainman.GetParams()), | 
| 1891 | 0 |       m_connman(connman), | 
| 1892 | 0 |       m_addrman(addrman), | 
| 1893 | 0 |       m_banman(banman), | 
| 1894 | 0 |       m_chainman(chainman), | 
| 1895 | 0 |       m_mempool(pool), | 
| 1896 | 0 |       m_txdownloadman(node::TxDownloadOptions{pool, m_rng, opts.deterministic_rng}), | 
| 1897 | 0 |       m_warnings{warnings}, | 
| 1898 | 0 |       m_opts{opts} | 
| 1899 | 0 | { | 
| 1900 |  |     // While Erlay support is incomplete, it must be enabled explicitly via -txreconciliation. | 
| 1901 |  |     // This argument can go away after Erlay support is complete. | 
| 1902 | 0 |     if (opts.reconcile_txs) { | 
| 1903 | 0 |         m_txreconciliation = std::make_unique<TxReconciliationTracker>(TXRECONCILIATION_VERSION); | 
| 1904 | 0 |     } | 
| 1905 | 0 | } | 
| 1906 |  |  | 
| 1907 |  | void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler) | 
| 1908 | 0 | { | 
| 1909 |  |     // Stale tip checking and peer eviction are on two different timers, but we | 
| 1910 |  |     // don't want them to get out of sync due to drift in the scheduler, so we | 
| 1911 |  |     // combine them in one function and schedule at the quicker (peer-eviction) | 
| 1912 |  |     // timer. | 
| 1913 | 0 |     static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer"); | 
| 1914 | 0 |     scheduler.scheduleEvery([this] { this->CheckForStaleTipAndEvictPeers(); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL}); | 
| 1915 |  |  | 
| 1916 |  |     // schedule next run for 10-15 minutes in the future | 
| 1917 | 0 |     const auto delta = 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min); | 
| 1918 | 0 |     scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta); | 
| 1919 | 0 | } | 
| 1920 |  |  | 
| 1921 |  | void PeerManagerImpl::ActiveTipChange(const CBlockIndex& new_tip, bool is_ibd) | 
| 1922 | 0 | { | 
| 1923 |  |     // Ensure mempool mutex was released, otherwise deadlock may occur if another thread holding | 
| 1924 |  |     // m_tx_download_mutex waits on the mempool mutex. | 
| 1925 | 0 |     AssertLockNotHeld(m_mempool.cs); | 
| 1926 | 0 |     AssertLockNotHeld(m_tx_download_mutex); | 
| 1927 |  | 
 | 
| 1928 | 0 |     if (!is_ibd) { | 
| 1929 | 0 |         LOCK(m_tx_download_mutex); | 
| 1930 |  |         // If the chain tip has changed, previously rejected transactions might now be valid, e.g. due | 
| 1931 |  |         // to a timelock. Reset the rejection filters to give those transactions another chance if we | 
| 1932 |  |         // see them again. | 
| 1933 | 0 |         m_txdownloadman.ActiveTipChange(); | 
| 1934 | 0 |     } | 
| 1935 | 0 | } | 
| 1936 |  |  | 
| 1937 |  | /** | 
| 1938 |  |  * Evict orphan txn pool entries based on a newly connected | 
| 1939 |  |  * block, remember the recently confirmed transactions, and delete tracked | 
| 1940 |  |  * announcements for them. Also save the time of the last tip update and | 
| 1941 |  |  * possibly reduce dynamic block stalling timeout. | 
| 1942 |  |  */ | 
| 1943 |  | void PeerManagerImpl::BlockConnected( | 
| 1944 |  |     ChainstateRole role, | 
| 1945 |  |     const std::shared_ptr<const CBlock>& pblock, | 
| 1946 |  |     const CBlockIndex* pindex) | 
| 1947 | 0 | { | 
| 1948 |  |     // Update this for all chainstate roles so that we don't mistakenly see peers | 
| 1949 |  |     // helping us do background IBD as having a stale tip. | 
| 1950 | 0 |     m_last_tip_update = GetTime<std::chrono::seconds>(); | 
| 1951 |  |  | 
| 1952 |  |     // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value | 
| 1953 | 0 |     auto stalling_timeout = m_block_stalling_timeout.load(); | 
| 1954 | 0 |     Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT); | 
| 1955 | 0 |     if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) { | 
| 1956 | 0 |         const auto new_timeout = std::max(std::chrono::duration_cast<std::chrono::seconds>(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT); | 
| 1957 | 0 |         if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) { | 
| 1958 | 0 |             LogDebug(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout)); | 
| 1959 | 0 |         } | 
| 1960 | 0 |     } | 
| 1961 |  |  | 
| 1962 |  |     // The following task can be skipped since we don't maintain a mempool for | 
| 1963 |  |     // the ibd/background chainstate. | 
| 1964 | 0 |     if (role == ChainstateRole::BACKGROUND) { | 
| 1965 | 0 |         return; | 
| 1966 | 0 |     } | 
| 1967 | 0 |     LOCK(m_tx_download_mutex); | 
| 1968 | 0 |     m_txdownloadman.BlockConnected(pblock); | 
| 1969 | 0 | } | 
| 1970 |  |  | 
| 1971 |  | void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) | 
| 1972 | 0 | { | 
| 1973 | 0 |     LOCK(m_tx_download_mutex); | 
| 1974 | 0 |     m_txdownloadman.BlockDisconnected(); | 
| 1975 | 0 | } | 
| 1976 |  |  | 
| 1977 |  | /** | 
| 1978 |  |  * Maintain state about the best-seen block and fast-announce a compact block | 
| 1979 |  |  * to compatible peers. | 
| 1980 |  |  */ | 
| 1981 |  | void PeerManagerImpl::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) | 
| 1982 | 0 | { | 
| 1983 | 0 |     auto pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock, FastRandomContext().rand64()); | 
| 1984 |  | 
 | 
| 1985 | 0 |     LOCK(cs_main); | 
| 1986 |  | 
 | 
| 1987 | 0 |     if (pindex->nHeight <= m_highest_fast_announce) | 
| 1988 | 0 |         return; | 
| 1989 | 0 |     m_highest_fast_announce = pindex->nHeight; | 
| 1990 |  | 
 | 
| 1991 | 0 |     if (!DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) return; | 
| 1992 |  |  | 
| 1993 | 0 |     uint256 hashBlock(pblock->GetHash()); | 
| 1994 | 0 |     const std::shared_future<CSerializedNetMsg> lazy_ser{ | 
| 1995 | 0 |         std::async(std::launch::deferred, [&] { return NetMsg::Make(NetMsgType::CMPCTBLOCK, *pcmpctblock); })}; | 
| 1996 |  | 
 | 
| 1997 | 0 |     { | 
| 1998 | 0 |         auto most_recent_block_txs = std::make_unique<std::map<GenTxid, CTransactionRef>>(); | 
| 1999 | 0 |         for (const auto& tx : pblock->vtx) { | 
| 2000 | 0 |             most_recent_block_txs->emplace(tx->GetHash(), tx); | 
| 2001 | 0 |             most_recent_block_txs->emplace(tx->GetWitnessHash(), tx); | 
| 2002 | 0 |         } | 
| 2003 |  | 
 | 
| 2004 | 0 |         LOCK(m_most_recent_block_mutex); | 
| 2005 | 0 |         m_most_recent_block_hash = hashBlock; | 
| 2006 | 0 |         m_most_recent_block = pblock; | 
| 2007 | 0 |         m_most_recent_compact_block = pcmpctblock; | 
| 2008 | 0 |         m_most_recent_block_txs = std::move(most_recent_block_txs); | 
| 2009 | 0 |     } | 
| 2010 |  | 
 | 
| 2011 | 0 |     m_connman.ForEachNode([this, pindex, &lazy_ser, &hashBlock](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { | 
| 2012 | 0 |         AssertLockHeld(::cs_main); | 
| 2013 |  | 
 | 
| 2014 | 0 |         if (pnode->GetCommonVersion() < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect) | 
| 2015 | 0 |             return; | 
| 2016 | 0 |         ProcessBlockAvailability(pnode->GetId()); | 
| 2017 | 0 |         CNodeState &state = *State(pnode->GetId()); | 
| 2018 |  |         // If the peer has, or we announced to them the previous block already, | 
| 2019 |  |         // but we don't think they have this one, go ahead and announce it | 
| 2020 | 0 |         if (state.m_requested_hb_cmpctblocks && !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) { | 
| 2021 |  | 
 | 
| 2022 | 0 |             LogDebug(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerManager::NewPoWValidBlock", | 
| 2023 | 0 |                     hashBlock.ToString(), pnode->GetId()); | 
| 2024 |  | 
 | 
| 2025 | 0 |             const CSerializedNetMsg& ser_cmpctblock{lazy_ser.get()}; | 
| 2026 | 0 |             PushMessage(*pnode, ser_cmpctblock.Copy()); | 
| 2027 | 0 |             state.pindexBestHeaderSent = pindex; | 
| 2028 | 0 |         } | 
| 2029 | 0 |     }); | 
| 2030 | 0 | } | 
| 2031 |  |  | 
| 2032 |  | /** | 
| 2033 |  |  * Update our best height and announce any block hashes which weren't previously | 
| 2034 |  |  * in m_chainman.ActiveChain() to our peers. | 
| 2035 |  |  */ | 
| 2036 |  | void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) | 
| 2037 | 0 | { | 
| 2038 | 0 |     SetBestBlock(pindexNew->nHeight, std::chrono::seconds{pindexNew->GetBlockTime()}); | 
| 2039 |  |  | 
| 2040 |  |     // Don't relay inventory during initial block download. | 
| 2041 | 0 |     if (fInitialDownload) return; | 
| 2042 |  |  | 
| 2043 |  |     // Find the hashes of all blocks that weren't previously in the best chain. | 
| 2044 | 0 |     std::vector<uint256> vHashes; | 
| 2045 | 0 |     const CBlockIndex *pindexToAnnounce = pindexNew; | 
| 2046 | 0 |     while (pindexToAnnounce != pindexFork) { | 
| 2047 | 0 |         vHashes.push_back(pindexToAnnounce->GetBlockHash()); | 
| 2048 | 0 |         pindexToAnnounce = pindexToAnnounce->pprev; | 
| 2049 | 0 |         if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) { | 
| 2050 |  |             // Limit announcements in case of a huge reorganization. | 
| 2051 |  |             // Rely on the peer's synchronization mechanism in that case. | 
| 2052 | 0 |             break; | 
| 2053 | 0 |         } | 
| 2054 | 0 |     } | 
| 2055 |  | 
 | 
| 2056 | 0 |     { | 
| 2057 | 0 |         LOCK(m_peer_mutex); | 
| 2058 | 0 |         for (auto& it : m_peer_map) { | 
| 2059 | 0 |             Peer& peer = *it.second; | 
| 2060 | 0 |             LOCK(peer.m_block_inv_mutex); | 
| 2061 | 0 |             for (const uint256& hash : vHashes | std::views::reverse) { | 
| 2062 | 0 |                 peer.m_blocks_for_headers_relay.push_back(hash); | 
| 2063 | 0 |             } | 
| 2064 | 0 |         } | 
| 2065 | 0 |     } | 
| 2066 |  | 
 | 
| 2067 | 0 |     m_connman.WakeMessageHandler(); | 
| 2068 | 0 | } | 
| 2069 |  |  | 
| 2070 |  | /** | 
| 2071 |  |  * Handle invalid block rejection and consequent peer discouragement, maintain which | 
| 2072 |  |  * peers announce compact blocks. | 
| 2073 |  |  */ | 
| 2074 |  | void PeerManagerImpl::BlockChecked(const std::shared_ptr<const CBlock>& block, const BlockValidationState& state) | 
| 2075 | 0 | { | 
| 2076 | 0 |     LOCK(cs_main); | 
| 2077 |  | 
 | 
| 2078 | 0 |     const uint256 hash(block->GetHash()); | 
| 2079 | 0 |     std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash); | 
| 2080 |  |  | 
| 2081 |  |     // If the block failed validation, we know where it came from and we're still connected | 
| 2082 |  |     // to that peer, maybe punish. | 
| 2083 | 0 |     if (state.IsInvalid() && | 
| 2084 | 0 |         it != mapBlockSource.end() && | 
| 2085 | 0 |         State(it->second.first)) { | 
| 2086 | 0 |             MaybePunishNodeForBlock(/*nodeid=*/ it->second.first, state, /*via_compact_block=*/ !it->second.second); | 
| 2087 | 0 |     } | 
| 2088 |  |     // Check that: | 
| 2089 |  |     // 1. The block is valid | 
| 2090 |  |     // 2. We're not in initial block download | 
| 2091 |  |     // 3. This is currently the best block we're aware of. We haven't updated | 
| 2092 |  |     //    the tip yet so we have no way to check this directly here. Instead we | 
| 2093 |  |     //    just check that there are currently no other blocks in flight. | 
| 2094 | 0 |     else if (state.IsValid() && | 
| 2095 | 0 |              !m_chainman.IsInitialBlockDownload() && | 
| 2096 | 0 |              mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) { | 
| 2097 | 0 |         if (it != mapBlockSource.end()) { | 
| 2098 | 0 |             MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first); | 
| 2099 | 0 |         } | 
| 2100 | 0 |     } | 
| 2101 | 0 |     if (it != mapBlockSource.end()) | 
| 2102 | 0 |         mapBlockSource.erase(it); | 
| 2103 | 0 | } | 
| 2104 |  |  | 
| 2105 |  | ////////////////////////////////////////////////////////////////////////////// | 
| 2106 |  | // | 
| 2107 |  | // Messages | 
| 2108 |  | // | 
| 2109 |  |  | 
| 2110 |  | bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash) | 
| 2111 | 0 | { | 
| 2112 | 0 |     return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr; | 
| 2113 | 0 | } | 
| 2114 |  |  | 
| 2115 |  | void PeerManagerImpl::SendPings() | 
| 2116 | 0 | { | 
| 2117 | 0 |     LOCK(m_peer_mutex); | 
| 2118 | 0 |     for(auto& it : m_peer_map) it.second->m_ping_queued = true; | 
| 2119 | 0 | } | 
| 2120 |  |  | 
| 2121 |  | void PeerManagerImpl::RelayTransaction(const Txid& txid, const Wtxid& wtxid) | 
| 2122 | 0 | { | 
| 2123 | 0 |     LOCK(m_peer_mutex); | 
| 2124 | 0 |     for(auto& it : m_peer_map) { | 
| 2125 | 0 |         Peer& peer = *it.second; | 
| 2126 | 0 |         auto tx_relay = peer.GetTxRelay(); | 
| 2127 | 0 |         if (!tx_relay) continue; | 
| 2128 |  |  | 
| 2129 | 0 |         LOCK(tx_relay->m_tx_inventory_mutex); | 
| 2130 |  |         // Only queue transactions for announcement once the version handshake | 
| 2131 |  |         // is completed. The time of arrival for these transactions is | 
| 2132 |  |         // otherwise at risk of leaking to a spy, if the spy is able to | 
| 2133 |  |         // distinguish transactions received during the handshake from the rest | 
| 2134 |  |         // in the announcement. | 
| 2135 | 0 |         if (tx_relay->m_next_inv_send_time == 0s) continue; | 
| 2136 |  |  | 
| 2137 | 0 |         const uint256& hash{peer.m_wtxid_relay ? wtxid.ToUint256() : txid.ToUint256()}; | 
| 2138 | 0 |         if (!tx_relay->m_tx_inventory_known_filter.contains(hash)) { | 
| 2139 | 0 |             tx_relay->m_tx_inventory_to_send.insert(wtxid); | 
| 2140 | 0 |         } | 
| 2141 | 0 |     } | 
| 2142 | 0 | } | 
| 2143 |  |  | 
| 2144 |  | void PeerManagerImpl::RelayAddress(NodeId originator, | 
| 2145 |  |                                    const CAddress& addr, | 
| 2146 |  |                                    bool fReachable) | 
| 2147 | 0 | { | 
| 2148 |  |     // We choose the same nodes within a given 24h window (if the list of connected | 
| 2149 |  |     // nodes does not change) and we don't relay to nodes that already know an | 
| 2150 |  |     // address. So within 24h we will likely relay a given address once. This is to | 
| 2151 |  |     // prevent a peer from unjustly giving their address better propagation by sending | 
| 2152 |  |     // it to us repeatedly. | 
| 2153 |  | 
 | 
| 2154 | 0 |     if (!fReachable && !addr.IsRelayable()) return; | 
| 2155 |  |  | 
| 2156 |  |     // Relay to a limited number of other nodes | 
| 2157 |  |     // Use deterministic randomness to send to the same nodes for 24 hours | 
| 2158 |  |     // at a time so the m_addr_knowns of the chosen nodes prevent repeats | 
| 2159 | 0 |     const uint64_t hash_addr{CServiceHash(0, 0)(addr)}; | 
| 2160 | 0 |     const auto current_time{GetTime<std::chrono::seconds>()}; | 
| 2161 |  |     // Adding address hash makes exact rotation time different per address, while preserving periodicity. | 
| 2162 | 0 |     const uint64_t time_addr{(static_cast<uint64_t>(count_seconds(current_time)) + hash_addr) / count_seconds(ROTATE_ADDR_RELAY_DEST_INTERVAL)}; | 
| 2163 | 0 |     const CSipHasher hasher{m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY) | 
| 2164 | 0 |                                 .Write(hash_addr) | 
| 2165 | 0 |                                 .Write(time_addr)}; | 
| 2166 |  |  | 
| 2167 |  |     // Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers. | 
| 2168 | 0 |     unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1; | 
| 2169 |  | 
 | 
| 2170 | 0 |     std::array<std::pair<uint64_t, Peer*>, 2> best{{{0, nullptr}, {0, nullptr}}}; | 
| 2171 | 0 |     assert(nRelayNodes <= best.size()); | 
| 2172 |  |  | 
| 2173 | 0 |     LOCK(m_peer_mutex); | 
| 2174 |  | 
 | 
| 2175 | 0 |     for (auto& [id, peer] : m_peer_map) { | 
| 2176 | 0 |         if (peer->m_addr_relay_enabled && id != originator && IsAddrCompatible(*peer, addr)) { | 
| 2177 | 0 |             uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize(); | 
| 2178 | 0 |             for (unsigned int i = 0; i < nRelayNodes; i++) { | 
| 2179 | 0 |                  if (hashKey > best[i].first) { | 
| 2180 | 0 |                      std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1); | 
| 2181 | 0 |                      best[i] = std::make_pair(hashKey, peer.get()); | 
| 2182 | 0 |                      break; | 
| 2183 | 0 |                  } | 
| 2184 | 0 |             } | 
| 2185 | 0 |         } | 
| 2186 | 0 |     }; | 
| 2187 |  | 
 | 
| 2188 | 0 |     for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) { | 
| 2189 | 0 |         PushAddress(*best[i].second, addr); | 
| 2190 | 0 |     } | 
| 2191 | 0 | } | 
| 2192 |  |  | 
| 2193 |  | void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv) | 
| 2194 | 0 | { | 
| 2195 | 0 |     std::shared_ptr<const CBlock> a_recent_block; | 
| 2196 | 0 |     std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block; | 
| 2197 | 0 |     { | 
| 2198 | 0 |         LOCK(m_most_recent_block_mutex); | 
| 2199 | 0 |         a_recent_block = m_most_recent_block; | 
| 2200 | 0 |         a_recent_compact_block = m_most_recent_compact_block; | 
| 2201 | 0 |     } | 
| 2202 |  | 
 | 
| 2203 | 0 |     bool need_activate_chain = false; | 
| 2204 | 0 |     { | 
| 2205 | 0 |         LOCK(cs_main); | 
| 2206 | 0 |         const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash); | 
| 2207 | 0 |         if (pindex) { | 
| 2208 | 0 |             if (pindex->HaveNumChainTxs() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) && | 
| 2209 | 0 |                     pindex->IsValid(BLOCK_VALID_TREE)) { | 
| 2210 |  |                 // If we have the block and all of its parents, but have not yet validated it, | 
| 2211 |  |                 // we might be in the middle of connecting it (ie in the unlock of cs_main | 
| 2212 |  |                 // before ActivateBestChain but after AcceptBlock). | 
| 2213 |  |                 // In this case, we need to run ActivateBestChain prior to checking the relay | 
| 2214 |  |                 // conditions below. | 
| 2215 | 0 |                 need_activate_chain = true; | 
| 2216 | 0 |             } | 
| 2217 | 0 |         } | 
| 2218 | 0 |     } // release cs_main before calling ActivateBestChain | 
| 2219 | 0 |     if (need_activate_chain) { | 
| 2220 | 0 |         BlockValidationState state; | 
| 2221 | 0 |         if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) { | 
| 2222 | 0 |             LogDebug(BCLog::NET, "failed to activate chain (%s)\n", state.ToString()); | 
| 2223 | 0 |         } | 
| 2224 | 0 |     } | 
| 2225 |  | 
 | 
| 2226 | 0 |     const CBlockIndex* pindex{nullptr}; | 
| 2227 | 0 |     const CBlockIndex* tip{nullptr}; | 
| 2228 | 0 |     bool can_direct_fetch{false}; | 
| 2229 | 0 |     FlatFilePos block_pos{}; | 
| 2230 | 0 |     { | 
| 2231 | 0 |         LOCK(cs_main); | 
| 2232 | 0 |         pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash); | 
| 2233 | 0 |         if (!pindex) { | 
| 2234 | 0 |             return; | 
| 2235 | 0 |         } | 
| 2236 | 0 |         if (!BlockRequestAllowed(pindex)) { | 
| 2237 | 0 |             LogDebug(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.GetId()); | 
| 2238 | 0 |             return; | 
| 2239 | 0 |         } | 
| 2240 |  |         // disconnect node in case we have reached the outbound limit for serving historical blocks | 
| 2241 | 0 |         if (m_connman.OutboundTargetReached(true) && | 
| 2242 | 0 |             (((m_chainman.m_best_header != nullptr) && (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) && | 
| 2243 | 0 |             !pfrom.HasPermission(NetPermissionFlags::Download) // nodes with the download permission may exceed target | 
| 2244 | 0 |         ) { | 
| 2245 | 0 |             LogDebug(BCLog::NET, "historical block serving limit reached, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 2246 | 0 |             pfrom.fDisconnect = true; | 
| 2247 | 0 |             return; | 
| 2248 | 0 |         } | 
| 2249 | 0 |         tip = m_chainman.ActiveChain().Tip(); | 
| 2250 |  |         // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold | 
| 2251 | 0 |         if (!pfrom.HasPermission(NetPermissionFlags::NoBan) && ( | 
| 2252 | 0 |                 (((peer.m_our_services & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) && (tip->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) ) | 
| 2253 | 0 |            )) { | 
| 2254 | 0 |             LogDebug(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 2255 |  |             //disconnect node and prevent it from stalling (would otherwise wait for the missing block) | 
| 2256 | 0 |             pfrom.fDisconnect = true; | 
| 2257 | 0 |             return; | 
| 2258 | 0 |         } | 
| 2259 |  |         // Pruned nodes may have deleted the block, so check whether | 
| 2260 |  |         // it's available before trying to send. | 
| 2261 | 0 |         if (!(pindex->nStatus & BLOCK_HAVE_DATA)) { | 
| 2262 | 0 |             return; | 
| 2263 | 0 |         } | 
| 2264 | 0 |         can_direct_fetch = CanDirectFetch(); | 
| 2265 | 0 |         block_pos = pindex->GetBlockPos(); | 
| 2266 | 0 |     } | 
| 2267 |  |  | 
| 2268 | 0 |     std::shared_ptr<const CBlock> pblock; | 
| 2269 | 0 |     if (a_recent_block && a_recent_block->GetHash() == inv.hash) { | 
| 2270 | 0 |         pblock = a_recent_block; | 
| 2271 | 0 |     } else if (inv.IsMsgWitnessBlk()) { | 
| 2272 |  |         // Fast-path: in this case it is possible to serve the block directly from disk, | 
| 2273 |  |         // as the network format matches the format on disk | 
| 2274 | 0 |         std::vector<std::byte> block_data; | 
| 2275 | 0 |         if (!m_chainman.m_blockman.ReadRawBlock(block_data, block_pos)) { | 
| 2276 | 0 |             if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) { | 
| 2277 | 0 |                 LogDebug(BCLog::NET, "Block was pruned before it could be read, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 2278 | 0 |             } else { | 
| 2279 | 0 |                 LogError("Cannot load block from disk, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 2280 | 0 |             } | 
| 2281 | 0 |             pfrom.fDisconnect = true; | 
| 2282 | 0 |             return; | 
| 2283 | 0 |         } | 
| 2284 | 0 |         MakeAndPushMessage(pfrom, NetMsgType::BLOCK, std::span{block_data}); | 
| 2285 |  |         // Don't set pblock as we've sent the block | 
| 2286 | 0 |     } else { | 
| 2287 |  |         // Send block from disk | 
| 2288 | 0 |         std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>(); | 
| 2289 | 0 |         if (!m_chainman.m_blockman.ReadBlock(*pblockRead, block_pos, inv.hash)) { | 
| 2290 | 0 |             if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) { | 
| 2291 | 0 |                 LogDebug(BCLog::NET, "Block was pruned before it could be read, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 2292 | 0 |             } else { | 
| 2293 | 0 |                 LogError("Cannot load block from disk, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 2294 | 0 |             } | 
| 2295 | 0 |             pfrom.fDisconnect = true; | 
| 2296 | 0 |             return; | 
| 2297 | 0 |         } | 
| 2298 | 0 |         pblock = pblockRead; | 
| 2299 | 0 |     } | 
| 2300 | 0 |     if (pblock) { | 
| 2301 | 0 |         if (inv.IsMsgBlk()) { | 
| 2302 | 0 |             MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_NO_WITNESS(*pblock)); | 
| 2303 | 0 |         } else if (inv.IsMsgWitnessBlk()) { | 
| 2304 | 0 |             MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_WITH_WITNESS(*pblock)); | 
| 2305 | 0 |         } else if (inv.IsMsgFilteredBlk()) { | 
| 2306 | 0 |             bool sendMerkleBlock = false; | 
| 2307 | 0 |             CMerkleBlock merkleBlock; | 
| 2308 | 0 |             if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) { | 
| 2309 | 0 |                 LOCK(tx_relay->m_bloom_filter_mutex); | 
| 2310 | 0 |                 if (tx_relay->m_bloom_filter) { | 
| 2311 | 0 |                     sendMerkleBlock = true; | 
| 2312 | 0 |                     merkleBlock = CMerkleBlock(*pblock, *tx_relay->m_bloom_filter); | 
| 2313 | 0 |                 } | 
| 2314 | 0 |             } | 
| 2315 | 0 |             if (sendMerkleBlock) { | 
| 2316 | 0 |                 MakeAndPushMessage(pfrom, NetMsgType::MERKLEBLOCK, merkleBlock); | 
| 2317 |  |                 // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see | 
| 2318 |  |                 // This avoids hurting performance by pointlessly requiring a round-trip | 
| 2319 |  |                 // Note that there is currently no way for a node to request any single transactions we didn't send here - | 
| 2320 |  |                 // they must either disconnect and retry or request the full block. | 
| 2321 |  |                 // Thus, the protocol spec specified allows for us to provide duplicate txn here, | 
| 2322 |  |                 // however we MUST always provide at least what the remote peer needs | 
| 2323 | 0 |                 for (const auto& [tx_idx, _] : merkleBlock.vMatchedTxn) | 
| 2324 | 0 |                     MakeAndPushMessage(pfrom, NetMsgType::TX, TX_NO_WITNESS(*pblock->vtx[tx_idx])); | 
| 2325 | 0 |             } | 
| 2326 |  |             // else | 
| 2327 |  |             // no response | 
| 2328 | 0 |         } else if (inv.IsMsgCmpctBlk()) { | 
| 2329 |  |             // If a peer is asking for old blocks, we're almost guaranteed | 
| 2330 |  |             // they won't have a useful mempool to match against a compact block, | 
| 2331 |  |             // and we don't feel like constructing the object for them, so | 
| 2332 |  |             // instead we respond with the full, non-compact block. | 
| 2333 | 0 |             if (can_direct_fetch && pindex->nHeight >= tip->nHeight - MAX_CMPCTBLOCK_DEPTH) { | 
| 2334 | 0 |                 if (a_recent_compact_block && a_recent_compact_block->header.GetHash() == inv.hash) { | 
| 2335 | 0 |                     MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, *a_recent_compact_block); | 
| 2336 | 0 |                 } else { | 
| 2337 | 0 |                     CBlockHeaderAndShortTxIDs cmpctblock{*pblock, m_rng.rand64()}; | 
| 2338 | 0 |                     MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, cmpctblock); | 
| 2339 | 0 |                 } | 
| 2340 | 0 |             } else { | 
| 2341 | 0 |                 MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_WITH_WITNESS(*pblock)); | 
| 2342 | 0 |             } | 
| 2343 | 0 |         } | 
| 2344 | 0 |     } | 
| 2345 |  | 
 | 
| 2346 | 0 |     { | 
| 2347 | 0 |         LOCK(peer.m_block_inv_mutex); | 
| 2348 |  |         // Trigger the peer node to send a getblocks request for the next batch of inventory | 
| 2349 | 0 |         if (inv.hash == peer.m_continuation_block) { | 
| 2350 |  |             // Send immediately. This must send even if redundant, | 
| 2351 |  |             // and we want it right after the last block so they don't | 
| 2352 |  |             // wait for other stuff first. | 
| 2353 | 0 |             std::vector<CInv> vInv; | 
| 2354 | 0 |             vInv.emplace_back(MSG_BLOCK, tip->GetBlockHash()); | 
| 2355 | 0 |             MakeAndPushMessage(pfrom, NetMsgType::INV, vInv); | 
| 2356 | 0 |             peer.m_continuation_block.SetNull(); | 
| 2357 | 0 |         } | 
| 2358 | 0 |     } | 
| 2359 | 0 | } | 
| 2360 |  |  | 
| 2361 |  | CTransactionRef PeerManagerImpl::FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid) | 
| 2362 | 0 | { | 
| 2363 |  |     // If a tx was in the mempool prior to the last INV for this peer, permit the request. | 
| 2364 | 0 |     auto txinfo{std::visit( | 
| 2365 | 0 |         [&](const auto& id) EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex) { | 
| 2366 | 0 |             return m_mempool.info_for_relay(id, tx_relay.m_last_inv_sequence); | 
| 2367 | 0 |         }, Unexecuted instantiation: net_processing.cpp:_ZZN12_GLOBAL__N_115PeerManagerImpl16FindTxForGetDataERKNS_4Peer7TxRelayERK7GenTxidENK3$_0clI22transaction_identifierILb0EEEEDaRKT_Unexecuted instantiation: net_processing.cpp:_ZZN12_GLOBAL__N_115PeerManagerImpl16FindTxForGetDataERKNS_4Peer7TxRelayERK7GenTxidENK3$_0clI22transaction_identifierILb1EEEEDaRKT_ | 
| 2368 | 0 |         gtxid)}; | 
| 2369 | 0 |     if (txinfo.tx) { | 
| 2370 | 0 |         return std::move(txinfo.tx); | 
| 2371 | 0 |     } | 
| 2372 |  |  | 
| 2373 |  |     // Or it might be from the most recent block | 
| 2374 | 0 |     { | 
| 2375 | 0 |         LOCK(m_most_recent_block_mutex); | 
| 2376 | 0 |         if (m_most_recent_block_txs != nullptr) { | 
| 2377 | 0 |             auto it = m_most_recent_block_txs->find(gtxid); | 
| 2378 | 0 |             if (it != m_most_recent_block_txs->end()) return it->second; | 
| 2379 | 0 |         } | 
| 2380 | 0 |     } | 
| 2381 |  |  | 
| 2382 | 0 |     return {}; | 
| 2383 | 0 | } | 
| 2384 |  |  | 
| 2385 |  | void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc) | 
| 2386 | 0 | { | 
| 2387 | 0 |     AssertLockNotHeld(cs_main); | 
| 2388 |  | 
 | 
| 2389 | 0 |     auto tx_relay = peer.GetTxRelay(); | 
| 2390 |  | 
 | 
| 2391 | 0 |     std::deque<CInv>::iterator it = peer.m_getdata_requests.begin(); | 
| 2392 | 0 |     std::vector<CInv> vNotFound; | 
| 2393 |  |  | 
| 2394 |  |     // Process as many TX items from the front of the getdata queue as | 
| 2395 |  |     // possible, since they're common and it's efficient to batch process | 
| 2396 |  |     // them. | 
| 2397 | 0 |     while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) { | 
| 2398 | 0 |         if (interruptMsgProc) return; | 
| 2399 |  |         // The send buffer provides backpressure. If there's no space in | 
| 2400 |  |         // the buffer, pause processing until the next call. | 
| 2401 | 0 |         if (pfrom.fPauseSend) break; | 
| 2402 |  |  | 
| 2403 | 0 |         const CInv &inv = *it++; | 
| 2404 |  | 
 | 
| 2405 | 0 |         if (tx_relay == nullptr) { | 
| 2406 |  |             // Ignore GETDATA requests for transactions from block-relay-only | 
| 2407 |  |             // peers and peers that asked us not to announce transactions. | 
| 2408 | 0 |             continue; | 
| 2409 | 0 |         } | 
| 2410 |  |  | 
| 2411 | 0 |         if (auto tx{FindTxForGetData(*tx_relay, ToGenTxid(inv))}) { | 
| 2412 |  |             // WTX and WITNESS_TX imply we serialize with witness | 
| 2413 | 0 |             const auto maybe_with_witness = (inv.IsMsgTx() ? TX_NO_WITNESS : TX_WITH_WITNESS); | 
| 2414 | 0 |             MakeAndPushMessage(pfrom, NetMsgType::TX, maybe_with_witness(*tx)); | 
| 2415 | 0 |             m_mempool.RemoveUnbroadcastTx(tx->GetHash()); | 
| 2416 | 0 |         } else { | 
| 2417 | 0 |             vNotFound.push_back(inv); | 
| 2418 | 0 |         } | 
| 2419 | 0 |     } | 
| 2420 |  |  | 
| 2421 |  |     // Only process one BLOCK item per call, since they're uncommon and can be | 
| 2422 |  |     // expensive to process. | 
| 2423 | 0 |     if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) { | 
| 2424 | 0 |         const CInv &inv = *it++; | 
| 2425 | 0 |         if (inv.IsGenBlkMsg()) { | 
| 2426 | 0 |             ProcessGetBlockData(pfrom, peer, inv); | 
| 2427 | 0 |         } | 
| 2428 |  |         // else: If the first item on the queue is an unknown type, we erase it | 
| 2429 |  |         // and continue processing the queue on the next call. | 
| 2430 |  |         // NOTE: previously we wouldn't do so and the peer sending us a malformed GETDATA could | 
| 2431 |  |         // result in never making progress and this thread using 100% allocated CPU. See | 
| 2432 |  |         // https://bitcoincore.org/en/2024/07/03/disclose-getdata-cpu. | 
| 2433 | 0 |     } | 
| 2434 |  | 
 | 
| 2435 | 0 |     peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it); | 
| 2436 |  | 
 | 
| 2437 | 0 |     if (!vNotFound.empty()) { | 
| 2438 |  |         // Let the peer know that we didn't find what it asked for, so it doesn't | 
| 2439 |  |         // have to wait around forever. | 
| 2440 |  |         // SPV clients care about this message: it's needed when they are | 
| 2441 |  |         // recursively walking the dependencies of relevant unconfirmed | 
| 2442 |  |         // transactions. SPV clients want to do that because they want to know | 
| 2443 |  |         // about (and store and rebroadcast and risk analyze) the dependencies | 
| 2444 |  |         // of transactions relevant to them, without having to download the | 
| 2445 |  |         // entire memory pool. | 
| 2446 |  |         // Also, other nodes can use these messages to automatically request a | 
| 2447 |  |         // transaction from some other peer that announced it, and stop | 
| 2448 |  |         // waiting for us to respond. | 
| 2449 |  |         // In normal operation, we often send NOTFOUND messages for parents of | 
| 2450 |  |         // transactions that we relay; if a peer is missing a parent, they may | 
| 2451 |  |         // assume we have them and request the parents from us. | 
| 2452 | 0 |         MakeAndPushMessage(pfrom, NetMsgType::NOTFOUND, vNotFound); | 
| 2453 | 0 |     } | 
| 2454 | 0 | } | 
| 2455 |  |  | 
| 2456 |  | uint32_t PeerManagerImpl::GetFetchFlags(const Peer& peer) const | 
| 2457 | 0 | { | 
| 2458 | 0 |     uint32_t nFetchFlags = 0; | 
| 2459 | 0 |     if (CanServeWitnesses(peer)) { | 
| 2460 | 0 |         nFetchFlags |= MSG_WITNESS_FLAG; | 
| 2461 | 0 |     } | 
| 2462 | 0 |     return nFetchFlags; | 
| 2463 | 0 | } | 
| 2464 |  |  | 
| 2465 |  | void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req) | 
| 2466 | 0 | { | 
| 2467 | 0 |     BlockTransactions resp(req); | 
| 2468 | 0 |     unsigned int tx_requested_size = 0; | 
| 2469 | 0 |     for (size_t i = 0; i < req.indexes.size(); i++) { | 
| 2470 | 0 |         if (req.indexes[i] >= block.vtx.size()) { | 
| 2471 | 0 |             Misbehaving(peer, "getblocktxn with out-of-bounds tx indices"); | 
| 2472 | 0 |             return; | 
| 2473 | 0 |         } | 
| 2474 | 0 |         resp.txn[i] = block.vtx[req.indexes[i]]; | 
| 2475 | 0 |         tx_requested_size += resp.txn[i]->GetTotalSize(); | 
| 2476 | 0 |     } | 
| 2477 |  |  | 
| 2478 | 0 |     LogDebug(BCLog::CMPCTBLOCK, "Peer %d sent us a GETBLOCKTXN for block %s, sending a BLOCKTXN with %u txns. (%u bytes)\n", pfrom.GetId(), block.GetHash().ToString(), resp.txn.size(), tx_requested_size); | 
| 2479 | 0 |     MakeAndPushMessage(pfrom, NetMsgType::BLOCKTXN, resp); | 
| 2480 | 0 | } | 
| 2481 |  |  | 
| 2482 |  | bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer) | 
| 2483 | 0 | { | 
| 2484 |  |     // Do these headers have proof-of-work matching what's claimed? | 
| 2485 | 0 |     if (!HasValidProofOfWork(headers, consensusParams)) { | 
| 2486 | 0 |         Misbehaving(peer, "header with invalid proof of work"); | 
| 2487 | 0 |         return false; | 
| 2488 | 0 |     } | 
| 2489 |  |  | 
| 2490 |  |     // Are these headers connected to each other? | 
| 2491 | 0 |     if (!CheckHeadersAreContinuous(headers)) { | 
| 2492 | 0 |         Misbehaving(peer, "non-continuous headers sequence"); | 
| 2493 | 0 |         return false; | 
| 2494 | 0 |     } | 
| 2495 | 0 |     return true; | 
| 2496 | 0 | } | 
| 2497 |  |  | 
| 2498 |  | arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold() | 
| 2499 | 0 | { | 
| 2500 | 0 |     arith_uint256 near_chaintip_work = 0; | 
| 2501 | 0 |     LOCK(cs_main); | 
| 2502 | 0 |     if (m_chainman.ActiveChain().Tip() != nullptr) { | 
| 2503 | 0 |         const CBlockIndex *tip = m_chainman.ActiveChain().Tip(); | 
| 2504 |  |         // Use a 144 block buffer, so that we'll accept headers that fork from | 
| 2505 |  |         // near our tip. | 
| 2506 | 0 |         near_chaintip_work = tip->nChainWork - std::min<arith_uint256>(144*GetBlockProof(*tip), tip->nChainWork); | 
| 2507 | 0 |     } | 
| 2508 | 0 |     return std::max(near_chaintip_work, m_chainman.MinimumChainWork()); | 
| 2509 | 0 | } | 
| 2510 |  |  | 
| 2511 |  | /** | 
| 2512 |  |  * Special handling for unconnecting headers that might be part of a block | 
| 2513 |  |  * announcement. | 
| 2514 |  |  * | 
| 2515 |  |  * We'll send a getheaders message in response to try to connect the chain. | 
| 2516 |  |  */ | 
| 2517 |  | void PeerManagerImpl::HandleUnconnectingHeaders(CNode& pfrom, Peer& peer, | 
| 2518 |  |         const std::vector<CBlockHeader>& headers) | 
| 2519 | 0 | { | 
| 2520 |  |     // Try to fill in the missing headers. | 
| 2521 | 0 |     const CBlockIndex* best_header{WITH_LOCK(cs_main, return m_chainman.m_best_header)}; | 
| 2522 | 0 |     if (MaybeSendGetHeaders(pfrom, GetLocator(best_header), peer)) { | 
| 2523 | 0 |         LogDebug(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d)\n", | 
| 2524 | 0 |             headers[0].GetHash().ToString(), | 
| 2525 | 0 |             headers[0].hashPrevBlock.ToString(), | 
| 2526 | 0 |             best_header->nHeight, | 
| 2527 | 0 |             pfrom.GetId()); | 
| 2528 | 0 |     } | 
| 2529 |  |  | 
| 2530 |  |     // Set hashLastUnknownBlock for this peer, so that if we | 
| 2531 |  |     // eventually get the headers - even from a different peer - | 
| 2532 |  |     // we can use this peer to download. | 
| 2533 | 0 |     WITH_LOCK(cs_main, UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash())); | 
| 2534 | 0 | } | 
| 2535 |  |  | 
| 2536 |  | bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const | 
| 2537 | 0 | { | 
| 2538 | 0 |     uint256 hashLastBlock; | 
| 2539 | 0 |     for (const CBlockHeader& header : headers) { | 
| 2540 | 0 |         if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) { | 
| 2541 | 0 |             return false; | 
| 2542 | 0 |         } | 
| 2543 | 0 |         hashLastBlock = header.GetHash(); | 
| 2544 | 0 |     } | 
| 2545 | 0 |     return true; | 
| 2546 | 0 | } | 
| 2547 |  |  | 
| 2548 |  | bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, std::vector<CBlockHeader>& headers) | 
| 2549 | 0 | { | 
| 2550 | 0 |     if (peer.m_headers_sync) { | 
| 2551 | 0 |         auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == m_opts.max_headers_result); | 
| 2552 |  |         // If it is a valid continuation, we should treat the existing getheaders request as responded to. | 
| 2553 | 0 |         if (result.success) peer.m_last_getheaders_timestamp = {}; | 
| 2554 | 0 |         if (result.request_more) { | 
| 2555 | 0 |             auto locator = peer.m_headers_sync->NextHeadersRequestLocator(); | 
| 2556 |  |             // If we were instructed to ask for a locator, it should not be empty. | 
| 2557 | 0 |             Assume(!locator.vHave.empty()); | 
| 2558 |  |             // We can only be instructed to request more if processing was successful. | 
| 2559 | 0 |             Assume(result.success); | 
| 2560 | 0 |             if (!locator.vHave.empty()) { | 
| 2561 |  |                 // It should be impossible for the getheaders request to fail, | 
| 2562 |  |                 // because we just cleared the last getheaders timestamp. | 
| 2563 | 0 |                 bool sent_getheaders = MaybeSendGetHeaders(pfrom, locator, peer); | 
| 2564 | 0 |                 Assume(sent_getheaders); | 
| 2565 | 0 |                 LogDebug(BCLog::NET, "more getheaders (from %s) to peer=%d\n", | 
| 2566 | 0 |                     locator.vHave.front().ToString(), pfrom.GetId()); | 
| 2567 | 0 |             } | 
| 2568 | 0 |         } | 
| 2569 |  | 
 | 
| 2570 | 0 |         if (peer.m_headers_sync->GetState() == HeadersSyncState::State::FINAL) { | 
| 2571 | 0 |             peer.m_headers_sync.reset(nullptr); | 
| 2572 |  |  | 
| 2573 |  |             // Delete this peer's entry in m_headers_presync_stats. | 
| 2574 |  |             // If this is m_headers_presync_bestpeer, it will be replaced later | 
| 2575 |  |             // by the next peer that triggers the else{} branch below. | 
| 2576 | 0 |             LOCK(m_headers_presync_mutex); | 
| 2577 | 0 |             m_headers_presync_stats.erase(pfrom.GetId()); | 
| 2578 | 0 |         } else { | 
| 2579 |  |             // Build statistics for this peer's sync. | 
| 2580 | 0 |             HeadersPresyncStats stats; | 
| 2581 | 0 |             stats.first = peer.m_headers_sync->GetPresyncWork(); | 
| 2582 | 0 |             if (peer.m_headers_sync->GetState() == HeadersSyncState::State::PRESYNC) { | 
| 2583 | 0 |                 stats.second = {peer.m_headers_sync->GetPresyncHeight(), | 
| 2584 | 0 |                                 peer.m_headers_sync->GetPresyncTime()}; | 
| 2585 | 0 |             } | 
| 2586 |  |  | 
| 2587 |  |             // Update statistics in stats. | 
| 2588 | 0 |             LOCK(m_headers_presync_mutex); | 
| 2589 | 0 |             m_headers_presync_stats[pfrom.GetId()] = stats; | 
| 2590 | 0 |             auto best_it = m_headers_presync_stats.find(m_headers_presync_bestpeer); | 
| 2591 | 0 |             bool best_updated = false; | 
| 2592 | 0 |             if (best_it == m_headers_presync_stats.end()) { | 
| 2593 |  |                 // If the cached best peer is outdated, iterate over all remaining ones (including | 
| 2594 |  |                 // newly updated one) to find the best one. | 
| 2595 | 0 |                 NodeId peer_best{-1}; | 
| 2596 | 0 |                 const HeadersPresyncStats* stat_best{nullptr}; | 
| 2597 | 0 |                 for (const auto& [peer, stat] : m_headers_presync_stats) { | 
| 2598 | 0 |                     if (!stat_best || stat > *stat_best) { | 
| 2599 | 0 |                         peer_best = peer; | 
| 2600 | 0 |                         stat_best = &stat; | 
| 2601 | 0 |                     } | 
| 2602 | 0 |                 } | 
| 2603 | 0 |                 m_headers_presync_bestpeer = peer_best; | 
| 2604 | 0 |                 best_updated = (peer_best == pfrom.GetId()); | 
| 2605 | 0 |             } else if (best_it->first == pfrom.GetId() || stats > best_it->second) { | 
| 2606 |  |                 // pfrom was and remains the best peer, or pfrom just became best. | 
| 2607 | 0 |                 m_headers_presync_bestpeer = pfrom.GetId(); | 
| 2608 | 0 |                 best_updated = true; | 
| 2609 | 0 |             } | 
| 2610 | 0 |             if (best_updated && stats.second.has_value()) { | 
| 2611 |  |                 // If the best peer updated, and it is in its first phase, signal. | 
| 2612 | 0 |                 m_headers_presync_should_signal = true; | 
| 2613 | 0 |             } | 
| 2614 | 0 |         } | 
| 2615 |  | 
 | 
| 2616 | 0 |         if (result.success) { | 
| 2617 |  |             // We only overwrite the headers passed in if processing was | 
| 2618 |  |             // successful. | 
| 2619 | 0 |             headers.swap(result.pow_validated_headers); | 
| 2620 | 0 |         } | 
| 2621 |  | 
 | 
| 2622 | 0 |         return result.success; | 
| 2623 | 0 |     } | 
| 2624 |  |     // Either we didn't have a sync in progress, or something went wrong | 
| 2625 |  |     // processing these headers, or we are returning headers to the caller to | 
| 2626 |  |     // process. | 
| 2627 | 0 |     return false; | 
| 2628 | 0 | } | 
| 2629 |  |  | 
| 2630 |  | bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlockIndex* chain_start_header, std::vector<CBlockHeader>& headers) | 
| 2631 | 0 | { | 
| 2632 |  |     // Calculate the claimed total work on this chain. | 
| 2633 | 0 |     arith_uint256 total_work = chain_start_header->nChainWork + CalculateClaimedHeadersWork(headers); | 
| 2634 |  |  | 
| 2635 |  |     // Our dynamic anti-DoS threshold (minimum work required on a headers chain | 
| 2636 |  |     // before we'll store it) | 
| 2637 | 0 |     arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold(); | 
| 2638 |  |  | 
| 2639 |  |     // Avoid DoS via low-difficulty-headers by only processing if the headers | 
| 2640 |  |     // are part of a chain with sufficient work. | 
| 2641 | 0 |     if (total_work < minimum_chain_work) { | 
| 2642 |  |         // Only try to sync with this peer if their headers message was full; | 
| 2643 |  |         // otherwise they don't have more headers after this so no point in | 
| 2644 |  |         // trying to sync their too-little-work chain. | 
| 2645 | 0 |         if (headers.size() == m_opts.max_headers_result) { | 
| 2646 |  |             // Note: we could advance to the last header in this set that is | 
| 2647 |  |             // known to us, rather than starting at the first header (which we | 
| 2648 |  |             // may already have); however this is unlikely to matter much since | 
| 2649 |  |             // ProcessHeadersMessage() already handles the case where all | 
| 2650 |  |             // headers in a received message are already known and are | 
| 2651 |  |             // ancestors of m_best_header or chainActive.Tip(), by skipping | 
| 2652 |  |             // this logic in that case. So even if the first header in this set | 
| 2653 |  |             // of headers is known, some header in this set must be new, so | 
| 2654 |  |             // advancing to the first unknown header would be a small effect. | 
| 2655 | 0 |             LOCK(peer.m_headers_sync_mutex); | 
| 2656 | 0 |             peer.m_headers_sync.reset(new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(), | 
| 2657 | 0 |                 chain_start_header, minimum_chain_work)); | 
| 2658 |  |  | 
| 2659 |  |             // Now a HeadersSyncState object for tracking this synchronization | 
| 2660 |  |             // is created, process the headers using it as normal. Failures are | 
| 2661 |  |             // handled inside of IsContinuationOfLowWorkHeadersSync. | 
| 2662 | 0 |             (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers); | 
| 2663 | 0 |         } else { | 
| 2664 | 0 |             LogDebug(BCLog::NET, "Ignoring low-work chain (height=%u) from peer=%d\n", chain_start_header->nHeight + headers.size(), pfrom.GetId()); | 
| 2665 | 0 |         } | 
| 2666 |  |  | 
| 2667 |  |         // The peer has not yet given us a chain that meets our work threshold, | 
| 2668 |  |         // so we want to prevent further processing of the headers in any case. | 
| 2669 | 0 |         headers = {}; | 
| 2670 | 0 |         return true; | 
| 2671 | 0 |     } | 
| 2672 |  |  | 
| 2673 | 0 |     return false; | 
| 2674 | 0 | } | 
| 2675 |  |  | 
| 2676 |  | bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) | 
| 2677 | 0 | { | 
| 2678 | 0 |     if (header == nullptr) { | 
| 2679 | 0 |         return false; | 
| 2680 | 0 |     } else if (m_chainman.m_best_header != nullptr && header == m_chainman.m_best_header->GetAncestor(header->nHeight)) { | 
| 2681 | 0 |         return true; | 
| 2682 | 0 |     } else if (m_chainman.ActiveChain().Contains(header)) { | 
| 2683 | 0 |         return true; | 
| 2684 | 0 |     } | 
| 2685 | 0 |     return false; | 
| 2686 | 0 | } | 
| 2687 |  |  | 
| 2688 |  | bool PeerManagerImpl::MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) | 
| 2689 | 0 | { | 
| 2690 | 0 |     const auto current_time = NodeClock::now(); | 
| 2691 |  |  | 
| 2692 |  |     // Only allow a new getheaders message to go out if we don't have a recent | 
| 2693 |  |     // one already in-flight | 
| 2694 | 0 |     if (current_time - peer.m_last_getheaders_timestamp > HEADERS_RESPONSE_TIME) { | 
| 2695 | 0 |         MakeAndPushMessage(pfrom, NetMsgType::GETHEADERS, locator, uint256()); | 
| 2696 | 0 |         peer.m_last_getheaders_timestamp = current_time; | 
| 2697 | 0 |         return true; | 
| 2698 | 0 |     } | 
| 2699 | 0 |     return false; | 
| 2700 | 0 | } | 
| 2701 |  |  | 
| 2702 |  | /* | 
| 2703 |  |  * Given a new headers tip ending in last_header, potentially request blocks towards that tip. | 
| 2704 |  |  * We require that the given tip have at least as much work as our tip, and for | 
| 2705 |  |  * our current tip to be "close to synced" (see CanDirectFetch()). | 
| 2706 |  |  */ | 
| 2707 |  | void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header) | 
| 2708 | 0 | { | 
| 2709 | 0 |     LOCK(cs_main); | 
| 2710 | 0 |     CNodeState *nodestate = State(pfrom.GetId()); | 
| 2711 |  | 
 | 
| 2712 | 0 |     if (CanDirectFetch() && last_header.IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= last_header.nChainWork) { | 
| 2713 | 0 |         std::vector<const CBlockIndex*> vToFetch; | 
| 2714 | 0 |         const CBlockIndex* pindexWalk{&last_header}; | 
| 2715 |  |         // Calculate all the blocks we'd need to switch to last_header, up to a limit. | 
| 2716 | 0 |         while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { | 
| 2717 | 0 |             if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && | 
| 2718 | 0 |                     !IsBlockRequested(pindexWalk->GetBlockHash()) && | 
| 2719 | 0 |                     (!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || CanServeWitnesses(peer))) { | 
| 2720 |  |                 // We don't have this block, and it's not yet in flight. | 
| 2721 | 0 |                 vToFetch.push_back(pindexWalk); | 
| 2722 | 0 |             } | 
| 2723 | 0 |             pindexWalk = pindexWalk->pprev; | 
| 2724 | 0 |         } | 
| 2725 |  |         // If pindexWalk still isn't on our main chain, we're looking at a | 
| 2726 |  |         // very large reorg at a time we think we're close to caught up to | 
| 2727 |  |         // the main chain -- this shouldn't really happen.  Bail out on the | 
| 2728 |  |         // direct fetch and rely on parallel download instead. | 
| 2729 | 0 |         if (!m_chainman.ActiveChain().Contains(pindexWalk)) { | 
| 2730 | 0 |             LogDebug(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n", | 
| 2731 | 0 |                      last_header.GetBlockHash().ToString(), | 
| 2732 | 0 |                      last_header.nHeight); | 
| 2733 | 0 |         } else { | 
| 2734 | 0 |             std::vector<CInv> vGetData; | 
| 2735 |  |             // Download as much as possible, from earliest to latest. | 
| 2736 | 0 |             for (const CBlockIndex* pindex : vToFetch | std::views::reverse) { | 
| 2737 | 0 |                 if (nodestate->vBlocksInFlight.size() >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { | 
| 2738 |  |                     // Can't download any more from this peer | 
| 2739 | 0 |                     break; | 
| 2740 | 0 |                 } | 
| 2741 | 0 |                 uint32_t nFetchFlags = GetFetchFlags(peer); | 
| 2742 | 0 |                 vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()); | 
| 2743 | 0 |                 BlockRequested(pfrom.GetId(), *pindex); | 
| 2744 | 0 |                 LogDebug(BCLog::NET, "Requesting block %s from  peer=%d\n", | 
| 2745 | 0 |                         pindex->GetBlockHash().ToString(), pfrom.GetId()); | 
| 2746 | 0 |             } | 
| 2747 | 0 |             if (vGetData.size() > 1) { | 
| 2748 | 0 |                 LogDebug(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n", | 
| 2749 | 0 |                          last_header.GetBlockHash().ToString(), | 
| 2750 | 0 |                          last_header.nHeight); | 
| 2751 | 0 |             } | 
| 2752 | 0 |             if (vGetData.size() > 0) { | 
| 2753 | 0 |                 if (!m_opts.ignore_incoming_txs && | 
| 2754 | 0 |                         nodestate->m_provides_cmpctblocks && | 
| 2755 | 0 |                         vGetData.size() == 1 && | 
| 2756 | 0 |                         mapBlocksInFlight.size() == 1 && | 
| 2757 | 0 |                         last_header.pprev->IsValid(BLOCK_VALID_CHAIN)) { | 
| 2758 |  |                     // In any case, we want to download using a compact block, not a regular one | 
| 2759 | 0 |                     vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); | 
| 2760 | 0 |                 } | 
| 2761 | 0 |                 MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vGetData); | 
| 2762 | 0 |             } | 
| 2763 | 0 |         } | 
| 2764 | 0 |     } | 
| 2765 | 0 | } | 
| 2766 |  |  | 
| 2767 |  | /** | 
| 2768 |  |  * Given receipt of headers from a peer ending in last_header, along with | 
| 2769 |  |  * whether that header was new and whether the headers message was full, | 
| 2770 |  |  * update the state we keep for the peer. | 
| 2771 |  |  */ | 
| 2772 |  | void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer, | 
| 2773 |  |         const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers) | 
| 2774 | 0 | { | 
| 2775 | 0 |     LOCK(cs_main); | 
| 2776 | 0 |     CNodeState *nodestate = State(pfrom.GetId()); | 
| 2777 |  | 
 | 
| 2778 | 0 |     UpdateBlockAvailability(pfrom.GetId(), last_header.GetBlockHash()); | 
| 2779 |  |  | 
| 2780 |  |     // From here, pindexBestKnownBlock should be guaranteed to be non-null, | 
| 2781 |  |     // because it is set in UpdateBlockAvailability. Some nullptr checks | 
| 2782 |  |     // are still present, however, as belt-and-suspenders. | 
| 2783 |  | 
 | 
| 2784 | 0 |     if (received_new_header && last_header.nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) { | 
| 2785 | 0 |         nodestate->m_last_block_announcement = GetTime(); | 
| 2786 | 0 |     } | 
| 2787 |  |  | 
| 2788 |  |     // If we're in IBD, we want outbound peers that will serve us a useful | 
| 2789 |  |     // chain. Disconnect peers that are on chains with insufficient work. | 
| 2790 | 0 |     if (m_chainman.IsInitialBlockDownload() && !may_have_more_headers) { | 
| 2791 |  |         // If the peer has no more headers to give us, then we know we have | 
| 2792 |  |         // their tip. | 
| 2793 | 0 |         if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) { | 
| 2794 |  |             // This peer has too little work on their headers chain to help | 
| 2795 |  |             // us sync -- disconnect if it is an outbound disconnection | 
| 2796 |  |             // candidate. | 
| 2797 |  |             // Note: We compare their tip to the minimum chain work (rather than | 
| 2798 |  |             // m_chainman.ActiveChain().Tip()) because we won't start block download | 
| 2799 |  |             // until we have a headers chain that has at least | 
| 2800 |  |             // the minimum chain work, even if a peer has a chain past our tip, | 
| 2801 |  |             // as an anti-DoS measure. | 
| 2802 | 0 |             if (pfrom.IsOutboundOrBlockRelayConn()) { | 
| 2803 | 0 |                 LogInfo("outbound peer headers chain has insufficient work, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 2804 | 0 |                 pfrom.fDisconnect = true; | 
| 2805 | 0 |             } | 
| 2806 | 0 |         } | 
| 2807 | 0 |     } | 
| 2808 |  |  | 
| 2809 |  |     // If this is an outbound full-relay peer, check to see if we should protect | 
| 2810 |  |     // it from the bad/lagging chain logic. | 
| 2811 |  |     // Note that outbound block-relay peers are excluded from this protection, and | 
| 2812 |  |     // thus always subject to eviction under the bad/lagging chain logic. | 
| 2813 |  |     // See ChainSyncTimeoutState. | 
| 2814 | 0 |     if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) { | 
| 2815 | 0 |         if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) { | 
| 2816 | 0 |             LogDebug(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId()); | 
| 2817 | 0 |             nodestate->m_chain_sync.m_protect = true; | 
| 2818 | 0 |             ++m_outbound_peers_with_protect_from_disconnect; | 
| 2819 | 0 |         } | 
| 2820 | 0 |     } | 
| 2821 | 0 | } | 
| 2822 |  |  | 
| 2823 |  | void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer, | 
| 2824 |  |                                             std::vector<CBlockHeader>&& headers, | 
| 2825 |  |                                             bool via_compact_block) | 
| 2826 | 0 | { | 
| 2827 | 0 |     size_t nCount = headers.size(); | 
| 2828 |  | 
 | 
| 2829 | 0 |     if (nCount == 0) { | 
| 2830 |  |         // Nothing interesting. Stop asking this peers for more headers. | 
| 2831 |  |         // If we were in the middle of headers sync, receiving an empty headers | 
| 2832 |  |         // message suggests that the peer suddenly has nothing to give us | 
| 2833 |  |         // (perhaps it reorged to our chain). Clear download state for this peer. | 
| 2834 | 0 |         LOCK(peer.m_headers_sync_mutex); | 
| 2835 | 0 |         if (peer.m_headers_sync) { | 
| 2836 | 0 |             peer.m_headers_sync.reset(nullptr); | 
| 2837 | 0 |             LOCK(m_headers_presync_mutex); | 
| 2838 | 0 |             m_headers_presync_stats.erase(pfrom.GetId()); | 
| 2839 | 0 |         } | 
| 2840 |  |         // A headers message with no headers cannot be an announcement, so assume | 
| 2841 |  |         // it is a response to our last getheaders request, if there is one. | 
| 2842 | 0 |         peer.m_last_getheaders_timestamp = {}; | 
| 2843 | 0 |         return; | 
| 2844 | 0 |     } | 
| 2845 |  |  | 
| 2846 |  |     // Before we do any processing, make sure these pass basic sanity checks. | 
| 2847 |  |     // We'll rely on headers having valid proof-of-work further down, as an | 
| 2848 |  |     // anti-DoS criteria (note: this check is required before passing any | 
| 2849 |  |     // headers into HeadersSyncState). | 
| 2850 | 0 |     if (!CheckHeadersPoW(headers, m_chainparams.GetConsensus(), peer)) { | 
| 2851 |  |         // Misbehaving() calls are handled within CheckHeadersPoW(), so we can | 
| 2852 |  |         // just return. (Note that even if a header is announced via compact | 
| 2853 |  |         // block, the header itself should be valid, so this type of error can | 
| 2854 |  |         // always be punished.) | 
| 2855 | 0 |         return; | 
| 2856 | 0 |     } | 
| 2857 |  |  | 
| 2858 | 0 |     const CBlockIndex *pindexLast = nullptr; | 
| 2859 |  |  | 
| 2860 |  |     // We'll set already_validated_work to true if these headers are | 
| 2861 |  |     // successfully processed as part of a low-work headers sync in progress | 
| 2862 |  |     // (either in PRESYNC or REDOWNLOAD phase). | 
| 2863 |  |     // If true, this will mean that any headers returned to us (ie during | 
| 2864 |  |     // REDOWNLOAD) can be validated without further anti-DoS checks. | 
| 2865 | 0 |     bool already_validated_work = false; | 
| 2866 |  |  | 
| 2867 |  |     // If we're in the middle of headers sync, let it do its magic. | 
| 2868 | 0 |     bool have_headers_sync = false; | 
| 2869 | 0 |     { | 
| 2870 | 0 |         LOCK(peer.m_headers_sync_mutex); | 
| 2871 |  | 
 | 
| 2872 | 0 |         already_validated_work = IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers); | 
| 2873 |  |  | 
| 2874 |  |         // The headers we passed in may have been: | 
| 2875 |  |         // - untouched, perhaps if no headers-sync was in progress, or some | 
| 2876 |  |         //   failure occurred | 
| 2877 |  |         // - erased, such as if the headers were successfully processed and no | 
| 2878 |  |         //   additional headers processing needs to take place (such as if we | 
| 2879 |  |         //   are still in PRESYNC) | 
| 2880 |  |         // - replaced with headers that are now ready for validation, such as | 
| 2881 |  |         //   during the REDOWNLOAD phase of a low-work headers sync. | 
| 2882 |  |         // So just check whether we still have headers that we need to process, | 
| 2883 |  |         // or not. | 
| 2884 | 0 |         if (headers.empty()) { | 
| 2885 | 0 |             return; | 
| 2886 | 0 |         } | 
| 2887 |  |  | 
| 2888 | 0 |         have_headers_sync = !!peer.m_headers_sync; | 
| 2889 | 0 |     } | 
| 2890 |  |  | 
| 2891 |  |     // Do these headers connect to something in our block index? | 
| 2892 | 0 |     const CBlockIndex *chain_start_header{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock))}; | 
| 2893 | 0 |     bool headers_connect_blockindex{chain_start_header != nullptr}; | 
| 2894 |  | 
 | 
| 2895 | 0 |     if (!headers_connect_blockindex) { | 
| 2896 |  |         // This could be a BIP 130 block announcement, use | 
| 2897 |  |         // special logic for handling headers that don't connect, as this | 
| 2898 |  |         // could be benign. | 
| 2899 | 0 |         HandleUnconnectingHeaders(pfrom, peer, headers); | 
| 2900 | 0 |         return; | 
| 2901 | 0 |     } | 
| 2902 |  |  | 
| 2903 |  |     // If headers connect, assume that this is in response to any outstanding getheaders | 
| 2904 |  |     // request we may have sent, and clear out the time of our last request. Non-connecting | 
| 2905 |  |     // headers cannot be a response to a getheaders request. | 
| 2906 | 0 |     peer.m_last_getheaders_timestamp = {}; | 
| 2907 |  |  | 
| 2908 |  |     // If the headers we received are already in memory and an ancestor of | 
| 2909 |  |     // m_best_header or our tip, skip anti-DoS checks. These headers will not | 
| 2910 |  |     // use any more memory (and we are not leaking information that could be | 
| 2911 |  |     // used to fingerprint us). | 
| 2912 | 0 |     const CBlockIndex *last_received_header{nullptr}; | 
| 2913 | 0 |     { | 
| 2914 | 0 |         LOCK(cs_main); | 
| 2915 | 0 |         last_received_header = m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash()); | 
| 2916 | 0 |         if (IsAncestorOfBestHeaderOrTip(last_received_header)) { | 
| 2917 | 0 |             already_validated_work = true; | 
| 2918 | 0 |         } | 
| 2919 | 0 |     } | 
| 2920 |  |  | 
| 2921 |  |     // If our peer has NetPermissionFlags::NoBan privileges, then bypass our | 
| 2922 |  |     // anti-DoS logic (this saves bandwidth when we connect to a trusted peer | 
| 2923 |  |     // on startup). | 
| 2924 | 0 |     if (pfrom.HasPermission(NetPermissionFlags::NoBan)) { | 
| 2925 | 0 |         already_validated_work = true; | 
| 2926 | 0 |     } | 
| 2927 |  |  | 
| 2928 |  |     // At this point, the headers connect to something in our block index. | 
| 2929 |  |     // Do anti-DoS checks to determine if we should process or store for later | 
| 2930 |  |     // processing. | 
| 2931 | 0 |     if (!already_validated_work && TryLowWorkHeadersSync(peer, pfrom, | 
| 2932 | 0 |                 chain_start_header, headers)) { | 
| 2933 |  |         // If we successfully started a low-work headers sync, then there | 
| 2934 |  |         // should be no headers to process any further. | 
| 2935 | 0 |         Assume(headers.empty()); | 
| 2936 | 0 |         return; | 
| 2937 | 0 |     } | 
| 2938 |  |  | 
| 2939 |  |     // At this point, we have a set of headers with sufficient work on them | 
| 2940 |  |     // which can be processed. | 
| 2941 |  |  | 
| 2942 |  |     // If we don't have the last header, then this peer will have given us | 
| 2943 |  |     // something new (if these headers are valid). | 
| 2944 | 0 |     bool received_new_header{last_received_header == nullptr}; | 
| 2945 |  |  | 
| 2946 |  |     // Now process all the headers. | 
| 2947 | 0 |     BlockValidationState state; | 
| 2948 | 0 |     const bool processed{m_chainman.ProcessNewBlockHeaders(headers, | 
| 2949 | 0 |                                                            /*min_pow_checked=*/true, | 
| 2950 | 0 |                                                            state, &pindexLast)}; | 
| 2951 | 0 |     if (!processed) { | 
| 2952 | 0 |         if (state.IsInvalid()) { | 
| 2953 | 0 |             MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received"); | 
| 2954 | 0 |             return; | 
| 2955 | 0 |         } | 
| 2956 | 0 |     } | 
| 2957 | 0 |     assert(pindexLast); | 
| 2958 |  |  | 
| 2959 | 0 |     if (processed && received_new_header) { | 
| 2960 | 0 |         LogBlockHeader(*pindexLast, pfrom, /*via_compact_block=*/false); | 
| 2961 | 0 |     } | 
| 2962 |  |  | 
| 2963 |  |     // Consider fetching more headers if we are not using our headers-sync mechanism. | 
| 2964 | 0 |     if (nCount == m_opts.max_headers_result && !have_headers_sync) { | 
| 2965 |  |         // Headers message had its maximum size; the peer may have more headers. | 
| 2966 | 0 |         if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) { | 
| 2967 | 0 |             LogDebug(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", | 
| 2968 | 0 |                     pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height); | 
| 2969 | 0 |         } | 
| 2970 | 0 |     } | 
| 2971 |  | 
 | 
| 2972 | 0 |     UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast, received_new_header, nCount == m_opts.max_headers_result); | 
| 2973 |  |  | 
| 2974 |  |     // Consider immediately downloading blocks. | 
| 2975 | 0 |     HeadersDirectFetchBlocks(pfrom, peer, *pindexLast); | 
| 2976 |  | 
 | 
| 2977 | 0 |     return; | 
| 2978 | 0 | } | 
| 2979 |  |  | 
| 2980 |  | std::optional<node::PackageToValidate> PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx, const TxValidationState& state, | 
| 2981 |  |                                        bool first_time_failure) | 
| 2982 | 0 | { | 
| 2983 | 0 |     AssertLockNotHeld(m_peer_mutex); | 
| 2984 | 0 |     AssertLockHeld(g_msgproc_mutex); | 
| 2985 | 0 |     AssertLockHeld(m_tx_download_mutex); | 
| 2986 |  | 
 | 
| 2987 | 0 |     PeerRef peer{GetPeerRef(nodeid)}; | 
| 2988 |  | 
 | 
| 2989 | 0 |     LogDebug(BCLog::MEMPOOLREJ, "%s (wtxid=%s) from peer=%d was not accepted: %s\n", | 
| 2990 | 0 |         ptx->GetHash().ToString(), | 
| 2991 | 0 |         ptx->GetWitnessHash().ToString(), | 
| 2992 | 0 |         nodeid, | 
| 2993 | 0 |         state.ToString()); | 
| 2994 |  | 
 | 
| 2995 | 0 |     const auto& [add_extra_compact_tx, unique_parents, package_to_validate] = m_txdownloadman.MempoolRejectedTx(ptx, state, nodeid, first_time_failure); | 
| 2996 |  | 
 | 
| 2997 | 0 |     if (add_extra_compact_tx && RecursiveDynamicUsage(*ptx) < 100000) { | 
| 2998 | 0 |         AddToCompactExtraTransactions(ptx); | 
| 2999 | 0 |     } | 
| 3000 | 0 |     for (const Txid& parent_txid : unique_parents) { | 
| 3001 | 0 |         if (peer) AddKnownTx(*peer, parent_txid.ToUint256()); | 
| 3002 | 0 |     } | 
| 3003 |  | 
 | 
| 3004 | 0 |     return package_to_validate; | 
| 3005 | 0 | } | 
| 3006 |  |  | 
| 3007 |  | void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions) | 
| 3008 | 0 | { | 
| 3009 | 0 |     AssertLockNotHeld(m_peer_mutex); | 
| 3010 | 0 |     AssertLockHeld(g_msgproc_mutex); | 
| 3011 | 0 |     AssertLockHeld(m_tx_download_mutex); | 
| 3012 |  | 
 | 
| 3013 | 0 |     m_txdownloadman.MempoolAcceptedTx(tx); | 
| 3014 |  | 
 | 
| 3015 | 0 |     LogDebug(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n", | 
| 3016 | 0 |              nodeid, | 
| 3017 | 0 |              tx->GetHash().ToString(), | 
| 3018 | 0 |              tx->GetWitnessHash().ToString(), | 
| 3019 | 0 |              m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000); | 
| 3020 |  | 
 | 
| 3021 | 0 |     RelayTransaction(tx->GetHash(), tx->GetWitnessHash()); | 
| 3022 |  | 
 | 
| 3023 | 0 |     for (const CTransactionRef& removedTx : replaced_transactions) { | 
| 3024 | 0 |         AddToCompactExtraTransactions(removedTx); | 
| 3025 | 0 |     } | 
| 3026 | 0 | } | 
| 3027 |  |  | 
| 3028 |  | void PeerManagerImpl::ProcessPackageResult(const node::PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result) | 
| 3029 | 0 | { | 
| 3030 | 0 |     AssertLockNotHeld(m_peer_mutex); | 
| 3031 | 0 |     AssertLockHeld(g_msgproc_mutex); | 
| 3032 | 0 |     AssertLockHeld(m_tx_download_mutex); | 
| 3033 |  | 
 | 
| 3034 | 0 |     const auto& package = package_to_validate.m_txns; | 
| 3035 | 0 |     const auto& senders = package_to_validate.m_senders; | 
| 3036 |  | 
 | 
| 3037 | 0 |     if (package_result.m_state.IsInvalid()) { | 
| 3038 | 0 |         m_txdownloadman.MempoolRejectedPackage(package); | 
| 3039 | 0 |     } | 
| 3040 |  |     // We currently only expect to process 1-parent-1-child packages. Remove if this changes. | 
| 3041 | 0 |     if (!Assume(package.size() == 2)) return; | 
| 3042 |  |  | 
| 3043 |  |     // Iterate backwards to erase in-package descendants from the orphanage before they become | 
| 3044 |  |     // relevant in AddChildrenToWorkSet. | 
| 3045 | 0 |     auto package_iter = package.rbegin(); | 
| 3046 | 0 |     auto senders_iter = senders.rbegin(); | 
| 3047 | 0 |     while (package_iter != package.rend()) { | 
| 3048 | 0 |         const auto& tx = *package_iter; | 
| 3049 | 0 |         const NodeId nodeid = *senders_iter; | 
| 3050 | 0 |         const auto it_result{package_result.m_tx_results.find(tx->GetWitnessHash())}; | 
| 3051 |  |  | 
| 3052 |  |         // It is not guaranteed that a result exists for every transaction. | 
| 3053 | 0 |         if (it_result != package_result.m_tx_results.end()) { | 
| 3054 | 0 |             const auto& tx_result = it_result->second; | 
| 3055 | 0 |             switch (tx_result.m_result_type) { | 
| 3056 | 0 |                 case MempoolAcceptResult::ResultType::VALID: | 
| 3057 | 0 |                 { | 
| 3058 | 0 |                     ProcessValidTx(nodeid, tx, tx_result.m_replaced_transactions); | 
| 3059 | 0 |                     break; | 
| 3060 | 0 |                 } | 
| 3061 | 0 |                 case MempoolAcceptResult::ResultType::INVALID: | 
| 3062 | 0 |                 case MempoolAcceptResult::ResultType::DIFFERENT_WITNESS: | 
| 3063 | 0 |                 { | 
| 3064 |  |                     // Don't add to vExtraTxnForCompact, as these transactions should have already been | 
| 3065 |  |                     // added there when added to the orphanage or rejected for TX_RECONSIDERABLE. | 
| 3066 |  |                     // This should be updated if package submission is ever used for transactions | 
| 3067 |  |                     // that haven't already been validated before. | 
| 3068 | 0 |                     ProcessInvalidTx(nodeid, tx, tx_result.m_state, /*first_time_failure=*/false); | 
| 3069 | 0 |                     break; | 
| 3070 | 0 |                 } | 
| 3071 | 0 |                 case MempoolAcceptResult::ResultType::MEMPOOL_ENTRY: | 
| 3072 | 0 |                 { | 
| 3073 |  |                     // AlreadyHaveTx() should be catching transactions that are already in mempool. | 
| 3074 | 0 |                     Assume(false); | 
| 3075 | 0 |                     break; | 
| 3076 | 0 |                 } | 
| 3077 | 0 |             } | 
| 3078 | 0 |         } | 
| 3079 | 0 |         package_iter++; | 
| 3080 | 0 |         senders_iter++; | 
| 3081 | 0 |     } | 
| 3082 | 0 | } | 
| 3083 |  |  | 
| 3084 |  | // NOTE: the orphan processing used to be uninterruptible and quadratic, which could allow a peer to stall the node for | 
| 3085 |  | // hours with specially crafted transactions. See https://bitcoincore.org/en/2024/07/03/disclose-orphan-dos. | 
| 3086 |  | bool PeerManagerImpl::ProcessOrphanTx(Peer& peer) | 
| 3087 | 0 | { | 
| 3088 | 0 |     AssertLockHeld(g_msgproc_mutex); | 
| 3089 | 0 |     LOCK2(::cs_main, m_tx_download_mutex); | 
| 3090 |  | 
 | 
| 3091 | 0 |     CTransactionRef porphanTx = nullptr; | 
| 3092 |  | 
 | 
| 3093 | 0 |     while (CTransactionRef porphanTx = m_txdownloadman.GetTxToReconsider(peer.m_id)) { | 
| 3094 | 0 |         const MempoolAcceptResult result = m_chainman.ProcessTransaction(porphanTx); | 
| 3095 | 0 |         const TxValidationState& state = result.m_state; | 
| 3096 | 0 |         const Txid& orphanHash = porphanTx->GetHash(); | 
| 3097 | 0 |         const Wtxid& orphan_wtxid = porphanTx->GetWitnessHash(); | 
| 3098 |  | 
 | 
| 3099 | 0 |         if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) { | 
| 3100 | 0 |             LogDebug(BCLog::TXPACKAGES, "   accepted orphan tx %s (wtxid=%s)\n", orphanHash.ToString(), orphan_wtxid.ToString()); | 
| 3101 | 0 |             ProcessValidTx(peer.m_id, porphanTx, result.m_replaced_transactions); | 
| 3102 | 0 |             return true; | 
| 3103 | 0 |         } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) { | 
| 3104 | 0 |             LogDebug(BCLog::TXPACKAGES, "   invalid orphan tx %s (wtxid=%s) from peer=%d. %s\n", | 
| 3105 | 0 |                 orphanHash.ToString(), | 
| 3106 | 0 |                 orphan_wtxid.ToString(), | 
| 3107 | 0 |                 peer.m_id, | 
| 3108 | 0 |                 state.ToString()); | 
| 3109 |  | 
 | 
| 3110 | 0 |             if (Assume(state.IsInvalid() && | 
| 3111 | 0 |                        state.GetResult() != TxValidationResult::TX_UNKNOWN && | 
| 3112 | 0 |                        state.GetResult() != TxValidationResult::TX_NO_MEMPOOL && | 
| 3113 | 0 |                        state.GetResult() != TxValidationResult::TX_RESULT_UNSET)) { | 
| 3114 | 0 |                 ProcessInvalidTx(peer.m_id, porphanTx, state, /*first_time_failure=*/false); | 
| 3115 | 0 |             } | 
| 3116 | 0 |             return true; | 
| 3117 | 0 |         } | 
| 3118 | 0 |     } | 
| 3119 |  |  | 
| 3120 | 0 |     return false; | 
| 3121 | 0 | } | 
| 3122 |  |  | 
| 3123 |  | bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& node, Peer& peer, | 
| 3124 |  |                                                 BlockFilterType filter_type, uint32_t start_height, | 
| 3125 |  |                                                 const uint256& stop_hash, uint32_t max_height_diff, | 
| 3126 |  |                                                 const CBlockIndex*& stop_index, | 
| 3127 |  |                                                 BlockFilterIndex*& filter_index) | 
| 3128 | 0 | { | 
| 3129 | 0 |     const bool supported_filter_type = | 
| 3130 | 0 |         (filter_type == BlockFilterType::BASIC && | 
| 3131 | 0 |          (peer.m_our_services & NODE_COMPACT_FILTERS)); | 
| 3132 | 0 |     if (!supported_filter_type) { | 
| 3133 | 0 |         LogDebug(BCLog::NET, "peer requested unsupported block filter type: %d, %s\n", | 
| 3134 | 0 |                  static_cast<uint8_t>(filter_type), node.DisconnectMsg(fLogIPs)); | 
| 3135 | 0 |         node.fDisconnect = true; | 
| 3136 | 0 |         return false; | 
| 3137 | 0 |     } | 
| 3138 |  |  | 
| 3139 | 0 |     { | 
| 3140 | 0 |         LOCK(cs_main); | 
| 3141 | 0 |         stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash); | 
| 3142 |  |  | 
| 3143 |  |         // Check that the stop block exists and the peer would be allowed to fetch it. | 
| 3144 | 0 |         if (!stop_index || !BlockRequestAllowed(stop_index)) { | 
| 3145 | 0 |             LogDebug(BCLog::NET, "peer requested invalid block hash: %s, %s\n", | 
| 3146 | 0 |                      stop_hash.ToString(), node.DisconnectMsg(fLogIPs)); | 
| 3147 | 0 |             node.fDisconnect = true; | 
| 3148 | 0 |             return false; | 
| 3149 | 0 |         } | 
| 3150 | 0 |     } | 
| 3151 |  |  | 
| 3152 | 0 |     uint32_t stop_height = stop_index->nHeight; | 
| 3153 | 0 |     if (start_height > stop_height) { | 
| 3154 | 0 |         LogDebug(BCLog::NET, "peer sent invalid getcfilters/getcfheaders with " | 
| 3155 | 0 |                  "start height %d and stop height %d, %s\n", | 
| 3156 | 0 |                  start_height, stop_height, node.DisconnectMsg(fLogIPs)); | 
| 3157 | 0 |         node.fDisconnect = true; | 
| 3158 | 0 |         return false; | 
| 3159 | 0 |     } | 
| 3160 | 0 |     if (stop_height - start_height >= max_height_diff) { | 
| 3161 | 0 |         LogDebug(BCLog::NET, "peer requested too many cfilters/cfheaders: %d / %d, %s\n", | 
| 3162 | 0 |                  stop_height - start_height + 1, max_height_diff, node.DisconnectMsg(fLogIPs)); | 
| 3163 | 0 |         node.fDisconnect = true; | 
| 3164 | 0 |         return false; | 
| 3165 | 0 |     } | 
| 3166 |  |  | 
| 3167 | 0 |     filter_index = GetBlockFilterIndex(filter_type); | 
| 3168 | 0 |     if (!filter_index) { | 
| 3169 | 0 |         LogDebug(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type)); | 
| 3170 | 0 |         return false; | 
| 3171 | 0 |     } | 
| 3172 |  |  | 
| 3173 | 0 |     return true; | 
| 3174 | 0 | } | 
| 3175 |  |  | 
| 3176 |  | void PeerManagerImpl::ProcessGetCFilters(CNode& node, Peer& peer, DataStream& vRecv) | 
| 3177 | 0 | { | 
| 3178 | 0 |     uint8_t filter_type_ser; | 
| 3179 | 0 |     uint32_t start_height; | 
| 3180 | 0 |     uint256 stop_hash; | 
| 3181 |  | 
 | 
| 3182 | 0 |     vRecv >> filter_type_ser >> start_height >> stop_hash; | 
| 3183 |  | 
 | 
| 3184 | 0 |     const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser); | 
| 3185 |  | 
 | 
| 3186 | 0 |     const CBlockIndex* stop_index; | 
| 3187 | 0 |     BlockFilterIndex* filter_index; | 
| 3188 | 0 |     if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash, | 
| 3189 | 0 |                                    MAX_GETCFILTERS_SIZE, stop_index, filter_index)) { | 
| 3190 | 0 |         return; | 
| 3191 | 0 |     } | 
| 3192 |  |  | 
| 3193 | 0 |     std::vector<BlockFilter> filters; | 
| 3194 | 0 |     if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) { | 
| 3195 | 0 |         LogDebug(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n", | 
| 3196 | 0 |                      BlockFilterTypeName(filter_type), start_height, stop_hash.ToString()); | 
| 3197 | 0 |         return; | 
| 3198 | 0 |     } | 
| 3199 |  |  | 
| 3200 | 0 |     for (const auto& filter : filters) { | 
| 3201 | 0 |         MakeAndPushMessage(node, NetMsgType::CFILTER, filter); | 
| 3202 | 0 |     } | 
| 3203 | 0 | } | 
| 3204 |  |  | 
| 3205 |  | void PeerManagerImpl::ProcessGetCFHeaders(CNode& node, Peer& peer, DataStream& vRecv) | 
| 3206 | 0 | { | 
| 3207 | 0 |     uint8_t filter_type_ser; | 
| 3208 | 0 |     uint32_t start_height; | 
| 3209 | 0 |     uint256 stop_hash; | 
| 3210 |  | 
 | 
| 3211 | 0 |     vRecv >> filter_type_ser >> start_height >> stop_hash; | 
| 3212 |  | 
 | 
| 3213 | 0 |     const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser); | 
| 3214 |  | 
 | 
| 3215 | 0 |     const CBlockIndex* stop_index; | 
| 3216 | 0 |     BlockFilterIndex* filter_index; | 
| 3217 | 0 |     if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash, | 
| 3218 | 0 |                                    MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) { | 
| 3219 | 0 |         return; | 
| 3220 | 0 |     } | 
| 3221 |  |  | 
| 3222 | 0 |     uint256 prev_header; | 
| 3223 | 0 |     if (start_height > 0) { | 
| 3224 | 0 |         const CBlockIndex* const prev_block = | 
| 3225 | 0 |             stop_index->GetAncestor(static_cast<int>(start_height - 1)); | 
| 3226 | 0 |         if (!filter_index->LookupFilterHeader(prev_block, prev_header)) { | 
| 3227 | 0 |             LogDebug(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n", | 
| 3228 | 0 |                          BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString()); | 
| 3229 | 0 |             return; | 
| 3230 | 0 |         } | 
| 3231 | 0 |     } | 
| 3232 |  |  | 
| 3233 | 0 |     std::vector<uint256> filter_hashes; | 
| 3234 | 0 |     if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) { | 
| 3235 | 0 |         LogDebug(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n", | 
| 3236 | 0 |                      BlockFilterTypeName(filter_type), start_height, stop_hash.ToString()); | 
| 3237 | 0 |         return; | 
| 3238 | 0 |     } | 
| 3239 |  |  | 
| 3240 | 0 |     MakeAndPushMessage(node, NetMsgType::CFHEADERS, | 
| 3241 | 0 |               filter_type_ser, | 
| 3242 | 0 |               stop_index->GetBlockHash(), | 
| 3243 | 0 |               prev_header, | 
| 3244 | 0 |               filter_hashes); | 
| 3245 | 0 | } | 
| 3246 |  |  | 
| 3247 |  | void PeerManagerImpl::ProcessGetCFCheckPt(CNode& node, Peer& peer, DataStream& vRecv) | 
| 3248 | 0 | { | 
| 3249 | 0 |     uint8_t filter_type_ser; | 
| 3250 | 0 |     uint256 stop_hash; | 
| 3251 |  | 
 | 
| 3252 | 0 |     vRecv >> filter_type_ser >> stop_hash; | 
| 3253 |  | 
 | 
| 3254 | 0 |     const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser); | 
| 3255 |  | 
 | 
| 3256 | 0 |     const CBlockIndex* stop_index; | 
| 3257 | 0 |     BlockFilterIndex* filter_index; | 
| 3258 | 0 |     if (!PrepareBlockFilterRequest(node, peer, filter_type, /*start_height=*/0, stop_hash, | 
| 3259 | 0 |                                    /*max_height_diff=*/std::numeric_limits<uint32_t>::max(), | 
| 3260 | 0 |                                    stop_index, filter_index)) { | 
| 3261 | 0 |         return; | 
| 3262 | 0 |     } | 
| 3263 |  |  | 
| 3264 | 0 |     std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL); | 
| 3265 |  |  | 
| 3266 |  |     // Populate headers. | 
| 3267 | 0 |     const CBlockIndex* block_index = stop_index; | 
| 3268 | 0 |     for (int i = headers.size() - 1; i >= 0; i--) { | 
| 3269 | 0 |         int height = (i + 1) * CFCHECKPT_INTERVAL; | 
| 3270 | 0 |         block_index = block_index->GetAncestor(height); | 
| 3271 |  | 
 | 
| 3272 | 0 |         if (!filter_index->LookupFilterHeader(block_index, headers[i])) { | 
| 3273 | 0 |             LogDebug(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n", | 
| 3274 | 0 |                          BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString()); | 
| 3275 | 0 |             return; | 
| 3276 | 0 |         } | 
| 3277 | 0 |     } | 
| 3278 |  |  | 
| 3279 | 0 |     MakeAndPushMessage(node, NetMsgType::CFCHECKPT, | 
| 3280 | 0 |               filter_type_ser, | 
| 3281 | 0 |               stop_index->GetBlockHash(), | 
| 3282 | 0 |               headers); | 
| 3283 | 0 | } | 
| 3284 |  |  | 
| 3285 |  | void PeerManagerImpl::ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked) | 
| 3286 | 0 | { | 
| 3287 | 0 |     bool new_block{false}; | 
| 3288 | 0 |     m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked, &new_block); | 
| 3289 | 0 |     if (new_block) { | 
| 3290 | 0 |         node.m_last_block_time = GetTime<std::chrono::seconds>(); | 
| 3291 |  |         // In case this block came from a different peer than we requested | 
| 3292 |  |         // from, we can erase the block request now anyway (as we just stored | 
| 3293 |  |         // this block to disk). | 
| 3294 | 0 |         LOCK(cs_main); | 
| 3295 | 0 |         RemoveBlockRequest(block->GetHash(), std::nullopt); | 
| 3296 | 0 |     } else { | 
| 3297 | 0 |         LOCK(cs_main); | 
| 3298 | 0 |         mapBlockSource.erase(block->GetHash()); | 
| 3299 | 0 |     } | 
| 3300 | 0 | } | 
| 3301 |  |  | 
| 3302 |  | void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions) | 
| 3303 | 0 | { | 
| 3304 | 0 |     std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); | 
| 3305 | 0 |     bool fBlockRead{false}; | 
| 3306 | 0 |     { | 
| 3307 | 0 |         LOCK(cs_main); | 
| 3308 |  | 
 | 
| 3309 | 0 |         auto range_flight = mapBlocksInFlight.equal_range(block_transactions.blockhash); | 
| 3310 | 0 |         size_t already_in_flight = std::distance(range_flight.first, range_flight.second); | 
| 3311 | 0 |         bool requested_block_from_this_peer{false}; | 
| 3312 |  |  | 
| 3313 |  |         // Multimap ensures ordering of outstanding requests. It's either empty or first in line. | 
| 3314 | 0 |         bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId()); | 
| 3315 |  | 
 | 
| 3316 | 0 |         while (range_flight.first != range_flight.second) { | 
| 3317 | 0 |             auto [node_id, block_it] = range_flight.first->second; | 
| 3318 | 0 |             if (node_id == pfrom.GetId() && block_it->partialBlock) { | 
| 3319 | 0 |                 requested_block_from_this_peer = true; | 
| 3320 | 0 |                 break; | 
| 3321 | 0 |             } | 
| 3322 | 0 |             range_flight.first++; | 
| 3323 | 0 |         } | 
| 3324 |  | 
 | 
| 3325 | 0 |         if (!requested_block_from_this_peer) { | 
| 3326 | 0 |             LogDebug(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom.GetId()); | 
| 3327 | 0 |             return; | 
| 3328 | 0 |         } | 
| 3329 |  |  | 
| 3330 | 0 |         PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock; | 
| 3331 |  | 
 | 
| 3332 | 0 |         if (partialBlock.header.IsNull()) { | 
| 3333 |  |             // It is possible for the header to be empty if a previous call to FillBlock wiped the header, but left | 
| 3334 |  |             // the PartiallyDownloadedBlock pointer around (i.e. did not call RemoveBlockRequest). In this case, we | 
| 3335 |  |             // should not call LookupBlockIndex below. | 
| 3336 | 0 |             RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); | 
| 3337 | 0 |             Misbehaving(peer, "previous compact block reconstruction attempt failed"); | 
| 3338 | 0 |             LogDebug(BCLog::NET, "Peer %d sent compact block transactions multiple times", pfrom.GetId()); | 
| 3339 | 0 |             return; | 
| 3340 | 0 |         } | 
| 3341 |  |  | 
| 3342 |  |         // We should not have gotten this far in compact block processing unless it's attached to a known header | 
| 3343 | 0 |         const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(partialBlock.header.hashPrevBlock))}; | 
| 3344 | 0 |         ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn, | 
| 3345 | 0 |                                                    /*segwit_active=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT)); | 
| 3346 | 0 |         if (status == READ_STATUS_INVALID) { | 
| 3347 | 0 |             RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect | 
| 3348 | 0 |             Misbehaving(peer, "invalid compact block/non-matching block transactions"); | 
| 3349 | 0 |             return; | 
| 3350 | 0 |         } else if (status == READ_STATUS_FAILED) { | 
| 3351 | 0 |             if (first_in_flight) { | 
| 3352 |  |                 // Might have collided, fall back to getdata now :( | 
| 3353 |  |                 // We keep the failed partialBlock to disallow processing another compact block announcement from the same | 
| 3354 |  |                 // peer for the same block. We let the full block download below continue under the same m_downloading_since | 
| 3355 |  |                 // timer. | 
| 3356 | 0 |                 std::vector<CInv> invs; | 
| 3357 | 0 |                 invs.emplace_back(MSG_BLOCK | GetFetchFlags(peer), block_transactions.blockhash); | 
| 3358 | 0 |                 MakeAndPushMessage(pfrom, NetMsgType::GETDATA, invs); | 
| 3359 | 0 |             } else { | 
| 3360 | 0 |                 RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); | 
| 3361 | 0 |                 LogDebug(BCLog::NET, "Peer %d sent us a compact block but it failed to reconstruct, waiting on first download to complete\n", pfrom.GetId()); | 
| 3362 | 0 |                 return; | 
| 3363 | 0 |             } | 
| 3364 | 0 |         } else { | 
| 3365 |  |             // Block is okay for further processing | 
| 3366 | 0 |             RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // it is now an empty pointer | 
| 3367 | 0 |             fBlockRead = true; | 
| 3368 |  |             // mapBlockSource is used for potentially punishing peers and | 
| 3369 |  |             // updating which peers send us compact blocks, so the race | 
| 3370 |  |             // between here and cs_main in ProcessNewBlock is fine. | 
| 3371 |  |             // BIP 152 permits peers to relay compact blocks after validating | 
| 3372 |  |             // the header only; we should not punish peers if the block turns | 
| 3373 |  |             // out to be invalid. | 
| 3374 | 0 |             mapBlockSource.emplace(block_transactions.blockhash, std::make_pair(pfrom.GetId(), false)); | 
| 3375 | 0 |         } | 
| 3376 | 0 |     } // Don't hold cs_main when we call into ProcessNewBlock | 
| 3377 | 0 |     if (fBlockRead) { | 
| 3378 |  |         // Since we requested this block (it was in mapBlocksInFlight), force it to be processed, | 
| 3379 |  |         // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc) | 
| 3380 |  |         // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent | 
| 3381 |  |         // disk-space attacks), but this should be safe due to the | 
| 3382 |  |         // protections in the compact block handler -- see related comment | 
| 3383 |  |         // in compact block optimistic reconstruction handling. | 
| 3384 | 0 |         ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true); | 
| 3385 | 0 |     } | 
| 3386 | 0 |     return; | 
| 3387 | 0 | } | 
| 3388 |  |  | 
| 3389 | 0 | void PeerManagerImpl::LogBlockHeader(const CBlockIndex& index, const CNode& peer, bool via_compact_block) { | 
| 3390 |  |     // To prevent log spam, this function should only be called after it was determined that a | 
| 3391 |  |     // header is both new and valid. | 
| 3392 |  |     // | 
| 3393 |  |     // These messages are valuable for detecting potential selfish mining behavior; | 
| 3394 |  |     // if multiple displacing headers are seen near simultaneously across many | 
| 3395 |  |     // nodes in the network, this might be an indication of selfish mining. | 
| 3396 |  |     // In addition it can be used to identify peers which send us a header, but | 
| 3397 |  |     // don't followup with a complete and valid (compact) block. | 
| 3398 |  |     // Having this log by default when not in IBD ensures broad availability of | 
| 3399 |  |     // this data in case investigation is merited. | 
| 3400 | 0 |     const auto msg = strprintf( | 
| 3401 | 0 |         "Saw new %sheader hash=%s height=%d peer=%d%s", | 
| 3402 | 0 |         via_compact_block ? "cmpctblock " : "", | 
| 3403 | 0 |         index.GetBlockHash().ToString(), | 
| 3404 | 0 |         index.nHeight, | 
| 3405 | 0 |         peer.GetId(), | 
| 3406 | 0 |         peer.LogIP(fLogIPs) | 
| 3407 | 0 |     ); | 
| 3408 | 0 |     if (m_chainman.IsInitialBlockDownload()) { | 
| 3409 | 0 |         LogDebug(BCLog::VALIDATION, "%s", msg); | 
| 3410 | 0 |     } else { | 
| 3411 | 0 |         LogInfo("%s", msg); | 
| 3412 | 0 |     } | 
| 3413 | 0 | } | 
| 3414 |  |  | 
| 3415 |  | void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, DataStream& vRecv, | 
| 3416 |  |                                      const std::chrono::microseconds time_received, | 
| 3417 |  |                                      const std::atomic<bool>& interruptMsgProc) | 
| 3418 | 0 | { | 
| 3419 | 0 |     AssertLockHeld(g_msgproc_mutex); | 
| 3420 |  | 
 | 
| 3421 | 0 |     LogDebug(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId()); | 
| 3422 |  | 
 | 
| 3423 | 0 |     PeerRef peer = GetPeerRef(pfrom.GetId()); | 
| 3424 | 0 |     if (peer == nullptr) return; | 
| 3425 |  |  | 
| 3426 | 0 |     if (msg_type == NetMsgType::VERSION) { | 
| 3427 | 0 |         if (pfrom.nVersion != 0) { | 
| 3428 | 0 |             LogDebug(BCLog::NET, "redundant version message from peer=%d\n", pfrom.GetId()); | 
| 3429 | 0 |             return; | 
| 3430 | 0 |         } | 
| 3431 |  |  | 
| 3432 | 0 |         int64_t nTime; | 
| 3433 | 0 |         CService addrMe; | 
| 3434 | 0 |         uint64_t nNonce = 1; | 
| 3435 | 0 |         ServiceFlags nServices; | 
| 3436 | 0 |         int nVersion; | 
| 3437 | 0 |         std::string cleanSubVer; | 
| 3438 | 0 |         int starting_height = -1; | 
| 3439 | 0 |         bool fRelay = true; | 
| 3440 |  | 
 | 
| 3441 | 0 |         vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime; | 
| 3442 | 0 |         if (nTime < 0) { | 
| 3443 | 0 |             nTime = 0; | 
| 3444 | 0 |         } | 
| 3445 | 0 |         vRecv.ignore(8); // Ignore the addrMe service bits sent by the peer | 
| 3446 | 0 |         vRecv >> CNetAddr::V1(addrMe); | 
| 3447 | 0 |         if (!pfrom.IsInboundConn()) | 
| 3448 | 0 |         { | 
| 3449 |  |             // Overwrites potentially existing services. In contrast to this, | 
| 3450 |  |             // unvalidated services received via gossip relay in ADDR/ADDRV2 | 
| 3451 |  |             // messages are only ever added but cannot replace existing ones. | 
| 3452 | 0 |             m_addrman.SetServices(pfrom.addr, nServices); | 
| 3453 | 0 |         } | 
| 3454 | 0 |         if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices)) | 
| 3455 | 0 |         { | 
| 3456 | 0 |             LogDebug(BCLog::NET, "peer does not offer the expected services (%08x offered, %08x expected), %s\n", | 
| 3457 | 0 |                      nServices, | 
| 3458 | 0 |                      GetDesirableServiceFlags(nServices), | 
| 3459 | 0 |                      pfrom.DisconnectMsg(fLogIPs)); | 
| 3460 | 0 |             pfrom.fDisconnect = true; | 
| 3461 | 0 |             return; | 
| 3462 | 0 |         } | 
| 3463 |  |  | 
| 3464 | 0 |         if (nVersion < MIN_PEER_PROTO_VERSION) { | 
| 3465 |  |             // disconnect from peers older than this proto version | 
| 3466 | 0 |             LogDebug(BCLog::NET, "peer using obsolete version %i, %s\n", nVersion, pfrom.DisconnectMsg(fLogIPs)); | 
| 3467 | 0 |             pfrom.fDisconnect = true; | 
| 3468 | 0 |             return; | 
| 3469 | 0 |         } | 
| 3470 |  |  | 
| 3471 | 0 |         if (!vRecv.empty()) { | 
| 3472 |  |             // The version message includes information about the sending node which we don't use: | 
| 3473 |  |             //   - 8 bytes (service bits) | 
| 3474 |  |             //   - 16 bytes (ipv6 address) | 
| 3475 |  |             //   - 2 bytes (port) | 
| 3476 | 0 |             vRecv.ignore(26); | 
| 3477 | 0 |             vRecv >> nNonce; | 
| 3478 | 0 |         } | 
| 3479 | 0 |         if (!vRecv.empty()) { | 
| 3480 | 0 |             std::string strSubVer; | 
| 3481 | 0 |             vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH); | 
| 3482 | 0 |             cleanSubVer = SanitizeString(strSubVer); | 
| 3483 | 0 |         } | 
| 3484 | 0 |         if (!vRecv.empty()) { | 
| 3485 | 0 |             vRecv >> starting_height; | 
| 3486 | 0 |         } | 
| 3487 | 0 |         if (!vRecv.empty()) | 
| 3488 | 0 |             vRecv >> fRelay; | 
| 3489 |  |         // Disconnect if we connected to ourself | 
| 3490 | 0 |         if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce)) | 
| 3491 | 0 |         { | 
| 3492 | 0 |             LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToStringAddrPort()); | 
| 3493 | 0 |             pfrom.fDisconnect = true; | 
| 3494 | 0 |             return; | 
| 3495 | 0 |         } | 
| 3496 |  |  | 
| 3497 | 0 |         if (pfrom.IsInboundConn() && addrMe.IsRoutable()) | 
| 3498 | 0 |         { | 
| 3499 | 0 |             SeenLocal(addrMe); | 
| 3500 | 0 |         } | 
| 3501 |  |  | 
| 3502 |  |         // Inbound peers send us their version message when they connect. | 
| 3503 |  |         // We send our version message in response. | 
| 3504 | 0 |         if (pfrom.IsInboundConn()) { | 
| 3505 | 0 |             PushNodeVersion(pfrom, *peer); | 
| 3506 | 0 |         } | 
| 3507 |  |  | 
| 3508 |  |         // Change version | 
| 3509 | 0 |         const int greatest_common_version = std::min(nVersion, PROTOCOL_VERSION); | 
| 3510 | 0 |         pfrom.SetCommonVersion(greatest_common_version); | 
| 3511 | 0 |         pfrom.nVersion = nVersion; | 
| 3512 |  | 
 | 
| 3513 | 0 |         if (greatest_common_version >= WTXID_RELAY_VERSION) { | 
| 3514 | 0 |             MakeAndPushMessage(pfrom, NetMsgType::WTXIDRELAY); | 
| 3515 | 0 |         } | 
| 3516 |  |  | 
| 3517 |  |         // Signal ADDRv2 support (BIP155). | 
| 3518 | 0 |         if (greatest_common_version >= 70016) { | 
| 3519 |  |             // BIP155 defines addrv2 and sendaddrv2 for all protocol versions, but some | 
| 3520 |  |             // implementations reject messages they don't know. As a courtesy, don't send | 
| 3521 |  |             // it to nodes with a version before 70016, as no software is known to support | 
| 3522 |  |             // BIP155 that doesn't announce at least that protocol version number. | 
| 3523 | 0 |             MakeAndPushMessage(pfrom, NetMsgType::SENDADDRV2); | 
| 3524 | 0 |         } | 
| 3525 |  | 
 | 
| 3526 | 0 |         pfrom.m_has_all_wanted_services = HasAllDesirableServiceFlags(nServices); | 
| 3527 | 0 |         peer->m_their_services = nServices; | 
| 3528 | 0 |         pfrom.SetAddrLocal(addrMe); | 
| 3529 | 0 |         { | 
| 3530 | 0 |             LOCK(pfrom.m_subver_mutex); | 
| 3531 | 0 |             pfrom.cleanSubVer = cleanSubVer; | 
| 3532 | 0 |         } | 
| 3533 | 0 |         peer->m_starting_height = starting_height; | 
| 3534 |  |  | 
| 3535 |  |         // Only initialize the Peer::TxRelay m_relay_txs data structure if: | 
| 3536 |  |         // - this isn't an outbound block-relay-only connection, and | 
| 3537 |  |         // - this isn't an outbound feeler connection, and | 
| 3538 |  |         // - fRelay=true (the peer wishes to receive transaction announcements) | 
| 3539 |  |         //   or we're offering NODE_BLOOM to this peer. NODE_BLOOM means that | 
| 3540 |  |         //   the peer may turn on transaction relay later. | 
| 3541 | 0 |         if (!pfrom.IsBlockOnlyConn() && | 
| 3542 | 0 |             !pfrom.IsFeelerConn() && | 
| 3543 | 0 |             (fRelay || (peer->m_our_services & NODE_BLOOM))) { | 
| 3544 | 0 |             auto* const tx_relay = peer->SetTxRelay(); | 
| 3545 | 0 |             { | 
| 3546 | 0 |                 LOCK(tx_relay->m_bloom_filter_mutex); | 
| 3547 | 0 |                 tx_relay->m_relay_txs = fRelay; // set to true after we get the first filter* message | 
| 3548 | 0 |             } | 
| 3549 | 0 |             if (fRelay) pfrom.m_relays_txs = true; | 
| 3550 | 0 |         } | 
| 3551 |  | 
 | 
| 3552 | 0 |         if (greatest_common_version >= WTXID_RELAY_VERSION && m_txreconciliation) { | 
| 3553 |  |             // Per BIP-330, we announce txreconciliation support if: | 
| 3554 |  |             // - protocol version per the peer's VERSION message supports WTXID_RELAY; | 
| 3555 |  |             // - transaction relay is supported per the peer's VERSION message | 
| 3556 |  |             // - this is not a block-relay-only connection and not a feeler | 
| 3557 |  |             // - this is not an addr fetch connection; | 
| 3558 |  |             // - we are not in -blocksonly mode. | 
| 3559 | 0 |             const auto* tx_relay = peer->GetTxRelay(); | 
| 3560 | 0 |             if (tx_relay && WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs) && | 
| 3561 | 0 |                 !pfrom.IsAddrFetchConn() && !m_opts.ignore_incoming_txs) { | 
| 3562 | 0 |                 const uint64_t recon_salt = m_txreconciliation->PreRegisterPeer(pfrom.GetId()); | 
| 3563 | 0 |                 MakeAndPushMessage(pfrom, NetMsgType::SENDTXRCNCL, | 
| 3564 | 0 |                                    TXRECONCILIATION_VERSION, recon_salt); | 
| 3565 | 0 |             } | 
| 3566 | 0 |         } | 
| 3567 |  | 
 | 
| 3568 | 0 |         MakeAndPushMessage(pfrom, NetMsgType::VERACK); | 
| 3569 |  |  | 
| 3570 |  |         // Potentially mark this peer as a preferred download peer. | 
| 3571 | 0 |         { | 
| 3572 | 0 |             LOCK(cs_main); | 
| 3573 | 0 |             CNodeState* state = State(pfrom.GetId()); | 
| 3574 | 0 |             state->fPreferredDownload = (!pfrom.IsInboundConn() || pfrom.HasPermission(NetPermissionFlags::NoBan)) && !pfrom.IsAddrFetchConn() && CanServeBlocks(*peer); | 
| 3575 | 0 |             m_num_preferred_download_peers += state->fPreferredDownload; | 
| 3576 | 0 |         } | 
| 3577 |  |  | 
| 3578 |  |         // Attempt to initialize address relay for outbound peers and use result | 
| 3579 |  |         // to decide whether to send GETADDR, so that we don't send it to | 
| 3580 |  |         // inbound or outbound block-relay-only peers. | 
| 3581 | 0 |         bool send_getaddr{false}; | 
| 3582 | 0 |         if (!pfrom.IsInboundConn()) { | 
| 3583 | 0 |             send_getaddr = SetupAddressRelay(pfrom, *peer); | 
| 3584 | 0 |         } | 
| 3585 | 0 |         if (send_getaddr) { | 
| 3586 |  |             // Do a one-time address fetch to help populate/update our addrman. | 
| 3587 |  |             // If we're starting up for the first time, our addrman may be pretty | 
| 3588 |  |             // empty, so this mechanism is important to help us connect to the network. | 
| 3589 |  |             // We skip this for block-relay-only peers. We want to avoid | 
| 3590 |  |             // potentially leaking addr information and we do not want to | 
| 3591 |  |             // indicate to the peer that we will participate in addr relay. | 
| 3592 | 0 |             MakeAndPushMessage(pfrom, NetMsgType::GETADDR); | 
| 3593 | 0 |             peer->m_getaddr_sent = true; | 
| 3594 |  |             // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND addresses in response | 
| 3595 |  |             // (bypassing the MAX_ADDR_PROCESSING_TOKEN_BUCKET limit). | 
| 3596 | 0 |             peer->m_addr_token_bucket += MAX_ADDR_TO_SEND; | 
| 3597 | 0 |         } | 
| 3598 |  | 
 | 
| 3599 | 0 |         if (!pfrom.IsInboundConn()) { | 
| 3600 |  |             // For non-inbound connections, we update the addrman to record | 
| 3601 |  |             // connection success so that addrman will have an up-to-date | 
| 3602 |  |             // notion of which peers are online and available. | 
| 3603 |  |             // | 
| 3604 |  |             // While we strive to not leak information about block-relay-only | 
| 3605 |  |             // connections via the addrman, not moving an address to the tried | 
| 3606 |  |             // table is also potentially detrimental because new-table entries | 
| 3607 |  |             // are subject to eviction in the event of addrman collisions.  We | 
| 3608 |  |             // mitigate the information-leak by never calling | 
| 3609 |  |             // AddrMan::Connected() on block-relay-only peers; see | 
| 3610 |  |             // FinalizeNode(). | 
| 3611 |  |             // | 
| 3612 |  |             // This moves an address from New to Tried table in Addrman, | 
| 3613 |  |             // resolves tried-table collisions, etc. | 
| 3614 | 0 |             m_addrman.Good(pfrom.addr); | 
| 3615 | 0 |         } | 
| 3616 |  | 
 | 
| 3617 | 0 |         const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)}; | 
| 3618 | 0 |         LogDebug(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d%s%s\n", | 
| 3619 | 0 |                   cleanSubVer, pfrom.nVersion, | 
| 3620 | 0 |                   peer->m_starting_height, addrMe.ToStringAddrPort(), fRelay, pfrom.GetId(), | 
| 3621 | 0 |                   pfrom.LogIP(fLogIPs), (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : "")); | 
| 3622 |  | 
 | 
| 3623 | 0 |         peer->m_time_offset = NodeSeconds{std::chrono::seconds{nTime}} - Now<NodeSeconds>(); | 
| 3624 | 0 |         if (!pfrom.IsInboundConn()) { | 
| 3625 |  |             // Don't use timedata samples from inbound peers to make it | 
| 3626 |  |             // harder for others to create false warnings about our clock being out of sync. | 
| 3627 | 0 |             m_outbound_time_offsets.Add(peer->m_time_offset); | 
| 3628 | 0 |             m_outbound_time_offsets.WarnIfOutOfSync(); | 
| 3629 | 0 |         } | 
| 3630 |  |  | 
| 3631 |  |         // If the peer is old enough to have the old alert system, send it the final alert. | 
| 3632 | 0 |         if (greatest_common_version <= 70012) { | 
| 3633 | 0 |             constexpr auto finalAlert{"60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"_hex}; | 
| 3634 | 0 |             MakeAndPushMessage(pfrom, "alert", finalAlert); | 
| 3635 | 0 |         } | 
| 3636 |  |  | 
| 3637 |  |         // Feeler connections exist only to verify if address is online. | 
| 3638 | 0 |         if (pfrom.IsFeelerConn()) { | 
| 3639 | 0 |             LogDebug(BCLog::NET, "feeler connection completed, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 3640 | 0 |             pfrom.fDisconnect = true; | 
| 3641 | 0 |         } | 
| 3642 | 0 |         return; | 
| 3643 | 0 |     } | 
| 3644 |  |  | 
| 3645 | 0 |     if (pfrom.nVersion == 0) { | 
| 3646 |  |         // Must have a version message before anything else | 
| 3647 | 0 |         LogDebug(BCLog::NET, "non-version message before version handshake. Message \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId()); | 
| 3648 | 0 |         return; | 
| 3649 | 0 |     } | 
| 3650 |  |  | 
| 3651 | 0 |     if (msg_type == NetMsgType::VERACK) { | 
| 3652 | 0 |         if (pfrom.fSuccessfullyConnected) { | 
| 3653 | 0 |             LogDebug(BCLog::NET, "ignoring redundant verack message from peer=%d\n", pfrom.GetId()); | 
| 3654 | 0 |             return; | 
| 3655 | 0 |         } | 
| 3656 |  |  | 
| 3657 |  |         // Log successful connections unconditionally for outbound, but not for inbound as those | 
| 3658 |  |         // can be triggered by an attacker at high rate. | 
| 3659 | 0 |         if (!pfrom.IsInboundConn() || LogAcceptCategory(BCLog::NET, BCLog::Level::Debug)) { | 
| 3660 | 0 |             const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)}; | 
| 3661 | 0 |             LogPrintf("New %s %s peer connected: version: %d, blocks=%d, peer=%d%s%s\n", | 
| 3662 | 0 |                       pfrom.ConnectionTypeAsString(), | 
| 3663 | 0 |                       TransportTypeAsString(pfrom.m_transport->GetInfo().transport_type), | 
| 3664 | 0 |                       pfrom.nVersion.load(), peer->m_starting_height, | 
| 3665 | 0 |                       pfrom.GetId(), pfrom.LogIP(fLogIPs), | 
| 3666 | 0 |                       (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : "")); | 
| 3667 | 0 |         } | 
| 3668 |  | 
 | 
| 3669 | 0 |         if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) { | 
| 3670 |  |             // Tell our peer we are willing to provide version 2 cmpctblocks. | 
| 3671 |  |             // However, we do not request new block announcements using | 
| 3672 |  |             // cmpctblock messages. | 
| 3673 |  |             // We send this to non-NODE NETWORK peers as well, because | 
| 3674 |  |             // they may wish to request compact blocks from us | 
| 3675 | 0 |             MakeAndPushMessage(pfrom, NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION); | 
| 3676 | 0 |         } | 
| 3677 |  | 
 | 
| 3678 | 0 |         if (m_txreconciliation) { | 
| 3679 | 0 |             if (!peer->m_wtxid_relay || !m_txreconciliation->IsPeerRegistered(pfrom.GetId())) { | 
| 3680 |  |                 // We could have optimistically pre-registered/registered the peer. In that case, | 
| 3681 |  |                 // we should forget about the reconciliation state here if this wasn't followed | 
| 3682 |  |                 // by WTXIDRELAY (since WTXIDRELAY can't be announced later). | 
| 3683 | 0 |                 m_txreconciliation->ForgetPeer(pfrom.GetId()); | 
| 3684 | 0 |             } | 
| 3685 | 0 |         } | 
| 3686 |  | 
 | 
| 3687 | 0 |         if (auto tx_relay = peer->GetTxRelay()) { | 
| 3688 |  |             // `TxRelay::m_tx_inventory_to_send` must be empty before the | 
| 3689 |  |             // version handshake is completed as | 
| 3690 |  |             // `TxRelay::m_next_inv_send_time` is first initialised in | 
| 3691 |  |             // `SendMessages` after the verack is received. Any transactions | 
| 3692 |  |             // received during the version handshake would otherwise | 
| 3693 |  |             // immediately be advertised without random delay, potentially | 
| 3694 |  |             // leaking the time of arrival to a spy. | 
| 3695 | 0 |             Assume(WITH_LOCK( | 
| 3696 | 0 |                 tx_relay->m_tx_inventory_mutex, | 
| 3697 | 0 |                 return tx_relay->m_tx_inventory_to_send.empty() && | 
| 3698 | 0 |                        tx_relay->m_next_inv_send_time == 0s)); | 
| 3699 | 0 |         } | 
| 3700 |  | 
 | 
| 3701 | 0 |         { | 
| 3702 | 0 |             LOCK2(::cs_main, m_tx_download_mutex); | 
| 3703 | 0 |             const CNodeState* state = State(pfrom.GetId()); | 
| 3704 | 0 |             m_txdownloadman.ConnectedPeer(pfrom.GetId(), node::TxDownloadConnectionInfo { | 
| 3705 | 0 |                 .m_preferred = state->fPreferredDownload, | 
| 3706 | 0 |                 .m_relay_permissions = pfrom.HasPermission(NetPermissionFlags::Relay), | 
| 3707 | 0 |                 .m_wtxid_relay = peer->m_wtxid_relay, | 
| 3708 | 0 |             }); | 
| 3709 | 0 |         } | 
| 3710 |  | 
 | 
| 3711 | 0 |         pfrom.fSuccessfullyConnected = true; | 
| 3712 | 0 |         return; | 
| 3713 | 0 |     } | 
| 3714 |  |  | 
| 3715 | 0 |     if (msg_type == NetMsgType::SENDHEADERS) { | 
| 3716 | 0 |         peer->m_prefers_headers = true; | 
| 3717 | 0 |         return; | 
| 3718 | 0 |     } | 
| 3719 |  |  | 
| 3720 | 0 |     if (msg_type == NetMsgType::SENDCMPCT) { | 
| 3721 | 0 |         bool sendcmpct_hb{false}; | 
| 3722 | 0 |         uint64_t sendcmpct_version{0}; | 
| 3723 | 0 |         vRecv >> sendcmpct_hb >> sendcmpct_version; | 
| 3724 |  |  | 
| 3725 |  |         // Only support compact block relay with witnesses | 
| 3726 | 0 |         if (sendcmpct_version != CMPCTBLOCKS_VERSION) return; | 
| 3727 |  |  | 
| 3728 | 0 |         LOCK(cs_main); | 
| 3729 | 0 |         CNodeState* nodestate = State(pfrom.GetId()); | 
| 3730 | 0 |         nodestate->m_provides_cmpctblocks = true; | 
| 3731 | 0 |         nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb; | 
| 3732 |  |         // save whether peer selects us as BIP152 high-bandwidth peer | 
| 3733 |  |         // (receiving sendcmpct(1) signals high-bandwidth, sendcmpct(0) low-bandwidth) | 
| 3734 | 0 |         pfrom.m_bip152_highbandwidth_from = sendcmpct_hb; | 
| 3735 | 0 |         return; | 
| 3736 | 0 |     } | 
| 3737 |  |  | 
| 3738 |  |     // BIP339 defines feature negotiation of wtxidrelay, which must happen between | 
| 3739 |  |     // VERSION and VERACK to avoid relay problems from switching after a connection is up. | 
| 3740 | 0 |     if (msg_type == NetMsgType::WTXIDRELAY) { | 
| 3741 | 0 |         if (pfrom.fSuccessfullyConnected) { | 
| 3742 |  |             // Disconnect peers that send a wtxidrelay message after VERACK. | 
| 3743 | 0 |             LogDebug(BCLog::NET, "wtxidrelay received after verack, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 3744 | 0 |             pfrom.fDisconnect = true; | 
| 3745 | 0 |             return; | 
| 3746 | 0 |         } | 
| 3747 | 0 |         if (pfrom.GetCommonVersion() >= WTXID_RELAY_VERSION) { | 
| 3748 | 0 |             if (!peer->m_wtxid_relay) { | 
| 3749 | 0 |                 peer->m_wtxid_relay = true; | 
| 3750 | 0 |                 m_wtxid_relay_peers++; | 
| 3751 | 0 |             } else { | 
| 3752 | 0 |                 LogDebug(BCLog::NET, "ignoring duplicate wtxidrelay from peer=%d\n", pfrom.GetId()); | 
| 3753 | 0 |             } | 
| 3754 | 0 |         } else { | 
| 3755 | 0 |             LogDebug(BCLog::NET, "ignoring wtxidrelay due to old common version=%d from peer=%d\n", pfrom.GetCommonVersion(), pfrom.GetId()); | 
| 3756 | 0 |         } | 
| 3757 | 0 |         return; | 
| 3758 | 0 |     } | 
| 3759 |  |  | 
| 3760 |  |     // BIP155 defines feature negotiation of addrv2 and sendaddrv2, which must happen | 
| 3761 |  |     // between VERSION and VERACK. | 
| 3762 | 0 |     if (msg_type == NetMsgType::SENDADDRV2) { | 
| 3763 | 0 |         if (pfrom.fSuccessfullyConnected) { | 
| 3764 |  |             // Disconnect peers that send a SENDADDRV2 message after VERACK. | 
| 3765 | 0 |             LogDebug(BCLog::NET, "sendaddrv2 received after verack, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 3766 | 0 |             pfrom.fDisconnect = true; | 
| 3767 | 0 |             return; | 
| 3768 | 0 |         } | 
| 3769 | 0 |         peer->m_wants_addrv2 = true; | 
| 3770 | 0 |         return; | 
| 3771 | 0 |     } | 
| 3772 |  |  | 
| 3773 |  |     // Received from a peer demonstrating readiness to announce transactions via reconciliations. | 
| 3774 |  |     // This feature negotiation must happen between VERSION and VERACK to avoid relay problems | 
| 3775 |  |     // from switching announcement protocols after the connection is up. | 
| 3776 | 0 |     if (msg_type == NetMsgType::SENDTXRCNCL) { | 
| 3777 | 0 |         if (!m_txreconciliation) { | 
| 3778 | 0 |             LogDebug(BCLog::NET, "sendtxrcncl from peer=%d ignored, as our node does not have txreconciliation enabled\n", pfrom.GetId()); | 
| 3779 | 0 |             return; | 
| 3780 | 0 |         } | 
| 3781 |  |  | 
| 3782 | 0 |         if (pfrom.fSuccessfullyConnected) { | 
| 3783 | 0 |             LogDebug(BCLog::NET, "sendtxrcncl received after verack, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 3784 | 0 |             pfrom.fDisconnect = true; | 
| 3785 | 0 |             return; | 
| 3786 | 0 |         } | 
| 3787 |  |  | 
| 3788 |  |         // Peer must not offer us reconciliations if we specified no tx relay support in VERSION. | 
| 3789 | 0 |         if (RejectIncomingTxs(pfrom)) { | 
| 3790 | 0 |             LogDebug(BCLog::NET, "sendtxrcncl received to which we indicated no tx relay, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 3791 | 0 |             pfrom.fDisconnect = true; | 
| 3792 | 0 |             return; | 
| 3793 | 0 |         } | 
| 3794 |  |  | 
| 3795 |  |         // Peer must not offer us reconciliations if they specified no tx relay support in VERSION. | 
| 3796 |  |         // This flag might also be false in other cases, but the RejectIncomingTxs check above | 
| 3797 |  |         // eliminates them, so that this flag fully represents what we are looking for. | 
| 3798 | 0 |         const auto* tx_relay = peer->GetTxRelay(); | 
| 3799 | 0 |         if (!tx_relay || !WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs)) { | 
| 3800 | 0 |             LogDebug(BCLog::NET, "sendtxrcncl received which indicated no tx relay to us, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 3801 | 0 |             pfrom.fDisconnect = true; | 
| 3802 | 0 |             return; | 
| 3803 | 0 |         } | 
| 3804 |  |  | 
| 3805 | 0 |         uint32_t peer_txreconcl_version; | 
| 3806 | 0 |         uint64_t remote_salt; | 
| 3807 | 0 |         vRecv >> peer_txreconcl_version >> remote_salt; | 
| 3808 |  | 
 | 
| 3809 | 0 |         const ReconciliationRegisterResult result = m_txreconciliation->RegisterPeer(pfrom.GetId(), pfrom.IsInboundConn(), | 
| 3810 | 0 |                                                                                      peer_txreconcl_version, remote_salt); | 
| 3811 | 0 |         switch (result) { | 
| 3812 | 0 |         case ReconciliationRegisterResult::NOT_FOUND: | 
| 3813 | 0 |             LogDebug(BCLog::NET, "Ignore unexpected txreconciliation signal from peer=%d\n", pfrom.GetId()); | 
| 3814 | 0 |             break; | 
| 3815 | 0 |         case ReconciliationRegisterResult::SUCCESS: | 
| 3816 | 0 |             break; | 
| 3817 | 0 |         case ReconciliationRegisterResult::ALREADY_REGISTERED: | 
| 3818 | 0 |             LogDebug(BCLog::NET, "txreconciliation protocol violation (sendtxrcncl received from already registered peer), %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 3819 | 0 |             pfrom.fDisconnect = true; | 
| 3820 | 0 |             return; | 
| 3821 | 0 |         case ReconciliationRegisterResult::PROTOCOL_VIOLATION: | 
| 3822 | 0 |             LogDebug(BCLog::NET, "txreconciliation protocol violation, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 3823 | 0 |             pfrom.fDisconnect = true; | 
| 3824 | 0 |             return; | 
| 3825 | 0 |         } | 
| 3826 | 0 |         return; | 
| 3827 | 0 |     } | 
| 3828 |  |  | 
| 3829 | 0 |     if (!pfrom.fSuccessfullyConnected) { | 
| 3830 | 0 |         LogDebug(BCLog::NET, "Unsupported message \"%s\" prior to verack from peer=%d\n", SanitizeString(msg_type), pfrom.GetId()); | 
| 3831 | 0 |         return; | 
| 3832 | 0 |     } | 
| 3833 |  |  | 
| 3834 | 0 |     if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) { | 
| 3835 | 0 |         const auto ser_params{ | 
| 3836 | 0 |             msg_type == NetMsgType::ADDRV2 ? | 
| 3837 |  |             // Set V2 param so that the CNetAddr and CAddress | 
| 3838 |  |             // unserialize methods know that an address in v2 format is coming. | 
| 3839 | 0 |             CAddress::V2_NETWORK : | 
| 3840 | 0 |             CAddress::V1_NETWORK, | 
| 3841 | 0 |         }; | 
| 3842 |  | 
 | 
| 3843 | 0 |         std::vector<CAddress> vAddr; | 
| 3844 |  | 
 | 
| 3845 | 0 |         vRecv >> ser_params(vAddr); | 
| 3846 |  | 
 | 
| 3847 | 0 |         if (!SetupAddressRelay(pfrom, *peer)) { | 
| 3848 | 0 |             LogDebug(BCLog::NET, "ignoring %s message from %s peer=%d\n", msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId()); | 
| 3849 | 0 |             return; | 
| 3850 | 0 |         } | 
| 3851 |  |  | 
| 3852 | 0 |         if (vAddr.size() > MAX_ADDR_TO_SEND) | 
| 3853 | 0 |         { | 
| 3854 | 0 |             Misbehaving(*peer, strprintf("%s message size = %u", msg_type, vAddr.size())); | 
| 3855 | 0 |             return; | 
| 3856 | 0 |         } | 
| 3857 |  |  | 
| 3858 |  |         // Store the new addresses | 
| 3859 | 0 |         std::vector<CAddress> vAddrOk; | 
| 3860 | 0 |         const auto current_a_time{Now<NodeSeconds>()}; | 
| 3861 |  |  | 
| 3862 |  |         // Update/increment addr rate limiting bucket. | 
| 3863 | 0 |         const auto current_time{GetTime<std::chrono::microseconds>()}; | 
| 3864 | 0 |         if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) { | 
| 3865 |  |             // Don't increment bucket if it's already full | 
| 3866 | 0 |             const auto time_diff = std::max(current_time - peer->m_addr_token_timestamp, 0us); | 
| 3867 | 0 |             const double increment = Ticks<SecondsDouble>(time_diff) * MAX_ADDR_RATE_PER_SECOND; | 
| 3868 | 0 |             peer->m_addr_token_bucket = std::min<double>(peer->m_addr_token_bucket + increment, MAX_ADDR_PROCESSING_TOKEN_BUCKET); | 
| 3869 | 0 |         } | 
| 3870 | 0 |         peer->m_addr_token_timestamp = current_time; | 
| 3871 |  | 
 | 
| 3872 | 0 |         const bool rate_limited = !pfrom.HasPermission(NetPermissionFlags::Addr); | 
| 3873 | 0 |         uint64_t num_proc = 0; | 
| 3874 | 0 |         uint64_t num_rate_limit = 0; | 
| 3875 | 0 |         std::shuffle(vAddr.begin(), vAddr.end(), m_rng); | 
| 3876 | 0 |         for (CAddress& addr : vAddr) | 
| 3877 | 0 |         { | 
| 3878 | 0 |             if (interruptMsgProc) | 
| 3879 | 0 |                 return; | 
| 3880 |  |  | 
| 3881 |  |             // Apply rate limiting. | 
| 3882 | 0 |             if (peer->m_addr_token_bucket < 1.0) { | 
| 3883 | 0 |                 if (rate_limited) { | 
| 3884 | 0 |                     ++num_rate_limit; | 
| 3885 | 0 |                     continue; | 
| 3886 | 0 |                 } | 
| 3887 | 0 |             } else { | 
| 3888 | 0 |                 peer->m_addr_token_bucket -= 1.0; | 
| 3889 | 0 |             } | 
| 3890 |  |             // We only bother storing full nodes, though this may include | 
| 3891 |  |             // things which we would not make an outbound connection to, in | 
| 3892 |  |             // part because we may make feeler connections to them. | 
| 3893 | 0 |             if (!MayHaveUsefulAddressDB(addr.nServices) && !HasAllDesirableServiceFlags(addr.nServices)) | 
| 3894 | 0 |                 continue; | 
| 3895 |  |  | 
| 3896 | 0 |             if (addr.nTime <= NodeSeconds{100000000s} || addr.nTime > current_a_time + 10min) { | 
| 3897 | 0 |                 addr.nTime = current_a_time - 5 * 24h; | 
| 3898 | 0 |             } | 
| 3899 | 0 |             AddAddressKnown(*peer, addr); | 
| 3900 | 0 |             if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) { | 
| 3901 |  |                 // Do not process banned/discouraged addresses beyond remembering we received them | 
| 3902 | 0 |                 continue; | 
| 3903 | 0 |             } | 
| 3904 | 0 |             ++num_proc; | 
| 3905 | 0 |             const bool reachable{g_reachable_nets.Contains(addr)}; | 
| 3906 | 0 |             if (addr.nTime > current_a_time - 10min && !peer->m_getaddr_sent && vAddr.size() <= 10 && addr.IsRoutable()) { | 
| 3907 |  |                 // Relay to a limited number of other nodes | 
| 3908 | 0 |                 RelayAddress(pfrom.GetId(), addr, reachable); | 
| 3909 | 0 |             } | 
| 3910 |  |             // Do not store addresses outside our network | 
| 3911 | 0 |             if (reachable) { | 
| 3912 | 0 |                 vAddrOk.push_back(addr); | 
| 3913 | 0 |             } | 
| 3914 | 0 |         } | 
| 3915 | 0 |         peer->m_addr_processed += num_proc; | 
| 3916 | 0 |         peer->m_addr_rate_limited += num_rate_limit; | 
| 3917 | 0 |         LogDebug(BCLog::NET, "Received addr: %u addresses (%u processed, %u rate-limited) from peer=%d\n", | 
| 3918 | 0 |                  vAddr.size(), num_proc, num_rate_limit, pfrom.GetId()); | 
| 3919 |  | 
 | 
| 3920 | 0 |         m_addrman.Add(vAddrOk, pfrom.addr, 2h); | 
| 3921 | 0 |         if (vAddr.size() < 1000) peer->m_getaddr_sent = false; | 
| 3922 |  |  | 
| 3923 |  |         // AddrFetch: Require multiple addresses to avoid disconnecting on self-announcements | 
| 3924 | 0 |         if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) { | 
| 3925 | 0 |             LogDebug(BCLog::NET, "addrfetch connection completed, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 3926 | 0 |             pfrom.fDisconnect = true; | 
| 3927 | 0 |         } | 
| 3928 | 0 |         return; | 
| 3929 | 0 |     } | 
| 3930 |  |  | 
| 3931 | 0 |     if (msg_type == NetMsgType::INV) { | 
| 3932 | 0 |         std::vector<CInv> vInv; | 
| 3933 | 0 |         vRecv >> vInv; | 
| 3934 | 0 |         if (vInv.size() > MAX_INV_SZ) | 
| 3935 | 0 |         { | 
| 3936 | 0 |             Misbehaving(*peer, strprintf("inv message size = %u", vInv.size())); | 
| 3937 | 0 |             return; | 
| 3938 | 0 |         } | 
| 3939 |  |  | 
| 3940 | 0 |         const bool reject_tx_invs{RejectIncomingTxs(pfrom)}; | 
| 3941 |  | 
 | 
| 3942 | 0 |         LOCK2(cs_main, m_tx_download_mutex); | 
| 3943 |  | 
 | 
| 3944 | 0 |         const auto current_time{GetTime<std::chrono::microseconds>()}; | 
| 3945 | 0 |         uint256* best_block{nullptr}; | 
| 3946 |  | 
 | 
| 3947 | 0 |         for (CInv& inv : vInv) { | 
| 3948 | 0 |             if (interruptMsgProc) return; | 
| 3949 |  |  | 
| 3950 |  |             // Ignore INVs that don't match wtxidrelay setting. | 
| 3951 |  |             // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting. | 
| 3952 |  |             // This is fine as no INV messages are involved in that process. | 
| 3953 | 0 |             if (peer->m_wtxid_relay) { | 
| 3954 | 0 |                 if (inv.IsMsgTx()) continue; | 
| 3955 | 0 |             } else { | 
| 3956 | 0 |                 if (inv.IsMsgWtx()) continue; | 
| 3957 | 0 |             } | 
| 3958 |  |  | 
| 3959 | 0 |             if (inv.IsMsgBlk()) { | 
| 3960 | 0 |                 const bool fAlreadyHave = AlreadyHaveBlock(inv.hash); | 
| 3961 | 0 |                 LogDebug(BCLog::NET, "got inv: %s  %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); | 
| 3962 |  | 
 | 
| 3963 | 0 |                 UpdateBlockAvailability(pfrom.GetId(), inv.hash); | 
| 3964 | 0 |                 if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() && !IsBlockRequested(inv.hash)) { | 
| 3965 |  |                     // Headers-first is the primary method of announcement on | 
| 3966 |  |                     // the network. If a node fell back to sending blocks by | 
| 3967 |  |                     // inv, it may be for a re-org, or because we haven't | 
| 3968 |  |                     // completed initial headers sync. The final block hash | 
| 3969 |  |                     // provided should be the highest, so send a getheaders and | 
| 3970 |  |                     // then fetch the blocks we need to catch up. | 
| 3971 | 0 |                     best_block = &inv.hash; | 
| 3972 | 0 |                 } | 
| 3973 | 0 |             } else if (inv.IsGenTxMsg()) { | 
| 3974 | 0 |                 if (reject_tx_invs) { | 
| 3975 | 0 |                     LogDebug(BCLog::NET, "transaction (%s) inv sent in violation of protocol, %s\n", inv.hash.ToString(), pfrom.DisconnectMsg(fLogIPs)); | 
| 3976 | 0 |                     pfrom.fDisconnect = true; | 
| 3977 | 0 |                     return; | 
| 3978 | 0 |                 } | 
| 3979 | 0 |                 const GenTxid gtxid = ToGenTxid(inv); | 
| 3980 | 0 |                 AddKnownTx(*peer, inv.hash); | 
| 3981 |  | 
 | 
| 3982 | 0 |                 if (!m_chainman.IsInitialBlockDownload()) { | 
| 3983 | 0 |                     const bool fAlreadyHave{m_txdownloadman.AddTxAnnouncement(pfrom.GetId(), gtxid, current_time)}; | 
| 3984 | 0 |                     LogDebug(BCLog::NET, "got inv: %s  %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); | 
| 3985 | 0 |                 } | 
| 3986 | 0 |             } else { | 
| 3987 | 0 |                 LogDebug(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId()); | 
| 3988 | 0 |             } | 
| 3989 | 0 |         } | 
| 3990 |  |  | 
| 3991 | 0 |         if (best_block != nullptr) { | 
| 3992 |  |             // If we haven't started initial headers-sync with this peer, then | 
| 3993 |  |             // consider sending a getheaders now. On initial startup, there's a | 
| 3994 |  |             // reliability vs bandwidth tradeoff, where we are only trying to do | 
| 3995 |  |             // initial headers sync with one peer at a time, with a long | 
| 3996 |  |             // timeout (at which point, if the sync hasn't completed, we will | 
| 3997 |  |             // disconnect the peer and then choose another). In the meantime, | 
| 3998 |  |             // as new blocks are found, we are willing to add one new peer per | 
| 3999 |  |             // block to sync with as well, to sync quicker in the case where | 
| 4000 |  |             // our initial peer is unresponsive (but less bandwidth than we'd | 
| 4001 |  |             // use if we turned on sync with all peers). | 
| 4002 | 0 |             CNodeState& state{*Assert(State(pfrom.GetId()))}; | 
| 4003 | 0 |             if (state.fSyncStarted || (!peer->m_inv_triggered_getheaders_before_sync && *best_block != m_last_block_inv_triggering_headers_sync)) { | 
| 4004 | 0 |                 if (MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer)) { | 
| 4005 | 0 |                     LogDebug(BCLog::NET, "getheaders (%d) %s to peer=%d\n", | 
| 4006 | 0 |                             m_chainman.m_best_header->nHeight, best_block->ToString(), | 
| 4007 | 0 |                             pfrom.GetId()); | 
| 4008 | 0 |                 } | 
| 4009 | 0 |                 if (!state.fSyncStarted) { | 
| 4010 | 0 |                     peer->m_inv_triggered_getheaders_before_sync = true; | 
| 4011 |  |                     // Update the last block hash that triggered a new headers | 
| 4012 |  |                     // sync, so that we don't turn on headers sync with more | 
| 4013 |  |                     // than 1 new peer every new block. | 
| 4014 | 0 |                     m_last_block_inv_triggering_headers_sync = *best_block; | 
| 4015 | 0 |                 } | 
| 4016 | 0 |             } | 
| 4017 | 0 |         } | 
| 4018 |  | 
 | 
| 4019 | 0 |         return; | 
| 4020 | 0 |     } | 
| 4021 |  |  | 
| 4022 | 0 |     if (msg_type == NetMsgType::GETDATA) { | 
| 4023 | 0 |         std::vector<CInv> vInv; | 
| 4024 | 0 |         vRecv >> vInv; | 
| 4025 | 0 |         if (vInv.size() > MAX_INV_SZ) | 
| 4026 | 0 |         { | 
| 4027 | 0 |             Misbehaving(*peer, strprintf("getdata message size = %u", vInv.size())); | 
| 4028 | 0 |             return; | 
| 4029 | 0 |         } | 
| 4030 |  |  | 
| 4031 | 0 |         LogDebug(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom.GetId()); | 
| 4032 |  | 
 | 
| 4033 | 0 |         if (vInv.size() > 0) { | 
| 4034 | 0 |             LogDebug(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId()); | 
| 4035 | 0 |         } | 
| 4036 |  | 
 | 
| 4037 | 0 |         { | 
| 4038 | 0 |             LOCK(peer->m_getdata_requests_mutex); | 
| 4039 | 0 |             peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end()); | 
| 4040 | 0 |             ProcessGetData(pfrom, *peer, interruptMsgProc); | 
| 4041 | 0 |         } | 
| 4042 |  | 
 | 
| 4043 | 0 |         return; | 
| 4044 | 0 |     } | 
| 4045 |  |  | 
| 4046 | 0 |     if (msg_type == NetMsgType::GETBLOCKS) { | 
| 4047 | 0 |         CBlockLocator locator; | 
| 4048 | 0 |         uint256 hashStop; | 
| 4049 | 0 |         vRecv >> locator >> hashStop; | 
| 4050 |  | 
 | 
| 4051 | 0 |         if (locator.vHave.size() > MAX_LOCATOR_SZ) { | 
| 4052 | 0 |             LogDebug(BCLog::NET, "getblocks locator size %lld > %d, %s\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.DisconnectMsg(fLogIPs)); | 
| 4053 | 0 |             pfrom.fDisconnect = true; | 
| 4054 | 0 |             return; | 
| 4055 | 0 |         } | 
| 4056 |  |  | 
| 4057 |  |         // We might have announced the currently-being-connected tip using a | 
| 4058 |  |         // compact block, which resulted in the peer sending a getblocks | 
| 4059 |  |         // request, which we would otherwise respond to without the new block. | 
| 4060 |  |         // To avoid this situation we simply verify that we are on our best | 
| 4061 |  |         // known chain now. This is super overkill, but we handle it better | 
| 4062 |  |         // for getheaders requests, and there are no known nodes which support | 
| 4063 |  |         // compact blocks but still use getblocks to request blocks. | 
| 4064 | 0 |         { | 
| 4065 | 0 |             std::shared_ptr<const CBlock> a_recent_block; | 
| 4066 | 0 |             { | 
| 4067 | 0 |                 LOCK(m_most_recent_block_mutex); | 
| 4068 | 0 |                 a_recent_block = m_most_recent_block; | 
| 4069 | 0 |             } | 
| 4070 | 0 |             BlockValidationState state; | 
| 4071 | 0 |             if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) { | 
| 4072 | 0 |                 LogDebug(BCLog::NET, "failed to activate chain (%s)\n", state.ToString()); | 
| 4073 | 0 |             } | 
| 4074 | 0 |         } | 
| 4075 |  | 
 | 
| 4076 | 0 |         LOCK(cs_main); | 
| 4077 |  |  | 
| 4078 |  |         // Find the last block the caller has in the main chain | 
| 4079 | 0 |         const CBlockIndex* pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator); | 
| 4080 |  |  | 
| 4081 |  |         // Send the rest of the chain | 
| 4082 | 0 |         if (pindex) | 
| 4083 | 0 |             pindex = m_chainman.ActiveChain().Next(pindex); | 
| 4084 | 0 |         int nLimit = 500; | 
| 4085 | 0 |         LogDebug(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom.GetId()); | 
| 4086 | 0 |         for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) | 
| 4087 | 0 |         { | 
| 4088 | 0 |             if (pindex->GetBlockHash() == hashStop) | 
| 4089 | 0 |             { | 
| 4090 | 0 |                 LogDebug(BCLog::NET, "  getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); | 
| 4091 | 0 |                 break; | 
| 4092 | 0 |             } | 
| 4093 |  |             // If pruning, don't inv blocks unless we have on disk and are likely to still have | 
| 4094 |  |             // for some reasonable time window (1 hour) that block relay might require. | 
| 4095 | 0 |             const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing; | 
| 4096 | 0 |             if (m_chainman.m_blockman.IsPruneMode() && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight - nPrunedBlocksLikelyToHave)) { | 
| 4097 | 0 |                 LogDebug(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); | 
| 4098 | 0 |                 break; | 
| 4099 | 0 |             } | 
| 4100 | 0 |             WITH_LOCK(peer->m_block_inv_mutex, peer->m_blocks_for_inv_relay.push_back(pindex->GetBlockHash())); | 
| 4101 | 0 |             if (--nLimit <= 0) { | 
| 4102 |  |                 // When this block is requested, we'll send an inv that'll | 
| 4103 |  |                 // trigger the peer to getblocks the next batch of inventory. | 
| 4104 | 0 |                 LogDebug(BCLog::NET, "  getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); | 
| 4105 | 0 |                 WITH_LOCK(peer->m_block_inv_mutex, {peer->m_continuation_block = pindex->GetBlockHash();}); | 
| 4106 | 0 |                 break; | 
| 4107 | 0 |             } | 
| 4108 | 0 |         } | 
| 4109 | 0 |         return; | 
| 4110 | 0 |     } | 
| 4111 |  |  | 
| 4112 | 0 |     if (msg_type == NetMsgType::GETBLOCKTXN) { | 
| 4113 | 0 |         BlockTransactionsRequest req; | 
| 4114 | 0 |         vRecv >> req; | 
| 4115 |  | 
 | 
| 4116 | 0 |         std::shared_ptr<const CBlock> recent_block; | 
| 4117 | 0 |         { | 
| 4118 | 0 |             LOCK(m_most_recent_block_mutex); | 
| 4119 | 0 |             if (m_most_recent_block_hash == req.blockhash) | 
| 4120 | 0 |                 recent_block = m_most_recent_block; | 
| 4121 |  |             // Unlock m_most_recent_block_mutex to avoid cs_main lock inversion | 
| 4122 | 0 |         } | 
| 4123 | 0 |         if (recent_block) { | 
| 4124 | 0 |             SendBlockTransactions(pfrom, *peer, *recent_block, req); | 
| 4125 | 0 |             return; | 
| 4126 | 0 |         } | 
| 4127 |  |  | 
| 4128 | 0 |         FlatFilePos block_pos{}; | 
| 4129 | 0 |         { | 
| 4130 | 0 |             LOCK(cs_main); | 
| 4131 |  | 
 | 
| 4132 | 0 |             const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(req.blockhash); | 
| 4133 | 0 |             if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) { | 
| 4134 | 0 |                 LogDebug(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId()); | 
| 4135 | 0 |                 return; | 
| 4136 | 0 |             } | 
| 4137 |  |  | 
| 4138 | 0 |             if (pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) { | 
| 4139 | 0 |                 block_pos = pindex->GetBlockPos(); | 
| 4140 | 0 |             } | 
| 4141 | 0 |         } | 
| 4142 |  |  | 
| 4143 | 0 |         if (!block_pos.IsNull()) { | 
| 4144 | 0 |             CBlock block; | 
| 4145 | 0 |             const bool ret{m_chainman.m_blockman.ReadBlock(block, block_pos, req.blockhash)}; | 
| 4146 |  |             // If height is above MAX_BLOCKTXN_DEPTH then this block cannot get | 
| 4147 |  |             // pruned after we release cs_main above, so this read should never fail. | 
| 4148 | 0 |             assert(ret); | 
| 4149 |  |  | 
| 4150 | 0 |             SendBlockTransactions(pfrom, *peer, block, req); | 
| 4151 | 0 |             return; | 
| 4152 | 0 |         } | 
| 4153 |  |  | 
| 4154 |  |         // If an older block is requested (should never happen in practice, | 
| 4155 |  |         // but can happen in tests) send a block response instead of a | 
| 4156 |  |         // blocktxn response. Sending a full block response instead of a | 
| 4157 |  |         // small blocktxn response is preferable in the case where a peer | 
| 4158 |  |         // might maliciously send lots of getblocktxn requests to trigger | 
| 4159 |  |         // expensive disk reads, because it will require the peer to | 
| 4160 |  |         // actually receive all the data read from disk over the network. | 
| 4161 | 0 |         LogDebug(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH); | 
| 4162 | 0 |         CInv inv{MSG_WITNESS_BLOCK, req.blockhash}; | 
| 4163 | 0 |         WITH_LOCK(peer->m_getdata_requests_mutex, peer->m_getdata_requests.push_back(inv)); | 
| 4164 |  |         // The message processing loop will go around again (without pausing) and we'll respond then | 
| 4165 | 0 |         return; | 
| 4166 | 0 |     } | 
| 4167 |  |  | 
| 4168 | 0 |     if (msg_type == NetMsgType::GETHEADERS) { | 
| 4169 | 0 |         CBlockLocator locator; | 
| 4170 | 0 |         uint256 hashStop; | 
| 4171 | 0 |         vRecv >> locator >> hashStop; | 
| 4172 |  | 
 | 
| 4173 | 0 |         if (locator.vHave.size() > MAX_LOCATOR_SZ) { | 
| 4174 | 0 |             LogDebug(BCLog::NET, "getheaders locator size %lld > %d, %s\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.DisconnectMsg(fLogIPs)); | 
| 4175 | 0 |             pfrom.fDisconnect = true; | 
| 4176 | 0 |             return; | 
| 4177 | 0 |         } | 
| 4178 |  |  | 
| 4179 | 0 |         if (m_chainman.m_blockman.LoadingBlocks()) { | 
| 4180 | 0 |             LogDebug(BCLog::NET, "Ignoring getheaders from peer=%d while importing/reindexing\n", pfrom.GetId()); | 
| 4181 | 0 |             return; | 
| 4182 | 0 |         } | 
| 4183 |  |  | 
| 4184 | 0 |         LOCK(cs_main); | 
| 4185 |  |  | 
| 4186 |  |         // Don't serve headers from our active chain until our chainwork is at least | 
| 4187 |  |         // the minimum chain work. This prevents us from starting a low-work headers | 
| 4188 |  |         // sync that will inevitably be aborted by our peer. | 
| 4189 | 0 |         if (m_chainman.ActiveTip() == nullptr || | 
| 4190 | 0 |                 (m_chainman.ActiveTip()->nChainWork < m_chainman.MinimumChainWork() && !pfrom.HasPermission(NetPermissionFlags::Download))) { | 
| 4191 | 0 |             LogDebug(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work; sending empty response\n", pfrom.GetId()); | 
| 4192 |  |             // Just respond with an empty headers message, to tell the peer to | 
| 4193 |  |             // go away but not treat us as unresponsive. | 
| 4194 | 0 |             MakeAndPushMessage(pfrom, NetMsgType::HEADERS, std::vector<CBlockHeader>()); | 
| 4195 | 0 |             return; | 
| 4196 | 0 |         } | 
| 4197 |  |  | 
| 4198 | 0 |         CNodeState *nodestate = State(pfrom.GetId()); | 
| 4199 | 0 |         const CBlockIndex* pindex = nullptr; | 
| 4200 | 0 |         if (locator.IsNull()) | 
| 4201 | 0 |         { | 
| 4202 |  |             // If locator is null, return the hashStop block | 
| 4203 | 0 |             pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop); | 
| 4204 | 0 |             if (!pindex) { | 
| 4205 | 0 |                 return; | 
| 4206 | 0 |             } | 
| 4207 |  |  | 
| 4208 | 0 |             if (!BlockRequestAllowed(pindex)) { | 
| 4209 | 0 |                 LogDebug(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom.GetId()); | 
| 4210 | 0 |                 return; | 
| 4211 | 0 |             } | 
| 4212 | 0 |         } | 
| 4213 | 0 |         else | 
| 4214 | 0 |         { | 
| 4215 |  |             // Find the last block the caller has in the main chain | 
| 4216 | 0 |             pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator); | 
| 4217 | 0 |             if (pindex) | 
| 4218 | 0 |                 pindex = m_chainman.ActiveChain().Next(pindex); | 
| 4219 | 0 |         } | 
| 4220 |  |  | 
| 4221 |  |         // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end | 
| 4222 | 0 |         std::vector<CBlock> vHeaders; | 
| 4223 | 0 |         int nLimit = m_opts.max_headers_result; | 
| 4224 | 0 |         LogDebug(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId()); | 
| 4225 | 0 |         for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) | 
| 4226 | 0 |         { | 
| 4227 | 0 |             vHeaders.emplace_back(pindex->GetBlockHeader()); | 
| 4228 | 0 |             if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) | 
| 4229 | 0 |                 break; | 
| 4230 | 0 |         } | 
| 4231 |  |         // pindex can be nullptr either if we sent m_chainman.ActiveChain().Tip() OR | 
| 4232 |  |         // if our peer has m_chainman.ActiveChain().Tip() (and thus we are sending an empty | 
| 4233 |  |         // headers message). In both cases it's safe to update | 
| 4234 |  |         // pindexBestHeaderSent to be our tip. | 
| 4235 |  |         // | 
| 4236 |  |         // It is important that we simply reset the BestHeaderSent value here, | 
| 4237 |  |         // and not max(BestHeaderSent, newHeaderSent). We might have announced | 
| 4238 |  |         // the currently-being-connected tip using a compact block, which | 
| 4239 |  |         // resulted in the peer sending a headers request, which we respond to | 
| 4240 |  |         // without the new block. By resetting the BestHeaderSent, we ensure we | 
| 4241 |  |         // will re-announce the new block via headers (or compact blocks again) | 
| 4242 |  |         // in the SendMessages logic. | 
| 4243 | 0 |         nodestate->pindexBestHeaderSent = pindex ? pindex : m_chainman.ActiveChain().Tip(); | 
| 4244 | 0 |         MakeAndPushMessage(pfrom, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders)); | 
| 4245 | 0 |         return; | 
| 4246 | 0 |     } | 
| 4247 |  |  | 
| 4248 | 0 |     if (msg_type == NetMsgType::TX) { | 
| 4249 | 0 |         if (RejectIncomingTxs(pfrom)) { | 
| 4250 | 0 |             LogDebug(BCLog::NET, "transaction sent in violation of protocol, %s", pfrom.DisconnectMsg(fLogIPs)); | 
| 4251 | 0 |             pfrom.fDisconnect = true; | 
| 4252 | 0 |             return; | 
| 4253 | 0 |         } | 
| 4254 |  |  | 
| 4255 |  |         // Stop processing the transaction early if we are still in IBD since we don't | 
| 4256 |  |         // have enough information to validate it yet. Sending unsolicited transactions | 
| 4257 |  |         // is not considered a protocol violation, so don't punish the peer. | 
| 4258 | 0 |         if (m_chainman.IsInitialBlockDownload()) return; | 
| 4259 |  |  | 
| 4260 | 0 |         CTransactionRef ptx; | 
| 4261 | 0 |         vRecv >> TX_WITH_WITNESS(ptx); | 
| 4262 |  | 
 | 
| 4263 | 0 |         const Txid& txid = ptx->GetHash(); | 
| 4264 | 0 |         const Wtxid& wtxid = ptx->GetWitnessHash(); | 
| 4265 |  | 
 | 
| 4266 | 0 |         const uint256& hash = peer->m_wtxid_relay ? wtxid.ToUint256() : txid.ToUint256(); | 
| 4267 | 0 |         AddKnownTx(*peer, hash); | 
| 4268 |  | 
 | 
| 4269 | 0 |         LOCK2(cs_main, m_tx_download_mutex); | 
| 4270 |  | 
 | 
| 4271 | 0 |         const auto& [should_validate, package_to_validate] = m_txdownloadman.ReceivedTx(pfrom.GetId(), ptx); | 
| 4272 | 0 |         if (!should_validate) { | 
| 4273 | 0 |             if (pfrom.HasPermission(NetPermissionFlags::ForceRelay)) { | 
| 4274 |  |                 // Always relay transactions received from peers with forcerelay | 
| 4275 |  |                 // permission, even if they were already in the mempool, allowing | 
| 4276 |  |                 // the node to function as a gateway for nodes hidden behind it. | 
| 4277 | 0 |                 if (!m_mempool.exists(txid)) { | 
| 4278 | 0 |                     LogPrintf("Not relaying non-mempool transaction %s (wtxid=%s) from forcerelay peer=%d\n", | 
| 4279 | 0 |                               txid.ToString(), wtxid.ToString(), pfrom.GetId()); | 
| 4280 | 0 |                 } else { | 
| 4281 | 0 |                     LogPrintf("Force relaying tx %s (wtxid=%s) from peer=%d\n", | 
| 4282 | 0 |                               txid.ToString(), wtxid.ToString(), pfrom.GetId()); | 
| 4283 | 0 |                     RelayTransaction(txid, wtxid); | 
| 4284 | 0 |                 } | 
| 4285 | 0 |             } | 
| 4286 |  | 
 | 
| 4287 | 0 |             if (package_to_validate) { | 
| 4288 | 0 |                 const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)}; | 
| 4289 | 0 |                 LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(), | 
| 4290 | 0 |                          package_result.m_state.IsValid() ? "package accepted" : "package rejected"); | 
| 4291 | 0 |                 ProcessPackageResult(package_to_validate.value(), package_result); | 
| 4292 | 0 |             } | 
| 4293 | 0 |             return; | 
| 4294 | 0 |         } | 
| 4295 |  |  | 
| 4296 |  |         // ReceivedTx should not be telling us to validate the tx and a package. | 
| 4297 | 0 |         Assume(!package_to_validate.has_value()); | 
| 4298 |  | 
 | 
| 4299 | 0 |         const MempoolAcceptResult result = m_chainman.ProcessTransaction(ptx); | 
| 4300 | 0 |         const TxValidationState& state = result.m_state; | 
| 4301 |  | 
 | 
| 4302 | 0 |         if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) { | 
| 4303 | 0 |             ProcessValidTx(pfrom.GetId(), ptx, result.m_replaced_transactions); | 
| 4304 | 0 |             pfrom.m_last_tx_time = GetTime<std::chrono::seconds>(); | 
| 4305 | 0 |         } | 
| 4306 | 0 |         if (state.IsInvalid()) { | 
| 4307 | 0 |             if (auto package_to_validate{ProcessInvalidTx(pfrom.GetId(), ptx, state, /*first_time_failure=*/true)}) { | 
| 4308 | 0 |                 const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)}; | 
| 4309 | 0 |                 LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(), | 
| 4310 | 0 |                          package_result.m_state.IsValid() ? "package accepted" : "package rejected"); | 
| 4311 | 0 |                 ProcessPackageResult(package_to_validate.value(), package_result); | 
| 4312 | 0 |             } | 
| 4313 | 0 |         } | 
| 4314 |  | 
 | 
| 4315 | 0 |         return; | 
| 4316 | 0 |     } | 
| 4317 |  |  | 
| 4318 | 0 |     if (msg_type == NetMsgType::CMPCTBLOCK) | 
| 4319 | 0 |     { | 
| 4320 |  |         // Ignore cmpctblock received while importing | 
| 4321 | 0 |         if (m_chainman.m_blockman.LoadingBlocks()) { | 
| 4322 | 0 |             LogDebug(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId()); | 
| 4323 | 0 |             return; | 
| 4324 | 0 |         } | 
| 4325 |  |  | 
| 4326 | 0 |         CBlockHeaderAndShortTxIDs cmpctblock; | 
| 4327 | 0 |         vRecv >> cmpctblock; | 
| 4328 |  | 
 | 
| 4329 | 0 |         bool received_new_header = false; | 
| 4330 | 0 |         const auto blockhash = cmpctblock.header.GetHash(); | 
| 4331 |  | 
 | 
| 4332 | 0 |         { | 
| 4333 | 0 |         LOCK(cs_main); | 
| 4334 |  | 
 | 
| 4335 | 0 |         const CBlockIndex* prev_block = m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock); | 
| 4336 | 0 |         if (!prev_block) { | 
| 4337 |  |             // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers | 
| 4338 | 0 |             if (!m_chainman.IsInitialBlockDownload()) { | 
| 4339 | 0 |                 MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer); | 
| 4340 | 0 |             } | 
| 4341 | 0 |             return; | 
| 4342 | 0 |         } else if (prev_block->nChainWork + CalculateClaimedHeadersWork({{cmpctblock.header}}) < GetAntiDoSWorkThreshold()) { | 
| 4343 |  |             // If we get a low-work header in a compact block, we can ignore it. | 
| 4344 | 0 |             LogDebug(BCLog::NET, "Ignoring low-work compact block from peer %d\n", pfrom.GetId()); | 
| 4345 | 0 |             return; | 
| 4346 | 0 |         } | 
| 4347 |  |  | 
| 4348 | 0 |         if (!m_chainman.m_blockman.LookupBlockIndex(blockhash)) { | 
| 4349 | 0 |             received_new_header = true; | 
| 4350 | 0 |         } | 
| 4351 | 0 |         } | 
| 4352 |  |  | 
| 4353 | 0 |         const CBlockIndex *pindex = nullptr; | 
| 4354 | 0 |         BlockValidationState state; | 
| 4355 | 0 |         if (!m_chainman.ProcessNewBlockHeaders({{cmpctblock.header}}, /*min_pow_checked=*/true, state, &pindex)) { | 
| 4356 | 0 |             if (state.IsInvalid()) { | 
| 4357 | 0 |                 MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block=*/true, "invalid header via cmpctblock"); | 
| 4358 | 0 |                 return; | 
| 4359 | 0 |             } | 
| 4360 | 0 |         } | 
| 4361 |  |  | 
| 4362 |  |         // If AcceptBlockHeader returned true, it set pindex | 
| 4363 | 0 |         Assert(pindex); | 
| 4364 | 0 |         if (received_new_header) { | 
| 4365 | 0 |             LogBlockHeader(*pindex, pfrom, /*via_compact_block=*/true); | 
| 4366 | 0 |         } | 
| 4367 |  | 
 | 
| 4368 | 0 |         bool fProcessBLOCKTXN = false; | 
| 4369 |  |  | 
| 4370 |  |         // If we end up treating this as a plain headers message, call that as well | 
| 4371 |  |         // without cs_main. | 
| 4372 | 0 |         bool fRevertToHeaderProcessing = false; | 
| 4373 |  |  | 
| 4374 |  |         // Keep a CBlock for "optimistic" compactblock reconstructions (see | 
| 4375 |  |         // below) | 
| 4376 | 0 |         std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); | 
| 4377 | 0 |         bool fBlockReconstructed = false; | 
| 4378 |  | 
 | 
| 4379 | 0 |         { | 
| 4380 | 0 |         LOCK(cs_main); | 
| 4381 | 0 |         UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash()); | 
| 4382 |  | 
 | 
| 4383 | 0 |         CNodeState *nodestate = State(pfrom.GetId()); | 
| 4384 |  |  | 
| 4385 |  |         // If this was a new header with more work than our tip, update the | 
| 4386 |  |         // peer's last block announcement time | 
| 4387 | 0 |         if (received_new_header && pindex->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) { | 
| 4388 | 0 |             nodestate->m_last_block_announcement = GetTime(); | 
| 4389 | 0 |         } | 
| 4390 |  | 
 | 
| 4391 | 0 |         if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here | 
| 4392 | 0 |             return; | 
| 4393 |  |  | 
| 4394 | 0 |         auto range_flight = mapBlocksInFlight.equal_range(pindex->GetBlockHash()); | 
| 4395 | 0 |         size_t already_in_flight = std::distance(range_flight.first, range_flight.second); | 
| 4396 | 0 |         bool requested_block_from_this_peer{false}; | 
| 4397 |  |  | 
| 4398 |  |         // Multimap ensures ordering of outstanding requests. It's either empty or first in line. | 
| 4399 | 0 |         bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId()); | 
| 4400 |  | 
 | 
| 4401 | 0 |         while (range_flight.first != range_flight.second) { | 
| 4402 | 0 |             if (range_flight.first->second.first == pfrom.GetId()) { | 
| 4403 | 0 |                 requested_block_from_this_peer = true; | 
| 4404 | 0 |                 break; | 
| 4405 | 0 |             } | 
| 4406 | 0 |             range_flight.first++; | 
| 4407 | 0 |         } | 
| 4408 |  | 
 | 
| 4409 | 0 |         if (pindex->nChainWork <= m_chainman.ActiveChain().Tip()->nChainWork || // We know something better | 
| 4410 | 0 |                 pindex->nTx != 0) { // We had this block at some point, but pruned it | 
| 4411 | 0 |             if (requested_block_from_this_peer) { | 
| 4412 |  |                 // We requested this block for some reason, but our mempool will probably be useless | 
| 4413 |  |                 // so we just grab the block via normal getdata | 
| 4414 | 0 |                 std::vector<CInv> vInv(1); | 
| 4415 | 0 |                 vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash); | 
| 4416 | 0 |                 MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv); | 
| 4417 | 0 |             } | 
| 4418 | 0 |             return; | 
| 4419 | 0 |         } | 
| 4420 |  |  | 
| 4421 |  |         // If we're not close to tip yet, give up and let parallel block fetch work its magic | 
| 4422 | 0 |         if (!already_in_flight && !CanDirectFetch()) { | 
| 4423 | 0 |             return; | 
| 4424 | 0 |         } | 
| 4425 |  |  | 
| 4426 |  |         // We want to be a bit conservative just to be extra careful about DoS | 
| 4427 |  |         // possibilities in compact block processing... | 
| 4428 | 0 |         if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) { | 
| 4429 | 0 |             if ((already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK && nodestate->vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) || | 
| 4430 | 0 |                  requested_block_from_this_peer) { | 
| 4431 | 0 |                 std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr; | 
| 4432 | 0 |                 if (!BlockRequested(pfrom.GetId(), *pindex, &queuedBlockIt)) { | 
| 4433 | 0 |                     if (!(*queuedBlockIt)->partialBlock) | 
| 4434 | 0 |                         (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool)); | 
| 4435 | 0 |                     else { | 
| 4436 |  |                         // The block was already in flight using compact blocks from the same peer | 
| 4437 | 0 |                         LogDebug(BCLog::NET, "Peer sent us compact block we were already syncing!\n"); | 
| 4438 | 0 |                         return; | 
| 4439 | 0 |                     } | 
| 4440 | 0 |                 } | 
| 4441 |  |  | 
| 4442 | 0 |                 PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock; | 
| 4443 | 0 |                 ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact); | 
| 4444 | 0 |                 if (status == READ_STATUS_INVALID) { | 
| 4445 | 0 |                     RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect | 
| 4446 | 0 |                     Misbehaving(*peer, "invalid compact block"); | 
| 4447 | 0 |                     return; | 
| 4448 | 0 |                 } else if (status == READ_STATUS_FAILED) { | 
| 4449 | 0 |                     if (first_in_flight)  { | 
| 4450 |  |                         // Duplicate txindexes, the block is now in-flight, so just request it | 
| 4451 | 0 |                         std::vector<CInv> vInv(1); | 
| 4452 | 0 |                         vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash); | 
| 4453 | 0 |                         MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv); | 
| 4454 | 0 |                     } else { | 
| 4455 |  |                         // Give up for this peer and wait for other peer(s) | 
| 4456 | 0 |                         RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); | 
| 4457 | 0 |                     } | 
| 4458 | 0 |                     return; | 
| 4459 | 0 |                 } | 
| 4460 |  |  | 
| 4461 | 0 |                 BlockTransactionsRequest req; | 
| 4462 | 0 |                 for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) { | 
| 4463 | 0 |                     if (!partialBlock.IsTxAvailable(i)) | 
| 4464 | 0 |                         req.indexes.push_back(i); | 
| 4465 | 0 |                 } | 
| 4466 | 0 |                 if (req.indexes.empty()) { | 
| 4467 | 0 |                     fProcessBLOCKTXN = true; | 
| 4468 | 0 |                 } else if (first_in_flight) { | 
| 4469 |  |                     // We will try to round-trip any compact blocks we get on failure, | 
| 4470 |  |                     // as long as it's first... | 
| 4471 | 0 |                     req.blockhash = pindex->GetBlockHash(); | 
| 4472 | 0 |                     MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req); | 
| 4473 | 0 |                 } else if (pfrom.m_bip152_highbandwidth_to && | 
| 4474 | 0 |                     (!pfrom.IsInboundConn() || | 
| 4475 | 0 |                     IsBlockRequestedFromOutbound(blockhash) || | 
| 4476 | 0 |                     already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK - 1)) { | 
| 4477 |  |                     // ... or it's a hb relay peer and: | 
| 4478 |  |                     // - peer is outbound, or | 
| 4479 |  |                     // - we already have an outbound attempt in flight(so we'll take what we can get), or | 
| 4480 |  |                     // - it's not the final parallel download slot (which we may reserve for first outbound) | 
| 4481 | 0 |                     req.blockhash = pindex->GetBlockHash(); | 
| 4482 | 0 |                     MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req); | 
| 4483 | 0 |                 } else { | 
| 4484 |  |                     // Give up for this peer and wait for other peer(s) | 
| 4485 | 0 |                     RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); | 
| 4486 | 0 |                 } | 
| 4487 | 0 |             } else { | 
| 4488 |  |                 // This block is either already in flight from a different | 
| 4489 |  |                 // peer, or this peer has too many blocks outstanding to | 
| 4490 |  |                 // download from. | 
| 4491 |  |                 // Optimistically try to reconstruct anyway since we might be | 
| 4492 |  |                 // able to without any round trips. | 
| 4493 | 0 |                 PartiallyDownloadedBlock tempBlock(&m_mempool); | 
| 4494 | 0 |                 ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact); | 
| 4495 | 0 |                 if (status != READ_STATUS_OK) { | 
| 4496 |  |                     // TODO: don't ignore failures | 
| 4497 | 0 |                     return; | 
| 4498 | 0 |                 } | 
| 4499 | 0 |                 std::vector<CTransactionRef> dummy; | 
| 4500 | 0 |                 const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock))}; | 
| 4501 | 0 |                 status = tempBlock.FillBlock(*pblock, dummy, | 
| 4502 | 0 |                                              /*segwit_active=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT)); | 
| 4503 | 0 |                 if (status == READ_STATUS_OK) { | 
| 4504 | 0 |                     fBlockReconstructed = true; | 
| 4505 | 0 |                 } | 
| 4506 | 0 |             } | 
| 4507 | 0 |         } else { | 
| 4508 | 0 |             if (requested_block_from_this_peer) { | 
| 4509 |  |                 // We requested this block, but its far into the future, so our | 
| 4510 |  |                 // mempool will probably be useless - request the block normally | 
| 4511 | 0 |                 std::vector<CInv> vInv(1); | 
| 4512 | 0 |                 vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash); | 
| 4513 | 0 |                 MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv); | 
| 4514 | 0 |                 return; | 
| 4515 | 0 |             } else { | 
| 4516 |  |                 // If this was an announce-cmpctblock, we want the same treatment as a header message | 
| 4517 | 0 |                 fRevertToHeaderProcessing = true; | 
| 4518 | 0 |             } | 
| 4519 | 0 |         } | 
| 4520 | 0 |         } // cs_main | 
| 4521 |  |  | 
| 4522 | 0 |         if (fProcessBLOCKTXN) { | 
| 4523 | 0 |             BlockTransactions txn; | 
| 4524 | 0 |             txn.blockhash = blockhash; | 
| 4525 | 0 |             return ProcessCompactBlockTxns(pfrom, *peer, txn); | 
| 4526 | 0 |         } | 
| 4527 |  |  | 
| 4528 | 0 |         if (fRevertToHeaderProcessing) { | 
| 4529 |  |             // Headers received from HB compact block peers are permitted to be | 
| 4530 |  |             // relayed before full validation (see BIP 152), so we don't want to disconnect | 
| 4531 |  |             // the peer if the header turns out to be for an invalid block. | 
| 4532 |  |             // Note that if a peer tries to build on an invalid chain, that | 
| 4533 |  |             // will be detected and the peer will be disconnected/discouraged. | 
| 4534 | 0 |             return ProcessHeadersMessage(pfrom, *peer, {cmpctblock.header}, /*via_compact_block=*/true); | 
| 4535 | 0 |         } | 
| 4536 |  |  | 
| 4537 | 0 |         if (fBlockReconstructed) { | 
| 4538 |  |             // If we got here, we were able to optimistically reconstruct a | 
| 4539 |  |             // block that is in flight from some other peer. | 
| 4540 | 0 |             { | 
| 4541 | 0 |                 LOCK(cs_main); | 
| 4542 | 0 |                 mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.GetId(), false)); | 
| 4543 | 0 |             } | 
| 4544 |  |             // Setting force_processing to true means that we bypass some of | 
| 4545 |  |             // our anti-DoS protections in AcceptBlock, which filters | 
| 4546 |  |             // unrequested blocks that might be trying to waste our resources | 
| 4547 |  |             // (eg disk space). Because we only try to reconstruct blocks when | 
| 4548 |  |             // we're close to caught up (via the CanDirectFetch() requirement | 
| 4549 |  |             // above, combined with the behavior of not requesting blocks until | 
| 4550 |  |             // we have a chain with at least the minimum chain work), and we ignore | 
| 4551 |  |             // compact blocks with less work than our tip, it is safe to treat | 
| 4552 |  |             // reconstructed compact blocks as having been requested. | 
| 4553 | 0 |             ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true); | 
| 4554 | 0 |             LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid() | 
| 4555 | 0 |             if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) { | 
| 4556 |  |                 // Clear download state for this block, which is in | 
| 4557 |  |                 // process from some other peer.  We do this after calling | 
| 4558 |  |                 // ProcessNewBlock so that a malleated cmpctblock announcement | 
| 4559 |  |                 // can't be used to interfere with block relay. | 
| 4560 | 0 |                 RemoveBlockRequest(pblock->GetHash(), std::nullopt); | 
| 4561 | 0 |             } | 
| 4562 | 0 |         } | 
| 4563 | 0 |         return; | 
| 4564 | 0 |     } | 
| 4565 |  |  | 
| 4566 | 0 |     if (msg_type == NetMsgType::BLOCKTXN) | 
| 4567 | 0 |     { | 
| 4568 |  |         // Ignore blocktxn received while importing | 
| 4569 | 0 |         if (m_chainman.m_blockman.LoadingBlocks()) { | 
| 4570 | 0 |             LogDebug(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId()); | 
| 4571 | 0 |             return; | 
| 4572 | 0 |         } | 
| 4573 |  |  | 
| 4574 | 0 |         BlockTransactions resp; | 
| 4575 | 0 |         vRecv >> resp; | 
| 4576 |  | 
 | 
| 4577 | 0 |         return ProcessCompactBlockTxns(pfrom, *peer, resp); | 
| 4578 | 0 |     } | 
| 4579 |  |  | 
| 4580 | 0 |     if (msg_type == NetMsgType::HEADERS) | 
| 4581 | 0 |     { | 
| 4582 |  |         // Ignore headers received while importing | 
| 4583 | 0 |         if (m_chainman.m_blockman.LoadingBlocks()) { | 
| 4584 | 0 |             LogDebug(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId()); | 
| 4585 | 0 |             return; | 
| 4586 | 0 |         } | 
| 4587 |  |  | 
| 4588 | 0 |         std::vector<CBlockHeader> headers; | 
| 4589 |  |  | 
| 4590 |  |         // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks. | 
| 4591 | 0 |         unsigned int nCount = ReadCompactSize(vRecv); | 
| 4592 | 0 |         if (nCount > m_opts.max_headers_result) { | 
| 4593 | 0 |             Misbehaving(*peer, strprintf("headers message size = %u", nCount)); | 
| 4594 | 0 |             return; | 
| 4595 | 0 |         } | 
| 4596 | 0 |         headers.resize(nCount); | 
| 4597 | 0 |         for (unsigned int n = 0; n < nCount; n++) { | 
| 4598 | 0 |             vRecv >> headers[n]; | 
| 4599 | 0 |             ReadCompactSize(vRecv); // ignore tx count; assume it is 0. | 
| 4600 | 0 |         } | 
| 4601 |  | 
 | 
| 4602 | 0 |         ProcessHeadersMessage(pfrom, *peer, std::move(headers), /*via_compact_block=*/false); | 
| 4603 |  |  | 
| 4604 |  |         // Check if the headers presync progress needs to be reported to validation. | 
| 4605 |  |         // This needs to be done without holding the m_headers_presync_mutex lock. | 
| 4606 | 0 |         if (m_headers_presync_should_signal.exchange(false)) { | 
| 4607 | 0 |             HeadersPresyncStats stats; | 
| 4608 | 0 |             { | 
| 4609 | 0 |                 LOCK(m_headers_presync_mutex); | 
| 4610 | 0 |                 auto it = m_headers_presync_stats.find(m_headers_presync_bestpeer); | 
| 4611 | 0 |                 if (it != m_headers_presync_stats.end()) stats = it->second; | 
| 4612 | 0 |             } | 
| 4613 | 0 |             if (stats.second) { | 
| 4614 | 0 |                 m_chainman.ReportHeadersPresync(stats.first, stats.second->first, stats.second->second); | 
| 4615 | 0 |             } | 
| 4616 | 0 |         } | 
| 4617 |  | 
 | 
| 4618 | 0 |         return; | 
| 4619 | 0 |     } | 
| 4620 |  |  | 
| 4621 | 0 |     if (msg_type == NetMsgType::BLOCK) | 
| 4622 | 0 |     { | 
| 4623 |  |         // Ignore block received while importing | 
| 4624 | 0 |         if (m_chainman.m_blockman.LoadingBlocks()) { | 
| 4625 | 0 |             LogDebug(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId()); | 
| 4626 | 0 |             return; | 
| 4627 | 0 |         } | 
| 4628 |  |  | 
| 4629 | 0 |         std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); | 
| 4630 | 0 |         vRecv >> TX_WITH_WITNESS(*pblock); | 
| 4631 |  | 
 | 
| 4632 | 0 |         LogDebug(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom.GetId()); | 
| 4633 |  | 
 | 
| 4634 | 0 |         const CBlockIndex* prev_block{WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.LookupBlockIndex(pblock->hashPrevBlock))}; | 
| 4635 |  |  | 
| 4636 |  |         // Check for possible mutation if it connects to something we know so we can check for DEPLOYMENT_SEGWIT being active | 
| 4637 | 0 |         if (prev_block && IsBlockMutated(/*block=*/*pblock, | 
| 4638 | 0 |                            /*check_witness_root=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT))) { | 
| 4639 | 0 |             LogDebug(BCLog::NET, "Received mutated block from peer=%d\n", peer->m_id); | 
| 4640 | 0 |             Misbehaving(*peer, "mutated block"); | 
| 4641 | 0 |             WITH_LOCK(cs_main, RemoveBlockRequest(pblock->GetHash(), peer->m_id)); | 
| 4642 | 0 |             return; | 
| 4643 | 0 |         } | 
| 4644 |  |  | 
| 4645 | 0 |         bool forceProcessing = false; | 
| 4646 | 0 |         const uint256 hash(pblock->GetHash()); | 
| 4647 | 0 |         bool min_pow_checked = false; | 
| 4648 | 0 |         { | 
| 4649 | 0 |             LOCK(cs_main); | 
| 4650 |  |             // Always process the block if we requested it, since we may | 
| 4651 |  |             // need it even when it's not a candidate for a new best tip. | 
| 4652 | 0 |             forceProcessing = IsBlockRequested(hash); | 
| 4653 | 0 |             RemoveBlockRequest(hash, pfrom.GetId()); | 
| 4654 |  |             // mapBlockSource is only used for punishing peers and setting | 
| 4655 |  |             // which peers send us compact blocks, so the race between here and | 
| 4656 |  |             // cs_main in ProcessNewBlock is fine. | 
| 4657 | 0 |             mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true)); | 
| 4658 |  |  | 
| 4659 |  |             // Check claimed work on this block against our anti-dos thresholds. | 
| 4660 | 0 |             if (prev_block && prev_block->nChainWork + CalculateClaimedHeadersWork({{pblock->GetBlockHeader()}}) >= GetAntiDoSWorkThreshold()) { | 
| 4661 | 0 |                 min_pow_checked = true; | 
| 4662 | 0 |             } | 
| 4663 | 0 |         } | 
| 4664 | 0 |         ProcessBlock(pfrom, pblock, forceProcessing, min_pow_checked); | 
| 4665 | 0 |         return; | 
| 4666 | 0 |     } | 
| 4667 |  |  | 
| 4668 | 0 |     if (msg_type == NetMsgType::GETADDR) { | 
| 4669 |  |         // This asymmetric behavior for inbound and outbound connections was introduced | 
| 4670 |  |         // to prevent a fingerprinting attack: an attacker can send specific fake addresses | 
| 4671 |  |         // to users' AddrMan and later request them by sending getaddr messages. | 
| 4672 |  |         // Making nodes which are behind NAT and can only make outgoing connections ignore | 
| 4673 |  |         // the getaddr message mitigates the attack. | 
| 4674 | 0 |         if (!pfrom.IsInboundConn()) { | 
| 4675 | 0 |             LogDebug(BCLog::NET, "Ignoring \"getaddr\" from %s connection. peer=%d\n", pfrom.ConnectionTypeAsString(), pfrom.GetId()); | 
| 4676 | 0 |             return; | 
| 4677 | 0 |         } | 
| 4678 |  |  | 
| 4679 |  |         // Since this must be an inbound connection, SetupAddressRelay will | 
| 4680 |  |         // never fail. | 
| 4681 | 0 |         Assume(SetupAddressRelay(pfrom, *peer)); | 
| 4682 |  |  | 
| 4683 |  |         // Only send one GetAddr response per connection to reduce resource waste | 
| 4684 |  |         // and discourage addr stamping of INV announcements. | 
| 4685 | 0 |         if (peer->m_getaddr_recvd) { | 
| 4686 | 0 |             LogDebug(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId()); | 
| 4687 | 0 |             return; | 
| 4688 | 0 |         } | 
| 4689 | 0 |         peer->m_getaddr_recvd = true; | 
| 4690 |  | 
 | 
| 4691 | 0 |         peer->m_addrs_to_send.clear(); | 
| 4692 | 0 |         std::vector<CAddress> vAddr; | 
| 4693 | 0 |         if (pfrom.HasPermission(NetPermissionFlags::Addr)) { | 
| 4694 | 0 |             vAddr = m_connman.GetAddressesUnsafe(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND, /*network=*/std::nullopt); | 
| 4695 | 0 |         } else { | 
| 4696 | 0 |             vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND); | 
| 4697 | 0 |         } | 
| 4698 | 0 |         for (const CAddress &addr : vAddr) { | 
| 4699 | 0 |             PushAddress(*peer, addr); | 
| 4700 | 0 |         } | 
| 4701 | 0 |         return; | 
| 4702 | 0 |     } | 
| 4703 |  |  | 
| 4704 | 0 |     if (msg_type == NetMsgType::MEMPOOL) { | 
| 4705 |  |         // Only process received mempool messages if we advertise NODE_BLOOM | 
| 4706 |  |         // or if the peer has mempool permissions. | 
| 4707 | 0 |         if (!(peer->m_our_services & NODE_BLOOM) && !pfrom.HasPermission(NetPermissionFlags::Mempool)) | 
| 4708 | 0 |         { | 
| 4709 | 0 |             if (!pfrom.HasPermission(NetPermissionFlags::NoBan)) | 
| 4710 | 0 |             { | 
| 4711 | 0 |                 LogDebug(BCLog::NET, "mempool request with bloom filters disabled, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 4712 | 0 |                 pfrom.fDisconnect = true; | 
| 4713 | 0 |             } | 
| 4714 | 0 |             return; | 
| 4715 | 0 |         } | 
| 4716 |  |  | 
| 4717 | 0 |         if (m_connman.OutboundTargetReached(false) && !pfrom.HasPermission(NetPermissionFlags::Mempool)) | 
| 4718 | 0 |         { | 
| 4719 | 0 |             if (!pfrom.HasPermission(NetPermissionFlags::NoBan)) | 
| 4720 | 0 |             { | 
| 4721 | 0 |                 LogDebug(BCLog::NET, "mempool request with bandwidth limit reached, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 4722 | 0 |                 pfrom.fDisconnect = true; | 
| 4723 | 0 |             } | 
| 4724 | 0 |             return; | 
| 4725 | 0 |         } | 
| 4726 |  |  | 
| 4727 | 0 |         if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { | 
| 4728 | 0 |             LOCK(tx_relay->m_tx_inventory_mutex); | 
| 4729 | 0 |             tx_relay->m_send_mempool = true; | 
| 4730 | 0 |         } | 
| 4731 | 0 |         return; | 
| 4732 | 0 |     } | 
| 4733 |  |  | 
| 4734 | 0 |     if (msg_type == NetMsgType::PING) { | 
| 4735 | 0 |         if (pfrom.GetCommonVersion() > BIP0031_VERSION) { | 
| 4736 | 0 |             uint64_t nonce = 0; | 
| 4737 | 0 |             vRecv >> nonce; | 
| 4738 |  |             // Echo the message back with the nonce. This allows for two useful features: | 
| 4739 |  |             // | 
| 4740 |  |             // 1) A remote node can quickly check if the connection is operational | 
| 4741 |  |             // 2) Remote nodes can measure the latency of the network thread. If this node | 
| 4742 |  |             //    is overloaded it won't respond to pings quickly and the remote node can | 
| 4743 |  |             //    avoid sending us more work, like chain download requests. | 
| 4744 |  |             // | 
| 4745 |  |             // The nonce stops the remote getting confused between different pings: without | 
| 4746 |  |             // it, if the remote node sends a ping once per second and this node takes 5 | 
| 4747 |  |             // seconds to respond to each, the 5th ping the remote sends would appear to | 
| 4748 |  |             // return very quickly. | 
| 4749 | 0 |             MakeAndPushMessage(pfrom, NetMsgType::PONG, nonce); | 
| 4750 | 0 |         } | 
| 4751 | 0 |         return; | 
| 4752 | 0 |     } | 
| 4753 |  |  | 
| 4754 | 0 |     if (msg_type == NetMsgType::PONG) { | 
| 4755 | 0 |         const auto ping_end = time_received; | 
| 4756 | 0 |         uint64_t nonce = 0; | 
| 4757 | 0 |         size_t nAvail = vRecv.in_avail(); | 
| 4758 | 0 |         bool bPingFinished = false; | 
| 4759 | 0 |         std::string sProblem; | 
| 4760 |  | 
 | 
| 4761 | 0 |         if (nAvail >= sizeof(nonce)) { | 
| 4762 | 0 |             vRecv >> nonce; | 
| 4763 |  |  | 
| 4764 |  |             // Only process pong message if there is an outstanding ping (old ping without nonce should never pong) | 
| 4765 | 0 |             if (peer->m_ping_nonce_sent != 0) { | 
| 4766 | 0 |                 if (nonce == peer->m_ping_nonce_sent) { | 
| 4767 |  |                     // Matching pong received, this ping is no longer outstanding | 
| 4768 | 0 |                     bPingFinished = true; | 
| 4769 | 0 |                     const auto ping_time = ping_end - peer->m_ping_start.load(); | 
| 4770 | 0 |                     if (ping_time.count() >= 0) { | 
| 4771 |  |                         // Let connman know about this successful ping-pong | 
| 4772 | 0 |                         pfrom.PongReceived(ping_time); | 
| 4773 | 0 |                     } else { | 
| 4774 |  |                         // This should never happen | 
| 4775 | 0 |                         sProblem = "Timing mishap"; | 
| 4776 | 0 |                     } | 
| 4777 | 0 |                 } else { | 
| 4778 |  |                     // Nonce mismatches are normal when pings are overlapping | 
| 4779 | 0 |                     sProblem = "Nonce mismatch"; | 
| 4780 | 0 |                     if (nonce == 0) { | 
| 4781 |  |                         // This is most likely a bug in another implementation somewhere; cancel this ping | 
| 4782 | 0 |                         bPingFinished = true; | 
| 4783 | 0 |                         sProblem = "Nonce zero"; | 
| 4784 | 0 |                     } | 
| 4785 | 0 |                 } | 
| 4786 | 0 |             } else { | 
| 4787 | 0 |                 sProblem = "Unsolicited pong without ping"; | 
| 4788 | 0 |             } | 
| 4789 | 0 |         } else { | 
| 4790 |  |             // This is most likely a bug in another implementation somewhere; cancel this ping | 
| 4791 | 0 |             bPingFinished = true; | 
| 4792 | 0 |             sProblem = "Short payload"; | 
| 4793 | 0 |         } | 
| 4794 |  | 
 | 
| 4795 | 0 |         if (!(sProblem.empty())) { | 
| 4796 | 0 |             LogDebug(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n", | 
| 4797 | 0 |                 pfrom.GetId(), | 
| 4798 | 0 |                 sProblem, | 
| 4799 | 0 |                 peer->m_ping_nonce_sent, | 
| 4800 | 0 |                 nonce, | 
| 4801 | 0 |                 nAvail); | 
| 4802 | 0 |         } | 
| 4803 | 0 |         if (bPingFinished) { | 
| 4804 | 0 |             peer->m_ping_nonce_sent = 0; | 
| 4805 | 0 |         } | 
| 4806 | 0 |         return; | 
| 4807 | 0 |     } | 
| 4808 |  |  | 
| 4809 | 0 |     if (msg_type == NetMsgType::FILTERLOAD) { | 
| 4810 | 0 |         if (!(peer->m_our_services & NODE_BLOOM)) { | 
| 4811 | 0 |             LogDebug(BCLog::NET, "filterload received despite not offering bloom services, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 4812 | 0 |             pfrom.fDisconnect = true; | 
| 4813 | 0 |             return; | 
| 4814 | 0 |         } | 
| 4815 | 0 |         CBloomFilter filter; | 
| 4816 | 0 |         vRecv >> filter; | 
| 4817 |  | 
 | 
| 4818 | 0 |         if (!filter.IsWithinSizeConstraints()) | 
| 4819 | 0 |         { | 
| 4820 |  |             // There is no excuse for sending a too-large filter | 
| 4821 | 0 |             Misbehaving(*peer, "too-large bloom filter"); | 
| 4822 | 0 |         } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { | 
| 4823 | 0 |             { | 
| 4824 | 0 |                 LOCK(tx_relay->m_bloom_filter_mutex); | 
| 4825 | 0 |                 tx_relay->m_bloom_filter.reset(new CBloomFilter(filter)); | 
| 4826 | 0 |                 tx_relay->m_relay_txs = true; | 
| 4827 | 0 |             } | 
| 4828 | 0 |             pfrom.m_bloom_filter_loaded = true; | 
| 4829 | 0 |             pfrom.m_relays_txs = true; | 
| 4830 | 0 |         } | 
| 4831 | 0 |         return; | 
| 4832 | 0 |     } | 
| 4833 |  |  | 
| 4834 | 0 |     if (msg_type == NetMsgType::FILTERADD) { | 
| 4835 | 0 |         if (!(peer->m_our_services & NODE_BLOOM)) { | 
| 4836 | 0 |             LogDebug(BCLog::NET, "filteradd received despite not offering bloom services, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 4837 | 0 |             pfrom.fDisconnect = true; | 
| 4838 | 0 |             return; | 
| 4839 | 0 |         } | 
| 4840 | 0 |         std::vector<unsigned char> vData; | 
| 4841 | 0 |         vRecv >> vData; | 
| 4842 |  |  | 
| 4843 |  |         // Nodes must NEVER send a data item > MAX_SCRIPT_ELEMENT_SIZE bytes (the max size for a script data object, | 
| 4844 |  |         // and thus, the maximum size any matched object can have) in a filteradd message | 
| 4845 | 0 |         bool bad = false; | 
| 4846 | 0 |         if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) { | 
| 4847 | 0 |             bad = true; | 
| 4848 | 0 |         } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { | 
| 4849 | 0 |             LOCK(tx_relay->m_bloom_filter_mutex); | 
| 4850 | 0 |             if (tx_relay->m_bloom_filter) { | 
| 4851 | 0 |                 tx_relay->m_bloom_filter->insert(vData); | 
| 4852 | 0 |             } else { | 
| 4853 | 0 |                 bad = true; | 
| 4854 | 0 |             } | 
| 4855 | 0 |         } | 
| 4856 | 0 |         if (bad) { | 
| 4857 | 0 |             Misbehaving(*peer, "bad filteradd message"); | 
| 4858 | 0 |         } | 
| 4859 | 0 |         return; | 
| 4860 | 0 |     } | 
| 4861 |  |  | 
| 4862 | 0 |     if (msg_type == NetMsgType::FILTERCLEAR) { | 
| 4863 | 0 |         if (!(peer->m_our_services & NODE_BLOOM)) { | 
| 4864 | 0 |             LogDebug(BCLog::NET, "filterclear received despite not offering bloom services, %s\n", pfrom.DisconnectMsg(fLogIPs)); | 
| 4865 | 0 |             pfrom.fDisconnect = true; | 
| 4866 | 0 |             return; | 
| 4867 | 0 |         } | 
| 4868 | 0 |         auto tx_relay = peer->GetTxRelay(); | 
| 4869 | 0 |         if (!tx_relay) return; | 
| 4870 |  |  | 
| 4871 | 0 |         { | 
| 4872 | 0 |             LOCK(tx_relay->m_bloom_filter_mutex); | 
| 4873 | 0 |             tx_relay->m_bloom_filter = nullptr; | 
| 4874 | 0 |             tx_relay->m_relay_txs = true; | 
| 4875 | 0 |         } | 
| 4876 | 0 |         pfrom.m_bloom_filter_loaded = false; | 
| 4877 | 0 |         pfrom.m_relays_txs = true; | 
| 4878 | 0 |         return; | 
| 4879 | 0 |     } | 
| 4880 |  |  | 
| 4881 | 0 |     if (msg_type == NetMsgType::FEEFILTER) { | 
| 4882 | 0 |         CAmount newFeeFilter = 0; | 
| 4883 | 0 |         vRecv >> newFeeFilter; | 
| 4884 | 0 |         if (MoneyRange(newFeeFilter)) { | 
| 4885 | 0 |             if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { | 
| 4886 | 0 |                 tx_relay->m_fee_filter_received = newFeeFilter; | 
| 4887 | 0 |             } | 
| 4888 | 0 |             LogDebug(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId()); | 
| 4889 | 0 |         } | 
| 4890 | 0 |         return; | 
| 4891 | 0 |     } | 
| 4892 |  |  | 
| 4893 | 0 |     if (msg_type == NetMsgType::GETCFILTERS) { | 
| 4894 | 0 |         ProcessGetCFilters(pfrom, *peer, vRecv); | 
| 4895 | 0 |         return; | 
| 4896 | 0 |     } | 
| 4897 |  |  | 
| 4898 | 0 |     if (msg_type == NetMsgType::GETCFHEADERS) { | 
| 4899 | 0 |         ProcessGetCFHeaders(pfrom, *peer, vRecv); | 
| 4900 | 0 |         return; | 
| 4901 | 0 |     } | 
| 4902 |  |  | 
| 4903 | 0 |     if (msg_type == NetMsgType::GETCFCHECKPT) { | 
| 4904 | 0 |         ProcessGetCFCheckPt(pfrom, *peer, vRecv); | 
| 4905 | 0 |         return; | 
| 4906 | 0 |     } | 
| 4907 |  |  | 
| 4908 | 0 |     if (msg_type == NetMsgType::NOTFOUND) { | 
| 4909 | 0 |         std::vector<CInv> vInv; | 
| 4910 | 0 |         vRecv >> vInv; | 
| 4911 | 0 |         std::vector<GenTxid> tx_invs; | 
| 4912 | 0 |         if (vInv.size() <= node::MAX_PEER_TX_ANNOUNCEMENTS + MAX_BLOCKS_IN_TRANSIT_PER_PEER) { | 
| 4913 | 0 |             for (CInv &inv : vInv) { | 
| 4914 | 0 |                 if (inv.IsGenTxMsg()) { | 
| 4915 | 0 |                     tx_invs.emplace_back(ToGenTxid(inv)); | 
| 4916 | 0 |                 } | 
| 4917 | 0 |             } | 
| 4918 | 0 |         } | 
| 4919 | 0 |         LOCK(m_tx_download_mutex); | 
| 4920 | 0 |         m_txdownloadman.ReceivedNotFound(pfrom.GetId(), tx_invs); | 
| 4921 | 0 |         return; | 
| 4922 | 0 |     } | 
| 4923 |  |  | 
| 4924 |  |     // Ignore unknown commands for extensibility | 
| 4925 | 0 |     LogDebug(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId()); | 
| 4926 | 0 |     return; | 
| 4927 | 0 | } | 
| 4928 |  |  | 
| 4929 |  | bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer) | 
| 4930 | 0 | { | 
| 4931 | 0 |     { | 
| 4932 | 0 |         LOCK(peer.m_misbehavior_mutex); | 
| 4933 |  |  | 
| 4934 |  |         // There's nothing to do if the m_should_discourage flag isn't set | 
| 4935 | 0 |         if (!peer.m_should_discourage) return false; | 
| 4936 |  |  | 
| 4937 | 0 |         peer.m_should_discourage = false; | 
| 4938 | 0 |     } // peer.m_misbehavior_mutex | 
| 4939 |  |  | 
| 4940 | 0 |     if (pnode.HasPermission(NetPermissionFlags::NoBan)) { | 
| 4941 |  |         // We never disconnect or discourage peers for bad behavior if they have NetPermissionFlags::NoBan permission | 
| 4942 | 0 |         LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id); | 
| 4943 | 0 |         return false; | 
| 4944 | 0 |     } | 
| 4945 |  |  | 
| 4946 | 0 |     if (pnode.IsManualConn()) { | 
| 4947 |  |         // We never disconnect or discourage manual peers for bad behavior | 
| 4948 | 0 |         LogPrintf("Warning: not punishing manually connected peer %d!\n", peer.m_id); | 
| 4949 | 0 |         return false; | 
| 4950 | 0 |     } | 
| 4951 |  |  | 
| 4952 | 0 |     if (pnode.addr.IsLocal()) { | 
| 4953 |  |         // We disconnect local peers for bad behavior but don't discourage (since that would discourage | 
| 4954 |  |         // all peers on the same local address) | 
| 4955 | 0 |         LogDebug(BCLog::NET, "Warning: disconnecting but not discouraging %s peer %d!\n", | 
| 4956 | 0 |                  pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id); | 
| 4957 | 0 |         pnode.fDisconnect = true; | 
| 4958 | 0 |         return true; | 
| 4959 | 0 |     } | 
| 4960 |  |  | 
| 4961 |  |     // Normal case: Disconnect the peer and discourage all nodes sharing the address | 
| 4962 | 0 |     LogDebug(BCLog::NET, "Disconnecting and discouraging peer %d!\n", peer.m_id); | 
| 4963 | 0 |     if (m_banman) m_banman->Discourage(pnode.addr); | 
| 4964 | 0 |     m_connman.DisconnectNode(pnode.addr); | 
| 4965 | 0 |     return true; | 
| 4966 | 0 | } | 
| 4967 |  |  | 
| 4968 |  | bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc) | 
| 4969 | 0 | { | 
| 4970 | 0 |     AssertLockNotHeld(m_tx_download_mutex); | 
| 4971 | 0 |     AssertLockHeld(g_msgproc_mutex); | 
| 4972 |  | 
 | 
| 4973 | 0 |     PeerRef peer = GetPeerRef(pfrom->GetId()); | 
| 4974 | 0 |     if (peer == nullptr) return false; | 
| 4975 |  |  | 
| 4976 |  |     // For outbound connections, ensure that the initial VERSION message | 
| 4977 |  |     // has been sent first before processing any incoming messages | 
| 4978 | 0 |     if (!pfrom->IsInboundConn() && !peer->m_outbound_version_message_sent) return false; | 
| 4979 |  |  | 
| 4980 | 0 |     { | 
| 4981 | 0 |         LOCK(peer->m_getdata_requests_mutex); | 
| 4982 | 0 |         if (!peer->m_getdata_requests.empty()) { | 
| 4983 | 0 |             ProcessGetData(*pfrom, *peer, interruptMsgProc); | 
| 4984 | 0 |         } | 
| 4985 | 0 |     } | 
| 4986 |  | 
 | 
| 4987 | 0 |     const bool processed_orphan = ProcessOrphanTx(*peer); | 
| 4988 |  | 
 | 
| 4989 | 0 |     if (pfrom->fDisconnect) | 
| 4990 | 0 |         return false; | 
| 4991 |  |  | 
| 4992 | 0 |     if (processed_orphan) return true; | 
| 4993 |  |  | 
| 4994 |  |     // this maintains the order of responses | 
| 4995 |  |     // and prevents m_getdata_requests to grow unbounded | 
| 4996 | 0 |     { | 
| 4997 | 0 |         LOCK(peer->m_getdata_requests_mutex); | 
| 4998 | 0 |         if (!peer->m_getdata_requests.empty()) return true; | 
| 4999 | 0 |     } | 
| 5000 |  |  | 
| 5001 |  |     // Don't bother if send buffer is too full to respond anyway | 
| 5002 | 0 |     if (pfrom->fPauseSend) return false; | 
| 5003 |  |  | 
| 5004 | 0 |     auto poll_result{pfrom->PollMessage()}; | 
| 5005 | 0 |     if (!poll_result) { | 
| 5006 |  |         // No message to process | 
| 5007 | 0 |         return false; | 
| 5008 | 0 |     } | 
| 5009 |  |  | 
| 5010 | 0 |     CNetMessage& msg{poll_result->first}; | 
| 5011 | 0 |     bool fMoreWork = poll_result->second; | 
| 5012 |  | 
 | 
| 5013 | 0 |     TRACEPOINT(net, inbound_message, | 
| 5014 | 0 |         pfrom->GetId(), | 
| 5015 | 0 |         pfrom->m_addr_name.c_str(), | 
| 5016 | 0 |         pfrom->ConnectionTypeAsString().c_str(), | 
| 5017 | 0 |         msg.m_type.c_str(), | 
| 5018 | 0 |         msg.m_recv.size(), | 
| 5019 | 0 |         msg.m_recv.data() | 
| 5020 | 0 |     ); | 
| 5021 |  | 
 | 
| 5022 | 0 |     if (m_opts.capture_messages) { | 
| 5023 | 0 |         CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true); | 
| 5024 | 0 |     } | 
| 5025 |  | 
 | 
| 5026 | 0 |     try { | 
| 5027 | 0 |         ProcessMessage(*pfrom, msg.m_type, msg.m_recv, msg.m_time, interruptMsgProc); | 
| 5028 | 0 |         if (interruptMsgProc) return false; | 
| 5029 | 0 |         { | 
| 5030 | 0 |             LOCK(peer->m_getdata_requests_mutex); | 
| 5031 | 0 |             if (!peer->m_getdata_requests.empty()) fMoreWork = true; | 
| 5032 | 0 |         } | 
| 5033 |  |         // Does this peer has an orphan ready to reconsider? | 
| 5034 |  |         // (Note: we may have provided a parent for an orphan provided | 
| 5035 |  |         //  by another peer that was already processed; in that case, | 
| 5036 |  |         //  the extra work may not be noticed, possibly resulting in an | 
| 5037 |  |         //  unnecessary 100ms delay) | 
| 5038 | 0 |         LOCK(m_tx_download_mutex); | 
| 5039 | 0 |         if (m_txdownloadman.HaveMoreWork(peer->m_id)) fMoreWork = true; | 
| 5040 | 0 |     } catch (const std::exception& e) { | 
| 5041 | 0 |         LogDebug(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size, e.what(), typeid(e).name()); | 
| 5042 | 0 |     } catch (...) { | 
| 5043 | 0 |         LogDebug(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size); | 
| 5044 | 0 |     } | 
| 5045 |  |  | 
| 5046 | 0 |     return fMoreWork; | 
| 5047 | 0 | } | 
| 5048 |  |  | 
| 5049 |  | void PeerManagerImpl::ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) | 
| 5050 | 0 | { | 
| 5051 | 0 |     AssertLockHeld(cs_main); | 
| 5052 |  | 
 | 
| 5053 | 0 |     CNodeState &state = *State(pto.GetId()); | 
| 5054 |  | 
 | 
| 5055 | 0 |     if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) { | 
| 5056 |  |         // This is an outbound peer subject to disconnection if they don't | 
| 5057 |  |         // announce a block with as much work as the current tip within | 
| 5058 |  |         // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if | 
| 5059 |  |         // their chain has more work than ours, we should sync to it, | 
| 5060 |  |         // unless it's invalid, in which case we should find that out and | 
| 5061 |  |         // disconnect from them elsewhere). | 
| 5062 | 0 |         if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork) { | 
| 5063 |  |             // The outbound peer has sent us a block with at least as much work as our current tip, so reset the timeout if it was set | 
| 5064 | 0 |             if (state.m_chain_sync.m_timeout != 0s) { | 
| 5065 | 0 |                 state.m_chain_sync.m_timeout = 0s; | 
| 5066 | 0 |                 state.m_chain_sync.m_work_header = nullptr; | 
| 5067 | 0 |                 state.m_chain_sync.m_sent_getheaders = false; | 
| 5068 | 0 |             } | 
| 5069 | 0 |         } else if (state.m_chain_sync.m_timeout == 0s || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) { | 
| 5070 |  |             // At this point we know that the outbound peer has either never sent us a block/header or they have, but its tip is behind ours | 
| 5071 |  |             // AND | 
| 5072 |  |             // we are noticing this for the first time (m_timeout is 0) | 
| 5073 |  |             // OR we noticed this at some point within the last CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds and set a timeout | 
| 5074 |  |             // for them, they caught up to our tip at the time of setting the timer but not to our current one (we've also advanced). | 
| 5075 |  |             // Either way, set a new timeout based on our current tip. | 
| 5076 | 0 |             state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT; | 
| 5077 | 0 |             state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip(); | 
| 5078 | 0 |             state.m_chain_sync.m_sent_getheaders = false; | 
| 5079 | 0 |         } else if (state.m_chain_sync.m_timeout > 0s && time_in_seconds > state.m_chain_sync.m_timeout) { | 
| 5080 |  |             // No evidence yet that our peer has synced to a chain with work equal to that | 
| 5081 |  |             // of our tip, when we first detected it was behind. Send a single getheaders | 
| 5082 |  |             // message to give the peer a chance to update us. | 
| 5083 | 0 |             if (state.m_chain_sync.m_sent_getheaders) { | 
| 5084 |  |                 // They've run out of time to catch up! | 
| 5085 | 0 |                 LogInfo("Outbound peer has old chain, best known block = %s, %s\n", state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", pto.DisconnectMsg(fLogIPs)); | 
| 5086 | 0 |                 pto.fDisconnect = true; | 
| 5087 | 0 |             } else { | 
| 5088 | 0 |                 assert(state.m_chain_sync.m_work_header); | 
| 5089 |  |                 // Here, we assume that the getheaders message goes out, | 
| 5090 |  |                 // because it'll either go out or be skipped because of a | 
| 5091 |  |                 // getheaders in-flight already, in which case the peer should | 
| 5092 |  |                 // still respond to us with a sufficiently high work chain tip. | 
| 5093 | 0 |                 MaybeSendGetHeaders(pto, | 
| 5094 | 0 |                         GetLocator(state.m_chain_sync.m_work_header->pprev), | 
| 5095 | 0 |                         peer); | 
| 5096 | 0 |                 LogDebug(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString()); | 
| 5097 | 0 |                 state.m_chain_sync.m_sent_getheaders = true; | 
| 5098 |  |                 // Bump the timeout to allow a response, which could clear the timeout | 
| 5099 |  |                 // (if the response shows the peer has synced), reset the timeout (if | 
| 5100 |  |                 // the peer syncs to the required work but not to our tip), or result | 
| 5101 |  |                 // in disconnect (if we advance to the timeout and pindexBestKnownBlock | 
| 5102 |  |                 // has not sufficiently progressed) | 
| 5103 | 0 |                 state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME; | 
| 5104 | 0 |             } | 
| 5105 | 0 |         } | 
| 5106 | 0 |     } | 
| 5107 | 0 | } | 
| 5108 |  |  | 
| 5109 |  | void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now) | 
| 5110 | 0 | { | 
| 5111 |  |     // If we have any extra block-relay-only peers, disconnect the youngest unless | 
| 5112 |  |     // it's given us a block -- in which case, compare with the second-youngest, and | 
| 5113 |  |     // out of those two, disconnect the peer who least recently gave us a block. | 
| 5114 |  |     // The youngest block-relay-only peer would be the extra peer we connected | 
| 5115 |  |     // to temporarily in order to sync our tip; see net.cpp. | 
| 5116 |  |     // Note that we use higher nodeid as a measure for most recent connection. | 
| 5117 | 0 |     if (m_connman.GetExtraBlockRelayCount() > 0) { | 
| 5118 | 0 |         std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0}, next_youngest_peer{-1, 0}; | 
| 5119 |  | 
 | 
| 5120 | 0 |         m_connman.ForEachNode([&](CNode* pnode) { | 
| 5121 | 0 |             if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) return; | 
| 5122 | 0 |             if (pnode->GetId() > youngest_peer.first) { | 
| 5123 | 0 |                 next_youngest_peer = youngest_peer; | 
| 5124 | 0 |                 youngest_peer.first = pnode->GetId(); | 
| 5125 | 0 |                 youngest_peer.second = pnode->m_last_block_time; | 
| 5126 | 0 |             } | 
| 5127 | 0 |         }); | 
| 5128 | 0 |         NodeId to_disconnect = youngest_peer.first; | 
| 5129 | 0 |         if (youngest_peer.second > next_youngest_peer.second) { | 
| 5130 |  |             // Our newest block-relay-only peer gave us a block more recently; | 
| 5131 |  |             // disconnect our second youngest. | 
| 5132 | 0 |             to_disconnect = next_youngest_peer.first; | 
| 5133 | 0 |         } | 
| 5134 | 0 |         m_connman.ForNode(to_disconnect, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { | 
| 5135 | 0 |             AssertLockHeld(::cs_main); | 
| 5136 |  |             // Make sure we're not getting a block right now, and that | 
| 5137 |  |             // we've been connected long enough for this eviction to happen | 
| 5138 |  |             // at all. | 
| 5139 |  |             // Note that we only request blocks from a peer if we learn of a | 
| 5140 |  |             // valid headers chain with at least as much work as our tip. | 
| 5141 | 0 |             CNodeState *node_state = State(pnode->GetId()); | 
| 5142 | 0 |             if (node_state == nullptr || | 
| 5143 | 0 |                 (now - pnode->m_connected >= MINIMUM_CONNECT_TIME && node_state->vBlocksInFlight.empty())) { | 
| 5144 | 0 |                 pnode->fDisconnect = true; | 
| 5145 | 0 |                 LogDebug(BCLog::NET, "disconnecting extra block-relay-only peer=%d (last block received at time %d)\n", | 
| 5146 | 0 |                          pnode->GetId(), count_seconds(pnode->m_last_block_time)); | 
| 5147 | 0 |                 return true; | 
| 5148 | 0 |             } else { | 
| 5149 | 0 |                 LogDebug(BCLog::NET, "keeping block-relay-only peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", | 
| 5150 | 0 |                          pnode->GetId(), count_seconds(pnode->m_connected), node_state->vBlocksInFlight.size()); | 
| 5151 | 0 |             } | 
| 5152 | 0 |             return false; | 
| 5153 | 0 |         }); | 
| 5154 | 0 |     } | 
| 5155 |  |  | 
| 5156 |  |     // Check whether we have too many outbound-full-relay peers | 
| 5157 | 0 |     if (m_connman.GetExtraFullOutboundCount() > 0) { | 
| 5158 |  |         // If we have more outbound-full-relay peers than we target, disconnect one. | 
| 5159 |  |         // Pick the outbound-full-relay peer that least recently announced | 
| 5160 |  |         // us a new block, with ties broken by choosing the more recent | 
| 5161 |  |         // connection (higher node id) | 
| 5162 |  |         // Protect peers from eviction if we don't have another connection | 
| 5163 |  |         // to their network, counting both outbound-full-relay and manual peers. | 
| 5164 | 0 |         NodeId worst_peer = -1; | 
| 5165 | 0 |         int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max(); | 
| 5166 |  | 
 | 
| 5167 | 0 |         m_connman.ForEachNode([&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_connman.GetNodesMutex()) { | 
| 5168 | 0 |             AssertLockHeld(::cs_main); | 
| 5169 |  |  | 
| 5170 |  |             // Only consider outbound-full-relay peers that are not already | 
| 5171 |  |             // marked for disconnection | 
| 5172 | 0 |             if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) return; | 
| 5173 | 0 |             CNodeState *state = State(pnode->GetId()); | 
| 5174 | 0 |             if (state == nullptr) return; // shouldn't be possible, but just in case | 
| 5175 |  |             // Don't evict our protected peers | 
| 5176 | 0 |             if (state->m_chain_sync.m_protect) return; | 
| 5177 |  |             // If this is the only connection on a particular network that is | 
| 5178 |  |             // OUTBOUND_FULL_RELAY or MANUAL, protect it. | 
| 5179 | 0 |             if (!m_connman.MultipleManualOrFullOutboundConns(pnode->addr.GetNetwork())) return; | 
| 5180 | 0 |             if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) { | 
| 5181 | 0 |                 worst_peer = pnode->GetId(); | 
| 5182 | 0 |                 oldest_block_announcement = state->m_last_block_announcement; | 
| 5183 | 0 |             } | 
| 5184 | 0 |         }); | 
| 5185 | 0 |         if (worst_peer != -1) { | 
| 5186 | 0 |             bool disconnected = m_connman.ForNode(worst_peer, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { | 
| 5187 | 0 |                 AssertLockHeld(::cs_main); | 
| 5188 |  |  | 
| 5189 |  |                 // Only disconnect a peer that has been connected to us for | 
| 5190 |  |                 // some reasonable fraction of our check-frequency, to give | 
| 5191 |  |                 // it time for new information to have arrived. | 
| 5192 |  |                 // Also don't disconnect any peer we're trying to download a | 
| 5193 |  |                 // block from. | 
| 5194 | 0 |                 CNodeState &state = *State(pnode->GetId()); | 
| 5195 | 0 |                 if (now - pnode->m_connected > MINIMUM_CONNECT_TIME && state.vBlocksInFlight.empty()) { | 
| 5196 | 0 |                     LogDebug(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement); | 
| 5197 | 0 |                     pnode->fDisconnect = true; | 
| 5198 | 0 |                     return true; | 
| 5199 | 0 |                 } else { | 
| 5200 | 0 |                     LogDebug(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", | 
| 5201 | 0 |                              pnode->GetId(), count_seconds(pnode->m_connected), state.vBlocksInFlight.size()); | 
| 5202 | 0 |                     return false; | 
| 5203 | 0 |                 } | 
| 5204 | 0 |             }); | 
| 5205 | 0 |             if (disconnected) { | 
| 5206 |  |                 // If we disconnected an extra peer, that means we successfully | 
| 5207 |  |                 // connected to at least one peer after the last time we | 
| 5208 |  |                 // detected a stale tip. Don't try any more extra peers until | 
| 5209 |  |                 // we next detect a stale tip, to limit the load we put on the | 
| 5210 |  |                 // network from these extra connections. | 
| 5211 | 0 |                 m_connman.SetTryNewOutboundPeer(false); | 
| 5212 | 0 |             } | 
| 5213 | 0 |         } | 
| 5214 | 0 |     } | 
| 5215 | 0 | } | 
| 5216 |  |  | 
| 5217 |  | void PeerManagerImpl::CheckForStaleTipAndEvictPeers() | 
| 5218 | 0 | { | 
| 5219 | 0 |     LOCK(cs_main); | 
| 5220 |  | 
 | 
| 5221 | 0 |     auto now{GetTime<std::chrono::seconds>()}; | 
| 5222 |  | 
 | 
| 5223 | 0 |     EvictExtraOutboundPeers(now); | 
| 5224 |  | 
 | 
| 5225 | 0 |     if (now > m_stale_tip_check_time) { | 
| 5226 |  |         // Check whether our tip is stale, and if so, allow using an extra | 
| 5227 |  |         // outbound peer | 
| 5228 | 0 |         if (!m_chainman.m_blockman.LoadingBlocks() && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) { | 
| 5229 | 0 |             LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", | 
| 5230 | 0 |                       count_seconds(now - m_last_tip_update.load())); | 
| 5231 | 0 |             m_connman.SetTryNewOutboundPeer(true); | 
| 5232 | 0 |         } else if (m_connman.GetTryNewOutboundPeer()) { | 
| 5233 | 0 |             m_connman.SetTryNewOutboundPeer(false); | 
| 5234 | 0 |         } | 
| 5235 | 0 |         m_stale_tip_check_time = now + STALE_CHECK_INTERVAL; | 
| 5236 | 0 |     } | 
| 5237 |  | 
 | 
| 5238 | 0 |     if (!m_initial_sync_finished && CanDirectFetch()) { | 
| 5239 | 0 |         m_connman.StartExtraBlockRelayPeers(); | 
| 5240 | 0 |         m_initial_sync_finished = true; | 
| 5241 | 0 |     } | 
| 5242 | 0 | } | 
| 5243 |  |  | 
| 5244 |  | void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now) | 
| 5245 | 0 | { | 
| 5246 | 0 |     if (m_connman.ShouldRunInactivityChecks(node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) && | 
| 5247 | 0 |         peer.m_ping_nonce_sent && | 
| 5248 | 0 |         now > peer.m_ping_start.load() + TIMEOUT_INTERVAL) | 
| 5249 | 0 |     { | 
| 5250 |  |         // The ping timeout is using mocktime. To disable the check during | 
| 5251 |  |         // testing, increase -peertimeout. | 
| 5252 | 0 |         LogDebug(BCLog::NET, "ping timeout: %fs, %s", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), node_to.DisconnectMsg(fLogIPs)); | 
| 5253 | 0 |         node_to.fDisconnect = true; | 
| 5254 | 0 |         return; | 
| 5255 | 0 |     } | 
| 5256 |  |  | 
| 5257 | 0 |     bool pingSend = false; | 
| 5258 |  | 
 | 
| 5259 | 0 |     if (peer.m_ping_queued) { | 
| 5260 |  |         // RPC ping request by user | 
| 5261 | 0 |         pingSend = true; | 
| 5262 | 0 |     } | 
| 5263 |  | 
 | 
| 5264 | 0 |     if (peer.m_ping_nonce_sent == 0 && now > peer.m_ping_start.load() + PING_INTERVAL) { | 
| 5265 |  |         // Ping automatically sent as a latency probe & keepalive. | 
| 5266 | 0 |         pingSend = true; | 
| 5267 | 0 |     } | 
| 5268 |  | 
 | 
| 5269 | 0 |     if (pingSend) { | 
| 5270 | 0 |         uint64_t nonce; | 
| 5271 | 0 |         do { | 
| 5272 | 0 |             nonce = FastRandomContext().rand64(); | 
| 5273 | 0 |         } while (nonce == 0); | 
| 5274 | 0 |         peer.m_ping_queued = false; | 
| 5275 | 0 |         peer.m_ping_start = now; | 
| 5276 | 0 |         if (node_to.GetCommonVersion() > BIP0031_VERSION) { | 
| 5277 | 0 |             peer.m_ping_nonce_sent = nonce; | 
| 5278 | 0 |             MakeAndPushMessage(node_to, NetMsgType::PING, nonce); | 
| 5279 | 0 |         } else { | 
| 5280 |  |             // Peer is too old to support ping command with nonce, pong will never arrive. | 
| 5281 | 0 |             peer.m_ping_nonce_sent = 0; | 
| 5282 | 0 |             MakeAndPushMessage(node_to, NetMsgType::PING); | 
| 5283 | 0 |         } | 
| 5284 | 0 |     } | 
| 5285 | 0 | } | 
| 5286 |  |  | 
| 5287 |  | void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time) | 
| 5288 | 0 | { | 
| 5289 |  |     // Nothing to do for non-address-relay peers | 
| 5290 | 0 |     if (!peer.m_addr_relay_enabled) return; | 
| 5291 |  |  | 
| 5292 | 0 |     LOCK(peer.m_addr_send_times_mutex); | 
| 5293 |  |     // Periodically advertise our local address to the peer. | 
| 5294 | 0 |     if (fListen && !m_chainman.IsInitialBlockDownload() && | 
| 5295 | 0 |         peer.m_next_local_addr_send < current_time) { | 
| 5296 |  |         // If we've sent before, clear the bloom filter for the peer, so that our | 
| 5297 |  |         // self-announcement will actually go out. | 
| 5298 |  |         // This might be unnecessary if the bloom filter has already rolled | 
| 5299 |  |         // over since our last self-announcement, but there is only a small | 
| 5300 |  |         // bandwidth cost that we can incur by doing this (which happens | 
| 5301 |  |         // once a day on average). | 
| 5302 | 0 |         if (peer.m_next_local_addr_send != 0us) { | 
| 5303 | 0 |             peer.m_addr_known->reset(); | 
| 5304 | 0 |         } | 
| 5305 | 0 |         if (std::optional<CService> local_service = GetLocalAddrForPeer(node)) { | 
| 5306 | 0 |             CAddress local_addr{*local_service, peer.m_our_services, Now<NodeSeconds>()}; | 
| 5307 | 0 |             PushAddress(peer, local_addr); | 
| 5308 | 0 |         } | 
| 5309 | 0 |         peer.m_next_local_addr_send = current_time + m_rng.rand_exp_duration(AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); | 
| 5310 | 0 |     } | 
| 5311 |  |  | 
| 5312 |  |     // We sent an `addr` message to this peer recently. Nothing more to do. | 
| 5313 | 0 |     if (current_time <= peer.m_next_addr_send) return; | 
| 5314 |  |  | 
| 5315 | 0 |     peer.m_next_addr_send = current_time + m_rng.rand_exp_duration(AVG_ADDRESS_BROADCAST_INTERVAL); | 
| 5316 |  | 
 | 
| 5317 | 0 |     if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) { | 
| 5318 |  |         // Should be impossible since we always check size before adding to | 
| 5319 |  |         // m_addrs_to_send. Recover by trimming the vector. | 
| 5320 | 0 |         peer.m_addrs_to_send.resize(MAX_ADDR_TO_SEND); | 
| 5321 | 0 |     } | 
| 5322 |  |  | 
| 5323 |  |     // Remove addr records that the peer already knows about, and add new | 
| 5324 |  |     // addrs to the m_addr_known filter on the same pass. | 
| 5325 | 0 |     auto addr_already_known = [&peer](const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) { | 
| 5326 | 0 |         bool ret = peer.m_addr_known->contains(addr.GetKey()); | 
| 5327 | 0 |         if (!ret) peer.m_addr_known->insert(addr.GetKey()); | 
| 5328 | 0 |         return ret; | 
| 5329 | 0 |     }; | 
| 5330 | 0 |     peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(), peer.m_addrs_to_send.end(), addr_already_known), | 
| 5331 | 0 |                            peer.m_addrs_to_send.end()); | 
| 5332 |  |  | 
| 5333 |  |     // No addr messages to send | 
| 5334 | 0 |     if (peer.m_addrs_to_send.empty()) return; | 
| 5335 |  |  | 
| 5336 | 0 |     if (peer.m_wants_addrv2) { | 
| 5337 | 0 |         MakeAndPushMessage(node, NetMsgType::ADDRV2, CAddress::V2_NETWORK(peer.m_addrs_to_send)); | 
| 5338 | 0 |     } else { | 
| 5339 | 0 |         MakeAndPushMessage(node, NetMsgType::ADDR, CAddress::V1_NETWORK(peer.m_addrs_to_send)); | 
| 5340 | 0 |     } | 
| 5341 | 0 |     peer.m_addrs_to_send.clear(); | 
| 5342 |  |  | 
| 5343 |  |     // we only send the big addr message once | 
| 5344 | 0 |     if (peer.m_addrs_to_send.capacity() > 40) { | 
| 5345 | 0 |         peer.m_addrs_to_send.shrink_to_fit(); | 
| 5346 | 0 |     } | 
| 5347 | 0 | } | 
| 5348 |  |  | 
| 5349 |  | void PeerManagerImpl::MaybeSendSendHeaders(CNode& node, Peer& peer) | 
| 5350 | 0 | { | 
| 5351 |  |     // Delay sending SENDHEADERS (BIP 130) until we're done with an | 
| 5352 |  |     // initial-headers-sync with this peer. Receiving headers announcements for | 
| 5353 |  |     // new blocks while trying to sync their headers chain is problematic, | 
| 5354 |  |     // because of the state tracking done. | 
| 5355 | 0 |     if (!peer.m_sent_sendheaders && node.GetCommonVersion() >= SENDHEADERS_VERSION) { | 
| 5356 | 0 |         LOCK(cs_main); | 
| 5357 | 0 |         CNodeState &state = *State(node.GetId()); | 
| 5358 | 0 |         if (state.pindexBestKnownBlock != nullptr && | 
| 5359 | 0 |                 state.pindexBestKnownBlock->nChainWork > m_chainman.MinimumChainWork()) { | 
| 5360 |  |             // Tell our peer we prefer to receive headers rather than inv's | 
| 5361 |  |             // We send this to non-NODE NETWORK peers as well, because even | 
| 5362 |  |             // non-NODE NETWORK peers can announce blocks (such as pruning | 
| 5363 |  |             // nodes) | 
| 5364 | 0 |             MakeAndPushMessage(node, NetMsgType::SENDHEADERS); | 
| 5365 | 0 |             peer.m_sent_sendheaders = true; | 
| 5366 | 0 |         } | 
| 5367 | 0 |     } | 
| 5368 | 0 | } | 
| 5369 |  |  | 
| 5370 |  | void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::microseconds current_time) | 
| 5371 | 0 | { | 
| 5372 | 0 |     if (m_opts.ignore_incoming_txs) return; | 
| 5373 | 0 |     if (pto.GetCommonVersion() < FEEFILTER_VERSION) return; | 
| 5374 |  |     // peers with the forcerelay permission should not filter txs to us | 
| 5375 | 0 |     if (pto.HasPermission(NetPermissionFlags::ForceRelay)) return; | 
| 5376 |  |     // Don't send feefilter messages to outbound block-relay-only peers since they should never announce | 
| 5377 |  |     // transactions to us, regardless of feefilter state. | 
| 5378 | 0 |     if (pto.IsBlockOnlyConn()) return; | 
| 5379 |  |  | 
| 5380 | 0 |     CAmount currentFilter = m_mempool.GetMinFee().GetFeePerK(); | 
| 5381 |  | 
 | 
| 5382 | 0 |     if (m_chainman.IsInitialBlockDownload()) { | 
| 5383 |  |         // Received tx-inv messages are discarded when the active | 
| 5384 |  |         // chainstate is in IBD, so tell the peer to not send them. | 
| 5385 | 0 |         currentFilter = MAX_MONEY; | 
| 5386 | 0 |     } else { | 
| 5387 | 0 |         static const CAmount MAX_FILTER{m_fee_filter_rounder.round(MAX_MONEY)}; | 
| 5388 | 0 |         if (peer.m_fee_filter_sent == MAX_FILTER) { | 
| 5389 |  |             // Send the current filter if we sent MAX_FILTER previously | 
| 5390 |  |             // and made it out of IBD. | 
| 5391 | 0 |             peer.m_next_send_feefilter = 0us; | 
| 5392 | 0 |         } | 
| 5393 | 0 |     } | 
| 5394 | 0 |     if (current_time > peer.m_next_send_feefilter) { | 
| 5395 | 0 |         CAmount filterToSend = m_fee_filter_rounder.round(currentFilter); | 
| 5396 |  |         // We always have a fee filter of at least the min relay fee | 
| 5397 | 0 |         filterToSend = std::max(filterToSend, m_mempool.m_opts.min_relay_feerate.GetFeePerK()); | 
| 5398 | 0 |         if (filterToSend != peer.m_fee_filter_sent) { | 
| 5399 | 0 |             MakeAndPushMessage(pto, NetMsgType::FEEFILTER, filterToSend); | 
| 5400 | 0 |             peer.m_fee_filter_sent = filterToSend; | 
| 5401 | 0 |         } | 
| 5402 | 0 |         peer.m_next_send_feefilter = current_time + m_rng.rand_exp_duration(AVG_FEEFILTER_BROADCAST_INTERVAL); | 
| 5403 | 0 |     } | 
| 5404 |  |     // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY | 
| 5405 |  |     // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY. | 
| 5406 | 0 |     else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < peer.m_next_send_feefilter && | 
| 5407 | 0 |                 (currentFilter < 3 * peer.m_fee_filter_sent / 4 || currentFilter > 4 * peer.m_fee_filter_sent / 3)) { | 
| 5408 | 0 |         peer.m_next_send_feefilter = current_time + m_rng.randrange<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY); | 
| 5409 | 0 |     } | 
| 5410 | 0 | } | 
| 5411 |  |  | 
| 5412 |  | namespace { | 
| 5413 |  | class CompareInvMempoolOrder | 
| 5414 |  | { | 
| 5415 |  |     const CTxMemPool* m_mempool; | 
| 5416 |  | public: | 
| 5417 | 0 |     explicit CompareInvMempoolOrder(CTxMemPool* mempool) : m_mempool{mempool} {} | 
| 5418 |  |  | 
| 5419 |  |     bool operator()(std::set<Wtxid>::iterator a, std::set<Wtxid>::iterator b) | 
| 5420 | 0 |     { | 
| 5421 |  |         /* As std::make_heap produces a max-heap, we want the entries with the | 
| 5422 |  |          * fewest ancestors/highest fee to sort later. */ | 
| 5423 | 0 |         return m_mempool->CompareDepthAndScore(*b, *a); | 
| 5424 | 0 |     } | 
| 5425 |  | }; | 
| 5426 |  | } // namespace | 
| 5427 |  |  | 
| 5428 |  | bool PeerManagerImpl::RejectIncomingTxs(const CNode& peer) const | 
| 5429 | 0 | { | 
| 5430 |  |     // block-relay-only peers may never send txs to us | 
| 5431 | 0 |     if (peer.IsBlockOnlyConn()) return true; | 
| 5432 | 0 |     if (peer.IsFeelerConn()) return true; | 
| 5433 |  |     // In -blocksonly mode, peers need the 'relay' permission to send txs to us | 
| 5434 | 0 |     if (m_opts.ignore_incoming_txs && !peer.HasPermission(NetPermissionFlags::Relay)) return true; | 
| 5435 | 0 |     return false; | 
| 5436 | 0 | } | 
| 5437 |  |  | 
| 5438 |  | bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer) | 
| 5439 | 0 | { | 
| 5440 |  |     // We don't participate in addr relay with outbound block-relay-only | 
| 5441 |  |     // connections to prevent providing adversaries with the additional | 
| 5442 |  |     // information of addr traffic to infer the link. | 
| 5443 | 0 |     if (node.IsBlockOnlyConn()) return false; | 
| 5444 |  |  | 
| 5445 | 0 |     if (!peer.m_addr_relay_enabled.exchange(true)) { | 
| 5446 |  |         // During version message processing (non-block-relay-only outbound peers) | 
| 5447 |  |         // or on first addr-related message we have received (inbound peers), initialize | 
| 5448 |  |         // m_addr_known. | 
| 5449 | 0 |         peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001); | 
| 5450 | 0 |     } | 
| 5451 |  | 
 | 
| 5452 | 0 |     return true; | 
| 5453 | 0 | } | 
| 5454 |  |  | 
| 5455 |  | bool PeerManagerImpl::SendMessages(CNode* pto) | 
| 5456 | 0 | { | 
| 5457 | 0 |     AssertLockNotHeld(m_tx_download_mutex); | 
| 5458 | 0 |     AssertLockHeld(g_msgproc_mutex); | 
| 5459 |  | 
 | 
| 5460 | 0 |     PeerRef peer = GetPeerRef(pto->GetId()); | 
| 5461 | 0 |     if (!peer) return false; | 
| 5462 | 0 |     const Consensus::Params& consensusParams = m_chainparams.GetConsensus(); | 
| 5463 |  |  | 
| 5464 |  |     // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll | 
| 5465 |  |     // disconnect misbehaving peers even before the version handshake is complete. | 
| 5466 | 0 |     if (MaybeDiscourageAndDisconnect(*pto, *peer)) return true; | 
| 5467 |  |  | 
| 5468 |  |     // Initiate version handshake for outbound connections | 
| 5469 | 0 |     if (!pto->IsInboundConn() && !peer->m_outbound_version_message_sent) { | 
| 5470 | 0 |         PushNodeVersion(*pto, *peer); | 
| 5471 | 0 |         peer->m_outbound_version_message_sent = true; | 
| 5472 | 0 |     } | 
| 5473 |  |  | 
| 5474 |  |     // Don't send anything until the version handshake is complete | 
| 5475 | 0 |     if (!pto->fSuccessfullyConnected || pto->fDisconnect) | 
| 5476 | 0 |         return true; | 
| 5477 |  |  | 
| 5478 | 0 |     const auto current_time{GetTime<std::chrono::microseconds>()}; | 
| 5479 |  | 
 | 
| 5480 | 0 |     if (pto->IsAddrFetchConn() && current_time - pto->m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) { | 
| 5481 | 0 |         LogDebug(BCLog::NET, "addrfetch connection timeout, %s\n", pto->DisconnectMsg(fLogIPs)); | 
| 5482 | 0 |         pto->fDisconnect = true; | 
| 5483 | 0 |         return true; | 
| 5484 | 0 |     } | 
| 5485 |  |  | 
| 5486 | 0 |     MaybeSendPing(*pto, *peer, current_time); | 
| 5487 |  |  | 
| 5488 |  |     // MaybeSendPing may have marked peer for disconnection | 
| 5489 | 0 |     if (pto->fDisconnect) return true; | 
| 5490 |  |  | 
| 5491 | 0 |     MaybeSendAddr(*pto, *peer, current_time); | 
| 5492 |  | 
 | 
| 5493 | 0 |     MaybeSendSendHeaders(*pto, *peer); | 
| 5494 |  | 
 | 
| 5495 | 0 |     { | 
| 5496 | 0 |         LOCK(cs_main); | 
| 5497 |  | 
 | 
| 5498 | 0 |         CNodeState &state = *State(pto->GetId()); | 
| 5499 |  |  | 
| 5500 |  |         // Start block sync | 
| 5501 | 0 |         if (m_chainman.m_best_header == nullptr) { | 
| 5502 | 0 |             m_chainman.m_best_header = m_chainman.ActiveChain().Tip(); | 
| 5503 | 0 |         } | 
| 5504 |  |  | 
| 5505 |  |         // Determine whether we might try initial headers sync or parallel | 
| 5506 |  |         // block download from this peer -- this mostly affects behavior while | 
| 5507 |  |         // in IBD (once out of IBD, we sync from all peers). | 
| 5508 | 0 |         bool sync_blocks_and_headers_from_peer = false; | 
| 5509 | 0 |         if (state.fPreferredDownload) { | 
| 5510 | 0 |             sync_blocks_and_headers_from_peer = true; | 
| 5511 | 0 |         } else if (CanServeBlocks(*peer) && !pto->IsAddrFetchConn()) { | 
| 5512 |  |             // Typically this is an inbound peer. If we don't have any outbound | 
| 5513 |  |             // peers, or if we aren't downloading any blocks from such peers, | 
| 5514 |  |             // then allow block downloads from this peer, too. | 
| 5515 |  |             // We prefer downloading blocks from outbound peers to avoid | 
| 5516 |  |             // putting undue load on (say) some home user who is just making | 
| 5517 |  |             // outbound connections to the network, but if our only source of | 
| 5518 |  |             // the latest blocks is from an inbound peer, we have to be sure to | 
| 5519 |  |             // eventually download it (and not just wait indefinitely for an | 
| 5520 |  |             // outbound peer to have it). | 
| 5521 | 0 |             if (m_num_preferred_download_peers == 0 || mapBlocksInFlight.empty()) { | 
| 5522 | 0 |                 sync_blocks_and_headers_from_peer = true; | 
| 5523 | 0 |             } | 
| 5524 | 0 |         } | 
| 5525 |  | 
 | 
| 5526 | 0 |         if (!state.fSyncStarted && CanServeBlocks(*peer) && !m_chainman.m_blockman.LoadingBlocks()) { | 
| 5527 |  |             // Only actively request headers from a single peer, unless we're close to today. | 
| 5528 | 0 |             if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->Time() > NodeClock::now() - 24h) { | 
| 5529 | 0 |                 const CBlockIndex* pindexStart = m_chainman.m_best_header; | 
| 5530 |  |                 /* If possible, start at the block preceding the currently | 
| 5531 |  |                    best known header.  This ensures that we always get a | 
| 5532 |  |                    non-empty list of headers back as long as the peer | 
| 5533 |  |                    is up-to-date.  With a non-empty response, we can initialise | 
| 5534 |  |                    the peer's known best block.  This wouldn't be possible | 
| 5535 |  |                    if we requested starting at m_chainman.m_best_header and | 
| 5536 |  |                    got back an empty response.  */ | 
| 5537 | 0 |                 if (pindexStart->pprev) | 
| 5538 | 0 |                     pindexStart = pindexStart->pprev; | 
| 5539 | 0 |                 if (MaybeSendGetHeaders(*pto, GetLocator(pindexStart), *peer)) { | 
| 5540 | 0 |                     LogDebug(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height); | 
| 5541 |  | 
 | 
| 5542 | 0 |                     state.fSyncStarted = true; | 
| 5543 | 0 |                     peer->m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE + | 
| 5544 | 0 |                         ( | 
| 5545 |  |                          // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling | 
| 5546 |  |                          // to maintain precision | 
| 5547 | 0 |                          std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} * | 
| 5548 | 0 |                          Ticks<std::chrono::seconds>(NodeClock::now() - m_chainman.m_best_header->Time()) / consensusParams.nPowTargetSpacing | 
| 5549 | 0 |                         ); | 
| 5550 | 0 |                     nSyncStarted++; | 
| 5551 | 0 |                 } | 
| 5552 | 0 |             } | 
| 5553 | 0 |         } | 
| 5554 |  |  | 
| 5555 |  |         // | 
| 5556 |  |         // Try sending block announcements via headers | 
| 5557 |  |         // | 
| 5558 | 0 |         { | 
| 5559 |  |             // If we have no more than MAX_BLOCKS_TO_ANNOUNCE in our | 
| 5560 |  |             // list of block hashes we're relaying, and our peer wants | 
| 5561 |  |             // headers announcements, then find the first header | 
| 5562 |  |             // not yet known to our peer but would connect, and send. | 
| 5563 |  |             // If no header would connect, or if we have too many | 
| 5564 |  |             // blocks, or if the peer doesn't want headers, just | 
| 5565 |  |             // add all to the inv queue. | 
| 5566 | 0 |             LOCK(peer->m_block_inv_mutex); | 
| 5567 | 0 |             std::vector<CBlock> vHeaders; | 
| 5568 | 0 |             bool fRevertToInv = ((!peer->m_prefers_headers && | 
| 5569 | 0 |                                  (!state.m_requested_hb_cmpctblocks || peer->m_blocks_for_headers_relay.size() > 1)) || | 
| 5570 | 0 |                                  peer->m_blocks_for_headers_relay.size() > MAX_BLOCKS_TO_ANNOUNCE); | 
| 5571 | 0 |             const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery | 
| 5572 | 0 |             ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date | 
| 5573 |  | 
 | 
| 5574 | 0 |             if (!fRevertToInv) { | 
| 5575 | 0 |                 bool fFoundStartingHeader = false; | 
| 5576 |  |                 // Try to find first header that our peer doesn't have, and | 
| 5577 |  |                 // then send all headers past that one.  If we come across any | 
| 5578 |  |                 // headers that aren't on m_chainman.ActiveChain(), give up. | 
| 5579 | 0 |                 for (const uint256& hash : peer->m_blocks_for_headers_relay) { | 
| 5580 | 0 |                     const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash); | 
| 5581 | 0 |                     assert(pindex); | 
| 5582 | 0 |                     if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) { | 
| 5583 |  |                         // Bail out if we reorged away from this block | 
| 5584 | 0 |                         fRevertToInv = true; | 
| 5585 | 0 |                         break; | 
| 5586 | 0 |                     } | 
| 5587 | 0 |                     if (pBestIndex != nullptr && pindex->pprev != pBestIndex) { | 
| 5588 |  |                         // This means that the list of blocks to announce don't | 
| 5589 |  |                         // connect to each other. | 
| 5590 |  |                         // This shouldn't really be possible to hit during | 
| 5591 |  |                         // regular operation (because reorgs should take us to | 
| 5592 |  |                         // a chain that has some block not on the prior chain, | 
| 5593 |  |                         // which should be caught by the prior check), but one | 
| 5594 |  |                         // way this could happen is by using invalidateblock / | 
| 5595 |  |                         // reconsiderblock repeatedly on the tip, causing it to | 
| 5596 |  |                         // be added multiple times to m_blocks_for_headers_relay. | 
| 5597 |  |                         // Robustly deal with this rare situation by reverting | 
| 5598 |  |                         // to an inv. | 
| 5599 | 0 |                         fRevertToInv = true; | 
| 5600 | 0 |                         break; | 
| 5601 | 0 |                     } | 
| 5602 | 0 |                     pBestIndex = pindex; | 
| 5603 | 0 |                     if (fFoundStartingHeader) { | 
| 5604 |  |                         // add this to the headers message | 
| 5605 | 0 |                         vHeaders.emplace_back(pindex->GetBlockHeader()); | 
| 5606 | 0 |                     } else if (PeerHasHeader(&state, pindex)) { | 
| 5607 | 0 |                         continue; // keep looking for the first new block | 
| 5608 | 0 |                     } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) { | 
| 5609 |  |                         // Peer doesn't have this header but they do have the prior one. | 
| 5610 |  |                         // Start sending headers. | 
| 5611 | 0 |                         fFoundStartingHeader = true; | 
| 5612 | 0 |                         vHeaders.emplace_back(pindex->GetBlockHeader()); | 
| 5613 | 0 |                     } else { | 
| 5614 |  |                         // Peer doesn't have this header or the prior one -- nothing will | 
| 5615 |  |                         // connect, so bail out. | 
| 5616 | 0 |                         fRevertToInv = true; | 
| 5617 | 0 |                         break; | 
| 5618 | 0 |                     } | 
| 5619 | 0 |                 } | 
| 5620 | 0 |             } | 
| 5621 | 0 |             if (!fRevertToInv && !vHeaders.empty()) { | 
| 5622 | 0 |                 if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) { | 
| 5623 |  |                     // We only send up to 1 block as header-and-ids, as otherwise | 
| 5624 |  |                     // probably means we're doing an initial-ish-sync or they're slow | 
| 5625 | 0 |                     LogDebug(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__, | 
| 5626 | 0 |                             vHeaders.front().GetHash().ToString(), pto->GetId()); | 
| 5627 |  | 
 | 
| 5628 | 0 |                     std::optional<CSerializedNetMsg> cached_cmpctblock_msg; | 
| 5629 | 0 |                     { | 
| 5630 | 0 |                         LOCK(m_most_recent_block_mutex); | 
| 5631 | 0 |                         if (m_most_recent_block_hash == pBestIndex->GetBlockHash()) { | 
| 5632 | 0 |                             cached_cmpctblock_msg = NetMsg::Make(NetMsgType::CMPCTBLOCK, *m_most_recent_compact_block); | 
| 5633 | 0 |                         } | 
| 5634 | 0 |                     } | 
| 5635 | 0 |                     if (cached_cmpctblock_msg.has_value()) { | 
| 5636 | 0 |                         PushMessage(*pto, std::move(cached_cmpctblock_msg.value())); | 
| 5637 | 0 |                     } else { | 
| 5638 | 0 |                         CBlock block; | 
| 5639 | 0 |                         const bool ret{m_chainman.m_blockman.ReadBlock(block, *pBestIndex)}; | 
| 5640 | 0 |                         assert(ret); | 
| 5641 | 0 |                         CBlockHeaderAndShortTxIDs cmpctblock{block, m_rng.rand64()}; | 
| 5642 | 0 |                         MakeAndPushMessage(*pto, NetMsgType::CMPCTBLOCK, cmpctblock); | 
| 5643 | 0 |                     } | 
| 5644 | 0 |                     state.pindexBestHeaderSent = pBestIndex; | 
| 5645 | 0 |                 } else if (peer->m_prefers_headers) { | 
| 5646 | 0 |                     if (vHeaders.size() > 1) { | 
| 5647 | 0 |                         LogDebug(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__, | 
| 5648 | 0 |                                 vHeaders.size(), | 
| 5649 | 0 |                                 vHeaders.front().GetHash().ToString(), | 
| 5650 | 0 |                                 vHeaders.back().GetHash().ToString(), pto->GetId()); | 
| 5651 | 0 |                     } else { | 
| 5652 | 0 |                         LogDebug(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__, | 
| 5653 | 0 |                                 vHeaders.front().GetHash().ToString(), pto->GetId()); | 
| 5654 | 0 |                     } | 
| 5655 | 0 |                     MakeAndPushMessage(*pto, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders)); | 
| 5656 | 0 |                     state.pindexBestHeaderSent = pBestIndex; | 
| 5657 | 0 |                 } else | 
| 5658 | 0 |                     fRevertToInv = true; | 
| 5659 | 0 |             } | 
| 5660 | 0 |             if (fRevertToInv) { | 
| 5661 |  |                 // If falling back to using an inv, just try to inv the tip. | 
| 5662 |  |                 // The last entry in m_blocks_for_headers_relay was our tip at some point | 
| 5663 |  |                 // in the past. | 
| 5664 | 0 |                 if (!peer->m_blocks_for_headers_relay.empty()) { | 
| 5665 | 0 |                     const uint256& hashToAnnounce = peer->m_blocks_for_headers_relay.back(); | 
| 5666 | 0 |                     const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce); | 
| 5667 | 0 |                     assert(pindex); | 
| 5668 |  |  | 
| 5669 |  |                     // Warn if we're announcing a block that is not on the main chain. | 
| 5670 |  |                     // This should be very rare and could be optimized out. | 
| 5671 |  |                     // Just log for now. | 
| 5672 | 0 |                     if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) { | 
| 5673 | 0 |                         LogDebug(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n", | 
| 5674 | 0 |                             hashToAnnounce.ToString(), m_chainman.ActiveChain().Tip()->GetBlockHash().ToString()); | 
| 5675 | 0 |                     } | 
| 5676 |  |  | 
| 5677 |  |                     // If the peer's chain has this block, don't inv it back. | 
| 5678 | 0 |                     if (!PeerHasHeader(&state, pindex)) { | 
| 5679 | 0 |                         peer->m_blocks_for_inv_relay.push_back(hashToAnnounce); | 
| 5680 | 0 |                         LogDebug(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__, | 
| 5681 | 0 |                             pto->GetId(), hashToAnnounce.ToString()); | 
| 5682 | 0 |                     } | 
| 5683 | 0 |                 } | 
| 5684 | 0 |             } | 
| 5685 | 0 |             peer->m_blocks_for_headers_relay.clear(); | 
| 5686 | 0 |         } | 
| 5687 |  |  | 
| 5688 |  |         // | 
| 5689 |  |         // Message: inventory | 
| 5690 |  |         // | 
| 5691 | 0 |         std::vector<CInv> vInv; | 
| 5692 | 0 |         { | 
| 5693 | 0 |             LOCK(peer->m_block_inv_mutex); | 
| 5694 | 0 |             vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(), INVENTORY_BROADCAST_TARGET)); | 
| 5695 |  |  | 
| 5696 |  |             // Add blocks | 
| 5697 | 0 |             for (const uint256& hash : peer->m_blocks_for_inv_relay) { | 
| 5698 | 0 |                 vInv.emplace_back(MSG_BLOCK, hash); | 
| 5699 | 0 |                 if (vInv.size() == MAX_INV_SZ) { | 
| 5700 | 0 |                     MakeAndPushMessage(*pto, NetMsgType::INV, vInv); | 
| 5701 | 0 |                     vInv.clear(); | 
| 5702 | 0 |                 } | 
| 5703 | 0 |             } | 
| 5704 | 0 |             peer->m_blocks_for_inv_relay.clear(); | 
| 5705 | 0 |         } | 
| 5706 |  | 
 | 
| 5707 | 0 |         if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { | 
| 5708 | 0 |                 LOCK(tx_relay->m_tx_inventory_mutex); | 
| 5709 |  |                 // Check whether periodic sends should happen | 
| 5710 | 0 |                 bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan); | 
| 5711 | 0 |                 if (tx_relay->m_next_inv_send_time < current_time) { | 
| 5712 | 0 |                     fSendTrickle = true; | 
| 5713 | 0 |                     if (pto->IsInboundConn()) { | 
| 5714 | 0 |                         tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL); | 
| 5715 | 0 |                     } else { | 
| 5716 | 0 |                         tx_relay->m_next_inv_send_time = current_time + m_rng.rand_exp_duration(OUTBOUND_INVENTORY_BROADCAST_INTERVAL); | 
| 5717 | 0 |                     } | 
| 5718 | 0 |                 } | 
| 5719 |  |  | 
| 5720 |  |                 // Time to send but the peer has requested we not relay transactions. | 
| 5721 | 0 |                 if (fSendTrickle) { | 
| 5722 | 0 |                     LOCK(tx_relay->m_bloom_filter_mutex); | 
| 5723 | 0 |                     if (!tx_relay->m_relay_txs) tx_relay->m_tx_inventory_to_send.clear(); | 
| 5724 | 0 |                 } | 
| 5725 |  |  | 
| 5726 |  |                 // Respond to BIP35 mempool requests | 
| 5727 | 0 |                 if (fSendTrickle && tx_relay->m_send_mempool) { | 
| 5728 | 0 |                     auto vtxinfo = m_mempool.infoAll(); | 
| 5729 | 0 |                     tx_relay->m_send_mempool = false; | 
| 5730 | 0 |                     const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()}; | 
| 5731 |  | 
 | 
| 5732 | 0 |                     LOCK(tx_relay->m_bloom_filter_mutex); | 
| 5733 |  | 
 | 
| 5734 | 0 |                     for (const auto& txinfo : vtxinfo) { | 
| 5735 | 0 |                         const Txid& txid{txinfo.tx->GetHash()}; | 
| 5736 | 0 |                         const Wtxid& wtxid{txinfo.tx->GetWitnessHash()}; | 
| 5737 | 0 |                         const auto inv = peer->m_wtxid_relay ? | 
| 5738 | 0 |                                              CInv{MSG_WTX, wtxid.ToUint256()} : | 
| 5739 | 0 |                                              CInv{MSG_TX, txid.ToUint256()}; | 
| 5740 | 0 |                         tx_relay->m_tx_inventory_to_send.erase(wtxid); | 
| 5741 |  |  | 
| 5742 |  |                         // Don't send transactions that peers will not put into their mempool | 
| 5743 | 0 |                         if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { | 
| 5744 | 0 |                             continue; | 
| 5745 | 0 |                         } | 
| 5746 | 0 |                         if (tx_relay->m_bloom_filter) { | 
| 5747 | 0 |                             if (!tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue; | 
| 5748 | 0 |                         } | 
| 5749 | 0 |                         tx_relay->m_tx_inventory_known_filter.insert(inv.hash); | 
| 5750 | 0 |                         vInv.push_back(inv); | 
| 5751 | 0 |                         if (vInv.size() == MAX_INV_SZ) { | 
| 5752 | 0 |                             MakeAndPushMessage(*pto, NetMsgType::INV, vInv); | 
| 5753 | 0 |                             vInv.clear(); | 
| 5754 | 0 |                         } | 
| 5755 | 0 |                     } | 
| 5756 | 0 |                 } | 
| 5757 |  |  | 
| 5758 |  |                 // Determine transactions to relay | 
| 5759 | 0 |                 if (fSendTrickle) { | 
| 5760 |  |                     // Produce a vector with all candidates for sending | 
| 5761 | 0 |                     std::vector<std::set<Wtxid>::iterator> vInvTx; | 
| 5762 | 0 |                     vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size()); | 
| 5763 | 0 |                     for (std::set<Wtxid>::iterator it = tx_relay->m_tx_inventory_to_send.begin(); it != tx_relay->m_tx_inventory_to_send.end(); it++) { | 
| 5764 | 0 |                         vInvTx.push_back(it); | 
| 5765 | 0 |                     } | 
| 5766 | 0 |                     const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()}; | 
| 5767 |  |                     // Topologically and fee-rate sort the inventory we send for privacy and priority reasons. | 
| 5768 |  |                     // A heap is used so that not all items need sorting if only a few are being sent. | 
| 5769 | 0 |                     CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool); | 
| 5770 | 0 |                     std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); | 
| 5771 |  |                     // No reason to drain out at many times the network's capacity, | 
| 5772 |  |                     // especially since we have many peers and some will draw much shorter delays. | 
| 5773 | 0 |                     unsigned int nRelayedTransactions = 0; | 
| 5774 | 0 |                     LOCK(tx_relay->m_bloom_filter_mutex); | 
| 5775 | 0 |                     size_t broadcast_max{INVENTORY_BROADCAST_TARGET + (tx_relay->m_tx_inventory_to_send.size()/1000)*5}; | 
| 5776 | 0 |                     broadcast_max = std::min<size_t>(INVENTORY_BROADCAST_MAX, broadcast_max); | 
| 5777 | 0 |                     while (!vInvTx.empty() && nRelayedTransactions < broadcast_max) { | 
| 5778 |  |                         // Fetch the top element from the heap | 
| 5779 | 0 |                         std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); | 
| 5780 | 0 |                         std::set<Wtxid>::iterator it = vInvTx.back(); | 
| 5781 | 0 |                         vInvTx.pop_back(); | 
| 5782 | 0 |                         auto wtxid = *it; | 
| 5783 |  |                         // Remove it from the to-be-sent set | 
| 5784 | 0 |                         tx_relay->m_tx_inventory_to_send.erase(it); | 
| 5785 |  |                         // Not in the mempool anymore? don't bother sending it. | 
| 5786 | 0 |                         auto txinfo = m_mempool.info(wtxid); | 
| 5787 | 0 |                         if (!txinfo.tx) { | 
| 5788 | 0 |                             continue; | 
| 5789 | 0 |                         } | 
| 5790 |  |                         // `TxRelay::m_tx_inventory_known_filter` contains either txids or wtxids | 
| 5791 |  |                         // depending on whether our peer supports wtxid-relay. Therefore, first | 
| 5792 |  |                         // construct the inv and then use its hash for the filter check. | 
| 5793 | 0 |                         const auto inv = peer->m_wtxid_relay ? | 
| 5794 | 0 |                                              CInv{MSG_WTX, wtxid.ToUint256()} : | 
| 5795 | 0 |                                              CInv{MSG_TX, txinfo.tx->GetHash().ToUint256()}; | 
| 5796 |  |                         // Check if not in the filter already | 
| 5797 | 0 |                         if (tx_relay->m_tx_inventory_known_filter.contains(inv.hash)) { | 
| 5798 | 0 |                             continue; | 
| 5799 | 0 |                         } | 
| 5800 |  |                         // Peer told you to not send transactions at that feerate? Don't bother sending it. | 
| 5801 | 0 |                         if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { | 
| 5802 | 0 |                             continue; | 
| 5803 | 0 |                         } | 
| 5804 | 0 |                         if (tx_relay->m_bloom_filter && !tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue; | 
| 5805 |  |                         // Send | 
| 5806 | 0 |                         vInv.push_back(inv); | 
| 5807 | 0 |                         nRelayedTransactions++; | 
| 5808 | 0 |                         if (vInv.size() == MAX_INV_SZ) { | 
| 5809 | 0 |                             MakeAndPushMessage(*pto, NetMsgType::INV, vInv); | 
| 5810 | 0 |                             vInv.clear(); | 
| 5811 | 0 |                         } | 
| 5812 | 0 |                         tx_relay->m_tx_inventory_known_filter.insert(inv.hash); | 
| 5813 | 0 |                     } | 
| 5814 |  |  | 
| 5815 |  |                     // Ensure we'll respond to GETDATA requests for anything we've just announced | 
| 5816 | 0 |                     LOCK(m_mempool.cs); | 
| 5817 | 0 |                     tx_relay->m_last_inv_sequence = m_mempool.GetSequence(); | 
| 5818 | 0 |                 } | 
| 5819 | 0 |         } | 
| 5820 | 0 |         if (!vInv.empty()) | 
| 5821 | 0 |             MakeAndPushMessage(*pto, NetMsgType::INV, vInv); | 
| 5822 |  |  | 
| 5823 |  |         // Detect whether we're stalling | 
| 5824 | 0 |         auto stalling_timeout = m_block_stalling_timeout.load(); | 
| 5825 | 0 |         if (state.m_stalling_since.count() && state.m_stalling_since < current_time - stalling_timeout) { | 
| 5826 |  |             // Stalling only triggers when the block download window cannot move. During normal steady state, | 
| 5827 |  |             // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection | 
| 5828 |  |             // should only happen during initial block download. | 
| 5829 | 0 |             LogInfo("Peer is stalling block download, %s\n", pto->DisconnectMsg(fLogIPs)); | 
| 5830 | 0 |             pto->fDisconnect = true; | 
| 5831 |  |             // Increase timeout for the next peer so that we don't disconnect multiple peers if our own | 
| 5832 |  |             // bandwidth is insufficient. | 
| 5833 | 0 |             const auto new_timeout = std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX); | 
| 5834 | 0 |             if (stalling_timeout != new_timeout && m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) { | 
| 5835 | 0 |                 LogDebug(BCLog::NET, "Increased stalling timeout temporarily to %d seconds\n", count_seconds(new_timeout)); | 
| 5836 | 0 |             } | 
| 5837 | 0 |             return true; | 
| 5838 | 0 |         } | 
| 5839 |  |         // In case there is a block that has been in flight from this peer for block_interval * (1 + 0.5 * N) | 
| 5840 |  |         // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout. | 
| 5841 |  |         // We compensate for other peers to prevent killing off peers due to our own downstream link | 
| 5842 |  |         // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes | 
| 5843 |  |         // to unreasonably increase our timeout. | 
| 5844 | 0 |         if (state.vBlocksInFlight.size() > 0) { | 
| 5845 | 0 |             QueuedBlock &queuedBlock = state.vBlocksInFlight.front(); | 
| 5846 | 0 |             int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1; | 
| 5847 | 0 |             if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) { | 
| 5848 | 0 |                 LogInfo("Timeout downloading block %s, %s\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->DisconnectMsg(fLogIPs)); | 
| 5849 | 0 |                 pto->fDisconnect = true; | 
| 5850 | 0 |                 return true; | 
| 5851 | 0 |             } | 
| 5852 | 0 |         } | 
| 5853 |  |         // Check for headers sync timeouts | 
| 5854 | 0 |         if (state.fSyncStarted && peer->m_headers_sync_timeout < std::chrono::microseconds::max()) { | 
| 5855 |  |             // Detect whether this is a stalling initial-headers-sync peer | 
| 5856 | 0 |             if (m_chainman.m_best_header->Time() <= NodeClock::now() - 24h) { | 
| 5857 | 0 |                 if (current_time > peer->m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) { | 
| 5858 |  |                     // Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer, | 
| 5859 |  |                     // and we have others we could be using instead. | 
| 5860 |  |                     // Note: If all our peers are inbound, then we won't | 
| 5861 |  |                     // disconnect our sync peer for stalling; we have bigger | 
| 5862 |  |                     // problems if we can't get any outbound peers. | 
| 5863 | 0 |                     if (!pto->HasPermission(NetPermissionFlags::NoBan)) { | 
| 5864 | 0 |                         LogInfo("Timeout downloading headers, %s\n", pto->DisconnectMsg(fLogIPs)); | 
| 5865 | 0 |                         pto->fDisconnect = true; | 
| 5866 | 0 |                         return true; | 
| 5867 | 0 |                     } else { | 
| 5868 | 0 |                         LogInfo("Timeout downloading headers from noban peer, not %s\n", pto->DisconnectMsg(fLogIPs)); | 
| 5869 |  |                         // Reset the headers sync state so that we have a | 
| 5870 |  |                         // chance to try downloading from a different peer. | 
| 5871 |  |                         // Note: this will also result in at least one more | 
| 5872 |  |                         // getheaders message to be sent to | 
| 5873 |  |                         // this peer (eventually). | 
| 5874 | 0 |                         state.fSyncStarted = false; | 
| 5875 | 0 |                         nSyncStarted--; | 
| 5876 | 0 |                         peer->m_headers_sync_timeout = 0us; | 
| 5877 | 0 |                     } | 
| 5878 | 0 |                 } | 
| 5879 | 0 |             } else { | 
| 5880 |  |                 // After we've caught up once, reset the timeout so we can't trigger | 
| 5881 |  |                 // disconnect later. | 
| 5882 | 0 |                 peer->m_headers_sync_timeout = std::chrono::microseconds::max(); | 
| 5883 | 0 |             } | 
| 5884 | 0 |         } | 
| 5885 |  |  | 
| 5886 |  |         // Check that outbound peers have reasonable chains | 
| 5887 |  |         // GetTime() is used by this anti-DoS logic so we can test this using mocktime | 
| 5888 | 0 |         ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>()); | 
| 5889 |  |  | 
| 5890 |  |         // | 
| 5891 |  |         // Message: getdata (blocks) | 
| 5892 |  |         // | 
| 5893 | 0 |         std::vector<CInv> vGetData; | 
| 5894 | 0 |         if (CanServeBlocks(*peer) && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.IsInitialBlockDownload()) && state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { | 
| 5895 | 0 |             std::vector<const CBlockIndex*> vToDownload; | 
| 5896 | 0 |             NodeId staller = -1; | 
| 5897 | 0 |             auto get_inflight_budget = [&state]() { | 
| 5898 | 0 |                 return std::max(0, MAX_BLOCKS_IN_TRANSIT_PER_PEER - static_cast<int>(state.vBlocksInFlight.size())); | 
| 5899 | 0 |             }; | 
| 5900 |  |  | 
| 5901 |  |             // If a snapshot chainstate is in use, we want to find its next blocks | 
| 5902 |  |             // before the background chainstate to prioritize getting to network tip. | 
| 5903 | 0 |             FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload, staller); | 
| 5904 | 0 |             if (m_chainman.BackgroundSyncInProgress() && !IsLimitedPeer(*peer)) { | 
| 5905 |  |                 // If the background tip is not an ancestor of the snapshot block, | 
| 5906 |  |                 // we need to start requesting blocks from their last common ancestor. | 
| 5907 | 0 |                 const CBlockIndex *from_tip = LastCommonAncestor(m_chainman.GetBackgroundSyncTip(), m_chainman.GetSnapshotBaseBlock()); | 
| 5908 | 0 |                 TryDownloadingHistoricalBlocks( | 
| 5909 | 0 |                     *peer, | 
| 5910 | 0 |                     get_inflight_budget(), | 
| 5911 | 0 |                     vToDownload, from_tip, | 
| 5912 | 0 |                     Assert(m_chainman.GetSnapshotBaseBlock())); | 
| 5913 | 0 |             } | 
| 5914 | 0 |             for (const CBlockIndex *pindex : vToDownload) { | 
| 5915 | 0 |                 uint32_t nFetchFlags = GetFetchFlags(*peer); | 
| 5916 | 0 |                 vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()); | 
| 5917 | 0 |                 BlockRequested(pto->GetId(), *pindex); | 
| 5918 | 0 |                 LogDebug(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), | 
| 5919 | 0 |                     pindex->nHeight, pto->GetId()); | 
| 5920 | 0 |             } | 
| 5921 | 0 |             if (state.vBlocksInFlight.empty() && staller != -1) { | 
| 5922 | 0 |                 if (State(staller)->m_stalling_since == 0us) { | 
| 5923 | 0 |                     State(staller)->m_stalling_since = current_time; | 
| 5924 | 0 |                     LogDebug(BCLog::NET, "Stall started peer=%d\n", staller); | 
| 5925 | 0 |                 } | 
| 5926 | 0 |             } | 
| 5927 | 0 |         } | 
| 5928 |  |  | 
| 5929 |  |         // | 
| 5930 |  |         // Message: getdata (transactions) | 
| 5931 |  |         // | 
| 5932 | 0 |         { | 
| 5933 | 0 |             LOCK(m_tx_download_mutex); | 
| 5934 | 0 |             for (const GenTxid& gtxid : m_txdownloadman.GetRequestsToSend(pto->GetId(), current_time)) { | 
| 5935 | 0 |                 vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*peer)), gtxid.ToUint256()); | 
| 5936 | 0 |                 if (vGetData.size() >= MAX_GETDATA_SZ) { | 
| 5937 | 0 |                     MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData); | 
| 5938 | 0 |                     vGetData.clear(); | 
| 5939 | 0 |                 } | 
| 5940 | 0 |             } | 
| 5941 | 0 |         } | 
| 5942 |  | 
 | 
| 5943 | 0 |         if (!vGetData.empty()) | 
| 5944 | 0 |             MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData); | 
| 5945 | 0 |     } // release cs_main | 
| 5946 | 0 |     MaybeSendFeefilter(*pto, *peer, current_time); | 
| 5947 | 0 |     return true; | 
| 5948 | 0 | } |