/root/bitcoin/src/net_processing.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) 2009-2010 Satoshi Nakamoto |
2 | | // Copyright (c) 2009-2022 The Bitcoin Core developers |
3 | | // Distributed under the MIT software license, see the accompanying |
4 | | // file COPYING or http://www.opensource.org/licenses/mit-license.php. |
5 | | |
6 | | #include <net_processing.h> |
7 | | |
8 | | #include <addrman.h> |
9 | | #include <banman.h> |
10 | | #include <blockencodings.h> |
11 | | #include <blockfilter.h> |
12 | | #include <chainparams.h> |
13 | | #include <consensus/amount.h> |
14 | | #include <consensus/validation.h> |
15 | | #include <deploymentstatus.h> |
16 | | #include <hash.h> |
17 | | #include <headerssync.h> |
18 | | #include <index/blockfilterindex.h> |
19 | | #include <kernel/chain.h> |
20 | | #include <kernel/mempool_entry.h> |
21 | | #include <logging.h> |
22 | | #include <merkleblock.h> |
23 | | #include <netbase.h> |
24 | | #include <netmessagemaker.h> |
25 | | #include <node/blockstorage.h> |
26 | | #include <node/timeoffsets.h> |
27 | | #include <node/txdownloadman.h> |
28 | | #include <node/txreconciliation.h> |
29 | | #include <node/warnings.h> |
30 | | #include <policy/fees.h> |
31 | | #include <policy/policy.h> |
32 | | #include <policy/settings.h> |
33 | | #include <primitives/block.h> |
34 | | #include <primitives/transaction.h> |
35 | | #include <random.h> |
36 | | #include <scheduler.h> |
37 | | #include <streams.h> |
38 | | #include <sync.h> |
39 | | #include <tinyformat.h> |
40 | | #include <txmempool.h> |
41 | | #include <txorphanage.h> |
42 | | #include <txrequest.h> |
43 | | #include <util/check.h> |
44 | | #include <util/strencodings.h> |
45 | | #include <util/time.h> |
46 | | #include <util/trace.h> |
47 | | #include <validation.h> |
48 | | |
49 | | #include <algorithm> |
50 | | #include <atomic> |
51 | | #include <future> |
52 | | #include <memory> |
53 | | #include <optional> |
54 | | #include <ranges> |
55 | | #include <typeinfo> |
56 | | #include <utility> |
57 | | |
58 | | using namespace util::hex_literals; |
59 | | |
60 | | /** Headers download timeout. |
61 | | * Timeout = base + per_header * (expected number of headers) */ |
62 | | static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min; |
63 | | static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms; |
64 | | /** How long to wait for a peer to respond to a getheaders request */ |
65 | | static constexpr auto HEADERS_RESPONSE_TIME{2min}; |
66 | | /** Protect at least this many outbound peers from disconnection due to slow/ |
67 | | * behind headers chain. |
68 | | */ |
69 | | static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4; |
70 | | /** Timeout for (unprotected) outbound peers to sync to our chainwork */ |
71 | | static constexpr auto CHAIN_SYNC_TIMEOUT{20min}; |
72 | | /** How frequently to check for stale tips */ |
73 | | static constexpr auto STALE_CHECK_INTERVAL{10min}; |
74 | | /** How frequently to check for extra outbound peers and disconnect */ |
75 | | static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s}; |
76 | | /** Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict */ |
77 | | static constexpr auto MINIMUM_CONNECT_TIME{30s}; |
78 | | /** SHA256("main address relay")[0:8] */ |
79 | | static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; |
80 | | /// Age after which a stale block will no longer be served if requested as |
81 | | /// protection against fingerprinting. Set to one month, denominated in seconds. |
82 | | static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60; |
83 | | /// Age after which a block is considered historical for purposes of rate |
84 | | /// limiting block relay. Set to one week, denominated in seconds. |
85 | | static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60; |
86 | | /** Time between pings automatically sent out for latency probing and keepalive */ |
87 | | static constexpr auto PING_INTERVAL{2min}; |
88 | | /** The maximum number of entries in a locator */ |
89 | | static const unsigned int MAX_LOCATOR_SZ = 101; |
90 | | /** The maximum number of entries in an 'inv' protocol message */ |
91 | | static const unsigned int MAX_INV_SZ = 50000; |
92 | | /** Limit to avoid sending big packets. Not used in processing incoming GETDATA for compatibility */ |
93 | | static const unsigned int MAX_GETDATA_SZ = 1000; |
94 | | /** Number of blocks that can be requested at any given time from a single peer. */ |
95 | | static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16; |
96 | | /** Default time during which a peer must stall block download progress before being disconnected. |
97 | | * the actual timeout is increased temporarily if peers are disconnected for hitting the timeout */ |
98 | | static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s}; |
99 | | /** Maximum timeout for stalling block download. */ |
100 | | static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s}; |
101 | | /** Maximum depth of blocks we're willing to serve as compact blocks to peers |
102 | | * when requested. For older blocks, a regular BLOCK response will be sent. */ |
103 | | static const int MAX_CMPCTBLOCK_DEPTH = 5; |
104 | | /** Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests for. */ |
105 | | static const int MAX_BLOCKTXN_DEPTH = 10; |
106 | | static_assert(MAX_BLOCKTXN_DEPTH <= MIN_BLOCKS_TO_KEEP, "MAX_BLOCKTXN_DEPTH too high"); |
107 | | /** Size of the "block download window": how far ahead of our current height do we fetch? |
108 | | * Larger windows tolerate larger download speed differences between peer, but increase the potential |
109 | | * degree of disordering of blocks on disk (which make reindexing and pruning harder). We'll probably |
110 | | * want to make this a per-peer adaptive value at some point. */ |
111 | | static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024; |
112 | | /** Block download timeout base, expressed in multiples of the block interval (i.e. 10 min) */ |
113 | | static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1; |
114 | | /** Additional block download timeout per parallel downloading peer (i.e. 5 min) */ |
115 | | static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5; |
116 | | /** Maximum number of headers to announce when relaying blocks with headers message.*/ |
117 | | static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8; |
118 | | /** Minimum blocks required to signal NODE_NETWORK_LIMITED */ |
119 | | static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288; |
120 | | /** Window, in blocks, for connecting to NODE_NETWORK_LIMITED peers */ |
121 | | static const unsigned int NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS = 144; |
122 | | /** Average delay between local address broadcasts */ |
123 | | static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h}; |
124 | | /** Average delay between peer address broadcasts */ |
125 | | static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s}; |
126 | | /** Delay between rotating the peers we relay a particular address to */ |
127 | | static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL{24h}; |
128 | | /** Average delay between trickled inventory transmissions for inbound peers. |
129 | | * Blocks and peers with NetPermissionFlags::NoBan permission bypass this. */ |
130 | | static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s}; |
131 | | /** Average delay between trickled inventory transmissions for outbound peers. |
132 | | * Use a smaller delay as there is less privacy concern for them. |
133 | | * Blocks and peers with NetPermissionFlags::NoBan permission bypass this. */ |
134 | | static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL{2s}; |
135 | | /** Maximum rate of inventory items to send per second. |
136 | | * Limits the impact of low-fee transaction floods. */ |
137 | | static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7; |
138 | | /** Target number of tx inventory items to send per transmission. */ |
139 | | static constexpr unsigned int INVENTORY_BROADCAST_TARGET = INVENTORY_BROADCAST_PER_SECOND * count_seconds(INBOUND_INVENTORY_BROADCAST_INTERVAL); |
140 | | /** Maximum number of inventory items to send per transmission. */ |
141 | | static constexpr unsigned int INVENTORY_BROADCAST_MAX = 1000; |
142 | | static_assert(INVENTORY_BROADCAST_MAX >= INVENTORY_BROADCAST_TARGET, "INVENTORY_BROADCAST_MAX too low"); |
143 | | static_assert(INVENTORY_BROADCAST_MAX <= node::MAX_PEER_TX_ANNOUNCEMENTS, "INVENTORY_BROADCAST_MAX too high"); |
144 | | /** Average delay between feefilter broadcasts in seconds. */ |
145 | | static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min}; |
146 | | /** Maximum feefilter broadcast delay after significant change. */ |
147 | | static constexpr auto MAX_FEEFILTER_CHANGE_DELAY{5min}; |
148 | | /** Maximum number of compact filters that may be requested with one getcfilters. See BIP 157. */ |
149 | | static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000; |
150 | | /** Maximum number of cf hashes that may be requested with one getcfheaders. See BIP 157. */ |
151 | | static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000; |
152 | | /** the maximum percentage of addresses from our addrman to return in response to a getaddr message. */ |
153 | | static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23; |
154 | | /** The maximum number of address records permitted in an ADDR message. */ |
155 | | static constexpr size_t MAX_ADDR_TO_SEND{1000}; |
156 | | /** The maximum rate of address records we're willing to process on average. Can be bypassed using |
157 | | * the NetPermissionFlags::Addr permission. */ |
158 | | static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1}; |
159 | | /** The soft limit of the address processing token bucket (the regular MAX_ADDR_RATE_PER_SECOND |
160 | | * based increments won't go above this, but the MAX_ADDR_TO_SEND increment following GETADDR |
161 | | * is exempt from this limit). */ |
162 | | static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET{MAX_ADDR_TO_SEND}; |
163 | | /** The compactblocks version we support. See BIP 152. */ |
164 | | static constexpr uint64_t CMPCTBLOCKS_VERSION{2}; |
165 | | |
166 | | // Internal stuff |
167 | | namespace { |
168 | | /** Blocks that are in flight, and that are in the queue to be downloaded. */ |
169 | | struct QueuedBlock { |
170 | | /** BlockIndex. We must have this since we only request blocks when we've already validated the header. */ |
171 | | const CBlockIndex* pindex; |
172 | | /** Optional, used for CMPCTBLOCK downloads */ |
173 | | std::unique_ptr<PartiallyDownloadedBlock> partialBlock; |
174 | | }; |
175 | | |
176 | | /** |
177 | | * Data structure for an individual peer. This struct is not protected by |
178 | | * cs_main since it does not contain validation-critical data. |
179 | | * |
180 | | * Memory is owned by shared pointers and this object is destructed when |
181 | | * the refcount drops to zero. |
182 | | * |
183 | | * Mutexes inside this struct must not be held when locking m_peer_mutex. |
184 | | * |
185 | | * TODO: move most members from CNodeState to this structure. |
186 | | * TODO: move remaining application-layer data members from CNode to this structure. |
187 | | */ |
188 | | struct Peer { |
189 | | /** Same id as the CNode object for this peer */ |
190 | | const NodeId m_id{0}; |
191 | | |
192 | | /** Services we offered to this peer. |
193 | | * |
194 | | * This is supplied by CConnman during peer initialization. It's const |
195 | | * because there is no protocol defined for renegotiating services |
196 | | * initially offered to a peer. The set of local services we offer should |
197 | | * not change after initialization. |
198 | | * |
199 | | * An interesting example of this is NODE_NETWORK and initial block |
200 | | * download: a node which starts up from scratch doesn't have any blocks |
201 | | * to serve, but still advertises NODE_NETWORK because it will eventually |
202 | | * fulfill this role after IBD completes. P2P code is written in such a |
203 | | * way that it can gracefully handle peers who don't make good on their |
204 | | * service advertisements. */ |
205 | | const ServiceFlags m_our_services; |
206 | | /** Services this peer offered to us. */ |
207 | | std::atomic<ServiceFlags> m_their_services{NODE_NONE}; |
208 | | |
209 | | //! Whether this peer is an inbound connection |
210 | | const bool m_is_inbound; |
211 | | |
212 | | /** Protects misbehavior data members */ |
213 | | Mutex m_misbehavior_mutex; |
214 | | /** Whether this peer should be disconnected and marked as discouraged (unless it has NetPermissionFlags::NoBan permission). */ |
215 | | bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false}; |
216 | | |
217 | | /** Protects block inventory data members */ |
218 | | Mutex m_block_inv_mutex; |
219 | | /** List of blocks that we'll announce via an `inv` message. |
220 | | * There is no final sorting before sending, as they are always sent |
221 | | * immediately and in the order requested. */ |
222 | | std::vector<uint256> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex); |
223 | | /** Unfiltered list of blocks that we'd like to announce via a `headers` |
224 | | * message. If we can't announce via a `headers` message, we'll fall back to |
225 | | * announcing via `inv`. */ |
226 | | std::vector<uint256> m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex); |
227 | | /** The final block hash that we sent in an `inv` message to this peer. |
228 | | * When the peer requests this block, we send an `inv` message to trigger |
229 | | * the peer to request the next sequence of block hashes. |
230 | | * Most peers use headers-first syncing, which doesn't use this mechanism */ |
231 | | uint256 m_continuation_block GUARDED_BY(m_block_inv_mutex) {}; |
232 | | |
233 | | /** Set to true once initial VERSION message was sent (only relevant for outbound peers). */ |
234 | | bool m_outbound_version_message_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; |
235 | | |
236 | | /** This peer's reported block height when we connected */ |
237 | | std::atomic<int> m_starting_height{-1}; |
238 | | |
239 | | /** The pong reply we're expecting, or 0 if no pong expected. */ |
240 | | std::atomic<uint64_t> m_ping_nonce_sent{0}; |
241 | | /** When the last ping was sent, or 0 if no ping was ever sent */ |
242 | | std::atomic<std::chrono::microseconds> m_ping_start{0us}; |
243 | | /** Whether a ping has been requested by the user */ |
244 | | std::atomic<bool> m_ping_queued{false}; |
245 | | |
246 | | /** Whether this peer relays txs via wtxid */ |
247 | | std::atomic<bool> m_wtxid_relay{false}; |
248 | | /** The feerate in the most recent BIP133 `feefilter` message sent to the peer. |
249 | | * It is *not* a p2p protocol violation for the peer to send us |
250 | | * transactions with a lower fee rate than this. See BIP133. */ |
251 | | CAmount m_fee_filter_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0}; |
252 | | /** Timestamp after which we will send the next BIP133 `feefilter` message |
253 | | * to the peer. */ |
254 | | std::chrono::microseconds m_next_send_feefilter GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0}; |
255 | | |
256 | | struct TxRelay { |
257 | | mutable RecursiveMutex m_bloom_filter_mutex; |
258 | | /** Whether we relay transactions to this peer. */ |
259 | | bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false}; |
260 | | /** A bloom filter for which transactions to announce to the peer. See BIP37. */ |
261 | | std::unique_ptr<CBloomFilter> m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex) GUARDED_BY(m_bloom_filter_mutex){nullptr}; |
262 | | |
263 | | mutable RecursiveMutex m_tx_inventory_mutex; |
264 | | /** A filter of all the (w)txids that the peer has announced to |
265 | | * us or we have announced to the peer. We use this to avoid announcing |
266 | | * the same (w)txid to a peer that already has the transaction. */ |
267 | | CRollingBloomFilter m_tx_inventory_known_filter GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001}; |
268 | | /** Set of transaction ids we still have to announce (txid for |
269 | | * non-wtxid-relay peers, wtxid for wtxid-relay peers). We use the |
270 | | * mempool to sort transactions in dependency order before relay, so |
271 | | * this does not have to be sorted. */ |
272 | | std::set<uint256> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex); |
273 | | /** Whether the peer has requested us to send our complete mempool. Only |
274 | | * permitted if the peer has NetPermissionFlags::Mempool or we advertise |
275 | | * NODE_BLOOM. See BIP35. */ |
276 | | bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false}; |
277 | | /** The next time after which we will send an `inv` message containing |
278 | | * transaction announcements to this peer. */ |
279 | | std::chrono::microseconds m_next_inv_send_time GUARDED_BY(m_tx_inventory_mutex){0}; |
280 | | /** The mempool sequence num at which we sent the last `inv` message to this peer. |
281 | | * Can relay txs with lower sequence numbers than this (see CTxMempool::info_for_relay). */ |
282 | | uint64_t m_last_inv_sequence GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1}; |
283 | | |
284 | | /** Minimum fee rate with which to filter transaction announcements to this node. See BIP133. */ |
285 | | std::atomic<CAmount> m_fee_filter_received{0}; |
286 | | }; |
287 | | |
288 | | /* Initializes a TxRelay struct for this peer. Can be called at most once for a peer. */ |
289 | | TxRelay* SetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) |
290 | 0 | { |
291 | 0 | LOCK(m_tx_relay_mutex); |
292 | 0 | Assume(!m_tx_relay); |
293 | 0 | m_tx_relay = std::make_unique<Peer::TxRelay>(); |
294 | 0 | return m_tx_relay.get(); |
295 | 0 | }; |
296 | | |
297 | | TxRelay* GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) |
298 | 0 | { |
299 | 0 | return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get()); |
300 | 0 | }; |
301 | | |
302 | | /** A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND. */ |
303 | | std::vector<CAddress> m_addrs_to_send GUARDED_BY(NetEventsInterface::g_msgproc_mutex); |
304 | | /** Probabilistic filter to track recent addr messages relayed with this |
305 | | * peer. Used to avoid relaying redundant addresses to this peer. |
306 | | * |
307 | | * We initialize this filter for outbound peers (other than |
308 | | * block-relay-only connections) or when an inbound peer sends us an |
309 | | * address related message (ADDR, ADDRV2, GETADDR). |
310 | | * |
311 | | * Presence of this filter must correlate with m_addr_relay_enabled. |
312 | | **/ |
313 | | std::unique_ptr<CRollingBloomFilter> m_addr_known GUARDED_BY(NetEventsInterface::g_msgproc_mutex); |
314 | | /** Whether we are participating in address relay with this connection. |
315 | | * |
316 | | * We set this bool to true for outbound peers (other than |
317 | | * block-relay-only connections), or when an inbound peer sends us an |
318 | | * address related message (ADDR, ADDRV2, GETADDR). |
319 | | * |
320 | | * We use this bool to decide whether a peer is eligible for gossiping |
321 | | * addr messages. This avoids relaying to peers that are unlikely to |
322 | | * forward them, effectively blackholing self announcements. Reasons |
323 | | * peers might support addr relay on the link include that they connected |
324 | | * to us as a block-relay-only peer or they are a light client. |
325 | | * |
326 | | * This field must correlate with whether m_addr_known has been |
327 | | * initialized.*/ |
328 | | std::atomic_bool m_addr_relay_enabled{false}; |
329 | | /** Whether a getaddr request to this peer is outstanding. */ |
330 | | bool m_getaddr_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; |
331 | | /** Guards address sending timers. */ |
332 | | mutable Mutex m_addr_send_times_mutex; |
333 | | /** Time point to send the next ADDR message to this peer. */ |
334 | | std::chrono::microseconds m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0}; |
335 | | /** Time point to possibly re-announce our local address to this peer. */ |
336 | | std::chrono::microseconds m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0}; |
337 | | /** Whether the peer has signaled support for receiving ADDRv2 (BIP155) |
338 | | * messages, indicating a preference to receive ADDRv2 instead of ADDR ones. */ |
339 | | std::atomic_bool m_wants_addrv2{false}; |
340 | | /** Whether this peer has already sent us a getaddr message. */ |
341 | | bool m_getaddr_recvd GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; |
342 | | /** Number of addresses that can be processed from this peer. Start at 1 to |
343 | | * permit self-announcement. */ |
344 | | double m_addr_token_bucket GUARDED_BY(NetEventsInterface::g_msgproc_mutex){1.0}; |
345 | | /** When m_addr_token_bucket was last updated */ |
346 | | std::chrono::microseconds m_addr_token_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){GetTime<std::chrono::microseconds>()}; |
347 | | /** Total number of addresses that were dropped due to rate limiting. */ |
348 | | std::atomic<uint64_t> m_addr_rate_limited{0}; |
349 | | /** Total number of addresses that were processed (excludes rate-limited ones). */ |
350 | | std::atomic<uint64_t> m_addr_processed{0}; |
351 | | |
352 | | /** Whether we've sent this peer a getheaders in response to an inv prior to initial-headers-sync completing */ |
353 | | bool m_inv_triggered_getheaders_before_sync GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; |
354 | | |
355 | | /** Protects m_getdata_requests **/ |
356 | | Mutex m_getdata_requests_mutex; |
357 | | /** Work queue of items requested by this peer **/ |
358 | | std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex); |
359 | | |
360 | | /** Time of the last getheaders message to this peer */ |
361 | | NodeClock::time_point m_last_getheaders_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){}; |
362 | | |
363 | | /** Protects m_headers_sync **/ |
364 | | Mutex m_headers_sync_mutex; |
365 | | /** Headers-sync state for this peer (eg for initial sync, or syncing large |
366 | | * reorgs) **/ |
367 | | std::unique_ptr<HeadersSyncState> m_headers_sync PT_GUARDED_BY(m_headers_sync_mutex) GUARDED_BY(m_headers_sync_mutex) {}; |
368 | | |
369 | | /** Whether we've sent our peer a sendheaders message. **/ |
370 | | std::atomic<bool> m_sent_sendheaders{false}; |
371 | | |
372 | | /** When to potentially disconnect peer for stalling headers download */ |
373 | | std::chrono::microseconds m_headers_sync_timeout GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0us}; |
374 | | |
375 | | /** Whether this peer wants invs or headers (when possible) for block announcements */ |
376 | | bool m_prefers_headers GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false}; |
377 | | |
378 | | /** Time offset computed during the version handshake based on the |
379 | | * timestamp the peer sent in the version message. */ |
380 | | std::atomic<std::chrono::seconds> m_time_offset{0s}; |
381 | | |
382 | | explicit Peer(NodeId id, ServiceFlags our_services, bool is_inbound) |
383 | 0 | : m_id{id} |
384 | 0 | , m_our_services{our_services} |
385 | 0 | , m_is_inbound{is_inbound} |
386 | 0 | {} |
387 | | |
388 | | private: |
389 | | mutable Mutex m_tx_relay_mutex; |
390 | | |
391 | | /** Transaction relay data. May be a nullptr. */ |
392 | | std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex); |
393 | | }; |
394 | | |
395 | | using PeerRef = std::shared_ptr<Peer>; |
396 | | |
397 | | /** |
398 | | * Maintain validation-specific state about nodes, protected by cs_main, instead |
399 | | * by CNode's own locks. This simplifies asynchronous operation, where |
400 | | * processing of incoming data is done after the ProcessMessage call returns, |
401 | | * and we're no longer holding the node's locks. |
402 | | */ |
403 | | struct CNodeState { |
404 | | //! The best known block we know this peer has announced. |
405 | | const CBlockIndex* pindexBestKnownBlock{nullptr}; |
406 | | //! The hash of the last unknown block this peer has announced. |
407 | | uint256 hashLastUnknownBlock{}; |
408 | | //! The last full block we both have. |
409 | | const CBlockIndex* pindexLastCommonBlock{nullptr}; |
410 | | //! The best header we have sent our peer. |
411 | | const CBlockIndex* pindexBestHeaderSent{nullptr}; |
412 | | //! Whether we've started headers synchronization with this peer. |
413 | | bool fSyncStarted{false}; |
414 | | //! Since when we're stalling block download progress (in microseconds), or 0. |
415 | | std::chrono::microseconds m_stalling_since{0us}; |
416 | | std::list<QueuedBlock> vBlocksInFlight; |
417 | | //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty. |
418 | | std::chrono::microseconds m_downloading_since{0us}; |
419 | | //! Whether we consider this a preferred download peer. |
420 | | bool fPreferredDownload{false}; |
421 | | /** Whether this peer wants invs or cmpctblocks (when possible) for block announcements. */ |
422 | | bool m_requested_hb_cmpctblocks{false}; |
423 | | /** Whether this peer will send us cmpctblocks if we request them. */ |
424 | | bool m_provides_cmpctblocks{false}; |
425 | | |
426 | | /** State used to enforce CHAIN_SYNC_TIMEOUT and EXTRA_PEER_CHECK_INTERVAL logic. |
427 | | * |
428 | | * Both are only in effect for outbound, non-manual, non-protected connections. |
429 | | * Any peer protected (m_protect = true) is not chosen for eviction. A peer is |
430 | | * marked as protected if all of these are true: |
431 | | * - its connection type is IsBlockOnlyConn() == false |
432 | | * - it gave us a valid connecting header |
433 | | * - we haven't reached MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT yet |
434 | | * - its chain tip has at least as much work as ours |
435 | | * |
436 | | * CHAIN_SYNC_TIMEOUT: if a peer's best known block has less work than our tip, |
437 | | * set a timeout CHAIN_SYNC_TIMEOUT in the future: |
438 | | * - If at timeout their best known block now has more work than our tip |
439 | | * when the timeout was set, then either reset the timeout or clear it |
440 | | * (after comparing against our current tip's work) |
441 | | * - If at timeout their best known block still has less work than our |
442 | | * tip did when the timeout was set, then send a getheaders message, |
443 | | * and set a shorter timeout, HEADERS_RESPONSE_TIME seconds in future. |
444 | | * If their best known block is still behind when that new timeout is |
445 | | * reached, disconnect. |
446 | | * |
447 | | * EXTRA_PEER_CHECK_INTERVAL: after each interval, if we have too many outbound peers, |
448 | | * drop the outbound one that least recently announced us a new block. |
449 | | */ |
450 | | struct ChainSyncTimeoutState { |
451 | | //! A timeout used for checking whether our peer has sufficiently synced |
452 | | std::chrono::seconds m_timeout{0s}; |
453 | | //! A header with the work we require on our peer's chain |
454 | | const CBlockIndex* m_work_header{nullptr}; |
455 | | //! After timeout is reached, set to true after sending getheaders |
456 | | bool m_sent_getheaders{false}; |
457 | | //! Whether this peer is protected from disconnection due to a bad/slow chain |
458 | | bool m_protect{false}; |
459 | | }; |
460 | | |
461 | | ChainSyncTimeoutState m_chain_sync; |
462 | | |
463 | | //! Time of last new block announcement |
464 | | int64_t m_last_block_announcement{0}; |
465 | | }; |
466 | | |
467 | | class PeerManagerImpl final : public PeerManager |
468 | | { |
469 | | public: |
470 | | PeerManagerImpl(CConnman& connman, AddrMan& addrman, |
471 | | BanMan* banman, ChainstateManager& chainman, |
472 | | CTxMemPool& pool, node::Warnings& warnings, Options opts); |
473 | | |
474 | | /** Overridden from CValidationInterface. */ |
475 | | void ActiveTipChange(const CBlockIndex& new_tip, bool) override |
476 | | EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex); |
477 | | void BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override |
478 | | EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex); |
479 | | void BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) override |
480 | | EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex); |
481 | | void UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) override |
482 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
483 | | void BlockChecked(const CBlock& block, const BlockValidationState& state) override |
484 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
485 | | void NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) override |
486 | | EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex); |
487 | | |
488 | | /** Implement NetEventsInterface */ |
489 | | void InitializeNode(const CNode& node, ServiceFlags our_services) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_tx_download_mutex); |
490 | | void FinalizeNode(const CNode& node) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, !m_tx_download_mutex); |
491 | | bool HasAllDesirableServiceFlags(ServiceFlags services) const override; |
492 | | bool ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt) override |
493 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex); |
494 | | bool SendMessages(CNode* pto) override |
495 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, g_msgproc_mutex, !m_tx_download_mutex); |
496 | | |
497 | | /** Implement PeerManager */ |
498 | | void StartScheduledTasks(CScheduler& scheduler) override; |
499 | | void CheckForStaleTipAndEvictPeers() override; |
500 | | std::optional<std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override |
501 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
502 | | bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
503 | | std::vector<TxOrphanage::OrphanTxBase> GetOrphanTransactions() override EXCLUSIVE_LOCKS_REQUIRED(!m_tx_download_mutex); |
504 | | PeerManagerInfo GetInfo() const override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
505 | | void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
506 | | void RelayTransaction(const uint256& txid, const uint256& wtxid) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
507 | | void SetBestBlock(int height, std::chrono::seconds time) override |
508 | 0 | { |
509 | 0 | m_best_height = height; |
510 | 0 | m_best_block_time = time; |
511 | 0 | }; |
512 | 0 | void UnitTestMisbehaving(NodeId peer_id) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) { Misbehaving(*Assert(GetPeerRef(peer_id)), ""); }; |
513 | | void ProcessMessage(CNode& pfrom, const std::string& msg_type, DataStream& vRecv, |
514 | | const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override |
515 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex, !m_tx_download_mutex); |
516 | | void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) override; |
517 | | ServiceFlags GetDesirableServiceFlags(ServiceFlags services) const override; |
518 | | |
519 | | private: |
520 | | /** Consider evicting an outbound peer based on the amount of time they've been behind our tip */ |
521 | | void ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_msgproc_mutex); |
522 | | |
523 | | /** If we have extra outbound peers, try to disconnect the one with the oldest block announcement */ |
524 | | void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
525 | | |
526 | | /** Retrieve unbroadcast transactions from the mempool and reattempt sending to peers */ |
527 | | void ReattemptInitialBroadcast(CScheduler& scheduler) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
528 | | |
529 | | /** Get a shared pointer to the Peer object. |
530 | | * May return an empty shared_ptr if the Peer object can't be found. */ |
531 | | PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
532 | | |
533 | | /** Get a shared pointer to the Peer object and remove it from m_peer_map. |
534 | | * May return an empty shared_ptr if the Peer object can't be found. */ |
535 | | PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
536 | | |
537 | | /** Mark a peer as misbehaving, which will cause it to be disconnected and its |
538 | | * address discouraged. */ |
539 | | void Misbehaving(Peer& peer, const std::string& message); |
540 | | |
541 | | /** |
542 | | * Potentially mark a node discouraged based on the contents of a BlockValidationState object |
543 | | * |
544 | | * @param[in] via_compact_block this bool is passed in because net_processing should |
545 | | * punish peers differently depending on whether the data was provided in a compact |
546 | | * block message or not. If the compact block had a valid header, but contained invalid |
547 | | * txs, the peer should not be punished. See BIP 152. |
548 | | */ |
549 | | void MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state, |
550 | | bool via_compact_block, const std::string& message = "") |
551 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
552 | | |
553 | | /** |
554 | | * Potentially disconnect and discourage a node based on the contents of a TxValidationState object |
555 | | */ |
556 | | void MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state) |
557 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); |
558 | | |
559 | | /** Maybe disconnect a peer and discourage future connections from its address. |
560 | | * |
561 | | * @param[in] pnode The node to check. |
562 | | * @param[in] peer The peer object to check. |
563 | | * @return True if the peer was marked for disconnection in this function |
564 | | */ |
565 | | bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer); |
566 | | |
567 | | /** Handle a transaction whose result was not MempoolAcceptResult::ResultType::VALID. |
568 | | * @param[in] first_time_failure Whether we should consider inserting into vExtraTxnForCompact, adding |
569 | | * a new orphan to resolve, or looking for a package to submit. |
570 | | * Set to true for transactions just received over p2p. |
571 | | * Set to false if the tx has already been rejected before, |
572 | | * e.g. is already in the orphanage, to avoid adding duplicate entries. |
573 | | * Updates m_txrequest, m_lazy_recent_rejects, m_lazy_recent_rejects_reconsiderable, m_orphanage, and vExtraTxnForCompact. |
574 | | * |
575 | | * @returns a PackageToValidate if this transaction has a reconsiderable failure and an eligible package was found, |
576 | | * or std::nullopt otherwise. |
577 | | */ |
578 | | std::optional<node::PackageToValidate> ProcessInvalidTx(NodeId nodeid, const CTransactionRef& tx, const TxValidationState& result, |
579 | | bool first_time_failure) |
580 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex); |
581 | | |
582 | | /** Handle a transaction whose result was MempoolAcceptResult::ResultType::VALID. |
583 | | * Updates m_txrequest, m_orphanage, and vExtraTxnForCompact. Also queues the tx for relay. */ |
584 | | void ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions) |
585 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex); |
586 | | |
587 | | /** Handle the results of package validation: calls ProcessValidTx and ProcessInvalidTx for |
588 | | * individual transactions, and caches rejection for the package as a group. |
589 | | */ |
590 | | void ProcessPackageResult(const node::PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result) |
591 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, m_tx_download_mutex); |
592 | | |
593 | | /** |
594 | | * Reconsider orphan transactions after a parent has been accepted to the mempool. |
595 | | * |
596 | | * @peer[in] peer The peer whose orphan transactions we will reconsider. Generally only |
597 | | * one orphan will be reconsidered on each call of this function. If an |
598 | | * accepted orphan has orphaned children, those will need to be |
599 | | * reconsidered, creating more work, possibly for other peers. |
600 | | * @return True if meaningful work was done (an orphan was accepted/rejected). |
601 | | * If no meaningful work was done, then the work set for this peer |
602 | | * will be empty. |
603 | | */ |
604 | | bool ProcessOrphanTx(Peer& peer) |
605 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex, !m_tx_download_mutex); |
606 | | |
607 | | /** Process a single headers message from a peer. |
608 | | * |
609 | | * @param[in] pfrom CNode of the peer |
610 | | * @param[in] peer The peer sending us the headers |
611 | | * @param[in] headers The headers received. Note that this may be modified within ProcessHeadersMessage. |
612 | | * @param[in] via_compact_block Whether this header came in via compact block handling. |
613 | | */ |
614 | | void ProcessHeadersMessage(CNode& pfrom, Peer& peer, |
615 | | std::vector<CBlockHeader>&& headers, |
616 | | bool via_compact_block) |
617 | | EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex); |
618 | | /** Various helpers for headers processing, invoked by ProcessHeadersMessage() */ |
619 | | /** Return true if headers are continuous and have valid proof-of-work (DoS points assigned on failure) */ |
620 | | bool CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer); |
621 | | /** Calculate an anti-DoS work threshold for headers chains */ |
622 | | arith_uint256 GetAntiDoSWorkThreshold(); |
623 | | /** Deal with state tracking and headers sync for peers that send |
624 | | * non-connecting headers (this can happen due to BIP 130 headers |
625 | | * announcements for blocks interacting with the 2hr (MAX_FUTURE_BLOCK_TIME) rule). */ |
626 | | void HandleUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
627 | | /** Return true if the headers connect to each other, false otherwise */ |
628 | | bool CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const; |
629 | | /** Try to continue a low-work headers sync that has already begun. |
630 | | * Assumes the caller has already verified the headers connect, and has |
631 | | * checked that each header satisfies the proof-of-work target included in |
632 | | * the header. |
633 | | * @param[in] peer The peer we're syncing with. |
634 | | * @param[in] pfrom CNode of the peer |
635 | | * @param[in,out] headers The headers to be processed. |
636 | | * @return True if the passed in headers were successfully processed |
637 | | * as the continuation of a low-work headers sync in progress; |
638 | | * false otherwise. |
639 | | * If false, the passed in headers will be returned back to |
640 | | * the caller. |
641 | | * If true, the returned headers may be empty, indicating |
642 | | * there is no more work for the caller to do; or the headers |
643 | | * may be populated with entries that have passed anti-DoS |
644 | | * checks (and therefore may be validated for block index |
645 | | * acceptance by the caller). |
646 | | */ |
647 | | bool IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, |
648 | | std::vector<CBlockHeader>& headers) |
649 | | EXCLUSIVE_LOCKS_REQUIRED(peer.m_headers_sync_mutex, !m_headers_presync_mutex, g_msgproc_mutex); |
650 | | /** Check work on a headers chain to be processed, and if insufficient, |
651 | | * initiate our anti-DoS headers sync mechanism. |
652 | | * |
653 | | * @param[in] peer The peer whose headers we're processing. |
654 | | * @param[in] pfrom CNode of the peer |
655 | | * @param[in] chain_start_header Where these headers connect in our index. |
656 | | * @param[in,out] headers The headers to be processed. |
657 | | * |
658 | | * @return True if chain was low work (headers will be empty after |
659 | | * calling); false otherwise. |
660 | | */ |
661 | | bool TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, |
662 | | const CBlockIndex* chain_start_header, |
663 | | std::vector<CBlockHeader>& headers) |
664 | | EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex, !m_headers_presync_mutex, g_msgproc_mutex); |
665 | | |
666 | | /** Return true if the given header is an ancestor of |
667 | | * m_chainman.m_best_header or our current tip */ |
668 | | bool IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
669 | | |
670 | | /** Request further headers from this peer with a given locator. |
671 | | * We don't issue a getheaders message if we have a recent one outstanding. |
672 | | * This returns true if a getheaders is actually sent, and false otherwise. |
673 | | */ |
674 | | bool MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
675 | | /** Potentially fetch blocks from this peer upon receipt of a new headers tip */ |
676 | | void HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header); |
677 | | /** Update peer state based on received headers message */ |
678 | | void UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer, const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers) |
679 | | EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
680 | | |
681 | | void SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req); |
682 | | |
683 | | /** Send a message to a peer */ |
684 | 0 | void PushMessage(CNode& node, CSerializedNetMsg&& msg) const { m_connman.PushMessage(&node, std::move(msg)); } |
685 | | template <typename... Args> |
686 | | void MakeAndPushMessage(CNode& node, std::string msg_type, Args&&... args) const |
687 | 0 | { |
688 | 0 | m_connman.PushMessage(&node, NetMsg::Make(std::move(msg_type), std::forward<Args>(args)...)); |
689 | 0 | } Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJbRKmEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRSt6vectorI4CInvSaIS3_EEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRKiRmRKlS4_13ParamsWrapperIN8CNetAddr9SerParamsE8CServiceES4_SB_S4_RNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEES3_RKbEEEvR5CNodeSH_DpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRKjRKmEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRKSt5arrayISt4byteLm168EEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRK13CBlockLocator7uint256EEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJ13ParamsWrapperI20TransactionSerParamsK12CTransactionEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJ4SpanIhEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJ13ParamsWrapperI20TransactionSerParamsK6CBlockEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJR12CMerkleBlockEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRK25CBlockHeaderAndShortTxIDsEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJR25CBlockHeaderAndShortTxIDsEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJR17BlockTransactionsEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJSt6vectorI12CBlockHeaderSaIS3_EEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJ13ParamsWrapperI20TransactionSerParamsSt6vectorI6CBlockSaIS5_EEEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJR24BlockTransactionsRequestEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRmEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRK11BlockFilterEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRh7uint256RS3_RSt6vectorIS3_SaIS3_EEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRh7uint256RSt6vectorIS3_SaIS3_EEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJ13ParamsWrapperIN8CAddress9SerParamsESt6vectorIS3_SaIS3_EEEEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ Unexecuted instantiation: net_processing.cpp:_ZNK12_GLOBAL__N_115PeerManagerImpl18MakeAndPushMessageIJRlEEEvR5CNodeNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEDpOT_ |
690 | | |
691 | | /** Send a version message to a peer */ |
692 | | void PushNodeVersion(CNode& pnode, const Peer& peer); |
693 | | |
694 | | /** Send a ping message every PING_INTERVAL or if requested via RPC. May |
695 | | * mark the peer to be disconnected if a ping has timed out. |
696 | | * We use mockable time for ping timeouts, so setmocktime may cause pings |
697 | | * to time out. */ |
698 | | void MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now); |
699 | | |
700 | | /** Send `addr` messages on a regular schedule. */ |
701 | | void MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
702 | | |
703 | | /** Send a single `sendheaders` message, after we have completed headers sync with a peer. */ |
704 | | void MaybeSendSendHeaders(CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
705 | | |
706 | | /** Relay (gossip) an address to a few randomly chosen nodes. |
707 | | * |
708 | | * @param[in] originator The id of the peer that sent us the address. We don't want to relay it back. |
709 | | * @param[in] addr Address to relay. |
710 | | * @param[in] fReachable Whether the address' network is reachable. We relay unreachable |
711 | | * addresses less. |
712 | | */ |
713 | | void RelayAddress(NodeId originator, const CAddress& addr, bool fReachable) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex); |
714 | | |
715 | | /** Send `feefilter` message. */ |
716 | | void MaybeSendFeefilter(CNode& node, Peer& peer, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
717 | | |
718 | | FastRandomContext m_rng GUARDED_BY(NetEventsInterface::g_msgproc_mutex); |
719 | | |
720 | | FeeFilterRounder m_fee_filter_rounder GUARDED_BY(NetEventsInterface::g_msgproc_mutex); |
721 | | |
722 | | const CChainParams& m_chainparams; |
723 | | CConnman& m_connman; |
724 | | AddrMan& m_addrman; |
725 | | /** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */ |
726 | | BanMan* const m_banman; |
727 | | ChainstateManager& m_chainman; |
728 | | CTxMemPool& m_mempool; |
729 | | |
730 | | /** Synchronizes tx download including TxRequestTracker, rejection filters, and TxOrphanage. |
731 | | * Lock invariants: |
732 | | * - A txhash (txid or wtxid) in m_txrequest is not also in m_orphanage. |
733 | | * - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_rejects. |
734 | | * - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_rejects_reconsiderable. |
735 | | * - A txhash (txid or wtxid) in m_txrequest is not also in m_lazy_recent_confirmed_transactions. |
736 | | * - Each data structure's limits hold (m_orphanage max size, m_txrequest per-peer limits, etc). |
737 | | */ |
738 | | Mutex m_tx_download_mutex ACQUIRED_BEFORE(m_mempool.cs); |
739 | | node::TxDownloadManager m_txdownloadman GUARDED_BY(m_tx_download_mutex); |
740 | | |
741 | | std::unique_ptr<TxReconciliationTracker> m_txreconciliation; |
742 | | |
743 | | /** The height of the best chain */ |
744 | | std::atomic<int> m_best_height{-1}; |
745 | | /** The time of the best chain tip block */ |
746 | | std::atomic<std::chrono::seconds> m_best_block_time{0s}; |
747 | | |
748 | | /** Next time to check for stale tip */ |
749 | | std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s}; |
750 | | |
751 | | node::Warnings& m_warnings; |
752 | | TimeOffsets m_outbound_time_offsets{m_warnings}; |
753 | | |
754 | | const Options m_opts; |
755 | | |
756 | | bool RejectIncomingTxs(const CNode& peer) const; |
757 | | |
758 | | /** Whether we've completed initial sync yet, for determining when to turn |
759 | | * on extra block-relay-only peers. */ |
760 | | bool m_initial_sync_finished GUARDED_BY(cs_main){false}; |
761 | | |
762 | | /** Protects m_peer_map. This mutex must not be locked while holding a lock |
763 | | * on any of the mutexes inside a Peer object. */ |
764 | | mutable Mutex m_peer_mutex; |
765 | | /** |
766 | | * Map of all Peer objects, keyed by peer id. This map is protected |
767 | | * by the m_peer_mutex. Once a shared pointer reference is |
768 | | * taken, the lock may be released. Individual fields are protected by |
769 | | * their own locks. |
770 | | */ |
771 | | std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex); |
772 | | |
773 | | /** Map maintaining per-node state. */ |
774 | | std::map<NodeId, CNodeState> m_node_states GUARDED_BY(cs_main); |
775 | | |
776 | | /** Get a pointer to a const CNodeState, used when not mutating the CNodeState object. */ |
777 | | const CNodeState* State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
778 | | /** Get a pointer to a mutable CNodeState. */ |
779 | | CNodeState* State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
780 | | |
781 | | uint32_t GetFetchFlags(const Peer& peer) const; |
782 | | |
783 | | std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us}; |
784 | | |
785 | | /** Number of nodes with fSyncStarted. */ |
786 | | int nSyncStarted GUARDED_BY(cs_main) = 0; |
787 | | |
788 | | /** Hash of the last block we received via INV */ |
789 | | uint256 m_last_block_inv_triggering_headers_sync GUARDED_BY(g_msgproc_mutex){}; |
790 | | |
791 | | /** |
792 | | * Sources of received blocks, saved to be able punish them when processing |
793 | | * happens afterwards. |
794 | | * Set mapBlockSource[hash].second to false if the node should not be |
795 | | * punished if the block is invalid. |
796 | | */ |
797 | | std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main); |
798 | | |
799 | | /** Number of peers with wtxid relay. */ |
800 | | std::atomic<int> m_wtxid_relay_peers{0}; |
801 | | |
802 | | /** Number of outbound peers with m_chain_sync.m_protect. */ |
803 | | int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0; |
804 | | |
805 | | /** Number of preferable block download peers. */ |
806 | | int m_num_preferred_download_peers GUARDED_BY(cs_main){0}; |
807 | | |
808 | | /** Stalling timeout for blocks in IBD */ |
809 | | std::atomic<std::chrono::seconds> m_block_stalling_timeout{BLOCK_STALLING_TIMEOUT_DEFAULT}; |
810 | | |
811 | | /** |
812 | | * For sending `inv`s to inbound peers, we use a single (exponentially |
813 | | * distributed) timer for all peers. If we used a separate timer for each |
814 | | * peer, a spy node could make multiple inbound connections to us to |
815 | | * accurately determine when we received the transaction (and potentially |
816 | | * determine the transaction's origin). */ |
817 | | std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now, |
818 | | std::chrono::seconds average_interval) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
819 | | |
820 | | |
821 | | // All of the following cache a recent block, and are protected by m_most_recent_block_mutex |
822 | | Mutex m_most_recent_block_mutex; |
823 | | std::shared_ptr<const CBlock> m_most_recent_block GUARDED_BY(m_most_recent_block_mutex); |
824 | | std::shared_ptr<const CBlockHeaderAndShortTxIDs> m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex); |
825 | | uint256 m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex); |
826 | | std::unique_ptr<const std::map<uint256, CTransactionRef>> m_most_recent_block_txs GUARDED_BY(m_most_recent_block_mutex); |
827 | | |
828 | | // Data about the low-work headers synchronization, aggregated from all peers' HeadersSyncStates. |
829 | | /** Mutex guarding the other m_headers_presync_* variables. */ |
830 | | Mutex m_headers_presync_mutex; |
831 | | /** A type to represent statistics about a peer's low-work headers sync. |
832 | | * |
833 | | * - The first field is the total verified amount of work in that synchronization. |
834 | | * - The second is: |
835 | | * - nullopt: the sync is in REDOWNLOAD phase (phase 2). |
836 | | * - {height, timestamp}: the sync has the specified tip height and block timestamp (phase 1). |
837 | | */ |
838 | | using HeadersPresyncStats = std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>; |
839 | | /** Statistics for all peers in low-work headers sync. */ |
840 | | std::map<NodeId, HeadersPresyncStats> m_headers_presync_stats GUARDED_BY(m_headers_presync_mutex) {}; |
841 | | /** The peer with the most-work entry in m_headers_presync_stats. */ |
842 | | NodeId m_headers_presync_bestpeer GUARDED_BY(m_headers_presync_mutex) {-1}; |
843 | | /** The m_headers_presync_stats improved, and needs signalling. */ |
844 | | std::atomic_bool m_headers_presync_should_signal{false}; |
845 | | |
846 | | /** Height of the highest block announced using BIP 152 high-bandwidth mode. */ |
847 | | int m_highest_fast_announce GUARDED_BY(::cs_main){0}; |
848 | | |
849 | | /** Have we requested this block from a peer */ |
850 | | bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
851 | | |
852 | | /** Have we requested this block from an outbound peer */ |
853 | | bool IsBlockRequestedFromOutbound(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex); |
854 | | |
855 | | /** Remove this block from our tracked requested blocks. Called if: |
856 | | * - the block has been received from a peer |
857 | | * - the request for the block has timed out |
858 | | * If "from_peer" is specified, then only remove the block if it is in |
859 | | * flight from that peer (to avoid one peer's network traffic from |
860 | | * affecting another's state). |
861 | | */ |
862 | | void RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
863 | | |
864 | | /* Mark a block as in flight |
865 | | * Returns false, still setting pit, if the block was already in flight from the same peer |
866 | | * pit will only be valid as long as the same cs_main lock is being held |
867 | | */ |
868 | | bool BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
869 | | |
870 | | bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
871 | | |
872 | | /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has |
873 | | * at most count entries. |
874 | | */ |
875 | | void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
876 | | |
877 | | /** Request blocks for the background chainstate, if one is in use. */ |
878 | | void TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex* from_tip, const CBlockIndex* target_block) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
879 | | |
880 | | /** |
881 | | * \brief Find next blocks to download from a peer after a starting block. |
882 | | * |
883 | | * \param vBlocks Vector of blocks to download which will be appended to. |
884 | | * \param peer Peer which blocks will be downloaded from. |
885 | | * \param state Pointer to the state of the peer. |
886 | | * \param pindexWalk Pointer to the starting block to add to vBlocks. |
887 | | * \param count Maximum number of blocks to allow in vBlocks. No more |
888 | | * blocks will be added if it reaches this size. |
889 | | * \param nWindowEnd Maximum height of blocks to allow in vBlocks. No |
890 | | * blocks will be added above this height. |
891 | | * \param activeChain Optional pointer to a chain to compare against. If |
892 | | * provided, any next blocks which are already contained |
893 | | * in this chain will not be appended to vBlocks, but |
894 | | * instead will be used to update the |
895 | | * state->pindexLastCommonBlock pointer. |
896 | | * \param nodeStaller Optional pointer to a NodeId variable that will receive |
897 | | * the ID of another peer that might be causing this peer |
898 | | * to stall. This is set to the ID of the peer which |
899 | | * first requested the first in-flight block in the |
900 | | * download window. It is only set if vBlocks is empty at |
901 | | * the end of this function call and if increasing |
902 | | * nWindowEnd by 1 would cause it to be non-empty (which |
903 | | * indicates the download might be stalled because every |
904 | | * block in the window is in flight and no other peer is |
905 | | * trying to download the next block). |
906 | | */ |
907 | | void FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain=nullptr, NodeId* nodeStaller=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
908 | | |
909 | | /* Multimap used to preserve insertion order */ |
910 | | typedef std::multimap<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator>> BlockDownloadMap; |
911 | | BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main); |
912 | | |
913 | | /** When our tip was last updated. */ |
914 | | std::atomic<std::chrono::seconds> m_last_tip_update{0s}; |
915 | | |
916 | | /** Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). */ |
917 | | CTransactionRef FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid) |
918 | | EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, NetEventsInterface::g_msgproc_mutex); |
919 | | |
920 | | void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc) |
921 | | EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex, peer.m_getdata_requests_mutex, NetEventsInterface::g_msgproc_mutex) |
922 | | LOCKS_EXCLUDED(::cs_main); |
923 | | |
924 | | /** Process a new block. Perform any post-processing housekeeping */ |
925 | | void ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked); |
926 | | |
927 | | /** Process compact block txns */ |
928 | | void ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions) |
929 | | EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex); |
930 | | |
931 | | /** |
932 | | * When a peer sends us a valid block, instruct it to announce blocks to us |
933 | | * using CMPCTBLOCK if possible by adding its nodeid to the end of |
934 | | * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by |
935 | | * removing the first element if necessary. |
936 | | */ |
937 | | void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main, !m_peer_mutex); |
938 | | |
939 | | /** Stack of nodes which we have set to announce using compact blocks */ |
940 | | std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main); |
941 | | |
942 | | /** Number of peers from which we're downloading blocks. */ |
943 | | int m_peers_downloading_from GUARDED_BY(cs_main) = 0; |
944 | | |
945 | | void AddToCompactExtraTransactions(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
946 | | |
947 | | /** Orphan/conflicted/etc transactions that are kept for compact block reconstruction. |
948 | | * The last -blockreconstructionextratxn/DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN of |
949 | | * these are kept in a ring buffer */ |
950 | | std::vector<CTransactionRef> vExtraTxnForCompact GUARDED_BY(g_msgproc_mutex); |
951 | | /** Offset into vExtraTxnForCompact to insert the next tx */ |
952 | | size_t vExtraTxnForCompactIt GUARDED_BY(g_msgproc_mutex) = 0; |
953 | | |
954 | | /** Check whether the last unknown block a peer advertised is not yet known. */ |
955 | | void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
956 | | /** Update tracking information about which blocks a peer is assumed to have. */ |
957 | | void UpdateBlockAvailability(NodeId nodeid, const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
958 | | bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
959 | | |
960 | | /** |
961 | | * Estimates the distance, in blocks, between the best-known block and the network chain tip. |
962 | | * Utilizes the best-block time and the chainparams blocks spacing to approximate it. |
963 | | */ |
964 | | int64_t ApproximateBestBlockDepth() const; |
965 | | |
966 | | /** |
967 | | * To prevent fingerprinting attacks, only send blocks/headers outside of |
968 | | * the active chain if they are no more than a month older (both in time, |
969 | | * and in best equivalent proof of work) than the best header chain we know |
970 | | * about and we fully-validated them at some point. |
971 | | */ |
972 | | bool BlockRequestAllowed(const CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
973 | | bool AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); |
974 | | void ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv) |
975 | | EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex, !m_most_recent_block_mutex); |
976 | | |
977 | | /** |
978 | | * Validation logic for compact filters request handling. |
979 | | * |
980 | | * May disconnect from the peer in the case of a bad request. |
981 | | * |
982 | | * @param[in] node The node that we received the request from |
983 | | * @param[in] peer The peer that we received the request from |
984 | | * @param[in] filter_type The filter type the request is for. Must be basic filters. |
985 | | * @param[in] start_height The start height for the request |
986 | | * @param[in] stop_hash The stop_hash for the request |
987 | | * @param[in] max_height_diff The maximum number of items permitted to request, as specified in BIP 157 |
988 | | * @param[out] stop_index The CBlockIndex for the stop_hash block, if the request can be serviced. |
989 | | * @param[out] filter_index The filter index, if the request can be serviced. |
990 | | * @return True if the request can be serviced. |
991 | | */ |
992 | | bool PrepareBlockFilterRequest(CNode& node, Peer& peer, |
993 | | BlockFilterType filter_type, uint32_t start_height, |
994 | | const uint256& stop_hash, uint32_t max_height_diff, |
995 | | const CBlockIndex*& stop_index, |
996 | | BlockFilterIndex*& filter_index); |
997 | | |
998 | | /** |
999 | | * Handle a cfilters request. |
1000 | | * |
1001 | | * May disconnect from the peer in the case of a bad request. |
1002 | | * |
1003 | | * @param[in] node The node that we received the request from |
1004 | | * @param[in] peer The peer that we received the request from |
1005 | | * @param[in] vRecv The raw message received |
1006 | | */ |
1007 | | void ProcessGetCFilters(CNode& node, Peer& peer, DataStream& vRecv); |
1008 | | |
1009 | | /** |
1010 | | * Handle a cfheaders request. |
1011 | | * |
1012 | | * May disconnect from the peer in the case of a bad request. |
1013 | | * |
1014 | | * @param[in] node The node that we received the request from |
1015 | | * @param[in] peer The peer that we received the request from |
1016 | | * @param[in] vRecv The raw message received |
1017 | | */ |
1018 | | void ProcessGetCFHeaders(CNode& node, Peer& peer, DataStream& vRecv); |
1019 | | |
1020 | | /** |
1021 | | * Handle a getcfcheckpt request. |
1022 | | * |
1023 | | * May disconnect from the peer in the case of a bad request. |
1024 | | * |
1025 | | * @param[in] node The node that we received the request from |
1026 | | * @param[in] peer The peer that we received the request from |
1027 | | * @param[in] vRecv The raw message received |
1028 | | */ |
1029 | | void ProcessGetCFCheckPt(CNode& node, Peer& peer, DataStream& vRecv); |
1030 | | |
1031 | | /** Checks if address relay is permitted with peer. If needed, initializes |
1032 | | * the m_addr_known bloom filter and sets m_addr_relay_enabled to true. |
1033 | | * |
1034 | | * @return True if address relay is enabled with peer |
1035 | | * False if address relay is disallowed |
1036 | | */ |
1037 | | bool SetupAddressRelay(const CNode& node, Peer& peer) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
1038 | | |
1039 | | void AddAddressKnown(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
1040 | | void PushAddress(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); |
1041 | | }; |
1042 | | |
1043 | | const CNodeState* PeerManagerImpl::State(NodeId pnode) const |
1044 | 0 | { |
1045 | 0 | std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode); |
1046 | 0 | if (it == m_node_states.end()) |
1047 | 0 | return nullptr; |
1048 | 0 | return &it->second; |
1049 | 0 | } |
1050 | | |
1051 | | CNodeState* PeerManagerImpl::State(NodeId pnode) |
1052 | 0 | { |
1053 | 0 | return const_cast<CNodeState*>(std::as_const(*this).State(pnode)); |
1054 | 0 | } |
1055 | | |
1056 | | /** |
1057 | | * Whether the peer supports the address. For example, a peer that does not |
1058 | | * implement BIP155 cannot receive Tor v3 addresses because it requires |
1059 | | * ADDRv2 (BIP155) encoding. |
1060 | | */ |
1061 | | static bool IsAddrCompatible(const Peer& peer, const CAddress& addr) |
1062 | 0 | { |
1063 | 0 | return peer.m_wants_addrv2 || addr.IsAddrV1Compatible(); |
1064 | 0 | } |
1065 | | |
1066 | | void PeerManagerImpl::AddAddressKnown(Peer& peer, const CAddress& addr) |
1067 | 0 | { |
1068 | 0 | assert(peer.m_addr_known); |
1069 | 0 | peer.m_addr_known->insert(addr.GetKey()); |
1070 | 0 | } |
1071 | | |
1072 | | void PeerManagerImpl::PushAddress(Peer& peer, const CAddress& addr) |
1073 | 0 | { |
1074 | | // Known checking here is only to save space from duplicates. |
1075 | | // Before sending, we'll filter it again for known addresses that were |
1076 | | // added after addresses were pushed. |
1077 | 0 | assert(peer.m_addr_known); |
1078 | 0 | if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) && IsAddrCompatible(peer, addr)) { |
1079 | 0 | if (peer.m_addrs_to_send.size() >= MAX_ADDR_TO_SEND) { |
1080 | 0 | peer.m_addrs_to_send[m_rng.randrange(peer.m_addrs_to_send.size())] = addr; |
1081 | 0 | } else { |
1082 | 0 | peer.m_addrs_to_send.push_back(addr); |
1083 | 0 | } |
1084 | 0 | } |
1085 | 0 | } |
1086 | | |
1087 | | static void AddKnownTx(Peer& peer, const uint256& hash) |
1088 | 0 | { |
1089 | 0 | auto tx_relay = peer.GetTxRelay(); |
1090 | 0 | if (!tx_relay) return; |
1091 | | |
1092 | 0 | LOCK(tx_relay->m_tx_inventory_mutex); |
1093 | 0 | tx_relay->m_tx_inventory_known_filter.insert(hash); |
1094 | 0 | } |
1095 | | |
1096 | | /** Whether this peer can serve us blocks. */ |
1097 | | static bool CanServeBlocks(const Peer& peer) |
1098 | 0 | { |
1099 | 0 | return peer.m_their_services & (NODE_NETWORK|NODE_NETWORK_LIMITED); |
1100 | 0 | } |
1101 | | |
1102 | | /** Whether this peer can only serve limited recent blocks (e.g. because |
1103 | | * it prunes old blocks) */ |
1104 | | static bool IsLimitedPeer(const Peer& peer) |
1105 | 0 | { |
1106 | 0 | return (!(peer.m_their_services & NODE_NETWORK) && |
1107 | 0 | (peer.m_their_services & NODE_NETWORK_LIMITED)); |
1108 | 0 | } |
1109 | | |
1110 | | /** Whether this peer can serve us witness data */ |
1111 | | static bool CanServeWitnesses(const Peer& peer) |
1112 | 0 | { |
1113 | 0 | return peer.m_their_services & NODE_WITNESS; |
1114 | 0 | } |
1115 | | |
1116 | | std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now, |
1117 | | std::chrono::seconds average_interval) |
1118 | 0 | { |
1119 | 0 | if (m_next_inv_to_inbounds.load() < now) { |
1120 | | // If this function were called from multiple threads simultaneously |
1121 | | // it would possible that both update the next send variable, and return a different result to their caller. |
1122 | | // This is not possible in practice as only the net processing thread invokes this function. |
1123 | 0 | m_next_inv_to_inbounds = now + m_rng.rand_exp_duration(average_interval); |
1124 | 0 | } |
1125 | 0 | return m_next_inv_to_inbounds; |
1126 | 0 | } |
1127 | | |
1128 | | bool PeerManagerImpl::IsBlockRequested(const uint256& hash) |
1129 | 0 | { |
1130 | 0 | return mapBlocksInFlight.count(hash); |
1131 | 0 | } |
1132 | | |
1133 | | bool PeerManagerImpl::IsBlockRequestedFromOutbound(const uint256& hash) |
1134 | 0 | { |
1135 | 0 | for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) { |
1136 | 0 | auto [nodeid, block_it] = range.first->second; |
1137 | 0 | PeerRef peer{GetPeerRef(nodeid)}; |
1138 | 0 | if (peer && !peer->m_is_inbound) return true; |
1139 | 0 | } |
1140 | | |
1141 | 0 | return false; |
1142 | 0 | } |
1143 | | |
1144 | | void PeerManagerImpl::RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer) |
1145 | 0 | { |
1146 | 0 | auto range = mapBlocksInFlight.equal_range(hash); |
1147 | 0 | if (range.first == range.second) { |
1148 | | // Block was not requested from any peer |
1149 | 0 | return; |
1150 | 0 | } |
1151 | | |
1152 | | // We should not have requested too many of this block |
1153 | 0 | Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK); |
1154 | |
|
1155 | 0 | while (range.first != range.second) { |
1156 | 0 | auto [node_id, list_it] = range.first->second; |
1157 | |
|
1158 | 0 | if (from_peer && *from_peer != node_id) { |
1159 | 0 | range.first++; |
1160 | 0 | continue; |
1161 | 0 | } |
1162 | | |
1163 | 0 | CNodeState& state = *Assert(State(node_id)); |
1164 | |
|
1165 | 0 | if (state.vBlocksInFlight.begin() == list_it) { |
1166 | | // First block on the queue was received, update the start download time for the next one |
1167 | 0 | state.m_downloading_since = std::max(state.m_downloading_since, GetTime<std::chrono::microseconds>()); |
1168 | 0 | } |
1169 | 0 | state.vBlocksInFlight.erase(list_it); |
1170 | |
|
1171 | 0 | if (state.vBlocksInFlight.empty()) { |
1172 | | // Last validated block on the queue for this peer was received. |
1173 | 0 | m_peers_downloading_from--; |
1174 | 0 | } |
1175 | 0 | state.m_stalling_since = 0us; |
1176 | |
|
1177 | 0 | range.first = mapBlocksInFlight.erase(range.first); |
1178 | 0 | } |
1179 | 0 | } |
1180 | | |
1181 | | bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit) |
1182 | 0 | { |
1183 | 0 | const uint256& hash{block.GetBlockHash()}; |
1184 | |
|
1185 | 0 | CNodeState *state = State(nodeid); |
1186 | 0 | assert(state != nullptr); |
1187 | | |
1188 | 0 | Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK); |
1189 | | |
1190 | | // Short-circuit most stuff in case it is from the same node |
1191 | 0 | for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) { |
1192 | 0 | if (range.first->second.first == nodeid) { |
1193 | 0 | if (pit) { |
1194 | 0 | *pit = &range.first->second.second; |
1195 | 0 | } |
1196 | 0 | return false; |
1197 | 0 | } |
1198 | 0 | } |
1199 | | |
1200 | | // Make sure it's not being fetched already from same peer. |
1201 | 0 | RemoveBlockRequest(hash, nodeid); |
1202 | |
|
1203 | 0 | std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), |
1204 | 0 | {&block, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)}); |
1205 | 0 | if (state->vBlocksInFlight.size() == 1) { |
1206 | | // We're starting a block download (batch) from this peer. |
1207 | 0 | state->m_downloading_since = GetTime<std::chrono::microseconds>(); |
1208 | 0 | m_peers_downloading_from++; |
1209 | 0 | } |
1210 | 0 | auto itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))); |
1211 | 0 | if (pit) { |
1212 | 0 | *pit = &itInFlight->second.second; |
1213 | 0 | } |
1214 | 0 | return true; |
1215 | 0 | } |
1216 | | |
1217 | | void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) |
1218 | 0 | { |
1219 | 0 | AssertLockHeld(cs_main); |
1220 | | |
1221 | | // When in -blocksonly mode, never request high-bandwidth mode from peers. Our |
1222 | | // mempool will not contain the transactions necessary to reconstruct the |
1223 | | // compact block. |
1224 | 0 | if (m_opts.ignore_incoming_txs) return; |
1225 | | |
1226 | 0 | CNodeState* nodestate = State(nodeid); |
1227 | 0 | PeerRef peer{GetPeerRef(nodeid)}; |
1228 | 0 | if (!nodestate || !nodestate->m_provides_cmpctblocks) { |
1229 | | // Don't request compact blocks if the peer has not signalled support |
1230 | 0 | return; |
1231 | 0 | } |
1232 | | |
1233 | 0 | int num_outbound_hb_peers = 0; |
1234 | 0 | for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) { |
1235 | 0 | if (*it == nodeid) { |
1236 | 0 | lNodesAnnouncingHeaderAndIDs.erase(it); |
1237 | 0 | lNodesAnnouncingHeaderAndIDs.push_back(nodeid); |
1238 | 0 | return; |
1239 | 0 | } |
1240 | 0 | PeerRef peer_ref{GetPeerRef(*it)}; |
1241 | 0 | if (peer_ref && !peer_ref->m_is_inbound) ++num_outbound_hb_peers; |
1242 | 0 | } |
1243 | 0 | if (peer && peer->m_is_inbound) { |
1244 | | // If we're adding an inbound HB peer, make sure we're not removing |
1245 | | // our last outbound HB peer in the process. |
1246 | 0 | if (lNodesAnnouncingHeaderAndIDs.size() >= 3 && num_outbound_hb_peers == 1) { |
1247 | 0 | PeerRef remove_peer{GetPeerRef(lNodesAnnouncingHeaderAndIDs.front())}; |
1248 | 0 | if (remove_peer && !remove_peer->m_is_inbound) { |
1249 | | // Put the HB outbound peer in the second slot, so that it |
1250 | | // doesn't get removed. |
1251 | 0 | std::swap(lNodesAnnouncingHeaderAndIDs.front(), *std::next(lNodesAnnouncingHeaderAndIDs.begin())); |
1252 | 0 | } |
1253 | 0 | } |
1254 | 0 | } |
1255 | 0 | m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { |
1256 | 0 | AssertLockHeld(::cs_main); |
1257 | 0 | if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { |
1258 | | // As per BIP152, we only get 3 of our peers to announce |
1259 | | // blocks using compact encodings. |
1260 | 0 | m_connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [this](CNode* pnodeStop){ |
1261 | 0 | MakeAndPushMessage(*pnodeStop, NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION); |
1262 | | // save BIP152 bandwidth state: we select peer to be low-bandwidth |
1263 | 0 | pnodeStop->m_bip152_highbandwidth_to = false; |
1264 | 0 | return true; |
1265 | 0 | }); |
1266 | 0 | lNodesAnnouncingHeaderAndIDs.pop_front(); |
1267 | 0 | } |
1268 | 0 | MakeAndPushMessage(*pfrom, NetMsgType::SENDCMPCT, /*high_bandwidth=*/true, /*version=*/CMPCTBLOCKS_VERSION); |
1269 | | // save BIP152 bandwidth state: we select peer to be high-bandwidth |
1270 | 0 | pfrom->m_bip152_highbandwidth_to = true; |
1271 | 0 | lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); |
1272 | 0 | return true; |
1273 | 0 | }); |
1274 | 0 | } |
1275 | | |
1276 | | bool PeerManagerImpl::TipMayBeStale() |
1277 | 0 | { |
1278 | 0 | AssertLockHeld(cs_main); |
1279 | 0 | const Consensus::Params& consensusParams = m_chainparams.GetConsensus(); |
1280 | 0 | if (m_last_tip_update.load() == 0s) { |
1281 | 0 | m_last_tip_update = GetTime<std::chrono::seconds>(); |
1282 | 0 | } |
1283 | 0 | return m_last_tip_update.load() < GetTime<std::chrono::seconds>() - std::chrono::seconds{consensusParams.nPowTargetSpacing * 3} && mapBlocksInFlight.empty(); |
1284 | 0 | } |
1285 | | |
1286 | | int64_t PeerManagerImpl::ApproximateBestBlockDepth() const |
1287 | 0 | { |
1288 | 0 | return (GetTime<std::chrono::seconds>() - m_best_block_time.load()).count() / m_chainparams.GetConsensus().nPowTargetSpacing; |
1289 | 0 | } |
1290 | | |
1291 | | bool PeerManagerImpl::CanDirectFetch() |
1292 | 0 | { |
1293 | 0 | return m_chainman.ActiveChain().Tip()->Time() > NodeClock::now() - m_chainparams.GetConsensus().PowTargetSpacing() * 20; |
1294 | 0 | } |
1295 | | |
1296 | | static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main) |
1297 | 0 | { |
1298 | 0 | if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) |
1299 | 0 | return true; |
1300 | 0 | if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) |
1301 | 0 | return true; |
1302 | 0 | return false; |
1303 | 0 | } |
1304 | | |
1305 | 0 | void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) { |
1306 | 0 | CNodeState *state = State(nodeid); |
1307 | 0 | assert(state != nullptr); |
1308 | | |
1309 | 0 | if (!state->hashLastUnknownBlock.IsNull()) { |
1310 | 0 | const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock); |
1311 | 0 | if (pindex && pindex->nChainWork > 0) { |
1312 | 0 | if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) { |
1313 | 0 | state->pindexBestKnownBlock = pindex; |
1314 | 0 | } |
1315 | 0 | state->hashLastUnknownBlock.SetNull(); |
1316 | 0 | } |
1317 | 0 | } |
1318 | 0 | } |
1319 | | |
1320 | 0 | void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) { |
1321 | 0 | CNodeState *state = State(nodeid); |
1322 | 0 | assert(state != nullptr); |
1323 | | |
1324 | 0 | ProcessBlockAvailability(nodeid); |
1325 | |
|
1326 | 0 | const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash); |
1327 | 0 | if (pindex && pindex->nChainWork > 0) { |
1328 | | // An actually better block was announced. |
1329 | 0 | if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) { |
1330 | 0 | state->pindexBestKnownBlock = pindex; |
1331 | 0 | } |
1332 | 0 | } else { |
1333 | | // An unknown block was announced; just assume that the latest one is the best one. |
1334 | 0 | state->hashLastUnknownBlock = hash; |
1335 | 0 | } |
1336 | 0 | } |
1337 | | |
1338 | | // Logic for calculating which blocks to download from a given peer, given our current tip. |
1339 | | void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) |
1340 | 0 | { |
1341 | 0 | if (count == 0) |
1342 | 0 | return; |
1343 | | |
1344 | 0 | vBlocks.reserve(vBlocks.size() + count); |
1345 | 0 | CNodeState *state = State(peer.m_id); |
1346 | 0 | assert(state != nullptr); |
1347 | | |
1348 | | // Make sure pindexBestKnownBlock is up to date, we'll need it. |
1349 | 0 | ProcessBlockAvailability(peer.m_id); |
1350 | |
|
1351 | 0 | if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < m_chainman.ActiveChain().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) { |
1352 | | // This peer has nothing interesting. |
1353 | 0 | return; |
1354 | 0 | } |
1355 | | |
1356 | | // When we sync with AssumeUtxo and discover the snapshot is not in the peer's best chain, abort: |
1357 | | // We can't reorg to this chain due to missing undo data until the background sync has finished, |
1358 | | // so downloading blocks from it would be futile. |
1359 | 0 | const CBlockIndex* snap_base{m_chainman.GetSnapshotBaseBlock()}; |
1360 | 0 | if (snap_base && state->pindexBestKnownBlock->GetAncestor(snap_base->nHeight) != snap_base) { |
1361 | 0 | LogDebug(BCLog::NET, "Not downloading blocks from peer=%d, which doesn't have the snapshot block in its best chain.\n", peer.m_id); |
1362 | 0 | return; |
1363 | 0 | } |
1364 | | |
1365 | | // Bootstrap quickly by guessing a parent of our best tip is the forking point. |
1366 | | // Guessing wrong in either direction is not a problem. |
1367 | | // Also reset pindexLastCommonBlock after a snapshot was loaded, so that blocks after the snapshot will be prioritised for download. |
1368 | 0 | if (state->pindexLastCommonBlock == nullptr || |
1369 | 0 | (snap_base && state->pindexLastCommonBlock->nHeight < snap_base->nHeight)) { |
1370 | 0 | state->pindexLastCommonBlock = m_chainman.ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight, m_chainman.ActiveChain().Height())]; |
1371 | 0 | } |
1372 | | |
1373 | | // If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor |
1374 | | // of its current tip anymore. Go back enough to fix that. |
1375 | 0 | state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock); |
1376 | 0 | if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) |
1377 | 0 | return; |
1378 | | |
1379 | 0 | const CBlockIndex *pindexWalk = state->pindexLastCommonBlock; |
1380 | | // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last |
1381 | | // linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to |
1382 | | // download that next block if the window were 1 larger. |
1383 | 0 | int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW; |
1384 | |
|
1385 | 0 | FindNextBlocks(vBlocks, peer, state, pindexWalk, count, nWindowEnd, &m_chainman.ActiveChain(), &nodeStaller); |
1386 | 0 | } |
1387 | | |
1388 | | void PeerManagerImpl::TryDownloadingHistoricalBlocks(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, const CBlockIndex *from_tip, const CBlockIndex* target_block) |
1389 | 0 | { |
1390 | 0 | Assert(from_tip); |
1391 | 0 | Assert(target_block); |
1392 | |
|
1393 | 0 | if (vBlocks.size() >= count) { |
1394 | 0 | return; |
1395 | 0 | } |
1396 | | |
1397 | 0 | vBlocks.reserve(count); |
1398 | 0 | CNodeState *state = Assert(State(peer.m_id)); |
1399 | |
|
1400 | 0 | if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->GetAncestor(target_block->nHeight) != target_block) { |
1401 | | // This peer can't provide us the complete series of blocks leading up to the |
1402 | | // assumeutxo snapshot base. |
1403 | | // |
1404 | | // Presumably this peer's chain has less work than our ActiveChain()'s tip, or else we |
1405 | | // will eventually crash when we try to reorg to it. Let other logic |
1406 | | // deal with whether we disconnect this peer. |
1407 | | // |
1408 | | // TODO at some point in the future, we might choose to request what blocks |
1409 | | // this peer does have from the historical chain, despite it not having a |
1410 | | // complete history beneath the snapshot base. |
1411 | 0 | return; |
1412 | 0 | } |
1413 | | |
1414 | 0 | FindNextBlocks(vBlocks, peer, state, from_tip, count, std::min<int>(from_tip->nHeight + BLOCK_DOWNLOAD_WINDOW, target_block->nHeight)); |
1415 | 0 | } |
1416 | | |
1417 | | void PeerManagerImpl::FindNextBlocks(std::vector<const CBlockIndex*>& vBlocks, const Peer& peer, CNodeState *state, const CBlockIndex *pindexWalk, unsigned int count, int nWindowEnd, const CChain* activeChain, NodeId* nodeStaller) |
1418 | 0 | { |
1419 | 0 | std::vector<const CBlockIndex*> vToFetch; |
1420 | 0 | int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1); |
1421 | 0 | bool is_limited_peer = IsLimitedPeer(peer); |
1422 | 0 | NodeId waitingfor = -1; |
1423 | 0 | while (pindexWalk->nHeight < nMaxHeight) { |
1424 | | // Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards |
1425 | | // pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive |
1426 | | // as iterating over ~100 CBlockIndex* entries anyway. |
1427 | 0 | int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128)); |
1428 | 0 | vToFetch.resize(nToFetch); |
1429 | 0 | pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch); |
1430 | 0 | vToFetch[nToFetch - 1] = pindexWalk; |
1431 | 0 | for (unsigned int i = nToFetch - 1; i > 0; i--) { |
1432 | 0 | vToFetch[i - 1] = vToFetch[i]->pprev; |
1433 | 0 | } |
1434 | | |
1435 | | // Iterate over those blocks in vToFetch (in forward direction), adding the ones that |
1436 | | // are not yet downloaded and not in flight to vBlocks. In the meantime, update |
1437 | | // pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's |
1438 | | // already part of our chain (and therefore don't need it even if pruned). |
1439 | 0 | for (const CBlockIndex* pindex : vToFetch) { |
1440 | 0 | if (!pindex->IsValid(BLOCK_VALID_TREE)) { |
1441 | | // We consider the chain that this peer is on invalid. |
1442 | 0 | return; |
1443 | 0 | } |
1444 | | |
1445 | 0 | if (!CanServeWitnesses(peer) && DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) { |
1446 | | // We wouldn't download this block or its descendants from this peer. |
1447 | 0 | return; |
1448 | 0 | } |
1449 | | |
1450 | 0 | if (pindex->nStatus & BLOCK_HAVE_DATA || (activeChain && activeChain->Contains(pindex))) { |
1451 | 0 | if (activeChain && pindex->HaveNumChainTxs()) { |
1452 | 0 | state->pindexLastCommonBlock = pindex; |
1453 | 0 | } |
1454 | 0 | continue; |
1455 | 0 | } |
1456 | | |
1457 | | // Is block in-flight? |
1458 | 0 | if (IsBlockRequested(pindex->GetBlockHash())) { |
1459 | 0 | if (waitingfor == -1) { |
1460 | | // This is the first already-in-flight block. |
1461 | 0 | waitingfor = mapBlocksInFlight.lower_bound(pindex->GetBlockHash())->second.first; |
1462 | 0 | } |
1463 | 0 | continue; |
1464 | 0 | } |
1465 | | |
1466 | | // The block is not already downloaded, and not yet in flight. |
1467 | 0 | if (pindex->nHeight > nWindowEnd) { |
1468 | | // We reached the end of the window. |
1469 | 0 | if (vBlocks.size() == 0 && waitingfor != peer.m_id) { |
1470 | | // We aren't able to fetch anything, but we would be if the download window was one larger. |
1471 | 0 | if (nodeStaller) *nodeStaller = waitingfor; |
1472 | 0 | } |
1473 | 0 | return; |
1474 | 0 | } |
1475 | | |
1476 | | // Don't request blocks that go further than what limited peers can provide |
1477 | 0 | if (is_limited_peer && (state->pindexBestKnownBlock->nHeight - pindex->nHeight >= static_cast<int>(NODE_NETWORK_LIMITED_MIN_BLOCKS) - 2 /* two blocks buffer for possible races */)) { |
1478 | 0 | continue; |
1479 | 0 | } |
1480 | | |
1481 | 0 | vBlocks.push_back(pindex); |
1482 | 0 | if (vBlocks.size() == count) { |
1483 | 0 | return; |
1484 | 0 | } |
1485 | 0 | } |
1486 | 0 | } |
1487 | 0 | } |
1488 | | |
1489 | | } // namespace |
1490 | | |
1491 | | void PeerManagerImpl::PushNodeVersion(CNode& pnode, const Peer& peer) |
1492 | 0 | { |
1493 | 0 | uint64_t my_services{peer.m_our_services}; |
1494 | 0 | const int64_t nTime{count_seconds(GetTime<std::chrono::seconds>())}; |
1495 | 0 | uint64_t nonce = pnode.GetLocalNonce(); |
1496 | 0 | const int nNodeStartingHeight{m_best_height}; |
1497 | 0 | NodeId nodeid = pnode.GetId(); |
1498 | 0 | CAddress addr = pnode.addr; |
1499 | |
|
1500 | 0 | CService addr_you = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ? addr : CService(); |
1501 | 0 | uint64_t your_services{addr.nServices}; |
1502 | |
|
1503 | 0 | const bool tx_relay{!RejectIncomingTxs(pnode)}; |
1504 | 0 | MakeAndPushMessage(pnode, NetMsgType::VERSION, PROTOCOL_VERSION, my_services, nTime, |
1505 | 0 | your_services, CNetAddr::V1(addr_you), // Together the pre-version-31402 serialization of CAddress "addrYou" (without nTime) |
1506 | 0 | my_services, CNetAddr::V1(CService{}), // Together the pre-version-31402 serialization of CAddress "addrMe" (without nTime) |
1507 | 0 | nonce, strSubVersion, nNodeStartingHeight, tx_relay); |
1508 | |
|
1509 | 0 | if (fLogIPs) { |
1510 | 0 | LogDebug(BCLog::NET, "send version message: version %d, blocks=%d, them=%s, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addr_you.ToStringAddrPort(), tx_relay, nodeid); |
1511 | 0 | } else { |
1512 | 0 | LogDebug(BCLog::NET, "send version message: version %d, blocks=%d, txrelay=%d, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, tx_relay, nodeid); |
1513 | 0 | } |
1514 | 0 | } |
1515 | | |
1516 | | void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) |
1517 | 0 | { |
1518 | 0 | LOCK(cs_main); |
1519 | 0 | CNodeState *state = State(node); |
1520 | 0 | if (state) state->m_last_block_announcement = time_in_seconds; |
1521 | 0 | } |
1522 | | |
1523 | | void PeerManagerImpl::InitializeNode(const CNode& node, ServiceFlags our_services) |
1524 | 0 | { |
1525 | 0 | NodeId nodeid = node.GetId(); |
1526 | 0 | { |
1527 | 0 | LOCK(cs_main); // For m_node_states |
1528 | 0 | m_node_states.try_emplace(m_node_states.end(), nodeid); |
1529 | 0 | } |
1530 | 0 | WITH_LOCK(m_tx_download_mutex, m_txdownloadman.CheckIsEmpty(nodeid)); |
1531 | |
|
1532 | 0 | if (NetPermissions::HasFlag(node.m_permission_flags, NetPermissionFlags::BloomFilter)) { |
1533 | 0 | our_services = static_cast<ServiceFlags>(our_services | NODE_BLOOM); |
1534 | 0 | } |
1535 | |
|
1536 | 0 | PeerRef peer = std::make_shared<Peer>(nodeid, our_services, node.IsInboundConn()); |
1537 | 0 | { |
1538 | 0 | LOCK(m_peer_mutex); |
1539 | 0 | m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer); |
1540 | 0 | } |
1541 | 0 | } |
1542 | | |
1543 | | void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler) |
1544 | 0 | { |
1545 | 0 | std::set<uint256> unbroadcast_txids = m_mempool.GetUnbroadcastTxs(); |
1546 | |
|
1547 | 0 | for (const auto& txid : unbroadcast_txids) { |
1548 | 0 | CTransactionRef tx = m_mempool.get(txid); |
1549 | |
|
1550 | 0 | if (tx != nullptr) { |
1551 | 0 | RelayTransaction(txid, tx->GetWitnessHash()); |
1552 | 0 | } else { |
1553 | 0 | m_mempool.RemoveUnbroadcastTx(txid, true); |
1554 | 0 | } |
1555 | 0 | } |
1556 | | |
1557 | | // Schedule next run for 10-15 minutes in the future. |
1558 | | // We add randomness on every cycle to avoid the possibility of P2P fingerprinting. |
1559 | 0 | const auto delta = 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min); |
1560 | 0 | scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta); |
1561 | 0 | } |
1562 | | |
1563 | | void PeerManagerImpl::FinalizeNode(const CNode& node) |
1564 | 0 | { |
1565 | 0 | NodeId nodeid = node.GetId(); |
1566 | 0 | { |
1567 | 0 | LOCK(cs_main); |
1568 | 0 | { |
1569 | | // We remove the PeerRef from g_peer_map here, but we don't always |
1570 | | // destruct the Peer. Sometimes another thread is still holding a |
1571 | | // PeerRef, so the refcount is >= 1. Be careful not to do any |
1572 | | // processing here that assumes Peer won't be changed before it's |
1573 | | // destructed. |
1574 | 0 | PeerRef peer = RemovePeer(nodeid); |
1575 | 0 | assert(peer != nullptr); |
1576 | 0 | m_wtxid_relay_peers -= peer->m_wtxid_relay; |
1577 | 0 | assert(m_wtxid_relay_peers >= 0); |
1578 | 0 | } |
1579 | 0 | CNodeState *state = State(nodeid); |
1580 | 0 | assert(state != nullptr); |
1581 | | |
1582 | 0 | if (state->fSyncStarted) |
1583 | 0 | nSyncStarted--; |
1584 | |
|
1585 | 0 | for (const QueuedBlock& entry : state->vBlocksInFlight) { |
1586 | 0 | auto range = mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash()); |
1587 | 0 | while (range.first != range.second) { |
1588 | 0 | auto [node_id, list_it] = range.first->second; |
1589 | 0 | if (node_id != nodeid) { |
1590 | 0 | range.first++; |
1591 | 0 | } else { |
1592 | 0 | range.first = mapBlocksInFlight.erase(range.first); |
1593 | 0 | } |
1594 | 0 | } |
1595 | 0 | } |
1596 | 0 | { |
1597 | 0 | LOCK(m_tx_download_mutex); |
1598 | 0 | m_txdownloadman.DisconnectedPeer(nodeid); |
1599 | 0 | } |
1600 | 0 | if (m_txreconciliation) m_txreconciliation->ForgetPeer(nodeid); |
1601 | 0 | m_num_preferred_download_peers -= state->fPreferredDownload; |
1602 | 0 | m_peers_downloading_from -= (!state->vBlocksInFlight.empty()); |
1603 | 0 | assert(m_peers_downloading_from >= 0); |
1604 | 0 | m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; |
1605 | 0 | assert(m_outbound_peers_with_protect_from_disconnect >= 0); |
1606 | | |
1607 | 0 | m_node_states.erase(nodeid); |
1608 | |
|
1609 | 0 | if (m_node_states.empty()) { |
1610 | | // Do a consistency check after the last peer is removed. |
1611 | 0 | assert(mapBlocksInFlight.empty()); |
1612 | 0 | assert(m_num_preferred_download_peers == 0); |
1613 | 0 | assert(m_peers_downloading_from == 0); |
1614 | 0 | assert(m_outbound_peers_with_protect_from_disconnect == 0); |
1615 | 0 | assert(m_wtxid_relay_peers == 0); |
1616 | 0 | WITH_LOCK(m_tx_download_mutex, m_txdownloadman.CheckIsEmpty()); |
1617 | 0 | } |
1618 | 0 | } // cs_main |
1619 | 0 | if (node.fSuccessfullyConnected && |
1620 | 0 | !node.IsBlockOnlyConn() && !node.IsInboundConn()) { |
1621 | | // Only change visible addrman state for full outbound peers. We don't |
1622 | | // call Connected() for feeler connections since they don't have |
1623 | | // fSuccessfullyConnected set. |
1624 | 0 | m_addrman.Connected(node.addr); |
1625 | 0 | } |
1626 | 0 | { |
1627 | 0 | LOCK(m_headers_presync_mutex); |
1628 | 0 | m_headers_presync_stats.erase(nodeid); |
1629 | 0 | } |
1630 | 0 | LogDebug(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid); |
1631 | 0 | } |
1632 | | |
1633 | | bool PeerManagerImpl::HasAllDesirableServiceFlags(ServiceFlags services) const |
1634 | 0 | { |
1635 | | // Shortcut for (services & GetDesirableServiceFlags(services)) == GetDesirableServiceFlags(services) |
1636 | 0 | return !(GetDesirableServiceFlags(services) & (~services)); |
1637 | 0 | } |
1638 | | |
1639 | | ServiceFlags PeerManagerImpl::GetDesirableServiceFlags(ServiceFlags services) const |
1640 | 0 | { |
1641 | 0 | if (services & NODE_NETWORK_LIMITED) { |
1642 | | // Limited peers are desirable when we are close to the tip. |
1643 | 0 | if (ApproximateBestBlockDepth() < NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS) { |
1644 | 0 | return ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS); |
1645 | 0 | } |
1646 | 0 | } |
1647 | 0 | return ServiceFlags(NODE_NETWORK | NODE_WITNESS); |
1648 | 0 | } |
1649 | | |
1650 | | PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const |
1651 | 0 | { |
1652 | 0 | LOCK(m_peer_mutex); |
1653 | 0 | auto it = m_peer_map.find(id); |
1654 | 0 | return it != m_peer_map.end() ? it->second : nullptr; |
1655 | 0 | } |
1656 | | |
1657 | | PeerRef PeerManagerImpl::RemovePeer(NodeId id) |
1658 | 0 | { |
1659 | 0 | PeerRef ret; |
1660 | 0 | LOCK(m_peer_mutex); |
1661 | 0 | auto it = m_peer_map.find(id); |
1662 | 0 | if (it != m_peer_map.end()) { |
1663 | 0 | ret = std::move(it->second); |
1664 | 0 | m_peer_map.erase(it); |
1665 | 0 | } |
1666 | 0 | return ret; |
1667 | 0 | } |
1668 | | |
1669 | | bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const |
1670 | 0 | { |
1671 | 0 | { |
1672 | 0 | LOCK(cs_main); |
1673 | 0 | const CNodeState* state = State(nodeid); |
1674 | 0 | if (state == nullptr) |
1675 | 0 | return false; |
1676 | 0 | stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1; |
1677 | 0 | stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1; |
1678 | 0 | for (const QueuedBlock& queue : state->vBlocksInFlight) { |
1679 | 0 | if (queue.pindex) |
1680 | 0 | stats.vHeightInFlight.push_back(queue.pindex->nHeight); |
1681 | 0 | } |
1682 | 0 | } |
1683 | | |
1684 | 0 | PeerRef peer = GetPeerRef(nodeid); |
1685 | 0 | if (peer == nullptr) return false; |
1686 | 0 | stats.their_services = peer->m_their_services; |
1687 | 0 | stats.m_starting_height = peer->m_starting_height; |
1688 | | // It is common for nodes with good ping times to suddenly become lagged, |
1689 | | // due to a new block arriving or other large transfer. |
1690 | | // Merely reporting pingtime might fool the caller into thinking the node was still responsive, |
1691 | | // since pingtime does not update until the ping is complete, which might take a while. |
1692 | | // So, if a ping is taking an unusually long time in flight, |
1693 | | // the caller can immediately detect that this is happening. |
1694 | 0 | auto ping_wait{0us}; |
1695 | 0 | if ((0 != peer->m_ping_nonce_sent) && (0 != peer->m_ping_start.load().count())) { |
1696 | 0 | ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load(); |
1697 | 0 | } |
1698 | |
|
1699 | 0 | if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { |
1700 | 0 | stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs); |
1701 | 0 | stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load(); |
1702 | 0 | } else { |
1703 | 0 | stats.m_relay_txs = false; |
1704 | 0 | stats.m_fee_filter_received = 0; |
1705 | 0 | } |
1706 | |
|
1707 | 0 | stats.m_ping_wait = ping_wait; |
1708 | 0 | stats.m_addr_processed = peer->m_addr_processed.load(); |
1709 | 0 | stats.m_addr_rate_limited = peer->m_addr_rate_limited.load(); |
1710 | 0 | stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load(); |
1711 | 0 | { |
1712 | 0 | LOCK(peer->m_headers_sync_mutex); |
1713 | 0 | if (peer->m_headers_sync) { |
1714 | 0 | stats.presync_height = peer->m_headers_sync->GetPresyncHeight(); |
1715 | 0 | } |
1716 | 0 | } |
1717 | 0 | stats.time_offset = peer->m_time_offset; |
1718 | |
|
1719 | 0 | return true; |
1720 | 0 | } |
1721 | | |
1722 | | std::vector<TxOrphanage::OrphanTxBase> PeerManagerImpl::GetOrphanTransactions() |
1723 | 0 | { |
1724 | 0 | LOCK(m_tx_download_mutex); |
1725 | 0 | return m_txdownloadman.GetOrphanTransactions(); |
1726 | 0 | } |
1727 | | |
1728 | | PeerManagerInfo PeerManagerImpl::GetInfo() const |
1729 | 0 | { |
1730 | 0 | return PeerManagerInfo{ |
1731 | 0 | .median_outbound_time_offset = m_outbound_time_offsets.Median(), |
1732 | 0 | .ignores_incoming_txs = m_opts.ignore_incoming_txs, |
1733 | 0 | }; |
1734 | 0 | } |
1735 | | |
1736 | | void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef& tx) |
1737 | 0 | { |
1738 | 0 | if (m_opts.max_extra_txs <= 0) |
1739 | 0 | return; |
1740 | 0 | if (!vExtraTxnForCompact.size()) |
1741 | 0 | vExtraTxnForCompact.resize(m_opts.max_extra_txs); |
1742 | 0 | vExtraTxnForCompact[vExtraTxnForCompactIt] = tx; |
1743 | 0 | vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs; |
1744 | 0 | } |
1745 | | |
1746 | | void PeerManagerImpl::Misbehaving(Peer& peer, const std::string& message) |
1747 | 0 | { |
1748 | 0 | LOCK(peer.m_misbehavior_mutex); |
1749 | |
|
1750 | 0 | const std::string message_prefixed = message.empty() ? "" : (": " + message); |
1751 | 0 | peer.m_should_discourage = true; |
1752 | 0 | LogDebug(BCLog::NET, "Misbehaving: peer=%d%s\n", peer.m_id, message_prefixed); |
1753 | 0 | } |
1754 | | |
1755 | | void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state, |
1756 | | bool via_compact_block, const std::string& message) |
1757 | 0 | { |
1758 | 0 | PeerRef peer{GetPeerRef(nodeid)}; |
1759 | 0 | switch (state.GetResult()) { |
1760 | 0 | case BlockValidationResult::BLOCK_RESULT_UNSET: |
1761 | 0 | break; |
1762 | 0 | case BlockValidationResult::BLOCK_HEADER_LOW_WORK: |
1763 | | // We didn't try to process the block because the header chain may have |
1764 | | // too little work. |
1765 | 0 | break; |
1766 | | // The node is providing invalid data: |
1767 | 0 | case BlockValidationResult::BLOCK_CONSENSUS: |
1768 | 0 | case BlockValidationResult::BLOCK_MUTATED: |
1769 | 0 | if (!via_compact_block) { |
1770 | 0 | if (peer) Misbehaving(*peer, message); |
1771 | 0 | return; |
1772 | 0 | } |
1773 | 0 | break; |
1774 | 0 | case BlockValidationResult::BLOCK_CACHED_INVALID: |
1775 | 0 | { |
1776 | | // Discourage outbound (but not inbound) peers if on an invalid chain. |
1777 | | // Exempt HB compact block peers. Manual connections are always protected from discouragement. |
1778 | 0 | if (peer && !via_compact_block && !peer->m_is_inbound) { |
1779 | 0 | if (peer) Misbehaving(*peer, message); |
1780 | 0 | return; |
1781 | 0 | } |
1782 | 0 | break; |
1783 | 0 | } |
1784 | 0 | case BlockValidationResult::BLOCK_INVALID_HEADER: |
1785 | 0 | case BlockValidationResult::BLOCK_CHECKPOINT: |
1786 | 0 | case BlockValidationResult::BLOCK_INVALID_PREV: |
1787 | 0 | if (peer) Misbehaving(*peer, message); |
1788 | 0 | return; |
1789 | | // Conflicting (but not necessarily invalid) data or different policy: |
1790 | 0 | case BlockValidationResult::BLOCK_MISSING_PREV: |
1791 | 0 | if (peer) Misbehaving(*peer, message); |
1792 | 0 | return; |
1793 | 0 | case BlockValidationResult::BLOCK_RECENT_CONSENSUS_CHANGE: |
1794 | 0 | case BlockValidationResult::BLOCK_TIME_FUTURE: |
1795 | 0 | break; |
1796 | 0 | } |
1797 | 0 | if (message != "") { |
1798 | 0 | LogDebug(BCLog::NET, "peer=%d: %s\n", nodeid, message); |
1799 | 0 | } |
1800 | 0 | } |
1801 | | |
1802 | | void PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state) |
1803 | 0 | { |
1804 | 0 | PeerRef peer{GetPeerRef(nodeid)}; |
1805 | 0 | switch (state.GetResult()) { |
1806 | 0 | case TxValidationResult::TX_RESULT_UNSET: |
1807 | 0 | break; |
1808 | | // The node is providing invalid data: |
1809 | 0 | case TxValidationResult::TX_CONSENSUS: |
1810 | 0 | if (peer) Misbehaving(*peer, ""); |
1811 | 0 | return; |
1812 | | // Conflicting (but not necessarily invalid) data or different policy: |
1813 | 0 | case TxValidationResult::TX_RECENT_CONSENSUS_CHANGE: |
1814 | 0 | case TxValidationResult::TX_INPUTS_NOT_STANDARD: |
1815 | 0 | case TxValidationResult::TX_NOT_STANDARD: |
1816 | 0 | case TxValidationResult::TX_MISSING_INPUTS: |
1817 | 0 | case TxValidationResult::TX_PREMATURE_SPEND: |
1818 | 0 | case TxValidationResult::TX_WITNESS_MUTATED: |
1819 | 0 | case TxValidationResult::TX_WITNESS_STRIPPED: |
1820 | 0 | case TxValidationResult::TX_CONFLICT: |
1821 | 0 | case TxValidationResult::TX_MEMPOOL_POLICY: |
1822 | 0 | case TxValidationResult::TX_NO_MEMPOOL: |
1823 | 0 | case TxValidationResult::TX_RECONSIDERABLE: |
1824 | 0 | case TxValidationResult::TX_UNKNOWN: |
1825 | 0 | break; |
1826 | 0 | } |
1827 | 0 | } |
1828 | | |
1829 | | bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex) |
1830 | 0 | { |
1831 | 0 | AssertLockHeld(cs_main); |
1832 | 0 | if (m_chainman.ActiveChain().Contains(pindex)) return true; |
1833 | 0 | return pindex->IsValid(BLOCK_VALID_SCRIPTS) && (m_chainman.m_best_header != nullptr) && |
1834 | 0 | (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) && |
1835 | 0 | (GetBlockProofEquivalentTime(*m_chainman.m_best_header, *pindex, *m_chainman.m_best_header, m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT); |
1836 | 0 | } |
1837 | | |
1838 | | std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBlockIndex& block_index) |
1839 | 0 | { |
1840 | 0 | if (m_chainman.m_blockman.LoadingBlocks()) return "Loading blocks ..."; |
1841 | | |
1842 | | // Ensure this peer exists and hasn't been disconnected |
1843 | 0 | PeerRef peer = GetPeerRef(peer_id); |
1844 | 0 | if (peer == nullptr) return "Peer does not exist"; |
1845 | | |
1846 | | // Ignore pre-segwit peers |
1847 | 0 | if (!CanServeWitnesses(*peer)) return "Pre-SegWit peer"; |
1848 | | |
1849 | 0 | LOCK(cs_main); |
1850 | | |
1851 | | // Forget about all prior requests |
1852 | 0 | RemoveBlockRequest(block_index.GetBlockHash(), std::nullopt); |
1853 | | |
1854 | | // Mark block as in-flight |
1855 | 0 | if (!BlockRequested(peer_id, block_index)) return "Already requested from this peer"; |
1856 | | |
1857 | | // Construct message to request the block |
1858 | 0 | const uint256& hash{block_index.GetBlockHash()}; |
1859 | 0 | std::vector<CInv> invs{CInv(MSG_BLOCK | MSG_WITNESS_FLAG, hash)}; |
1860 | | |
1861 | | // Send block request message to the peer |
1862 | 0 | bool success = m_connman.ForNode(peer_id, [this, &invs](CNode* node) { |
1863 | 0 | this->MakeAndPushMessage(*node, NetMsgType::GETDATA, invs); |
1864 | 0 | return true; |
1865 | 0 | }); |
1866 | |
|
1867 | 0 | if (!success) return "Peer not fully connected"; |
1868 | | |
1869 | 0 | LogDebug(BCLog::NET, "Requesting block %s from peer=%d\n", |
1870 | 0 | hash.ToString(), peer_id); |
1871 | 0 | return std::nullopt; |
1872 | 0 | } |
1873 | | |
1874 | | std::unique_ptr<PeerManager> PeerManager::make(CConnman& connman, AddrMan& addrman, |
1875 | | BanMan* banman, ChainstateManager& chainman, |
1876 | | CTxMemPool& pool, node::Warnings& warnings, Options opts) |
1877 | 0 | { |
1878 | 0 | return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman, pool, warnings, opts); |
1879 | 0 | } |
1880 | | |
1881 | | PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman, |
1882 | | BanMan* banman, ChainstateManager& chainman, |
1883 | | CTxMemPool& pool, node::Warnings& warnings, Options opts) |
1884 | 0 | : m_rng{opts.deterministic_rng}, |
1885 | 0 | m_fee_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}, m_rng}, |
1886 | 0 | m_chainparams(chainman.GetParams()), |
1887 | 0 | m_connman(connman), |
1888 | 0 | m_addrman(addrman), |
1889 | 0 | m_banman(banman), |
1890 | 0 | m_chainman(chainman), |
1891 | 0 | m_mempool(pool), |
1892 | 0 | m_txdownloadman(node::TxDownloadOptions{pool, m_rng, opts.max_orphan_txs, opts.deterministic_rng}), |
1893 | 0 | m_warnings{warnings}, |
1894 | 0 | m_opts{opts} |
1895 | 0 | { |
1896 | | // While Erlay support is incomplete, it must be enabled explicitly via -txreconciliation. |
1897 | | // This argument can go away after Erlay support is complete. |
1898 | 0 | if (opts.reconcile_txs) { |
1899 | 0 | m_txreconciliation = std::make_unique<TxReconciliationTracker>(TXRECONCILIATION_VERSION); |
1900 | 0 | } |
1901 | 0 | } |
1902 | | |
1903 | | void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler) |
1904 | 0 | { |
1905 | | // Stale tip checking and peer eviction are on two different timers, but we |
1906 | | // don't want them to get out of sync due to drift in the scheduler, so we |
1907 | | // combine them in one function and schedule at the quicker (peer-eviction) |
1908 | | // timer. |
1909 | 0 | static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer"); |
1910 | 0 | scheduler.scheduleEvery([this] { this->CheckForStaleTipAndEvictPeers(); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL}); |
1911 | | |
1912 | | // schedule next run for 10-15 minutes in the future |
1913 | 0 | const auto delta = 10min + FastRandomContext().randrange<std::chrono::milliseconds>(5min); |
1914 | 0 | scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta); |
1915 | 0 | } |
1916 | | |
1917 | | void PeerManagerImpl::ActiveTipChange(const CBlockIndex& new_tip, bool is_ibd) |
1918 | 0 | { |
1919 | | // Ensure mempool mutex was released, otherwise deadlock may occur if another thread holding |
1920 | | // m_tx_download_mutex waits on the mempool mutex. |
1921 | 0 | AssertLockNotHeld(m_mempool.cs); |
1922 | 0 | AssertLockNotHeld(m_tx_download_mutex); |
1923 | |
|
1924 | 0 | if (!is_ibd) { |
1925 | 0 | LOCK(m_tx_download_mutex); |
1926 | | // If the chain tip has changed, previously rejected transactions might now be valid, e.g. due |
1927 | | // to a timelock. Reset the rejection filters to give those transactions another chance if we |
1928 | | // see them again. |
1929 | 0 | m_txdownloadman.ActiveTipChange(); |
1930 | 0 | } |
1931 | 0 | } |
1932 | | |
1933 | | /** |
1934 | | * Evict orphan txn pool entries based on a newly connected |
1935 | | * block, remember the recently confirmed transactions, and delete tracked |
1936 | | * announcements for them. Also save the time of the last tip update and |
1937 | | * possibly reduce dynamic block stalling timeout. |
1938 | | */ |
1939 | | void PeerManagerImpl::BlockConnected( |
1940 | | ChainstateRole role, |
1941 | | const std::shared_ptr<const CBlock>& pblock, |
1942 | | const CBlockIndex* pindex) |
1943 | 0 | { |
1944 | | // Update this for all chainstate roles so that we don't mistakenly see peers |
1945 | | // helping us do background IBD as having a stale tip. |
1946 | 0 | m_last_tip_update = GetTime<std::chrono::seconds>(); |
1947 | | |
1948 | | // In case the dynamic timeout was doubled once or more, reduce it slowly back to its default value |
1949 | 0 | auto stalling_timeout = m_block_stalling_timeout.load(); |
1950 | 0 | Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT); |
1951 | 0 | if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) { |
1952 | 0 | const auto new_timeout = std::max(std::chrono::duration_cast<std::chrono::seconds>(stalling_timeout * 0.85), BLOCK_STALLING_TIMEOUT_DEFAULT); |
1953 | 0 | if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) { |
1954 | 0 | LogDebug(BCLog::NET, "Decreased stalling timeout to %d seconds\n", count_seconds(new_timeout)); |
1955 | 0 | } |
1956 | 0 | } |
1957 | | |
1958 | | // The following task can be skipped since we don't maintain a mempool for |
1959 | | // the ibd/background chainstate. |
1960 | 0 | if (role == ChainstateRole::BACKGROUND) { |
1961 | 0 | return; |
1962 | 0 | } |
1963 | 0 | LOCK(m_tx_download_mutex); |
1964 | 0 | m_txdownloadman.BlockConnected(pblock); |
1965 | 0 | } |
1966 | | |
1967 | | void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &block, const CBlockIndex* pindex) |
1968 | 0 | { |
1969 | 0 | LOCK(m_tx_download_mutex); |
1970 | 0 | m_txdownloadman.BlockDisconnected(); |
1971 | 0 | } |
1972 | | |
1973 | | /** |
1974 | | * Maintain state about the best-seen block and fast-announce a compact block |
1975 | | * to compatible peers. |
1976 | | */ |
1977 | | void PeerManagerImpl::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) |
1978 | 0 | { |
1979 | 0 | auto pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock, FastRandomContext().rand64()); |
1980 | |
|
1981 | 0 | LOCK(cs_main); |
1982 | |
|
1983 | 0 | if (pindex->nHeight <= m_highest_fast_announce) |
1984 | 0 | return; |
1985 | 0 | m_highest_fast_announce = pindex->nHeight; |
1986 | |
|
1987 | 0 | if (!DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_SEGWIT)) return; |
1988 | | |
1989 | 0 | uint256 hashBlock(pblock->GetHash()); |
1990 | 0 | const std::shared_future<CSerializedNetMsg> lazy_ser{ |
1991 | 0 | std::async(std::launch::deferred, [&] { return NetMsg::Make(NetMsgType::CMPCTBLOCK, *pcmpctblock); })}; |
1992 | |
|
1993 | 0 | { |
1994 | 0 | auto most_recent_block_txs = std::make_unique<std::map<uint256, CTransactionRef>>(); |
1995 | 0 | for (const auto& tx : pblock->vtx) { |
1996 | 0 | most_recent_block_txs->emplace(tx->GetHash(), tx); |
1997 | 0 | most_recent_block_txs->emplace(tx->GetWitnessHash(), tx); |
1998 | 0 | } |
1999 | |
|
2000 | 0 | LOCK(m_most_recent_block_mutex); |
2001 | 0 | m_most_recent_block_hash = hashBlock; |
2002 | 0 | m_most_recent_block = pblock; |
2003 | 0 | m_most_recent_compact_block = pcmpctblock; |
2004 | 0 | m_most_recent_block_txs = std::move(most_recent_block_txs); |
2005 | 0 | } |
2006 | |
|
2007 | 0 | m_connman.ForEachNode([this, pindex, &lazy_ser, &hashBlock](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { |
2008 | 0 | AssertLockHeld(::cs_main); |
2009 | |
|
2010 | 0 | if (pnode->GetCommonVersion() < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect) |
2011 | 0 | return; |
2012 | 0 | ProcessBlockAvailability(pnode->GetId()); |
2013 | 0 | CNodeState &state = *State(pnode->GetId()); |
2014 | | // If the peer has, or we announced to them the previous block already, |
2015 | | // but we don't think they have this one, go ahead and announce it |
2016 | 0 | if (state.m_requested_hb_cmpctblocks && !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) { |
2017 | |
|
2018 | 0 | LogDebug(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerManager::NewPoWValidBlock", |
2019 | 0 | hashBlock.ToString(), pnode->GetId()); |
2020 | |
|
2021 | 0 | const CSerializedNetMsg& ser_cmpctblock{lazy_ser.get()}; |
2022 | 0 | PushMessage(*pnode, ser_cmpctblock.Copy()); |
2023 | 0 | state.pindexBestHeaderSent = pindex; |
2024 | 0 | } |
2025 | 0 | }); |
2026 | 0 | } |
2027 | | |
2028 | | /** |
2029 | | * Update our best height and announce any block hashes which weren't previously |
2030 | | * in m_chainman.ActiveChain() to our peers. |
2031 | | */ |
2032 | | void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) |
2033 | 0 | { |
2034 | 0 | SetBestBlock(pindexNew->nHeight, std::chrono::seconds{pindexNew->GetBlockTime()}); |
2035 | | |
2036 | | // Don't relay inventory during initial block download. |
2037 | 0 | if (fInitialDownload) return; |
2038 | | |
2039 | | // Find the hashes of all blocks that weren't previously in the best chain. |
2040 | 0 | std::vector<uint256> vHashes; |
2041 | 0 | const CBlockIndex *pindexToAnnounce = pindexNew; |
2042 | 0 | while (pindexToAnnounce != pindexFork) { |
2043 | 0 | vHashes.push_back(pindexToAnnounce->GetBlockHash()); |
2044 | 0 | pindexToAnnounce = pindexToAnnounce->pprev; |
2045 | 0 | if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) { |
2046 | | // Limit announcements in case of a huge reorganization. |
2047 | | // Rely on the peer's synchronization mechanism in that case. |
2048 | 0 | break; |
2049 | 0 | } |
2050 | 0 | } |
2051 | |
|
2052 | 0 | { |
2053 | 0 | LOCK(m_peer_mutex); |
2054 | 0 | for (auto& it : m_peer_map) { |
2055 | 0 | Peer& peer = *it.second; |
2056 | 0 | LOCK(peer.m_block_inv_mutex); |
2057 | 0 | for (const uint256& hash : vHashes | std::views::reverse) { |
2058 | 0 | peer.m_blocks_for_headers_relay.push_back(hash); |
2059 | 0 | } |
2060 | 0 | } |
2061 | 0 | } |
2062 | |
|
2063 | 0 | m_connman.WakeMessageHandler(); |
2064 | 0 | } |
2065 | | |
2066 | | /** |
2067 | | * Handle invalid block rejection and consequent peer discouragement, maintain which |
2068 | | * peers announce compact blocks. |
2069 | | */ |
2070 | | void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationState& state) |
2071 | 0 | { |
2072 | 0 | LOCK(cs_main); |
2073 | |
|
2074 | 0 | const uint256 hash(block.GetHash()); |
2075 | 0 | std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash); |
2076 | | |
2077 | | // If the block failed validation, we know where it came from and we're still connected |
2078 | | // to that peer, maybe punish. |
2079 | 0 | if (state.IsInvalid() && |
2080 | 0 | it != mapBlockSource.end() && |
2081 | 0 | State(it->second.first)) { |
2082 | 0 | MaybePunishNodeForBlock(/*nodeid=*/ it->second.first, state, /*via_compact_block=*/ !it->second.second); |
2083 | 0 | } |
2084 | | // Check that: |
2085 | | // 1. The block is valid |
2086 | | // 2. We're not in initial block download |
2087 | | // 3. This is currently the best block we're aware of. We haven't updated |
2088 | | // the tip yet so we have no way to check this directly here. Instead we |
2089 | | // just check that there are currently no other blocks in flight. |
2090 | 0 | else if (state.IsValid() && |
2091 | 0 | !m_chainman.IsInitialBlockDownload() && |
2092 | 0 | mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) { |
2093 | 0 | if (it != mapBlockSource.end()) { |
2094 | 0 | MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first); |
2095 | 0 | } |
2096 | 0 | } |
2097 | 0 | if (it != mapBlockSource.end()) |
2098 | 0 | mapBlockSource.erase(it); |
2099 | 0 | } |
2100 | | |
2101 | | ////////////////////////////////////////////////////////////////////////////// |
2102 | | // |
2103 | | // Messages |
2104 | | // |
2105 | | |
2106 | | bool PeerManagerImpl::AlreadyHaveBlock(const uint256& block_hash) |
2107 | 0 | { |
2108 | 0 | return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr; |
2109 | 0 | } |
2110 | | |
2111 | | void PeerManagerImpl::SendPings() |
2112 | 0 | { |
2113 | 0 | LOCK(m_peer_mutex); |
2114 | 0 | for(auto& it : m_peer_map) it.second->m_ping_queued = true; |
2115 | 0 | } |
2116 | | |
2117 | | void PeerManagerImpl::RelayTransaction(const uint256& txid, const uint256& wtxid) |
2118 | 0 | { |
2119 | 0 | LOCK(m_peer_mutex); |
2120 | 0 | for(auto& it : m_peer_map) { |
2121 | 0 | Peer& peer = *it.second; |
2122 | 0 | auto tx_relay = peer.GetTxRelay(); |
2123 | 0 | if (!tx_relay) continue; |
2124 | | |
2125 | 0 | LOCK(tx_relay->m_tx_inventory_mutex); |
2126 | | // Only queue transactions for announcement once the version handshake |
2127 | | // is completed. The time of arrival for these transactions is |
2128 | | // otherwise at risk of leaking to a spy, if the spy is able to |
2129 | | // distinguish transactions received during the handshake from the rest |
2130 | | // in the announcement. |
2131 | 0 | if (tx_relay->m_next_inv_send_time == 0s) continue; |
2132 | | |
2133 | 0 | const uint256& hash{peer.m_wtxid_relay ? wtxid : txid}; |
2134 | 0 | if (!tx_relay->m_tx_inventory_known_filter.contains(hash)) { |
2135 | 0 | tx_relay->m_tx_inventory_to_send.insert(hash); |
2136 | 0 | } |
2137 | 0 | }; |
2138 | 0 | } |
2139 | | |
2140 | | void PeerManagerImpl::RelayAddress(NodeId originator, |
2141 | | const CAddress& addr, |
2142 | | bool fReachable) |
2143 | 0 | { |
2144 | | // We choose the same nodes within a given 24h window (if the list of connected |
2145 | | // nodes does not change) and we don't relay to nodes that already know an |
2146 | | // address. So within 24h we will likely relay a given address once. This is to |
2147 | | // prevent a peer from unjustly giving their address better propagation by sending |
2148 | | // it to us repeatedly. |
2149 | |
|
2150 | 0 | if (!fReachable && !addr.IsRelayable()) return; |
2151 | | |
2152 | | // Relay to a limited number of other nodes |
2153 | | // Use deterministic randomness to send to the same nodes for 24 hours |
2154 | | // at a time so the m_addr_knowns of the chosen nodes prevent repeats |
2155 | 0 | const uint64_t hash_addr{CServiceHash(0, 0)(addr)}; |
2156 | 0 | const auto current_time{GetTime<std::chrono::seconds>()}; |
2157 | | // Adding address hash makes exact rotation time different per address, while preserving periodicity. |
2158 | 0 | const uint64_t time_addr{(static_cast<uint64_t>(count_seconds(current_time)) + hash_addr) / count_seconds(ROTATE_ADDR_RELAY_DEST_INTERVAL)}; |
2159 | 0 | const CSipHasher hasher{m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY) |
2160 | 0 | .Write(hash_addr) |
2161 | 0 | .Write(time_addr)}; |
2162 | | |
2163 | | // Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers. |
2164 | 0 | unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1; |
2165 | |
|
2166 | 0 | std::array<std::pair<uint64_t, Peer*>, 2> best{{{0, nullptr}, {0, nullptr}}}; |
2167 | 0 | assert(nRelayNodes <= best.size()); |
2168 | | |
2169 | 0 | LOCK(m_peer_mutex); |
2170 | |
|
2171 | 0 | for (auto& [id, peer] : m_peer_map) { |
2172 | 0 | if (peer->m_addr_relay_enabled && id != originator && IsAddrCompatible(*peer, addr)) { |
2173 | 0 | uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize(); |
2174 | 0 | for (unsigned int i = 0; i < nRelayNodes; i++) { |
2175 | 0 | if (hashKey > best[i].first) { |
2176 | 0 | std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1); |
2177 | 0 | best[i] = std::make_pair(hashKey, peer.get()); |
2178 | 0 | break; |
2179 | 0 | } |
2180 | 0 | } |
2181 | 0 | } |
2182 | 0 | }; |
2183 | |
|
2184 | 0 | for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) { |
2185 | 0 | PushAddress(*best[i].second, addr); |
2186 | 0 | } |
2187 | 0 | } |
2188 | | |
2189 | | void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& inv) |
2190 | 0 | { |
2191 | 0 | std::shared_ptr<const CBlock> a_recent_block; |
2192 | 0 | std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block; |
2193 | 0 | { |
2194 | 0 | LOCK(m_most_recent_block_mutex); |
2195 | 0 | a_recent_block = m_most_recent_block; |
2196 | 0 | a_recent_compact_block = m_most_recent_compact_block; |
2197 | 0 | } |
2198 | |
|
2199 | 0 | bool need_activate_chain = false; |
2200 | 0 | { |
2201 | 0 | LOCK(cs_main); |
2202 | 0 | const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash); |
2203 | 0 | if (pindex) { |
2204 | 0 | if (pindex->HaveNumChainTxs() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) && |
2205 | 0 | pindex->IsValid(BLOCK_VALID_TREE)) { |
2206 | | // If we have the block and all of its parents, but have not yet validated it, |
2207 | | // we might be in the middle of connecting it (ie in the unlock of cs_main |
2208 | | // before ActivateBestChain but after AcceptBlock). |
2209 | | // In this case, we need to run ActivateBestChain prior to checking the relay |
2210 | | // conditions below. |
2211 | 0 | need_activate_chain = true; |
2212 | 0 | } |
2213 | 0 | } |
2214 | 0 | } // release cs_main before calling ActivateBestChain |
2215 | 0 | if (need_activate_chain) { |
2216 | 0 | BlockValidationState state; |
2217 | 0 | if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) { |
2218 | 0 | LogDebug(BCLog::NET, "failed to activate chain (%s)\n", state.ToString()); |
2219 | 0 | } |
2220 | 0 | } |
2221 | |
|
2222 | 0 | const CBlockIndex* pindex{nullptr}; |
2223 | 0 | const CBlockIndex* tip{nullptr}; |
2224 | 0 | bool can_direct_fetch{false}; |
2225 | 0 | FlatFilePos block_pos{}; |
2226 | 0 | { |
2227 | 0 | LOCK(cs_main); |
2228 | 0 | pindex = m_chainman.m_blockman.LookupBlockIndex(inv.hash); |
2229 | 0 | if (!pindex) { |
2230 | 0 | return; |
2231 | 0 | } |
2232 | 0 | if (!BlockRequestAllowed(pindex)) { |
2233 | 0 | LogDebug(BCLog::NET, "%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom.GetId()); |
2234 | 0 | return; |
2235 | 0 | } |
2236 | | // disconnect node in case we have reached the outbound limit for serving historical blocks |
2237 | 0 | if (m_connman.OutboundTargetReached(true) && |
2238 | 0 | (((m_chainman.m_best_header != nullptr) && (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.IsMsgFilteredBlk()) && |
2239 | 0 | !pfrom.HasPermission(NetPermissionFlags::Download) // nodes with the download permission may exceed target |
2240 | 0 | ) { |
2241 | 0 | LogDebug(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom.GetId()); |
2242 | 0 | pfrom.fDisconnect = true; |
2243 | 0 | return; |
2244 | 0 | } |
2245 | 0 | tip = m_chainman.ActiveChain().Tip(); |
2246 | | // Avoid leaking prune-height by never sending blocks below the NODE_NETWORK_LIMITED threshold |
2247 | 0 | if (!pfrom.HasPermission(NetPermissionFlags::NoBan) && ( |
2248 | 0 | (((peer.m_our_services & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) && (tip->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2 /* add two blocks buffer extension for possible races */) ) |
2249 | 0 | )) { |
2250 | 0 | LogDebug(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED threshold, disconnect peer=%d\n", pfrom.GetId()); |
2251 | | //disconnect node and prevent it from stalling (would otherwise wait for the missing block) |
2252 | 0 | pfrom.fDisconnect = true; |
2253 | 0 | return; |
2254 | 0 | } |
2255 | | // Pruned nodes may have deleted the block, so check whether |
2256 | | // it's available before trying to send. |
2257 | 0 | if (!(pindex->nStatus & BLOCK_HAVE_DATA)) { |
2258 | 0 | return; |
2259 | 0 | } |
2260 | 0 | can_direct_fetch = CanDirectFetch(); |
2261 | 0 | block_pos = pindex->GetBlockPos(); |
2262 | 0 | } |
2263 | | |
2264 | 0 | std::shared_ptr<const CBlock> pblock; |
2265 | 0 | if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) { |
2266 | 0 | pblock = a_recent_block; |
2267 | 0 | } else if (inv.IsMsgWitnessBlk()) { |
2268 | | // Fast-path: in this case it is possible to serve the block directly from disk, |
2269 | | // as the network format matches the format on disk |
2270 | 0 | std::vector<uint8_t> block_data; |
2271 | 0 | if (!m_chainman.m_blockman.ReadRawBlockFromDisk(block_data, block_pos)) { |
2272 | 0 | if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) { |
2273 | 0 | LogDebug(BCLog::NET, "Block was pruned before it could be read, disconnect peer=%s\n", pfrom.GetId()); |
2274 | 0 | } else { |
2275 | 0 | LogError("Cannot load block from disk, disconnect peer=%d\n", pfrom.GetId()); |
2276 | 0 | } |
2277 | 0 | pfrom.fDisconnect = true; |
2278 | 0 | return; |
2279 | 0 | } |
2280 | 0 | MakeAndPushMessage(pfrom, NetMsgType::BLOCK, Span{block_data}); |
2281 | | // Don't set pblock as we've sent the block |
2282 | 0 | } else { |
2283 | | // Send block from disk |
2284 | 0 | std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>(); |
2285 | 0 | if (!m_chainman.m_blockman.ReadBlockFromDisk(*pblockRead, block_pos)) { |
2286 | 0 | if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) { |
2287 | 0 | LogDebug(BCLog::NET, "Block was pruned before it could be read, disconnect peer=%s\n", pfrom.GetId()); |
2288 | 0 | } else { |
2289 | 0 | LogError("Cannot load block from disk, disconnect peer=%d\n", pfrom.GetId()); |
2290 | 0 | } |
2291 | 0 | pfrom.fDisconnect = true; |
2292 | 0 | return; |
2293 | 0 | } |
2294 | 0 | pblock = pblockRead; |
2295 | 0 | } |
2296 | 0 | if (pblock) { |
2297 | 0 | if (inv.IsMsgBlk()) { |
2298 | 0 | MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_NO_WITNESS(*pblock)); |
2299 | 0 | } else if (inv.IsMsgWitnessBlk()) { |
2300 | 0 | MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_WITH_WITNESS(*pblock)); |
2301 | 0 | } else if (inv.IsMsgFilteredBlk()) { |
2302 | 0 | bool sendMerkleBlock = false; |
2303 | 0 | CMerkleBlock merkleBlock; |
2304 | 0 | if (auto tx_relay = peer.GetTxRelay(); tx_relay != nullptr) { |
2305 | 0 | LOCK(tx_relay->m_bloom_filter_mutex); |
2306 | 0 | if (tx_relay->m_bloom_filter) { |
2307 | 0 | sendMerkleBlock = true; |
2308 | 0 | merkleBlock = CMerkleBlock(*pblock, *tx_relay->m_bloom_filter); |
2309 | 0 | } |
2310 | 0 | } |
2311 | 0 | if (sendMerkleBlock) { |
2312 | 0 | MakeAndPushMessage(pfrom, NetMsgType::MERKLEBLOCK, merkleBlock); |
2313 | | // CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see |
2314 | | // This avoids hurting performance by pointlessly requiring a round-trip |
2315 | | // Note that there is currently no way for a node to request any single transactions we didn't send here - |
2316 | | // they must either disconnect and retry or request the full block. |
2317 | | // Thus, the protocol spec specified allows for us to provide duplicate txn here, |
2318 | | // however we MUST always provide at least what the remote peer needs |
2319 | 0 | typedef std::pair<unsigned int, uint256> PairType; |
2320 | 0 | for (PairType& pair : merkleBlock.vMatchedTxn) |
2321 | 0 | MakeAndPushMessage(pfrom, NetMsgType::TX, TX_NO_WITNESS(*pblock->vtx[pair.first])); |
2322 | 0 | } |
2323 | | // else |
2324 | | // no response |
2325 | 0 | } else if (inv.IsMsgCmpctBlk()) { |
2326 | | // If a peer is asking for old blocks, we're almost guaranteed |
2327 | | // they won't have a useful mempool to match against a compact block, |
2328 | | // and we don't feel like constructing the object for them, so |
2329 | | // instead we respond with the full, non-compact block. |
2330 | 0 | if (can_direct_fetch && pindex->nHeight >= tip->nHeight - MAX_CMPCTBLOCK_DEPTH) { |
2331 | 0 | if (a_recent_compact_block && a_recent_compact_block->header.GetHash() == pindex->GetBlockHash()) { |
2332 | 0 | MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, *a_recent_compact_block); |
2333 | 0 | } else { |
2334 | 0 | CBlockHeaderAndShortTxIDs cmpctblock{*pblock, m_rng.rand64()}; |
2335 | 0 | MakeAndPushMessage(pfrom, NetMsgType::CMPCTBLOCK, cmpctblock); |
2336 | 0 | } |
2337 | 0 | } else { |
2338 | 0 | MakeAndPushMessage(pfrom, NetMsgType::BLOCK, TX_WITH_WITNESS(*pblock)); |
2339 | 0 | } |
2340 | 0 | } |
2341 | 0 | } |
2342 | |
|
2343 | 0 | { |
2344 | 0 | LOCK(peer.m_block_inv_mutex); |
2345 | | // Trigger the peer node to send a getblocks request for the next batch of inventory |
2346 | 0 | if (inv.hash == peer.m_continuation_block) { |
2347 | | // Send immediately. This must send even if redundant, |
2348 | | // and we want it right after the last block so they don't |
2349 | | // wait for other stuff first. |
2350 | 0 | std::vector<CInv> vInv; |
2351 | 0 | vInv.emplace_back(MSG_BLOCK, tip->GetBlockHash()); |
2352 | 0 | MakeAndPushMessage(pfrom, NetMsgType::INV, vInv); |
2353 | 0 | peer.m_continuation_block.SetNull(); |
2354 | 0 | } |
2355 | 0 | } |
2356 | 0 | } |
2357 | | |
2358 | | CTransactionRef PeerManagerImpl::FindTxForGetData(const Peer::TxRelay& tx_relay, const GenTxid& gtxid) |
2359 | 0 | { |
2360 | | // If a tx was in the mempool prior to the last INV for this peer, permit the request. |
2361 | 0 | auto txinfo = m_mempool.info_for_relay(gtxid, tx_relay.m_last_inv_sequence); |
2362 | 0 | if (txinfo.tx) { |
2363 | 0 | return std::move(txinfo.tx); |
2364 | 0 | } |
2365 | | |
2366 | | // Or it might be from the most recent block |
2367 | 0 | { |
2368 | 0 | LOCK(m_most_recent_block_mutex); |
2369 | 0 | if (m_most_recent_block_txs != nullptr) { |
2370 | 0 | auto it = m_most_recent_block_txs->find(gtxid.GetHash()); |
2371 | 0 | if (it != m_most_recent_block_txs->end()) return it->second; |
2372 | 0 | } |
2373 | 0 | } |
2374 | | |
2375 | 0 | return {}; |
2376 | 0 | } |
2377 | | |
2378 | | void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc) |
2379 | 0 | { |
2380 | 0 | AssertLockNotHeld(cs_main); |
2381 | |
|
2382 | 0 | auto tx_relay = peer.GetTxRelay(); |
2383 | |
|
2384 | 0 | std::deque<CInv>::iterator it = peer.m_getdata_requests.begin(); |
2385 | 0 | std::vector<CInv> vNotFound; |
2386 | | |
2387 | | // Process as many TX items from the front of the getdata queue as |
2388 | | // possible, since they're common and it's efficient to batch process |
2389 | | // them. |
2390 | 0 | while (it != peer.m_getdata_requests.end() && it->IsGenTxMsg()) { |
2391 | 0 | if (interruptMsgProc) return; |
2392 | | // The send buffer provides backpressure. If there's no space in |
2393 | | // the buffer, pause processing until the next call. |
2394 | 0 | if (pfrom.fPauseSend) break; |
2395 | | |
2396 | 0 | const CInv &inv = *it++; |
2397 | |
|
2398 | 0 | if (tx_relay == nullptr) { |
2399 | | // Ignore GETDATA requests for transactions from block-relay-only |
2400 | | // peers and peers that asked us not to announce transactions. |
2401 | 0 | continue; |
2402 | 0 | } |
2403 | | |
2404 | 0 | CTransactionRef tx = FindTxForGetData(*tx_relay, ToGenTxid(inv)); |
2405 | 0 | if (tx) { |
2406 | | // WTX and WITNESS_TX imply we serialize with witness |
2407 | 0 | const auto maybe_with_witness = (inv.IsMsgTx() ? TX_NO_WITNESS : TX_WITH_WITNESS); |
2408 | 0 | MakeAndPushMessage(pfrom, NetMsgType::TX, maybe_with_witness(*tx)); |
2409 | 0 | m_mempool.RemoveUnbroadcastTx(tx->GetHash()); |
2410 | 0 | } else { |
2411 | 0 | vNotFound.push_back(inv); |
2412 | 0 | } |
2413 | 0 | } |
2414 | | |
2415 | | // Only process one BLOCK item per call, since they're uncommon and can be |
2416 | | // expensive to process. |
2417 | 0 | if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) { |
2418 | 0 | const CInv &inv = *it++; |
2419 | 0 | if (inv.IsGenBlkMsg()) { |
2420 | 0 | ProcessGetBlockData(pfrom, peer, inv); |
2421 | 0 | } |
2422 | | // else: If the first item on the queue is an unknown type, we erase it |
2423 | | // and continue processing the queue on the next call. |
2424 | 0 | } |
2425 | |
|
2426 | 0 | peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it); |
2427 | |
|
2428 | 0 | if (!vNotFound.empty()) { |
2429 | | // Let the peer know that we didn't find what it asked for, so it doesn't |
2430 | | // have to wait around forever. |
2431 | | // SPV clients care about this message: it's needed when they are |
2432 | | // recursively walking the dependencies of relevant unconfirmed |
2433 | | // transactions. SPV clients want to do that because they want to know |
2434 | | // about (and store and rebroadcast and risk analyze) the dependencies |
2435 | | // of transactions relevant to them, without having to download the |
2436 | | // entire memory pool. |
2437 | | // Also, other nodes can use these messages to automatically request a |
2438 | | // transaction from some other peer that announced it, and stop |
2439 | | // waiting for us to respond. |
2440 | | // In normal operation, we often send NOTFOUND messages for parents of |
2441 | | // transactions that we relay; if a peer is missing a parent, they may |
2442 | | // assume we have them and request the parents from us. |
2443 | 0 | MakeAndPushMessage(pfrom, NetMsgType::NOTFOUND, vNotFound); |
2444 | 0 | } |
2445 | 0 | } |
2446 | | |
2447 | | uint32_t PeerManagerImpl::GetFetchFlags(const Peer& peer) const |
2448 | 0 | { |
2449 | 0 | uint32_t nFetchFlags = 0; |
2450 | 0 | if (CanServeWitnesses(peer)) { |
2451 | 0 | nFetchFlags |= MSG_WITNESS_FLAG; |
2452 | 0 | } |
2453 | 0 | return nFetchFlags; |
2454 | 0 | } |
2455 | | |
2456 | | void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlock& block, const BlockTransactionsRequest& req) |
2457 | 0 | { |
2458 | 0 | BlockTransactions resp(req); |
2459 | 0 | for (size_t i = 0; i < req.indexes.size(); i++) { |
2460 | 0 | if (req.indexes[i] >= block.vtx.size()) { |
2461 | 0 | Misbehaving(peer, "getblocktxn with out-of-bounds tx indices"); |
2462 | 0 | return; |
2463 | 0 | } |
2464 | 0 | resp.txn[i] = block.vtx[req.indexes[i]]; |
2465 | 0 | } |
2466 | | |
2467 | 0 | MakeAndPushMessage(pfrom, NetMsgType::BLOCKTXN, resp); |
2468 | 0 | } |
2469 | | |
2470 | | bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer) |
2471 | 0 | { |
2472 | | // Do these headers have proof-of-work matching what's claimed? |
2473 | 0 | if (!HasValidProofOfWork(headers, consensusParams)) { |
2474 | 0 | Misbehaving(peer, "header with invalid proof of work"); |
2475 | 0 | return false; |
2476 | 0 | } |
2477 | | |
2478 | | // Are these headers connected to each other? |
2479 | 0 | if (!CheckHeadersAreContinuous(headers)) { |
2480 | 0 | Misbehaving(peer, "non-continuous headers sequence"); |
2481 | 0 | return false; |
2482 | 0 | } |
2483 | 0 | return true; |
2484 | 0 | } |
2485 | | |
2486 | | arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold() |
2487 | 0 | { |
2488 | 0 | arith_uint256 near_chaintip_work = 0; |
2489 | 0 | LOCK(cs_main); |
2490 | 0 | if (m_chainman.ActiveChain().Tip() != nullptr) { |
2491 | 0 | const CBlockIndex *tip = m_chainman.ActiveChain().Tip(); |
2492 | | // Use a 144 block buffer, so that we'll accept headers that fork from |
2493 | | // near our tip. |
2494 | 0 | near_chaintip_work = tip->nChainWork - std::min<arith_uint256>(144*GetBlockProof(*tip), tip->nChainWork); |
2495 | 0 | } |
2496 | 0 | return std::max(near_chaintip_work, m_chainman.MinimumChainWork()); |
2497 | 0 | } |
2498 | | |
2499 | | /** |
2500 | | * Special handling for unconnecting headers that might be part of a block |
2501 | | * announcement. |
2502 | | * |
2503 | | * We'll send a getheaders message in response to try to connect the chain. |
2504 | | */ |
2505 | | void PeerManagerImpl::HandleUnconnectingHeaders(CNode& pfrom, Peer& peer, |
2506 | | const std::vector<CBlockHeader>& headers) |
2507 | 0 | { |
2508 | | // Try to fill in the missing headers. |
2509 | 0 | const CBlockIndex* best_header{WITH_LOCK(cs_main, return m_chainman.m_best_header)}; |
2510 | 0 | if (MaybeSendGetHeaders(pfrom, GetLocator(best_header), peer)) { |
2511 | 0 | LogDebug(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d)\n", |
2512 | 0 | headers[0].GetHash().ToString(), |
2513 | 0 | headers[0].hashPrevBlock.ToString(), |
2514 | 0 | best_header->nHeight, |
2515 | 0 | pfrom.GetId()); |
2516 | 0 | } |
2517 | | |
2518 | | // Set hashLastUnknownBlock for this peer, so that if we |
2519 | | // eventually get the headers - even from a different peer - |
2520 | | // we can use this peer to download. |
2521 | 0 | WITH_LOCK(cs_main, UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash())); |
2522 | 0 | } |
2523 | | |
2524 | | bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const |
2525 | 0 | { |
2526 | 0 | uint256 hashLastBlock; |
2527 | 0 | for (const CBlockHeader& header : headers) { |
2528 | 0 | if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) { |
2529 | 0 | return false; |
2530 | 0 | } |
2531 | 0 | hashLastBlock = header.GetHash(); |
2532 | 0 | } |
2533 | 0 | return true; |
2534 | 0 | } |
2535 | | |
2536 | | bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfrom, std::vector<CBlockHeader>& headers) |
2537 | 0 | { |
2538 | 0 | if (peer.m_headers_sync) { |
2539 | 0 | auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == m_opts.max_headers_result); |
2540 | | // If it is a valid continuation, we should treat the existing getheaders request as responded to. |
2541 | 0 | if (result.success) peer.m_last_getheaders_timestamp = {}; |
2542 | 0 | if (result.request_more) { |
2543 | 0 | auto locator = peer.m_headers_sync->NextHeadersRequestLocator(); |
2544 | | // If we were instructed to ask for a locator, it should not be empty. |
2545 | 0 | Assume(!locator.vHave.empty()); |
2546 | | // We can only be instructed to request more if processing was successful. |
2547 | 0 | Assume(result.success); |
2548 | 0 | if (!locator.vHave.empty()) { |
2549 | | // It should be impossible for the getheaders request to fail, |
2550 | | // because we just cleared the last getheaders timestamp. |
2551 | 0 | bool sent_getheaders = MaybeSendGetHeaders(pfrom, locator, peer); |
2552 | 0 | Assume(sent_getheaders); |
2553 | 0 | LogDebug(BCLog::NET, "more getheaders (from %s) to peer=%d\n", |
2554 | 0 | locator.vHave.front().ToString(), pfrom.GetId()); |
2555 | 0 | } |
2556 | 0 | } |
2557 | |
|
2558 | 0 | if (peer.m_headers_sync->GetState() == HeadersSyncState::State::FINAL) { |
2559 | 0 | peer.m_headers_sync.reset(nullptr); |
2560 | | |
2561 | | // Delete this peer's entry in m_headers_presync_stats. |
2562 | | // If this is m_headers_presync_bestpeer, it will be replaced later |
2563 | | // by the next peer that triggers the else{} branch below. |
2564 | 0 | LOCK(m_headers_presync_mutex); |
2565 | 0 | m_headers_presync_stats.erase(pfrom.GetId()); |
2566 | 0 | } else { |
2567 | | // Build statistics for this peer's sync. |
2568 | 0 | HeadersPresyncStats stats; |
2569 | 0 | stats.first = peer.m_headers_sync->GetPresyncWork(); |
2570 | 0 | if (peer.m_headers_sync->GetState() == HeadersSyncState::State::PRESYNC) { |
2571 | 0 | stats.second = {peer.m_headers_sync->GetPresyncHeight(), |
2572 | 0 | peer.m_headers_sync->GetPresyncTime()}; |
2573 | 0 | } |
2574 | | |
2575 | | // Update statistics in stats. |
2576 | 0 | LOCK(m_headers_presync_mutex); |
2577 | 0 | m_headers_presync_stats[pfrom.GetId()] = stats; |
2578 | 0 | auto best_it = m_headers_presync_stats.find(m_headers_presync_bestpeer); |
2579 | 0 | bool best_updated = false; |
2580 | 0 | if (best_it == m_headers_presync_stats.end()) { |
2581 | | // If the cached best peer is outdated, iterate over all remaining ones (including |
2582 | | // newly updated one) to find the best one. |
2583 | 0 | NodeId peer_best{-1}; |
2584 | 0 | const HeadersPresyncStats* stat_best{nullptr}; |
2585 | 0 | for (const auto& [peer, stat] : m_headers_presync_stats) { |
2586 | 0 | if (!stat_best || stat > *stat_best) { |
2587 | 0 | peer_best = peer; |
2588 | 0 | stat_best = &stat; |
2589 | 0 | } |
2590 | 0 | } |
2591 | 0 | m_headers_presync_bestpeer = peer_best; |
2592 | 0 | best_updated = (peer_best == pfrom.GetId()); |
2593 | 0 | } else if (best_it->first == pfrom.GetId() || stats > best_it->second) { |
2594 | | // pfrom was and remains the best peer, or pfrom just became best. |
2595 | 0 | m_headers_presync_bestpeer = pfrom.GetId(); |
2596 | 0 | best_updated = true; |
2597 | 0 | } |
2598 | 0 | if (best_updated && stats.second.has_value()) { |
2599 | | // If the best peer updated, and it is in its first phase, signal. |
2600 | 0 | m_headers_presync_should_signal = true; |
2601 | 0 | } |
2602 | 0 | } |
2603 | |
|
2604 | 0 | if (result.success) { |
2605 | | // We only overwrite the headers passed in if processing was |
2606 | | // successful. |
2607 | 0 | headers.swap(result.pow_validated_headers); |
2608 | 0 | } |
2609 | |
|
2610 | 0 | return result.success; |
2611 | 0 | } |
2612 | | // Either we didn't have a sync in progress, or something went wrong |
2613 | | // processing these headers, or we are returning headers to the caller to |
2614 | | // process. |
2615 | 0 | return false; |
2616 | 0 | } |
2617 | | |
2618 | | bool PeerManagerImpl::TryLowWorkHeadersSync(Peer& peer, CNode& pfrom, const CBlockIndex* chain_start_header, std::vector<CBlockHeader>& headers) |
2619 | 0 | { |
2620 | | // Calculate the claimed total work on this chain. |
2621 | 0 | arith_uint256 total_work = chain_start_header->nChainWork + CalculateClaimedHeadersWork(headers); |
2622 | | |
2623 | | // Our dynamic anti-DoS threshold (minimum work required on a headers chain |
2624 | | // before we'll store it) |
2625 | 0 | arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold(); |
2626 | | |
2627 | | // Avoid DoS via low-difficulty-headers by only processing if the headers |
2628 | | // are part of a chain with sufficient work. |
2629 | 0 | if (total_work < minimum_chain_work) { |
2630 | | // Only try to sync with this peer if their headers message was full; |
2631 | | // otherwise they don't have more headers after this so no point in |
2632 | | // trying to sync their too-little-work chain. |
2633 | 0 | if (headers.size() == m_opts.max_headers_result) { |
2634 | | // Note: we could advance to the last header in this set that is |
2635 | | // known to us, rather than starting at the first header (which we |
2636 | | // may already have); however this is unlikely to matter much since |
2637 | | // ProcessHeadersMessage() already handles the case where all |
2638 | | // headers in a received message are already known and are |
2639 | | // ancestors of m_best_header or chainActive.Tip(), by skipping |
2640 | | // this logic in that case. So even if the first header in this set |
2641 | | // of headers is known, some header in this set must be new, so |
2642 | | // advancing to the first unknown header would be a small effect. |
2643 | 0 | LOCK(peer.m_headers_sync_mutex); |
2644 | 0 | peer.m_headers_sync.reset(new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(), |
2645 | 0 | chain_start_header, minimum_chain_work)); |
2646 | | |
2647 | | // Now a HeadersSyncState object for tracking this synchronization |
2648 | | // is created, process the headers using it as normal. Failures are |
2649 | | // handled inside of IsContinuationOfLowWorkHeadersSync. |
2650 | 0 | (void)IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers); |
2651 | 0 | } else { |
2652 | 0 | LogDebug(BCLog::NET, "Ignoring low-work chain (height=%u) from peer=%d\n", chain_start_header->nHeight + headers.size(), pfrom.GetId()); |
2653 | 0 | } |
2654 | | |
2655 | | // The peer has not yet given us a chain that meets our work threshold, |
2656 | | // so we want to prevent further processing of the headers in any case. |
2657 | 0 | headers = {}; |
2658 | 0 | return true; |
2659 | 0 | } |
2660 | | |
2661 | 0 | return false; |
2662 | 0 | } |
2663 | | |
2664 | | bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(const CBlockIndex* header) |
2665 | 0 | { |
2666 | 0 | if (header == nullptr) { |
2667 | 0 | return false; |
2668 | 0 | } else if (m_chainman.m_best_header != nullptr && header == m_chainman.m_best_header->GetAncestor(header->nHeight)) { |
2669 | 0 | return true; |
2670 | 0 | } else if (m_chainman.ActiveChain().Contains(header)) { |
2671 | 0 | return true; |
2672 | 0 | } |
2673 | 0 | return false; |
2674 | 0 | } |
2675 | | |
2676 | | bool PeerManagerImpl::MaybeSendGetHeaders(CNode& pfrom, const CBlockLocator& locator, Peer& peer) |
2677 | 0 | { |
2678 | 0 | const auto current_time = NodeClock::now(); |
2679 | | |
2680 | | // Only allow a new getheaders message to go out if we don't have a recent |
2681 | | // one already in-flight |
2682 | 0 | if (current_time - peer.m_last_getheaders_timestamp > HEADERS_RESPONSE_TIME) { |
2683 | 0 | MakeAndPushMessage(pfrom, NetMsgType::GETHEADERS, locator, uint256()); |
2684 | 0 | peer.m_last_getheaders_timestamp = current_time; |
2685 | 0 | return true; |
2686 | 0 | } |
2687 | 0 | return false; |
2688 | 0 | } |
2689 | | |
2690 | | /* |
2691 | | * Given a new headers tip ending in last_header, potentially request blocks towards that tip. |
2692 | | * We require that the given tip have at least as much work as our tip, and for |
2693 | | * our current tip to be "close to synced" (see CanDirectFetch()). |
2694 | | */ |
2695 | | void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, const CBlockIndex& last_header) |
2696 | 0 | { |
2697 | 0 | LOCK(cs_main); |
2698 | 0 | CNodeState *nodestate = State(pfrom.GetId()); |
2699 | |
|
2700 | 0 | if (CanDirectFetch() && last_header.IsValid(BLOCK_VALID_TREE) && m_chainman.ActiveChain().Tip()->nChainWork <= last_header.nChainWork) { |
2701 | 0 | std::vector<const CBlockIndex*> vToFetch; |
2702 | 0 | const CBlockIndex* pindexWalk{&last_header}; |
2703 | | // Calculate all the blocks we'd need to switch to last_header, up to a limit. |
2704 | 0 | while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { |
2705 | 0 | if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && |
2706 | 0 | !IsBlockRequested(pindexWalk->GetBlockHash()) && |
2707 | 0 | (!DeploymentActiveAt(*pindexWalk, m_chainman, Consensus::DEPLOYMENT_SEGWIT) || CanServeWitnesses(peer))) { |
2708 | | // We don't have this block, and it's not yet in flight. |
2709 | 0 | vToFetch.push_back(pindexWalk); |
2710 | 0 | } |
2711 | 0 | pindexWalk = pindexWalk->pprev; |
2712 | 0 | } |
2713 | | // If pindexWalk still isn't on our main chain, we're looking at a |
2714 | | // very large reorg at a time we think we're close to caught up to |
2715 | | // the main chain -- this shouldn't really happen. Bail out on the |
2716 | | // direct fetch and rely on parallel download instead. |
2717 | 0 | if (!m_chainman.ActiveChain().Contains(pindexWalk)) { |
2718 | 0 | LogDebug(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n", |
2719 | 0 | last_header.GetBlockHash().ToString(), |
2720 | 0 | last_header.nHeight); |
2721 | 0 | } else { |
2722 | 0 | std::vector<CInv> vGetData; |
2723 | | // Download as much as possible, from earliest to latest. |
2724 | 0 | for (const CBlockIndex* pindex : vToFetch | std::views::reverse) { |
2725 | 0 | if (nodestate->vBlocksInFlight.size() >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { |
2726 | | // Can't download any more from this peer |
2727 | 0 | break; |
2728 | 0 | } |
2729 | 0 | uint32_t nFetchFlags = GetFetchFlags(peer); |
2730 | 0 | vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()); |
2731 | 0 | BlockRequested(pfrom.GetId(), *pindex); |
2732 | 0 | LogDebug(BCLog::NET, "Requesting block %s from peer=%d\n", |
2733 | 0 | pindex->GetBlockHash().ToString(), pfrom.GetId()); |
2734 | 0 | } |
2735 | 0 | if (vGetData.size() > 1) { |
2736 | 0 | LogDebug(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n", |
2737 | 0 | last_header.GetBlockHash().ToString(), |
2738 | 0 | last_header.nHeight); |
2739 | 0 | } |
2740 | 0 | if (vGetData.size() > 0) { |
2741 | 0 | if (!m_opts.ignore_incoming_txs && |
2742 | 0 | nodestate->m_provides_cmpctblocks && |
2743 | 0 | vGetData.size() == 1 && |
2744 | 0 | mapBlocksInFlight.size() == 1 && |
2745 | 0 | last_header.pprev->IsValid(BLOCK_VALID_CHAIN)) { |
2746 | | // In any case, we want to download using a compact block, not a regular one |
2747 | 0 | vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); |
2748 | 0 | } |
2749 | 0 | MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vGetData); |
2750 | 0 | } |
2751 | 0 | } |
2752 | 0 | } |
2753 | 0 | } |
2754 | | |
2755 | | /** |
2756 | | * Given receipt of headers from a peer ending in last_header, along with |
2757 | | * whether that header was new and whether the headers message was full, |
2758 | | * update the state we keep for the peer. |
2759 | | */ |
2760 | | void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer, |
2761 | | const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers) |
2762 | 0 | { |
2763 | 0 | LOCK(cs_main); |
2764 | 0 | CNodeState *nodestate = State(pfrom.GetId()); |
2765 | |
|
2766 | 0 | UpdateBlockAvailability(pfrom.GetId(), last_header.GetBlockHash()); |
2767 | | |
2768 | | // From here, pindexBestKnownBlock should be guaranteed to be non-null, |
2769 | | // because it is set in UpdateBlockAvailability. Some nullptr checks |
2770 | | // are still present, however, as belt-and-suspenders. |
2771 | |
|
2772 | 0 | if (received_new_header && last_header.nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) { |
2773 | 0 | nodestate->m_last_block_announcement = GetTime(); |
2774 | 0 | } |
2775 | | |
2776 | | // If we're in IBD, we want outbound peers that will serve us a useful |
2777 | | // chain. Disconnect peers that are on chains with insufficient work. |
2778 | 0 | if (m_chainman.IsInitialBlockDownload() && !may_have_more_headers) { |
2779 | | // If the peer has no more headers to give us, then we know we have |
2780 | | // their tip. |
2781 | 0 | if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < m_chainman.MinimumChainWork()) { |
2782 | | // This peer has too little work on their headers chain to help |
2783 | | // us sync -- disconnect if it is an outbound disconnection |
2784 | | // candidate. |
2785 | | // Note: We compare their tip to the minimum chain work (rather than |
2786 | | // m_chainman.ActiveChain().Tip()) because we won't start block download |
2787 | | // until we have a headers chain that has at least |
2788 | | // the minimum chain work, even if a peer has a chain past our tip, |
2789 | | // as an anti-DoS measure. |
2790 | 0 | if (pfrom.IsOutboundOrBlockRelayConn()) { |
2791 | 0 | LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId()); |
2792 | 0 | pfrom.fDisconnect = true; |
2793 | 0 | } |
2794 | 0 | } |
2795 | 0 | } |
2796 | | |
2797 | | // If this is an outbound full-relay peer, check to see if we should protect |
2798 | | // it from the bad/lagging chain logic. |
2799 | | // Note that outbound block-relay peers are excluded from this protection, and |
2800 | | // thus always subject to eviction under the bad/lagging chain logic. |
2801 | | // See ChainSyncTimeoutState. |
2802 | 0 | if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) { |
2803 | 0 | if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) { |
2804 | 0 | LogDebug(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId()); |
2805 | 0 | nodestate->m_chain_sync.m_protect = true; |
2806 | 0 | ++m_outbound_peers_with_protect_from_disconnect; |
2807 | 0 | } |
2808 | 0 | } |
2809 | 0 | } |
2810 | | |
2811 | | void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer, |
2812 | | std::vector<CBlockHeader>&& headers, |
2813 | | bool via_compact_block) |
2814 | 0 | { |
2815 | 0 | size_t nCount = headers.size(); |
2816 | |
|
2817 | 0 | if (nCount == 0) { |
2818 | | // Nothing interesting. Stop asking this peers for more headers. |
2819 | | // If we were in the middle of headers sync, receiving an empty headers |
2820 | | // message suggests that the peer suddenly has nothing to give us |
2821 | | // (perhaps it reorged to our chain). Clear download state for this peer. |
2822 | 0 | LOCK(peer.m_headers_sync_mutex); |
2823 | 0 | if (peer.m_headers_sync) { |
2824 | 0 | peer.m_headers_sync.reset(nullptr); |
2825 | 0 | LOCK(m_headers_presync_mutex); |
2826 | 0 | m_headers_presync_stats.erase(pfrom.GetId()); |
2827 | 0 | } |
2828 | | // A headers message with no headers cannot be an announcement, so assume |
2829 | | // it is a response to our last getheaders request, if there is one. |
2830 | 0 | peer.m_last_getheaders_timestamp = {}; |
2831 | 0 | return; |
2832 | 0 | } |
2833 | | |
2834 | | // Before we do any processing, make sure these pass basic sanity checks. |
2835 | | // We'll rely on headers having valid proof-of-work further down, as an |
2836 | | // anti-DoS criteria (note: this check is required before passing any |
2837 | | // headers into HeadersSyncState). |
2838 | 0 | if (!CheckHeadersPoW(headers, m_chainparams.GetConsensus(), peer)) { |
2839 | | // Misbehaving() calls are handled within CheckHeadersPoW(), so we can |
2840 | | // just return. (Note that even if a header is announced via compact |
2841 | | // block, the header itself should be valid, so this type of error can |
2842 | | // always be punished.) |
2843 | 0 | return; |
2844 | 0 | } |
2845 | | |
2846 | 0 | const CBlockIndex *pindexLast = nullptr; |
2847 | | |
2848 | | // We'll set already_validated_work to true if these headers are |
2849 | | // successfully processed as part of a low-work headers sync in progress |
2850 | | // (either in PRESYNC or REDOWNLOAD phase). |
2851 | | // If true, this will mean that any headers returned to us (ie during |
2852 | | // REDOWNLOAD) can be validated without further anti-DoS checks. |
2853 | 0 | bool already_validated_work = false; |
2854 | | |
2855 | | // If we're in the middle of headers sync, let it do its magic. |
2856 | 0 | bool have_headers_sync = false; |
2857 | 0 | { |
2858 | 0 | LOCK(peer.m_headers_sync_mutex); |
2859 | |
|
2860 | 0 | already_validated_work = IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers); |
2861 | | |
2862 | | // The headers we passed in may have been: |
2863 | | // - untouched, perhaps if no headers-sync was in progress, or some |
2864 | | // failure occurred |
2865 | | // - erased, such as if the headers were successfully processed and no |
2866 | | // additional headers processing needs to take place (such as if we |
2867 | | // are still in PRESYNC) |
2868 | | // - replaced with headers that are now ready for validation, such as |
2869 | | // during the REDOWNLOAD phase of a low-work headers sync. |
2870 | | // So just check whether we still have headers that we need to process, |
2871 | | // or not. |
2872 | 0 | if (headers.empty()) { |
2873 | 0 | return; |
2874 | 0 | } |
2875 | | |
2876 | 0 | have_headers_sync = !!peer.m_headers_sync; |
2877 | 0 | } |
2878 | | |
2879 | | // Do these headers connect to something in our block index? |
2880 | 0 | const CBlockIndex *chain_start_header{WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock))}; |
2881 | 0 | bool headers_connect_blockindex{chain_start_header != nullptr}; |
2882 | |
|
2883 | 0 | if (!headers_connect_blockindex) { |
2884 | | // This could be a BIP 130 block announcement, use |
2885 | | // special logic for handling headers that don't connect, as this |
2886 | | // could be benign. |
2887 | 0 | HandleUnconnectingHeaders(pfrom, peer, headers); |
2888 | 0 | return; |
2889 | 0 | } |
2890 | | |
2891 | | // If headers connect, assume that this is in response to any outstanding getheaders |
2892 | | // request we may have sent, and clear out the time of our last request. Non-connecting |
2893 | | // headers cannot be a response to a getheaders request. |
2894 | 0 | peer.m_last_getheaders_timestamp = {}; |
2895 | | |
2896 | | // If the headers we received are already in memory and an ancestor of |
2897 | | // m_best_header or our tip, skip anti-DoS checks. These headers will not |
2898 | | // use any more memory (and we are not leaking information that could be |
2899 | | // used to fingerprint us). |
2900 | 0 | const CBlockIndex *last_received_header{nullptr}; |
2901 | 0 | { |
2902 | 0 | LOCK(cs_main); |
2903 | 0 | last_received_header = m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash()); |
2904 | 0 | if (IsAncestorOfBestHeaderOrTip(last_received_header)) { |
2905 | 0 | already_validated_work = true; |
2906 | 0 | } |
2907 | 0 | } |
2908 | | |
2909 | | // If our peer has NetPermissionFlags::NoBan privileges, then bypass our |
2910 | | // anti-DoS logic (this saves bandwidth when we connect to a trusted peer |
2911 | | // on startup). |
2912 | 0 | if (pfrom.HasPermission(NetPermissionFlags::NoBan)) { |
2913 | 0 | already_validated_work = true; |
2914 | 0 | } |
2915 | | |
2916 | | // At this point, the headers connect to something in our block index. |
2917 | | // Do anti-DoS checks to determine if we should process or store for later |
2918 | | // processing. |
2919 | 0 | if (!already_validated_work && TryLowWorkHeadersSync(peer, pfrom, |
2920 | 0 | chain_start_header, headers)) { |
2921 | | // If we successfully started a low-work headers sync, then there |
2922 | | // should be no headers to process any further. |
2923 | 0 | Assume(headers.empty()); |
2924 | 0 | return; |
2925 | 0 | } |
2926 | | |
2927 | | // At this point, we have a set of headers with sufficient work on them |
2928 | | // which can be processed. |
2929 | | |
2930 | | // If we don't have the last header, then this peer will have given us |
2931 | | // something new (if these headers are valid). |
2932 | 0 | bool received_new_header{last_received_header == nullptr}; |
2933 | | |
2934 | | // Now process all the headers. |
2935 | 0 | BlockValidationState state; |
2936 | 0 | if (!m_chainman.ProcessNewBlockHeaders(headers, /*min_pow_checked=*/true, state, &pindexLast)) { |
2937 | 0 | if (state.IsInvalid()) { |
2938 | 0 | MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received"); |
2939 | 0 | return; |
2940 | 0 | } |
2941 | 0 | } |
2942 | 0 | assert(pindexLast); |
2943 | | |
2944 | | // Consider fetching more headers if we are not using our headers-sync mechanism. |
2945 | 0 | if (nCount == m_opts.max_headers_result && !have_headers_sync) { |
2946 | | // Headers message had its maximum size; the peer may have more headers. |
2947 | 0 | if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) { |
2948 | 0 | LogDebug(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", |
2949 | 0 | pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height); |
2950 | 0 | } |
2951 | 0 | } |
2952 | |
|
2953 | 0 | UpdatePeerStateForReceivedHeaders(pfrom, peer, *pindexLast, received_new_header, nCount == m_opts.max_headers_result); |
2954 | | |
2955 | | // Consider immediately downloading blocks. |
2956 | 0 | HeadersDirectFetchBlocks(pfrom, peer, *pindexLast); |
2957 | |
|
2958 | 0 | return; |
2959 | 0 | } |
2960 | | |
2961 | | std::optional<node::PackageToValidate> PeerManagerImpl::ProcessInvalidTx(NodeId nodeid, const CTransactionRef& ptx, const TxValidationState& state, |
2962 | | bool first_time_failure) |
2963 | 0 | { |
2964 | 0 | AssertLockNotHeld(m_peer_mutex); |
2965 | 0 | AssertLockHeld(g_msgproc_mutex); |
2966 | 0 | AssertLockHeld(m_tx_download_mutex); |
2967 | |
|
2968 | 0 | PeerRef peer{GetPeerRef(nodeid)}; |
2969 | |
|
2970 | 0 | LogDebug(BCLog::MEMPOOLREJ, "%s (wtxid=%s) from peer=%d was not accepted: %s\n", |
2971 | 0 | ptx->GetHash().ToString(), |
2972 | 0 | ptx->GetWitnessHash().ToString(), |
2973 | 0 | nodeid, |
2974 | 0 | state.ToString()); |
2975 | |
|
2976 | 0 | const auto& [add_extra_compact_tx, unique_parents, package_to_validate] = m_txdownloadman.MempoolRejectedTx(ptx, state, nodeid, first_time_failure); |
2977 | |
|
2978 | 0 | if (add_extra_compact_tx && RecursiveDynamicUsage(*ptx) < 100000) { |
2979 | 0 | AddToCompactExtraTransactions(ptx); |
2980 | 0 | } |
2981 | 0 | for (const uint256& parent_txid : unique_parents) { |
2982 | 0 | if (peer) AddKnownTx(*peer, parent_txid); |
2983 | 0 | } |
2984 | |
|
2985 | 0 | MaybePunishNodeForTx(nodeid, state); |
2986 | |
|
2987 | 0 | return package_to_validate; |
2988 | 0 | } |
2989 | | |
2990 | | void PeerManagerImpl::ProcessValidTx(NodeId nodeid, const CTransactionRef& tx, const std::list<CTransactionRef>& replaced_transactions) |
2991 | 0 | { |
2992 | 0 | AssertLockNotHeld(m_peer_mutex); |
2993 | 0 | AssertLockHeld(g_msgproc_mutex); |
2994 | 0 | AssertLockHeld(m_tx_download_mutex); |
2995 | |
|
2996 | 0 | m_txdownloadman.MempoolAcceptedTx(tx); |
2997 | |
|
2998 | 0 | LogDebug(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (wtxid=%s) (poolsz %u txn, %u kB)\n", |
2999 | 0 | nodeid, |
3000 | 0 | tx->GetHash().ToString(), |
3001 | 0 | tx->GetWitnessHash().ToString(), |
3002 | 0 | m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000); |
3003 | |
|
3004 | 0 | RelayTransaction(tx->GetHash(), tx->GetWitnessHash()); |
3005 | |
|
3006 | 0 | for (const CTransactionRef& removedTx : replaced_transactions) { |
3007 | 0 | AddToCompactExtraTransactions(removedTx); |
3008 | 0 | } |
3009 | 0 | } |
3010 | | |
3011 | | void PeerManagerImpl::ProcessPackageResult(const node::PackageToValidate& package_to_validate, const PackageMempoolAcceptResult& package_result) |
3012 | 0 | { |
3013 | 0 | AssertLockNotHeld(m_peer_mutex); |
3014 | 0 | AssertLockHeld(g_msgproc_mutex); |
3015 | 0 | AssertLockHeld(m_tx_download_mutex); |
3016 | |
|
3017 | 0 | const auto& package = package_to_validate.m_txns; |
3018 | 0 | const auto& senders = package_to_validate.m_senders; |
3019 | |
|
3020 | 0 | if (package_result.m_state.IsInvalid()) { |
3021 | 0 | m_txdownloadman.MempoolRejectedPackage(package); |
3022 | 0 | } |
3023 | | // We currently only expect to process 1-parent-1-child packages. Remove if this changes. |
3024 | 0 | if (!Assume(package.size() == 2)) return; |
3025 | | |
3026 | | // Iterate backwards to erase in-package descendants from the orphanage before they become |
3027 | | // relevant in AddChildrenToWorkSet. |
3028 | 0 | auto package_iter = package.rbegin(); |
3029 | 0 | auto senders_iter = senders.rbegin(); |
3030 | 0 | while (package_iter != package.rend()) { |
3031 | 0 | const auto& tx = *package_iter; |
3032 | 0 | const NodeId nodeid = *senders_iter; |
3033 | 0 | const auto it_result{package_result.m_tx_results.find(tx->GetWitnessHash())}; |
3034 | | |
3035 | | // It is not guaranteed that a result exists for every transaction. |
3036 | 0 | if (it_result != package_result.m_tx_results.end()) { |
3037 | 0 | const auto& tx_result = it_result->second; |
3038 | 0 | switch (tx_result.m_result_type) { |
3039 | 0 | case MempoolAcceptResult::ResultType::VALID: |
3040 | 0 | { |
3041 | 0 | ProcessValidTx(nodeid, tx, tx_result.m_replaced_transactions); |
3042 | 0 | break; |
3043 | 0 | } |
3044 | 0 | case MempoolAcceptResult::ResultType::INVALID: |
3045 | 0 | case MempoolAcceptResult::ResultType::DIFFERENT_WITNESS: |
3046 | 0 | { |
3047 | | // Don't add to vExtraTxnForCompact, as these transactions should have already been |
3048 | | // added there when added to the orphanage or rejected for TX_RECONSIDERABLE. |
3049 | | // This should be updated if package submission is ever used for transactions |
3050 | | // that haven't already been validated before. |
3051 | 0 | ProcessInvalidTx(nodeid, tx, tx_result.m_state, /*first_time_failure=*/false); |
3052 | 0 | break; |
3053 | 0 | } |
3054 | 0 | case MempoolAcceptResult::ResultType::MEMPOOL_ENTRY: |
3055 | 0 | { |
3056 | | // AlreadyHaveTx() should be catching transactions that are already in mempool. |
3057 | 0 | Assume(false); |
3058 | 0 | break; |
3059 | 0 | } |
3060 | 0 | } |
3061 | 0 | } |
3062 | 0 | package_iter++; |
3063 | 0 | senders_iter++; |
3064 | 0 | } |
3065 | 0 | } |
3066 | | |
3067 | | bool PeerManagerImpl::ProcessOrphanTx(Peer& peer) |
3068 | 0 | { |
3069 | 0 | AssertLockHeld(g_msgproc_mutex); |
3070 | 0 | LOCK2(::cs_main, m_tx_download_mutex); |
3071 | |
|
3072 | 0 | CTransactionRef porphanTx = nullptr; |
3073 | |
|
3074 | 0 | while (CTransactionRef porphanTx = m_txdownloadman.GetTxToReconsider(peer.m_id)) { |
3075 | 0 | const MempoolAcceptResult result = m_chainman.ProcessTransaction(porphanTx); |
3076 | 0 | const TxValidationState& state = result.m_state; |
3077 | 0 | const Txid& orphanHash = porphanTx->GetHash(); |
3078 | 0 | const Wtxid& orphan_wtxid = porphanTx->GetWitnessHash(); |
3079 | |
|
3080 | 0 | if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) { |
3081 | 0 | LogDebug(BCLog::TXPACKAGES, " accepted orphan tx %s (wtxid=%s)\n", orphanHash.ToString(), orphan_wtxid.ToString()); |
3082 | 0 | ProcessValidTx(peer.m_id, porphanTx, result.m_replaced_transactions); |
3083 | 0 | return true; |
3084 | 0 | } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) { |
3085 | 0 | LogDebug(BCLog::TXPACKAGES, " invalid orphan tx %s (wtxid=%s) from peer=%d. %s\n", |
3086 | 0 | orphanHash.ToString(), |
3087 | 0 | orphan_wtxid.ToString(), |
3088 | 0 | peer.m_id, |
3089 | 0 | state.ToString()); |
3090 | |
|
3091 | 0 | if (Assume(state.IsInvalid() && |
3092 | 0 | state.GetResult() != TxValidationResult::TX_UNKNOWN && |
3093 | 0 | state.GetResult() != TxValidationResult::TX_NO_MEMPOOL && |
3094 | 0 | state.GetResult() != TxValidationResult::TX_RESULT_UNSET)) { |
3095 | 0 | ProcessInvalidTx(peer.m_id, porphanTx, state, /*first_time_failure=*/false); |
3096 | 0 | } |
3097 | 0 | return true; |
3098 | 0 | } |
3099 | 0 | } |
3100 | | |
3101 | 0 | return false; |
3102 | 0 | } |
3103 | | |
3104 | | bool PeerManagerImpl::PrepareBlockFilterRequest(CNode& node, Peer& peer, |
3105 | | BlockFilterType filter_type, uint32_t start_height, |
3106 | | const uint256& stop_hash, uint32_t max_height_diff, |
3107 | | const CBlockIndex*& stop_index, |
3108 | | BlockFilterIndex*& filter_index) |
3109 | 0 | { |
3110 | 0 | const bool supported_filter_type = |
3111 | 0 | (filter_type == BlockFilterType::BASIC && |
3112 | 0 | (peer.m_our_services & NODE_COMPACT_FILTERS)); |
3113 | 0 | if (!supported_filter_type) { |
3114 | 0 | LogDebug(BCLog::NET, "peer %d requested unsupported block filter type: %d\n", |
3115 | 0 | node.GetId(), static_cast<uint8_t>(filter_type)); |
3116 | 0 | node.fDisconnect = true; |
3117 | 0 | return false; |
3118 | 0 | } |
3119 | | |
3120 | 0 | { |
3121 | 0 | LOCK(cs_main); |
3122 | 0 | stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash); |
3123 | | |
3124 | | // Check that the stop block exists and the peer would be allowed to fetch it. |
3125 | 0 | if (!stop_index || !BlockRequestAllowed(stop_index)) { |
3126 | 0 | LogDebug(BCLog::NET, "peer %d requested invalid block hash: %s\n", |
3127 | 0 | node.GetId(), stop_hash.ToString()); |
3128 | 0 | node.fDisconnect = true; |
3129 | 0 | return false; |
3130 | 0 | } |
3131 | 0 | } |
3132 | | |
3133 | 0 | uint32_t stop_height = stop_index->nHeight; |
3134 | 0 | if (start_height > stop_height) { |
3135 | 0 | LogDebug(BCLog::NET, "peer %d sent invalid getcfilters/getcfheaders with " |
3136 | 0 | "start height %d and stop height %d\n", |
3137 | 0 | node.GetId(), start_height, stop_height); |
3138 | 0 | node.fDisconnect = true; |
3139 | 0 | return false; |
3140 | 0 | } |
3141 | 0 | if (stop_height - start_height >= max_height_diff) { |
3142 | 0 | LogDebug(BCLog::NET, "peer %d requested too many cfilters/cfheaders: %d / %d\n", |
3143 | 0 | node.GetId(), stop_height - start_height + 1, max_height_diff); |
3144 | 0 | node.fDisconnect = true; |
3145 | 0 | return false; |
3146 | 0 | } |
3147 | | |
3148 | 0 | filter_index = GetBlockFilterIndex(filter_type); |
3149 | 0 | if (!filter_index) { |
3150 | 0 | LogDebug(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type)); |
3151 | 0 | return false; |
3152 | 0 | } |
3153 | | |
3154 | 0 | return true; |
3155 | 0 | } |
3156 | | |
3157 | | void PeerManagerImpl::ProcessGetCFilters(CNode& node, Peer& peer, DataStream& vRecv) |
3158 | 0 | { |
3159 | 0 | uint8_t filter_type_ser; |
3160 | 0 | uint32_t start_height; |
3161 | 0 | uint256 stop_hash; |
3162 | |
|
3163 | 0 | vRecv >> filter_type_ser >> start_height >> stop_hash; |
3164 | |
|
3165 | 0 | const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser); |
3166 | |
|
3167 | 0 | const CBlockIndex* stop_index; |
3168 | 0 | BlockFilterIndex* filter_index; |
3169 | 0 | if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash, |
3170 | 0 | MAX_GETCFILTERS_SIZE, stop_index, filter_index)) { |
3171 | 0 | return; |
3172 | 0 | } |
3173 | | |
3174 | 0 | std::vector<BlockFilter> filters; |
3175 | 0 | if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) { |
3176 | 0 | LogDebug(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n", |
3177 | 0 | BlockFilterTypeName(filter_type), start_height, stop_hash.ToString()); |
3178 | 0 | return; |
3179 | 0 | } |
3180 | | |
3181 | 0 | for (const auto& filter : filters) { |
3182 | 0 | MakeAndPushMessage(node, NetMsgType::CFILTER, filter); |
3183 | 0 | } |
3184 | 0 | } |
3185 | | |
3186 | | void PeerManagerImpl::ProcessGetCFHeaders(CNode& node, Peer& peer, DataStream& vRecv) |
3187 | 0 | { |
3188 | 0 | uint8_t filter_type_ser; |
3189 | 0 | uint32_t start_height; |
3190 | 0 | uint256 stop_hash; |
3191 | |
|
3192 | 0 | vRecv >> filter_type_ser >> start_height >> stop_hash; |
3193 | |
|
3194 | 0 | const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser); |
3195 | |
|
3196 | 0 | const CBlockIndex* stop_index; |
3197 | 0 | BlockFilterIndex* filter_index; |
3198 | 0 | if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height, stop_hash, |
3199 | 0 | MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) { |
3200 | 0 | return; |
3201 | 0 | } |
3202 | | |
3203 | 0 | uint256 prev_header; |
3204 | 0 | if (start_height > 0) { |
3205 | 0 | const CBlockIndex* const prev_block = |
3206 | 0 | stop_index->GetAncestor(static_cast<int>(start_height - 1)); |
3207 | 0 | if (!filter_index->LookupFilterHeader(prev_block, prev_header)) { |
3208 | 0 | LogDebug(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n", |
3209 | 0 | BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString()); |
3210 | 0 | return; |
3211 | 0 | } |
3212 | 0 | } |
3213 | | |
3214 | 0 | std::vector<uint256> filter_hashes; |
3215 | 0 | if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) { |
3216 | 0 | LogDebug(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, start_height=%d, stop_hash=%s\n", |
3217 | 0 | BlockFilterTypeName(filter_type), start_height, stop_hash.ToString()); |
3218 | 0 | return; |
3219 | 0 | } |
3220 | | |
3221 | 0 | MakeAndPushMessage(node, NetMsgType::CFHEADERS, |
3222 | 0 | filter_type_ser, |
3223 | 0 | stop_index->GetBlockHash(), |
3224 | 0 | prev_header, |
3225 | 0 | filter_hashes); |
3226 | 0 | } |
3227 | | |
3228 | | void PeerManagerImpl::ProcessGetCFCheckPt(CNode& node, Peer& peer, DataStream& vRecv) |
3229 | 0 | { |
3230 | 0 | uint8_t filter_type_ser; |
3231 | 0 | uint256 stop_hash; |
3232 | |
|
3233 | 0 | vRecv >> filter_type_ser >> stop_hash; |
3234 | |
|
3235 | 0 | const BlockFilterType filter_type = static_cast<BlockFilterType>(filter_type_ser); |
3236 | |
|
3237 | 0 | const CBlockIndex* stop_index; |
3238 | 0 | BlockFilterIndex* filter_index; |
3239 | 0 | if (!PrepareBlockFilterRequest(node, peer, filter_type, /*start_height=*/0, stop_hash, |
3240 | 0 | /*max_height_diff=*/std::numeric_limits<uint32_t>::max(), |
3241 | 0 | stop_index, filter_index)) { |
3242 | 0 | return; |
3243 | 0 | } |
3244 | | |
3245 | 0 | std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL); |
3246 | | |
3247 | | // Populate headers. |
3248 | 0 | const CBlockIndex* block_index = stop_index; |
3249 | 0 | for (int i = headers.size() - 1; i >= 0; i--) { |
3250 | 0 | int height = (i + 1) * CFCHECKPT_INTERVAL; |
3251 | 0 | block_index = block_index->GetAncestor(height); |
3252 | |
|
3253 | 0 | if (!filter_index->LookupFilterHeader(block_index, headers[i])) { |
3254 | 0 | LogDebug(BCLog::NET, "Failed to find block filter header in index: filter_type=%s, block_hash=%s\n", |
3255 | 0 | BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString()); |
3256 | 0 | return; |
3257 | 0 | } |
3258 | 0 | } |
3259 | | |
3260 | 0 | MakeAndPushMessage(node, NetMsgType::CFCHECKPT, |
3261 | 0 | filter_type_ser, |
3262 | 0 | stop_index->GetBlockHash(), |
3263 | 0 | headers); |
3264 | 0 | } |
3265 | | |
3266 | | void PeerManagerImpl::ProcessBlock(CNode& node, const std::shared_ptr<const CBlock>& block, bool force_processing, bool min_pow_checked) |
3267 | 0 | { |
3268 | 0 | bool new_block{false}; |
3269 | 0 | m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked, &new_block); |
3270 | 0 | if (new_block) { |
3271 | 0 | node.m_last_block_time = GetTime<std::chrono::seconds>(); |
3272 | | // In case this block came from a different peer than we requested |
3273 | | // from, we can erase the block request now anyway (as we just stored |
3274 | | // this block to disk). |
3275 | 0 | LOCK(cs_main); |
3276 | 0 | RemoveBlockRequest(block->GetHash(), std::nullopt); |
3277 | 0 | } else { |
3278 | 0 | LOCK(cs_main); |
3279 | 0 | mapBlockSource.erase(block->GetHash()); |
3280 | 0 | } |
3281 | 0 | } |
3282 | | |
3283 | | void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const BlockTransactions& block_transactions) |
3284 | 0 | { |
3285 | 0 | std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); |
3286 | 0 | bool fBlockRead{false}; |
3287 | 0 | { |
3288 | 0 | LOCK(cs_main); |
3289 | |
|
3290 | 0 | auto range_flight = mapBlocksInFlight.equal_range(block_transactions.blockhash); |
3291 | 0 | size_t already_in_flight = std::distance(range_flight.first, range_flight.second); |
3292 | 0 | bool requested_block_from_this_peer{false}; |
3293 | | |
3294 | | // Multimap ensures ordering of outstanding requests. It's either empty or first in line. |
3295 | 0 | bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId()); |
3296 | |
|
3297 | 0 | while (range_flight.first != range_flight.second) { |
3298 | 0 | auto [node_id, block_it] = range_flight.first->second; |
3299 | 0 | if (node_id == pfrom.GetId() && block_it->partialBlock) { |
3300 | 0 | requested_block_from_this_peer = true; |
3301 | 0 | break; |
3302 | 0 | } |
3303 | 0 | range_flight.first++; |
3304 | 0 | } |
3305 | |
|
3306 | 0 | if (!requested_block_from_this_peer) { |
3307 | 0 | LogDebug(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom.GetId()); |
3308 | 0 | return; |
3309 | 0 | } |
3310 | | |
3311 | 0 | PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock; |
3312 | 0 | ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn); |
3313 | 0 | if (status == READ_STATUS_INVALID) { |
3314 | 0 | RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect |
3315 | 0 | Misbehaving(peer, "invalid compact block/non-matching block transactions"); |
3316 | 0 | return; |
3317 | 0 | } else if (status == READ_STATUS_FAILED) { |
3318 | 0 | if (first_in_flight) { |
3319 | | // Might have collided, fall back to getdata now :( |
3320 | 0 | std::vector<CInv> invs; |
3321 | 0 | invs.emplace_back(MSG_BLOCK | GetFetchFlags(peer), block_transactions.blockhash); |
3322 | 0 | MakeAndPushMessage(pfrom, NetMsgType::GETDATA, invs); |
3323 | 0 | } else { |
3324 | 0 | RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); |
3325 | 0 | LogDebug(BCLog::NET, "Peer %d sent us a compact block but it failed to reconstruct, waiting on first download to complete\n", pfrom.GetId()); |
3326 | 0 | return; |
3327 | 0 | } |
3328 | 0 | } else { |
3329 | | // Block is either okay, or possibly we received |
3330 | | // READ_STATUS_CHECKBLOCK_FAILED. |
3331 | | // Note that CheckBlock can only fail for one of a few reasons: |
3332 | | // 1. bad-proof-of-work (impossible here, because we've already |
3333 | | // accepted the header) |
3334 | | // 2. merkleroot doesn't match the transactions given (already |
3335 | | // caught in FillBlock with READ_STATUS_FAILED, so |
3336 | | // impossible here) |
3337 | | // 3. the block is otherwise invalid (eg invalid coinbase, |
3338 | | // block is too big, too many legacy sigops, etc). |
3339 | | // So if CheckBlock failed, #3 is the only possibility. |
3340 | | // Under BIP 152, we don't discourage the peer unless proof of work is |
3341 | | // invalid (we don't require all the stateless checks to have |
3342 | | // been run). This is handled below, so just treat this as |
3343 | | // though the block was successfully read, and rely on the |
3344 | | // handling in ProcessNewBlock to ensure the block index is |
3345 | | // updated, etc. |
3346 | 0 | RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // it is now an empty pointer |
3347 | 0 | fBlockRead = true; |
3348 | | // mapBlockSource is used for potentially punishing peers and |
3349 | | // updating which peers send us compact blocks, so the race |
3350 | | // between here and cs_main in ProcessNewBlock is fine. |
3351 | | // BIP 152 permits peers to relay compact blocks after validating |
3352 | | // the header only; we should not punish peers if the block turns |
3353 | | // out to be invalid. |
3354 | 0 | mapBlockSource.emplace(block_transactions.blockhash, std::make_pair(pfrom.GetId(), false)); |
3355 | 0 | } |
3356 | 0 | } // Don't hold cs_main when we call into ProcessNewBlock |
3357 | 0 | if (fBlockRead) { |
3358 | | // Since we requested this block (it was in mapBlocksInFlight), force it to be processed, |
3359 | | // even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc) |
3360 | | // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent |
3361 | | // disk-space attacks), but this should be safe due to the |
3362 | | // protections in the compact block handler -- see related comment |
3363 | | // in compact block optimistic reconstruction handling. |
3364 | 0 | ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true); |
3365 | 0 | } |
3366 | 0 | return; |
3367 | 0 | } |
3368 | | |
3369 | | void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, DataStream& vRecv, |
3370 | | const std::chrono::microseconds time_received, |
3371 | | const std::atomic<bool>& interruptMsgProc) |
3372 | 0 | { |
3373 | 0 | AssertLockHeld(g_msgproc_mutex); |
3374 | |
|
3375 | 0 | LogDebug(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId()); |
3376 | |
|
3377 | 0 | PeerRef peer = GetPeerRef(pfrom.GetId()); |
3378 | 0 | if (peer == nullptr) return; |
3379 | | |
3380 | 0 | if (msg_type == NetMsgType::VERSION) { |
3381 | 0 | if (pfrom.nVersion != 0) { |
3382 | 0 | LogDebug(BCLog::NET, "redundant version message from peer=%d\n", pfrom.GetId()); |
3383 | 0 | return; |
3384 | 0 | } |
3385 | | |
3386 | 0 | int64_t nTime; |
3387 | 0 | CService addrMe; |
3388 | 0 | uint64_t nNonce = 1; |
3389 | 0 | ServiceFlags nServices; |
3390 | 0 | int nVersion; |
3391 | 0 | std::string cleanSubVer; |
3392 | 0 | int starting_height = -1; |
3393 | 0 | bool fRelay = true; |
3394 | |
|
3395 | 0 | vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime; |
3396 | 0 | if (nTime < 0) { |
3397 | 0 | nTime = 0; |
3398 | 0 | } |
3399 | 0 | vRecv.ignore(8); // Ignore the addrMe service bits sent by the peer |
3400 | 0 | vRecv >> CNetAddr::V1(addrMe); |
3401 | 0 | if (!pfrom.IsInboundConn()) |
3402 | 0 | { |
3403 | | // Overwrites potentially existing services. In contrast to this, |
3404 | | // unvalidated services received via gossip relay in ADDR/ADDRV2 |
3405 | | // messages are only ever added but cannot replace existing ones. |
3406 | 0 | m_addrman.SetServices(pfrom.addr, nServices); |
3407 | 0 | } |
3408 | 0 | if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices)) |
3409 | 0 | { |
3410 | 0 | LogDebug(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom.GetId(), nServices, GetDesirableServiceFlags(nServices)); |
3411 | 0 | pfrom.fDisconnect = true; |
3412 | 0 | return; |
3413 | 0 | } |
3414 | | |
3415 | 0 | if (nVersion < MIN_PEER_PROTO_VERSION) { |
3416 | | // disconnect from peers older than this proto version |
3417 | 0 | LogDebug(BCLog::NET, "peer=%d using obsolete version %i; disconnecting\n", pfrom.GetId(), nVersion); |
3418 | 0 | pfrom.fDisconnect = true; |
3419 | 0 | return; |
3420 | 0 | } |
3421 | | |
3422 | 0 | if (!vRecv.empty()) { |
3423 | | // The version message includes information about the sending node which we don't use: |
3424 | | // - 8 bytes (service bits) |
3425 | | // - 16 bytes (ipv6 address) |
3426 | | // - 2 bytes (port) |
3427 | 0 | vRecv.ignore(26); |
3428 | 0 | vRecv >> nNonce; |
3429 | 0 | } |
3430 | 0 | if (!vRecv.empty()) { |
3431 | 0 | std::string strSubVer; |
3432 | 0 | vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH); |
3433 | 0 | cleanSubVer = SanitizeString(strSubVer); |
3434 | 0 | } |
3435 | 0 | if (!vRecv.empty()) { |
3436 | 0 | vRecv >> starting_height; |
3437 | 0 | } |
3438 | 0 | if (!vRecv.empty()) |
3439 | 0 | vRecv >> fRelay; |
3440 | | // Disconnect if we connected to ourself |
3441 | 0 | if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce)) |
3442 | 0 | { |
3443 | 0 | LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToStringAddrPort()); |
3444 | 0 | pfrom.fDisconnect = true; |
3445 | 0 | return; |
3446 | 0 | } |
3447 | | |
3448 | 0 | if (pfrom.IsInboundConn() && addrMe.IsRoutable()) |
3449 | 0 | { |
3450 | 0 | SeenLocal(addrMe); |
3451 | 0 | } |
3452 | | |
3453 | | // Inbound peers send us their version message when they connect. |
3454 | | // We send our version message in response. |
3455 | 0 | if (pfrom.IsInboundConn()) { |
3456 | 0 | PushNodeVersion(pfrom, *peer); |
3457 | 0 | } |
3458 | | |
3459 | | // Change version |
3460 | 0 | const int greatest_common_version = std::min(nVersion, PROTOCOL_VERSION); |
3461 | 0 | pfrom.SetCommonVersion(greatest_common_version); |
3462 | 0 | pfrom.nVersion = nVersion; |
3463 | |
|
3464 | 0 | if (greatest_common_version >= WTXID_RELAY_VERSION) { |
3465 | 0 | MakeAndPushMessage(pfrom, NetMsgType::WTXIDRELAY); |
3466 | 0 | } |
3467 | | |
3468 | | // Signal ADDRv2 support (BIP155). |
3469 | 0 | if (greatest_common_version >= 70016) { |
3470 | | // BIP155 defines addrv2 and sendaddrv2 for all protocol versions, but some |
3471 | | // implementations reject messages they don't know. As a courtesy, don't send |
3472 | | // it to nodes with a version before 70016, as no software is known to support |
3473 | | // BIP155 that doesn't announce at least that protocol version number. |
3474 | 0 | MakeAndPushMessage(pfrom, NetMsgType::SENDADDRV2); |
3475 | 0 | } |
3476 | |
|
3477 | 0 | pfrom.m_has_all_wanted_services = HasAllDesirableServiceFlags(nServices); |
3478 | 0 | peer->m_their_services = nServices; |
3479 | 0 | pfrom.SetAddrLocal(addrMe); |
3480 | 0 | { |
3481 | 0 | LOCK(pfrom.m_subver_mutex); |
3482 | 0 | pfrom.cleanSubVer = cleanSubVer; |
3483 | 0 | } |
3484 | 0 | peer->m_starting_height = starting_height; |
3485 | | |
3486 | | // Only initialize the Peer::TxRelay m_relay_txs data structure if: |
3487 | | // - this isn't an outbound block-relay-only connection, and |
3488 | | // - this isn't an outbound feeler connection, and |
3489 | | // - fRelay=true (the peer wishes to receive transaction announcements) |
3490 | | // or we're offering NODE_BLOOM to this peer. NODE_BLOOM means that |
3491 | | // the peer may turn on transaction relay later. |
3492 | 0 | if (!pfrom.IsBlockOnlyConn() && |
3493 | 0 | !pfrom.IsFeelerConn() && |
3494 | 0 | (fRelay || (peer->m_our_services & NODE_BLOOM))) { |
3495 | 0 | auto* const tx_relay = peer->SetTxRelay(); |
3496 | 0 | { |
3497 | 0 | LOCK(tx_relay->m_bloom_filter_mutex); |
3498 | 0 | tx_relay->m_relay_txs = fRelay; // set to true after we get the first filter* message |
3499 | 0 | } |
3500 | 0 | if (fRelay) pfrom.m_relays_txs = true; |
3501 | 0 | } |
3502 | |
|
3503 | 0 | if (greatest_common_version >= WTXID_RELAY_VERSION && m_txreconciliation) { |
3504 | | // Per BIP-330, we announce txreconciliation support if: |
3505 | | // - protocol version per the peer's VERSION message supports WTXID_RELAY; |
3506 | | // - transaction relay is supported per the peer's VERSION message |
3507 | | // - this is not a block-relay-only connection and not a feeler |
3508 | | // - this is not an addr fetch connection; |
3509 | | // - we are not in -blocksonly mode. |
3510 | 0 | const auto* tx_relay = peer->GetTxRelay(); |
3511 | 0 | if (tx_relay && WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs) && |
3512 | 0 | !pfrom.IsAddrFetchConn() && !m_opts.ignore_incoming_txs) { |
3513 | 0 | const uint64_t recon_salt = m_txreconciliation->PreRegisterPeer(pfrom.GetId()); |
3514 | 0 | MakeAndPushMessage(pfrom, NetMsgType::SENDTXRCNCL, |
3515 | 0 | TXRECONCILIATION_VERSION, recon_salt); |
3516 | 0 | } |
3517 | 0 | } |
3518 | |
|
3519 | 0 | MakeAndPushMessage(pfrom, NetMsgType::VERACK); |
3520 | | |
3521 | | // Potentially mark this peer as a preferred download peer. |
3522 | 0 | { |
3523 | 0 | LOCK(cs_main); |
3524 | 0 | CNodeState* state = State(pfrom.GetId()); |
3525 | 0 | state->fPreferredDownload = (!pfrom.IsInboundConn() || pfrom.HasPermission(NetPermissionFlags::NoBan)) && !pfrom.IsAddrFetchConn() && CanServeBlocks(*peer); |
3526 | 0 | m_num_preferred_download_peers += state->fPreferredDownload; |
3527 | 0 | } |
3528 | | |
3529 | | // Attempt to initialize address relay for outbound peers and use result |
3530 | | // to decide whether to send GETADDR, so that we don't send it to |
3531 | | // inbound or outbound block-relay-only peers. |
3532 | 0 | bool send_getaddr{false}; |
3533 | 0 | if (!pfrom.IsInboundConn()) { |
3534 | 0 | send_getaddr = SetupAddressRelay(pfrom, *peer); |
3535 | 0 | } |
3536 | 0 | if (send_getaddr) { |
3537 | | // Do a one-time address fetch to help populate/update our addrman. |
3538 | | // If we're starting up for the first time, our addrman may be pretty |
3539 | | // empty, so this mechanism is important to help us connect to the network. |
3540 | | // We skip this for block-relay-only peers. We want to avoid |
3541 | | // potentially leaking addr information and we do not want to |
3542 | | // indicate to the peer that we will participate in addr relay. |
3543 | 0 | MakeAndPushMessage(pfrom, NetMsgType::GETADDR); |
3544 | 0 | peer->m_getaddr_sent = true; |
3545 | | // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND addresses in response |
3546 | | // (bypassing the MAX_ADDR_PROCESSING_TOKEN_BUCKET limit). |
3547 | 0 | peer->m_addr_token_bucket += MAX_ADDR_TO_SEND; |
3548 | 0 | } |
3549 | |
|
3550 | 0 | if (!pfrom.IsInboundConn()) { |
3551 | | // For non-inbound connections, we update the addrman to record |
3552 | | // connection success so that addrman will have an up-to-date |
3553 | | // notion of which peers are online and available. |
3554 | | // |
3555 | | // While we strive to not leak information about block-relay-only |
3556 | | // connections via the addrman, not moving an address to the tried |
3557 | | // table is also potentially detrimental because new-table entries |
3558 | | // are subject to eviction in the event of addrman collisions. We |
3559 | | // mitigate the information-leak by never calling |
3560 | | // AddrMan::Connected() on block-relay-only peers; see |
3561 | | // FinalizeNode(). |
3562 | | // |
3563 | | // This moves an address from New to Tried table in Addrman, |
3564 | | // resolves tried-table collisions, etc. |
3565 | 0 | m_addrman.Good(pfrom.addr); |
3566 | 0 | } |
3567 | |
|
3568 | 0 | std::string remoteAddr; |
3569 | 0 | if (fLogIPs) |
3570 | 0 | remoteAddr = ", peeraddr=" + pfrom.addr.ToStringAddrPort(); |
3571 | |
|
3572 | 0 | const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)}; |
3573 | 0 | LogDebug(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d%s%s\n", |
3574 | 0 | cleanSubVer, pfrom.nVersion, |
3575 | 0 | peer->m_starting_height, addrMe.ToStringAddrPort(), fRelay, pfrom.GetId(), |
3576 | 0 | remoteAddr, (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : "")); |
3577 | |
|
3578 | 0 | peer->m_time_offset = NodeSeconds{std::chrono::seconds{nTime}} - Now<NodeSeconds>(); |
3579 | 0 | if (!pfrom.IsInboundConn()) { |
3580 | | // Don't use timedata samples from inbound peers to make it |
3581 | | // harder for others to create false warnings about our clock being out of sync. |
3582 | 0 | m_outbound_time_offsets.Add(peer->m_time_offset); |
3583 | 0 | m_outbound_time_offsets.WarnIfOutOfSync(); |
3584 | 0 | } |
3585 | | |
3586 | | // If the peer is old enough to have the old alert system, send it the final alert. |
3587 | 0 | if (greatest_common_version <= 70012) { |
3588 | 0 | constexpr auto finalAlert{"60010000000000000000000000ffffff7f00000000ffffff7ffeffff7f01ffffff7f00000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220653febd6410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3abd5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fecaae66ecf689bf71b50"_hex}; |
3589 | 0 | MakeAndPushMessage(pfrom, "alert", finalAlert); |
3590 | 0 | } |
3591 | | |
3592 | | // Feeler connections exist only to verify if address is online. |
3593 | 0 | if (pfrom.IsFeelerConn()) { |
3594 | 0 | LogDebug(BCLog::NET, "feeler connection completed peer=%d; disconnecting\n", pfrom.GetId()); |
3595 | 0 | pfrom.fDisconnect = true; |
3596 | 0 | } |
3597 | 0 | return; |
3598 | 0 | } |
3599 | | |
3600 | 0 | if (pfrom.nVersion == 0) { |
3601 | | // Must have a version message before anything else |
3602 | 0 | LogDebug(BCLog::NET, "non-version message before version handshake. Message \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId()); |
3603 | 0 | return; |
3604 | 0 | } |
3605 | | |
3606 | 0 | if (msg_type == NetMsgType::VERACK) { |
3607 | 0 | if (pfrom.fSuccessfullyConnected) { |
3608 | 0 | LogDebug(BCLog::NET, "ignoring redundant verack message from peer=%d\n", pfrom.GetId()); |
3609 | 0 | return; |
3610 | 0 | } |
3611 | | |
3612 | | // Log successful connections unconditionally for outbound, but not for inbound as those |
3613 | | // can be triggered by an attacker at high rate. |
3614 | 0 | if (!pfrom.IsInboundConn() || LogAcceptCategory(BCLog::NET, BCLog::Level::Debug)) { |
3615 | 0 | const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)}; |
3616 | 0 | LogPrintf("New %s %s peer connected: version: %d, blocks=%d, peer=%d%s%s\n", |
3617 | 0 | pfrom.ConnectionTypeAsString(), |
3618 | 0 | TransportTypeAsString(pfrom.m_transport->GetInfo().transport_type), |
3619 | 0 | pfrom.nVersion.load(), peer->m_starting_height, |
3620 | 0 | pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToStringAddrPort()) : ""), |
3621 | 0 | (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : "")); |
3622 | 0 | } |
3623 | |
|
3624 | 0 | if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) { |
3625 | | // Tell our peer we are willing to provide version 2 cmpctblocks. |
3626 | | // However, we do not request new block announcements using |
3627 | | // cmpctblock messages. |
3628 | | // We send this to non-NODE NETWORK peers as well, because |
3629 | | // they may wish to request compact blocks from us |
3630 | 0 | MakeAndPushMessage(pfrom, NetMsgType::SENDCMPCT, /*high_bandwidth=*/false, /*version=*/CMPCTBLOCKS_VERSION); |
3631 | 0 | } |
3632 | |
|
3633 | 0 | if (m_txreconciliation) { |
3634 | 0 | if (!peer->m_wtxid_relay || !m_txreconciliation->IsPeerRegistered(pfrom.GetId())) { |
3635 | | // We could have optimistically pre-registered/registered the peer. In that case, |
3636 | | // we should forget about the reconciliation state here if this wasn't followed |
3637 | | // by WTXIDRELAY (since WTXIDRELAY can't be announced later). |
3638 | 0 | m_txreconciliation->ForgetPeer(pfrom.GetId()); |
3639 | 0 | } |
3640 | 0 | } |
3641 | |
|
3642 | 0 | if (auto tx_relay = peer->GetTxRelay()) { |
3643 | | // `TxRelay::m_tx_inventory_to_send` must be empty before the |
3644 | | // version handshake is completed as |
3645 | | // `TxRelay::m_next_inv_send_time` is first initialised in |
3646 | | // `SendMessages` after the verack is received. Any transactions |
3647 | | // received during the version handshake would otherwise |
3648 | | // immediately be advertised without random delay, potentially |
3649 | | // leaking the time of arrival to a spy. |
3650 | 0 | Assume(WITH_LOCK( |
3651 | 0 | tx_relay->m_tx_inventory_mutex, |
3652 | 0 | return tx_relay->m_tx_inventory_to_send.empty() && |
3653 | 0 | tx_relay->m_next_inv_send_time == 0s)); |
3654 | 0 | } |
3655 | |
|
3656 | 0 | { |
3657 | 0 | LOCK2(::cs_main, m_tx_download_mutex); |
3658 | 0 | const CNodeState* state = State(pfrom.GetId()); |
3659 | 0 | m_txdownloadman.ConnectedPeer(pfrom.GetId(), node::TxDownloadConnectionInfo { |
3660 | 0 | .m_preferred = state->fPreferredDownload, |
3661 | 0 | .m_relay_permissions = pfrom.HasPermission(NetPermissionFlags::Relay), |
3662 | 0 | .m_wtxid_relay = peer->m_wtxid_relay, |
3663 | 0 | }); |
3664 | 0 | } |
3665 | |
|
3666 | 0 | pfrom.fSuccessfullyConnected = true; |
3667 | 0 | return; |
3668 | 0 | } |
3669 | | |
3670 | 0 | if (msg_type == NetMsgType::SENDHEADERS) { |
3671 | 0 | peer->m_prefers_headers = true; |
3672 | 0 | return; |
3673 | 0 | } |
3674 | | |
3675 | 0 | if (msg_type == NetMsgType::SENDCMPCT) { |
3676 | 0 | bool sendcmpct_hb{false}; |
3677 | 0 | uint64_t sendcmpct_version{0}; |
3678 | 0 | vRecv >> sendcmpct_hb >> sendcmpct_version; |
3679 | | |
3680 | | // Only support compact block relay with witnesses |
3681 | 0 | if (sendcmpct_version != CMPCTBLOCKS_VERSION) return; |
3682 | | |
3683 | 0 | LOCK(cs_main); |
3684 | 0 | CNodeState* nodestate = State(pfrom.GetId()); |
3685 | 0 | nodestate->m_provides_cmpctblocks = true; |
3686 | 0 | nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb; |
3687 | | // save whether peer selects us as BIP152 high-bandwidth peer |
3688 | | // (receiving sendcmpct(1) signals high-bandwidth, sendcmpct(0) low-bandwidth) |
3689 | 0 | pfrom.m_bip152_highbandwidth_from = sendcmpct_hb; |
3690 | 0 | return; |
3691 | 0 | } |
3692 | | |
3693 | | // BIP339 defines feature negotiation of wtxidrelay, which must happen between |
3694 | | // VERSION and VERACK to avoid relay problems from switching after a connection is up. |
3695 | 0 | if (msg_type == NetMsgType::WTXIDRELAY) { |
3696 | 0 | if (pfrom.fSuccessfullyConnected) { |
3697 | | // Disconnect peers that send a wtxidrelay message after VERACK. |
3698 | 0 | LogDebug(BCLog::NET, "wtxidrelay received after verack from peer=%d; disconnecting\n", pfrom.GetId()); |
3699 | 0 | pfrom.fDisconnect = true; |
3700 | 0 | return; |
3701 | 0 | } |
3702 | 0 | if (pfrom.GetCommonVersion() >= WTXID_RELAY_VERSION) { |
3703 | 0 | if (!peer->m_wtxid_relay) { |
3704 | 0 | peer->m_wtxid_relay = true; |
3705 | 0 | m_wtxid_relay_peers++; |
3706 | 0 | } else { |
3707 | 0 | LogDebug(BCLog::NET, "ignoring duplicate wtxidrelay from peer=%d\n", pfrom.GetId()); |
3708 | 0 | } |
3709 | 0 | } else { |
3710 | 0 | LogDebug(BCLog::NET, "ignoring wtxidrelay due to old common version=%d from peer=%d\n", pfrom.GetCommonVersion(), pfrom.GetId()); |
3711 | 0 | } |
3712 | 0 | return; |
3713 | 0 | } |
3714 | | |
3715 | | // BIP155 defines feature negotiation of addrv2 and sendaddrv2, which must happen |
3716 | | // between VERSION and VERACK. |
3717 | 0 | if (msg_type == NetMsgType::SENDADDRV2) { |
3718 | 0 | if (pfrom.fSuccessfullyConnected) { |
3719 | | // Disconnect peers that send a SENDADDRV2 message after VERACK. |
3720 | 0 | LogDebug(BCLog::NET, "sendaddrv2 received after verack from peer=%d; disconnecting\n", pfrom.GetId()); |
3721 | 0 | pfrom.fDisconnect = true; |
3722 | 0 | return; |
3723 | 0 | } |
3724 | 0 | peer->m_wants_addrv2 = true; |
3725 | 0 | return; |
3726 | 0 | } |
3727 | | |
3728 | | // Received from a peer demonstrating readiness to announce transactions via reconciliations. |
3729 | | // This feature negotiation must happen between VERSION and VERACK to avoid relay problems |
3730 | | // from switching announcement protocols after the connection is up. |
3731 | 0 | if (msg_type == NetMsgType::SENDTXRCNCL) { |
3732 | 0 | if (!m_txreconciliation) { |
3733 | 0 | LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl from peer=%d ignored, as our node does not have txreconciliation enabled\n", pfrom.GetId()); |
3734 | 0 | return; |
3735 | 0 | } |
3736 | | |
3737 | 0 | if (pfrom.fSuccessfullyConnected) { |
3738 | 0 | LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received after verack from peer=%d; disconnecting\n", pfrom.GetId()); |
3739 | 0 | pfrom.fDisconnect = true; |
3740 | 0 | return; |
3741 | 0 | } |
3742 | | |
3743 | | // Peer must not offer us reconciliations if we specified no tx relay support in VERSION. |
3744 | 0 | if (RejectIncomingTxs(pfrom)) { |
3745 | 0 | LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received from peer=%d to which we indicated no tx relay; disconnecting\n", pfrom.GetId()); |
3746 | 0 | pfrom.fDisconnect = true; |
3747 | 0 | return; |
3748 | 0 | } |
3749 | | |
3750 | | // Peer must not offer us reconciliations if they specified no tx relay support in VERSION. |
3751 | | // This flag might also be false in other cases, but the RejectIncomingTxs check above |
3752 | | // eliminates them, so that this flag fully represents what we are looking for. |
3753 | 0 | const auto* tx_relay = peer->GetTxRelay(); |
3754 | 0 | if (!tx_relay || !WITH_LOCK(tx_relay->m_bloom_filter_mutex, return tx_relay->m_relay_txs)) { |
3755 | 0 | LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "sendtxrcncl received from peer=%d which indicated no tx relay to us; disconnecting\n", pfrom.GetId()); |
3756 | 0 | pfrom.fDisconnect = true; |
3757 | 0 | return; |
3758 | 0 | } |
3759 | | |
3760 | 0 | uint32_t peer_txreconcl_version; |
3761 | 0 | uint64_t remote_salt; |
3762 | 0 | vRecv >> peer_txreconcl_version >> remote_salt; |
3763 | |
|
3764 | 0 | const ReconciliationRegisterResult result = m_txreconciliation->RegisterPeer(pfrom.GetId(), pfrom.IsInboundConn(), |
3765 | 0 | peer_txreconcl_version, remote_salt); |
3766 | 0 | switch (result) { |
3767 | 0 | case ReconciliationRegisterResult::NOT_FOUND: |
3768 | 0 | LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "Ignore unexpected txreconciliation signal from peer=%d\n", pfrom.GetId()); |
3769 | 0 | break; |
3770 | 0 | case ReconciliationRegisterResult::SUCCESS: |
3771 | 0 | break; |
3772 | 0 | case ReconciliationRegisterResult::ALREADY_REGISTERED: |
3773 | 0 | LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "txreconciliation protocol violation from peer=%d (sendtxrcncl received from already registered peer); disconnecting\n", pfrom.GetId()); |
3774 | 0 | pfrom.fDisconnect = true; |
3775 | 0 | return; |
3776 | 0 | case ReconciliationRegisterResult::PROTOCOL_VIOLATION: |
3777 | 0 | LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "txreconciliation protocol violation from peer=%d; disconnecting\n", pfrom.GetId()); |
3778 | 0 | pfrom.fDisconnect = true; |
3779 | 0 | return; |
3780 | 0 | } |
3781 | 0 | return; |
3782 | 0 | } |
3783 | | |
3784 | 0 | if (!pfrom.fSuccessfullyConnected) { |
3785 | 0 | LogDebug(BCLog::NET, "Unsupported message \"%s\" prior to verack from peer=%d\n", SanitizeString(msg_type), pfrom.GetId()); |
3786 | 0 | return; |
3787 | 0 | } |
3788 | | |
3789 | 0 | if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) { |
3790 | 0 | const auto ser_params{ |
3791 | 0 | msg_type == NetMsgType::ADDRV2 ? |
3792 | | // Set V2 param so that the CNetAddr and CAddress |
3793 | | // unserialize methods know that an address in v2 format is coming. |
3794 | 0 | CAddress::V2_NETWORK : |
3795 | 0 | CAddress::V1_NETWORK, |
3796 | 0 | }; |
3797 | |
|
3798 | 0 | std::vector<CAddress> vAddr; |
3799 | |
|
3800 | 0 | vRecv >> ser_params(vAddr); |
3801 | |
|
3802 | 0 | if (!SetupAddressRelay(pfrom, *peer)) { |
3803 | 0 | LogDebug(BCLog::NET, "ignoring %s message from %s peer=%d\n", msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId()); |
3804 | 0 | return; |
3805 | 0 | } |
3806 | | |
3807 | 0 | if (vAddr.size() > MAX_ADDR_TO_SEND) |
3808 | 0 | { |
3809 | 0 | Misbehaving(*peer, strprintf("%s message size = %u", msg_type, vAddr.size())); |
3810 | 0 | return; |
3811 | 0 | } |
3812 | | |
3813 | | // Store the new addresses |
3814 | 0 | std::vector<CAddress> vAddrOk; |
3815 | 0 | const auto current_a_time{Now<NodeSeconds>()}; |
3816 | | |
3817 | | // Update/increment addr rate limiting bucket. |
3818 | 0 | const auto current_time{GetTime<std::chrono::microseconds>()}; |
3819 | 0 | if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) { |
3820 | | // Don't increment bucket if it's already full |
3821 | 0 | const auto time_diff = std::max(current_time - peer->m_addr_token_timestamp, 0us); |
3822 | 0 | const double increment = Ticks<SecondsDouble>(time_diff) * MAX_ADDR_RATE_PER_SECOND; |
3823 | 0 | peer->m_addr_token_bucket = std::min<double>(peer->m_addr_token_bucket + increment, MAX_ADDR_PROCESSING_TOKEN_BUCKET); |
3824 | 0 | } |
3825 | 0 | peer->m_addr_token_timestamp = current_time; |
3826 | |
|
3827 | 0 | const bool rate_limited = !pfrom.HasPermission(NetPermissionFlags::Addr); |
3828 | 0 | uint64_t num_proc = 0; |
3829 | 0 | uint64_t num_rate_limit = 0; |
3830 | 0 | std::shuffle(vAddr.begin(), vAddr.end(), m_rng); |
3831 | 0 | for (CAddress& addr : vAddr) |
3832 | 0 | { |
3833 | 0 | if (interruptMsgProc) |
3834 | 0 | return; |
3835 | | |
3836 | | // Apply rate limiting. |
3837 | 0 | if (peer->m_addr_token_bucket < 1.0) { |
3838 | 0 | if (rate_limited) { |
3839 | 0 | ++num_rate_limit; |
3840 | 0 | continue; |
3841 | 0 | } |
3842 | 0 | } else { |
3843 | 0 | peer->m_addr_token_bucket -= 1.0; |
3844 | 0 | } |
3845 | | // We only bother storing full nodes, though this may include |
3846 | | // things which we would not make an outbound connection to, in |
3847 | | // part because we may make feeler connections to them. |
3848 | 0 | if (!MayHaveUsefulAddressDB(addr.nServices) && !HasAllDesirableServiceFlags(addr.nServices)) |
3849 | 0 | continue; |
3850 | | |
3851 | 0 | if (addr.nTime <= NodeSeconds{100000000s} || addr.nTime > current_a_time + 10min) { |
3852 | 0 | addr.nTime = current_a_time - 5 * 24h; |
3853 | 0 | } |
3854 | 0 | AddAddressKnown(*peer, addr); |
3855 | 0 | if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) { |
3856 | | // Do not process banned/discouraged addresses beyond remembering we received them |
3857 | 0 | continue; |
3858 | 0 | } |
3859 | 0 | ++num_proc; |
3860 | 0 | const bool reachable{g_reachable_nets.Contains(addr)}; |
3861 | 0 | if (addr.nTime > current_a_time - 10min && !peer->m_getaddr_sent && vAddr.size() <= 10 && addr.IsRoutable()) { |
3862 | | // Relay to a limited number of other nodes |
3863 | 0 | RelayAddress(pfrom.GetId(), addr, reachable); |
3864 | 0 | } |
3865 | | // Do not store addresses outside our network |
3866 | 0 | if (reachable) { |
3867 | 0 | vAddrOk.push_back(addr); |
3868 | 0 | } |
3869 | 0 | } |
3870 | 0 | peer->m_addr_processed += num_proc; |
3871 | 0 | peer->m_addr_rate_limited += num_rate_limit; |
3872 | 0 | LogDebug(BCLog::NET, "Received addr: %u addresses (%u processed, %u rate-limited) from peer=%d\n", |
3873 | 0 | vAddr.size(), num_proc, num_rate_limit, pfrom.GetId()); |
3874 | |
|
3875 | 0 | m_addrman.Add(vAddrOk, pfrom.addr, 2h); |
3876 | 0 | if (vAddr.size() < 1000) peer->m_getaddr_sent = false; |
3877 | | |
3878 | | // AddrFetch: Require multiple addresses to avoid disconnecting on self-announcements |
3879 | 0 | if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) { |
3880 | 0 | LogDebug(BCLog::NET, "addrfetch connection completed peer=%d; disconnecting\n", pfrom.GetId()); |
3881 | 0 | pfrom.fDisconnect = true; |
3882 | 0 | } |
3883 | 0 | return; |
3884 | 0 | } |
3885 | | |
3886 | 0 | if (msg_type == NetMsgType::INV) { |
3887 | 0 | std::vector<CInv> vInv; |
3888 | 0 | vRecv >> vInv; |
3889 | 0 | if (vInv.size() > MAX_INV_SZ) |
3890 | 0 | { |
3891 | 0 | Misbehaving(*peer, strprintf("inv message size = %u", vInv.size())); |
3892 | 0 | return; |
3893 | 0 | } |
3894 | | |
3895 | 0 | const bool reject_tx_invs{RejectIncomingTxs(pfrom)}; |
3896 | |
|
3897 | 0 | LOCK2(cs_main, m_tx_download_mutex); |
3898 | |
|
3899 | 0 | const auto current_time{GetTime<std::chrono::microseconds>()}; |
3900 | 0 | uint256* best_block{nullptr}; |
3901 | |
|
3902 | 0 | for (CInv& inv : vInv) { |
3903 | 0 | if (interruptMsgProc) return; |
3904 | | |
3905 | | // Ignore INVs that don't match wtxidrelay setting. |
3906 | | // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting. |
3907 | | // This is fine as no INV messages are involved in that process. |
3908 | 0 | if (peer->m_wtxid_relay) { |
3909 | 0 | if (inv.IsMsgTx()) continue; |
3910 | 0 | } else { |
3911 | 0 | if (inv.IsMsgWtx()) continue; |
3912 | 0 | } |
3913 | | |
3914 | 0 | if (inv.IsMsgBlk()) { |
3915 | 0 | const bool fAlreadyHave = AlreadyHaveBlock(inv.hash); |
3916 | 0 | LogDebug(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); |
3917 | |
|
3918 | 0 | UpdateBlockAvailability(pfrom.GetId(), inv.hash); |
3919 | 0 | if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() && !IsBlockRequested(inv.hash)) { |
3920 | | // Headers-first is the primary method of announcement on |
3921 | | // the network. If a node fell back to sending blocks by |
3922 | | // inv, it may be for a re-org, or because we haven't |
3923 | | // completed initial headers sync. The final block hash |
3924 | | // provided should be the highest, so send a getheaders and |
3925 | | // then fetch the blocks we need to catch up. |
3926 | 0 | best_block = &inv.hash; |
3927 | 0 | } |
3928 | 0 | } else if (inv.IsGenTxMsg()) { |
3929 | 0 | if (reject_tx_invs) { |
3930 | 0 | LogDebug(BCLog::NET, "transaction (%s) inv sent in violation of protocol, disconnecting peer=%d\n", inv.hash.ToString(), pfrom.GetId()); |
3931 | 0 | pfrom.fDisconnect = true; |
3932 | 0 | return; |
3933 | 0 | } |
3934 | 0 | const GenTxid gtxid = ToGenTxid(inv); |
3935 | 0 | AddKnownTx(*peer, inv.hash); |
3936 | |
|
3937 | 0 | if (!m_chainman.IsInitialBlockDownload()) { |
3938 | 0 | const bool fAlreadyHave{m_txdownloadman.AddTxAnnouncement(pfrom.GetId(), gtxid, current_time, /*p2p_inv=*/true)}; |
3939 | 0 | LogDebug(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); |
3940 | 0 | } |
3941 | 0 | } else { |
3942 | 0 | LogDebug(BCLog::NET, "Unknown inv type \"%s\" received from peer=%d\n", inv.ToString(), pfrom.GetId()); |
3943 | 0 | } |
3944 | 0 | } |
3945 | | |
3946 | 0 | if (best_block != nullptr) { |
3947 | | // If we haven't started initial headers-sync with this peer, then |
3948 | | // consider sending a getheaders now. On initial startup, there's a |
3949 | | // reliability vs bandwidth tradeoff, where we are only trying to do |
3950 | | // initial headers sync with one peer at a time, with a long |
3951 | | // timeout (at which point, if the sync hasn't completed, we will |
3952 | | // disconnect the peer and then choose another). In the meantime, |
3953 | | // as new blocks are found, we are willing to add one new peer per |
3954 | | // block to sync with as well, to sync quicker in the case where |
3955 | | // our initial peer is unresponsive (but less bandwidth than we'd |
3956 | | // use if we turned on sync with all peers). |
3957 | 0 | CNodeState& state{*Assert(State(pfrom.GetId()))}; |
3958 | 0 | if (state.fSyncStarted || (!peer->m_inv_triggered_getheaders_before_sync && *best_block != m_last_block_inv_triggering_headers_sync)) { |
3959 | 0 | if (MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer)) { |
3960 | 0 | LogDebug(BCLog::NET, "getheaders (%d) %s to peer=%d\n", |
3961 | 0 | m_chainman.m_best_header->nHeight, best_block->ToString(), |
3962 | 0 | pfrom.GetId()); |
3963 | 0 | } |
3964 | 0 | if (!state.fSyncStarted) { |
3965 | 0 | peer->m_inv_triggered_getheaders_before_sync = true; |
3966 | | // Update the last block hash that triggered a new headers |
3967 | | // sync, so that we don't turn on headers sync with more |
3968 | | // than 1 new peer every new block. |
3969 | 0 | m_last_block_inv_triggering_headers_sync = *best_block; |
3970 | 0 | } |
3971 | 0 | } |
3972 | 0 | } |
3973 | |
|
3974 | 0 | return; |
3975 | 0 | } |
3976 | | |
3977 | 0 | if (msg_type == NetMsgType::GETDATA) { |
3978 | 0 | std::vector<CInv> vInv; |
3979 | 0 | vRecv >> vInv; |
3980 | 0 | if (vInv.size() > MAX_INV_SZ) |
3981 | 0 | { |
3982 | 0 | Misbehaving(*peer, strprintf("getdata message size = %u", vInv.size())); |
3983 | 0 | return; |
3984 | 0 | } |
3985 | | |
3986 | 0 | LogDebug(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom.GetId()); |
3987 | |
|
3988 | 0 | if (vInv.size() > 0) { |
3989 | 0 | LogDebug(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId()); |
3990 | 0 | } |
3991 | |
|
3992 | 0 | { |
3993 | 0 | LOCK(peer->m_getdata_requests_mutex); |
3994 | 0 | peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end()); |
3995 | 0 | ProcessGetData(pfrom, *peer, interruptMsgProc); |
3996 | 0 | } |
3997 | |
|
3998 | 0 | return; |
3999 | 0 | } |
4000 | | |
4001 | 0 | if (msg_type == NetMsgType::GETBLOCKS) { |
4002 | 0 | CBlockLocator locator; |
4003 | 0 | uint256 hashStop; |
4004 | 0 | vRecv >> locator >> hashStop; |
4005 | |
|
4006 | 0 | if (locator.vHave.size() > MAX_LOCATOR_SZ) { |
4007 | 0 | LogDebug(BCLog::NET, "getblocks locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId()); |
4008 | 0 | pfrom.fDisconnect = true; |
4009 | 0 | return; |
4010 | 0 | } |
4011 | | |
4012 | | // We might have announced the currently-being-connected tip using a |
4013 | | // compact block, which resulted in the peer sending a getblocks |
4014 | | // request, which we would otherwise respond to without the new block. |
4015 | | // To avoid this situation we simply verify that we are on our best |
4016 | | // known chain now. This is super overkill, but we handle it better |
4017 | | // for getheaders requests, and there are no known nodes which support |
4018 | | // compact blocks but still use getblocks to request blocks. |
4019 | 0 | { |
4020 | 0 | std::shared_ptr<const CBlock> a_recent_block; |
4021 | 0 | { |
4022 | 0 | LOCK(m_most_recent_block_mutex); |
4023 | 0 | a_recent_block = m_most_recent_block; |
4024 | 0 | } |
4025 | 0 | BlockValidationState state; |
4026 | 0 | if (!m_chainman.ActiveChainstate().ActivateBestChain(state, a_recent_block)) { |
4027 | 0 | LogDebug(BCLog::NET, "failed to activate chain (%s)\n", state.ToString()); |
4028 | 0 | } |
4029 | 0 | } |
4030 | |
|
4031 | 0 | LOCK(cs_main); |
4032 | | |
4033 | | // Find the last block the caller has in the main chain |
4034 | 0 | const CBlockIndex* pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator); |
4035 | | |
4036 | | // Send the rest of the chain |
4037 | 0 | if (pindex) |
4038 | 0 | pindex = m_chainman.ActiveChain().Next(pindex); |
4039 | 0 | int nLimit = 500; |
4040 | 0 | LogDebug(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom.GetId()); |
4041 | 0 | for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) |
4042 | 0 | { |
4043 | 0 | if (pindex->GetBlockHash() == hashStop) |
4044 | 0 | { |
4045 | 0 | LogDebug(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); |
4046 | 0 | break; |
4047 | 0 | } |
4048 | | // If pruning, don't inv blocks unless we have on disk and are likely to still have |
4049 | | // for some reasonable time window (1 hour) that block relay might require. |
4050 | 0 | const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing; |
4051 | 0 | if (m_chainman.m_blockman.IsPruneMode() && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight - nPrunedBlocksLikelyToHave)) { |
4052 | 0 | LogDebug(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); |
4053 | 0 | break; |
4054 | 0 | } |
4055 | 0 | WITH_LOCK(peer->m_block_inv_mutex, peer->m_blocks_for_inv_relay.push_back(pindex->GetBlockHash())); |
4056 | 0 | if (--nLimit <= 0) { |
4057 | | // When this block is requested, we'll send an inv that'll |
4058 | | // trigger the peer to getblocks the next batch of inventory. |
4059 | 0 | LogDebug(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); |
4060 | 0 | WITH_LOCK(peer->m_block_inv_mutex, {peer->m_continuation_block = pindex->GetBlockHash();}); |
4061 | 0 | break; |
4062 | 0 | } |
4063 | 0 | } |
4064 | 0 | return; |
4065 | 0 | } |
4066 | | |
4067 | 0 | if (msg_type == NetMsgType::GETBLOCKTXN) { |
4068 | 0 | BlockTransactionsRequest req; |
4069 | 0 | vRecv >> req; |
4070 | |
|
4071 | 0 | std::shared_ptr<const CBlock> recent_block; |
4072 | 0 | { |
4073 | 0 | LOCK(m_most_recent_block_mutex); |
4074 | 0 | if (m_most_recent_block_hash == req.blockhash) |
4075 | 0 | recent_block = m_most_recent_block; |
4076 | | // Unlock m_most_recent_block_mutex to avoid cs_main lock inversion |
4077 | 0 | } |
4078 | 0 | if (recent_block) { |
4079 | 0 | SendBlockTransactions(pfrom, *peer, *recent_block, req); |
4080 | 0 | return; |
4081 | 0 | } |
4082 | | |
4083 | 0 | FlatFilePos block_pos{}; |
4084 | 0 | { |
4085 | 0 | LOCK(cs_main); |
4086 | |
|
4087 | 0 | const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(req.blockhash); |
4088 | 0 | if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) { |
4089 | 0 | LogDebug(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId()); |
4090 | 0 | return; |
4091 | 0 | } |
4092 | | |
4093 | 0 | if (pindex->nHeight >= m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) { |
4094 | 0 | block_pos = pindex->GetBlockPos(); |
4095 | 0 | } |
4096 | 0 | } |
4097 | | |
4098 | 0 | if (!block_pos.IsNull()) { |
4099 | 0 | CBlock block; |
4100 | 0 | const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(block, block_pos)}; |
4101 | | // If height is above MAX_BLOCKTXN_DEPTH then this block cannot get |
4102 | | // pruned after we release cs_main above, so this read should never fail. |
4103 | 0 | assert(ret); |
4104 | | |
4105 | 0 | SendBlockTransactions(pfrom, *peer, block, req); |
4106 | 0 | return; |
4107 | 0 | } |
4108 | | |
4109 | | // If an older block is requested (should never happen in practice, |
4110 | | // but can happen in tests) send a block response instead of a |
4111 | | // blocktxn response. Sending a full block response instead of a |
4112 | | // small blocktxn response is preferable in the case where a peer |
4113 | | // might maliciously send lots of getblocktxn requests to trigger |
4114 | | // expensive disk reads, because it will require the peer to |
4115 | | // actually receive all the data read from disk over the network. |
4116 | 0 | LogDebug(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH); |
4117 | 0 | CInv inv{MSG_WITNESS_BLOCK, req.blockhash}; |
4118 | 0 | WITH_LOCK(peer->m_getdata_requests_mutex, peer->m_getdata_requests.push_back(inv)); |
4119 | | // The message processing loop will go around again (without pausing) and we'll respond then |
4120 | 0 | return; |
4121 | 0 | } |
4122 | | |
4123 | 0 | if (msg_type == NetMsgType::GETHEADERS) { |
4124 | 0 | CBlockLocator locator; |
4125 | 0 | uint256 hashStop; |
4126 | 0 | vRecv >> locator >> hashStop; |
4127 | |
|
4128 | 0 | if (locator.vHave.size() > MAX_LOCATOR_SZ) { |
4129 | 0 | LogDebug(BCLog::NET, "getheaders locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId()); |
4130 | 0 | pfrom.fDisconnect = true; |
4131 | 0 | return; |
4132 | 0 | } |
4133 | | |
4134 | 0 | if (m_chainman.m_blockman.LoadingBlocks()) { |
4135 | 0 | LogDebug(BCLog::NET, "Ignoring getheaders from peer=%d while importing/reindexing\n", pfrom.GetId()); |
4136 | 0 | return; |
4137 | 0 | } |
4138 | | |
4139 | 0 | LOCK(cs_main); |
4140 | | |
4141 | | // Note that if we were to be on a chain that forks from the checkpointed |
4142 | | // chain, then serving those headers to a peer that has seen the |
4143 | | // checkpointed chain would cause that peer to disconnect us. Requiring |
4144 | | // that our chainwork exceed the minimum chain work is a protection against |
4145 | | // being fed a bogus chain when we started up for the first time and |
4146 | | // getting partitioned off the honest network for serving that chain to |
4147 | | // others. |
4148 | 0 | if (m_chainman.ActiveTip() == nullptr || |
4149 | 0 | (m_chainman.ActiveTip()->nChainWork < m_chainman.MinimumChainWork() && !pfrom.HasPermission(NetPermissionFlags::Download))) { |
4150 | 0 | LogDebug(BCLog::NET, "Ignoring getheaders from peer=%d because active chain has too little work; sending empty response\n", pfrom.GetId()); |
4151 | | // Just respond with an empty headers message, to tell the peer to |
4152 | | // go away but not treat us as unresponsive. |
4153 | 0 | MakeAndPushMessage(pfrom, NetMsgType::HEADERS, std::vector<CBlockHeader>()); |
4154 | 0 | return; |
4155 | 0 | } |
4156 | | |
4157 | 0 | CNodeState *nodestate = State(pfrom.GetId()); |
4158 | 0 | const CBlockIndex* pindex = nullptr; |
4159 | 0 | if (locator.IsNull()) |
4160 | 0 | { |
4161 | | // If locator is null, return the hashStop block |
4162 | 0 | pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop); |
4163 | 0 | if (!pindex) { |
4164 | 0 | return; |
4165 | 0 | } |
4166 | | |
4167 | 0 | if (!BlockRequestAllowed(pindex)) { |
4168 | 0 | LogDebug(BCLog::NET, "%s: ignoring request from peer=%i for old block header that isn't in the main chain\n", __func__, pfrom.GetId()); |
4169 | 0 | return; |
4170 | 0 | } |
4171 | 0 | } |
4172 | 0 | else |
4173 | 0 | { |
4174 | | // Find the last block the caller has in the main chain |
4175 | 0 | pindex = m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator); |
4176 | 0 | if (pindex) |
4177 | 0 | pindex = m_chainman.ActiveChain().Next(pindex); |
4178 | 0 | } |
4179 | | |
4180 | | // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end |
4181 | 0 | std::vector<CBlock> vHeaders; |
4182 | 0 | int nLimit = m_opts.max_headers_result; |
4183 | 0 | LogDebug(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId()); |
4184 | 0 | for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) |
4185 | 0 | { |
4186 | 0 | vHeaders.emplace_back(pindex->GetBlockHeader()); |
4187 | 0 | if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) |
4188 | 0 | break; |
4189 | 0 | } |
4190 | | // pindex can be nullptr either if we sent m_chainman.ActiveChain().Tip() OR |
4191 | | // if our peer has m_chainman.ActiveChain().Tip() (and thus we are sending an empty |
4192 | | // headers message). In both cases it's safe to update |
4193 | | // pindexBestHeaderSent to be our tip. |
4194 | | // |
4195 | | // It is important that we simply reset the BestHeaderSent value here, |
4196 | | // and not max(BestHeaderSent, newHeaderSent). We might have announced |
4197 | | // the currently-being-connected tip using a compact block, which |
4198 | | // resulted in the peer sending a headers request, which we respond to |
4199 | | // without the new block. By resetting the BestHeaderSent, we ensure we |
4200 | | // will re-announce the new block via headers (or compact blocks again) |
4201 | | // in the SendMessages logic. |
4202 | 0 | nodestate->pindexBestHeaderSent = pindex ? pindex : m_chainman.ActiveChain().Tip(); |
4203 | 0 | MakeAndPushMessage(pfrom, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders)); |
4204 | 0 | return; |
4205 | 0 | } |
4206 | | |
4207 | 0 | if (msg_type == NetMsgType::TX) { |
4208 | 0 | if (RejectIncomingTxs(pfrom)) { |
4209 | 0 | LogDebug(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId()); |
4210 | 0 | pfrom.fDisconnect = true; |
4211 | 0 | return; |
4212 | 0 | } |
4213 | | |
4214 | | // Stop processing the transaction early if we are still in IBD since we don't |
4215 | | // have enough information to validate it yet. Sending unsolicited transactions |
4216 | | // is not considered a protocol violation, so don't punish the peer. |
4217 | 0 | if (m_chainman.IsInitialBlockDownload()) return; |
4218 | | |
4219 | 0 | CTransactionRef ptx; |
4220 | 0 | vRecv >> TX_WITH_WITNESS(ptx); |
4221 | 0 | const CTransaction& tx = *ptx; |
4222 | |
|
4223 | 0 | const uint256& txid = ptx->GetHash(); |
4224 | 0 | const uint256& wtxid = ptx->GetWitnessHash(); |
4225 | |
|
4226 | 0 | const uint256& hash = peer->m_wtxid_relay ? wtxid : txid; |
4227 | 0 | AddKnownTx(*peer, hash); |
4228 | |
|
4229 | 0 | LOCK2(cs_main, m_tx_download_mutex); |
4230 | |
|
4231 | 0 | const auto& [should_validate, package_to_validate] = m_txdownloadman.ReceivedTx(pfrom.GetId(), ptx); |
4232 | 0 | if (!should_validate) { |
4233 | 0 | if (pfrom.HasPermission(NetPermissionFlags::ForceRelay)) { |
4234 | | // Always relay transactions received from peers with forcerelay |
4235 | | // permission, even if they were already in the mempool, allowing |
4236 | | // the node to function as a gateway for nodes hidden behind it. |
4237 | 0 | if (!m_mempool.exists(GenTxid::Txid(tx.GetHash()))) { |
4238 | 0 | LogPrintf("Not relaying non-mempool transaction %s (wtxid=%s) from forcerelay peer=%d\n", |
4239 | 0 | tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), pfrom.GetId()); |
4240 | 0 | } else { |
4241 | 0 | LogPrintf("Force relaying tx %s (wtxid=%s) from peer=%d\n", |
4242 | 0 | tx.GetHash().ToString(), tx.GetWitnessHash().ToString(), pfrom.GetId()); |
4243 | 0 | RelayTransaction(tx.GetHash(), tx.GetWitnessHash()); |
4244 | 0 | } |
4245 | 0 | } |
4246 | |
|
4247 | 0 | if (package_to_validate) { |
4248 | 0 | const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)}; |
4249 | 0 | LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(), |
4250 | 0 | package_result.m_state.IsValid() ? "package accepted" : "package rejected"); |
4251 | 0 | ProcessPackageResult(package_to_validate.value(), package_result); |
4252 | 0 | } |
4253 | 0 | return; |
4254 | 0 | } |
4255 | | |
4256 | | // ReceivedTx should not be telling us to validate the tx and a package. |
4257 | 0 | Assume(!package_to_validate.has_value()); |
4258 | |
|
4259 | 0 | const MempoolAcceptResult result = m_chainman.ProcessTransaction(ptx); |
4260 | 0 | const TxValidationState& state = result.m_state; |
4261 | |
|
4262 | 0 | if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) { |
4263 | 0 | ProcessValidTx(pfrom.GetId(), ptx, result.m_replaced_transactions); |
4264 | 0 | pfrom.m_last_tx_time = GetTime<std::chrono::seconds>(); |
4265 | 0 | } |
4266 | 0 | if (state.IsInvalid()) { |
4267 | 0 | if (auto package_to_validate{ProcessInvalidTx(pfrom.GetId(), ptx, state, /*first_time_failure=*/true)}) { |
4268 | 0 | const auto package_result{ProcessNewPackage(m_chainman.ActiveChainstate(), m_mempool, package_to_validate->m_txns, /*test_accept=*/false, /*client_maxfeerate=*/std::nullopt)}; |
4269 | 0 | LogDebug(BCLog::TXPACKAGES, "package evaluation for %s: %s\n", package_to_validate->ToString(), |
4270 | 0 | package_result.m_state.IsValid() ? "package accepted" : "package rejected"); |
4271 | 0 | ProcessPackageResult(package_to_validate.value(), package_result); |
4272 | 0 | } |
4273 | 0 | } |
4274 | |
|
4275 | 0 | return; |
4276 | 0 | } |
4277 | | |
4278 | 0 | if (msg_type == NetMsgType::CMPCTBLOCK) |
4279 | 0 | { |
4280 | | // Ignore cmpctblock received while importing |
4281 | 0 | if (m_chainman.m_blockman.LoadingBlocks()) { |
4282 | 0 | LogDebug(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId()); |
4283 | 0 | return; |
4284 | 0 | } |
4285 | | |
4286 | 0 | CBlockHeaderAndShortTxIDs cmpctblock; |
4287 | 0 | vRecv >> cmpctblock; |
4288 | |
|
4289 | 0 | bool received_new_header = false; |
4290 | 0 | const auto blockhash = cmpctblock.header.GetHash(); |
4291 | |
|
4292 | 0 | { |
4293 | 0 | LOCK(cs_main); |
4294 | |
|
4295 | 0 | const CBlockIndex* prev_block = m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock); |
4296 | 0 | if (!prev_block) { |
4297 | | // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers |
4298 | 0 | if (!m_chainman.IsInitialBlockDownload()) { |
4299 | 0 | MaybeSendGetHeaders(pfrom, GetLocator(m_chainman.m_best_header), *peer); |
4300 | 0 | } |
4301 | 0 | return; |
4302 | 0 | } else if (prev_block->nChainWork + CalculateClaimedHeadersWork({{cmpctblock.header}}) < GetAntiDoSWorkThreshold()) { |
4303 | | // If we get a low-work header in a compact block, we can ignore it. |
4304 | 0 | LogDebug(BCLog::NET, "Ignoring low-work compact block from peer %d\n", pfrom.GetId()); |
4305 | 0 | return; |
4306 | 0 | } |
4307 | | |
4308 | 0 | if (!m_chainman.m_blockman.LookupBlockIndex(blockhash)) { |
4309 | 0 | received_new_header = true; |
4310 | 0 | } |
4311 | 0 | } |
4312 | | |
4313 | 0 | const CBlockIndex *pindex = nullptr; |
4314 | 0 | BlockValidationState state; |
4315 | 0 | if (!m_chainman.ProcessNewBlockHeaders({{cmpctblock.header}}, /*min_pow_checked=*/true, state, &pindex)) { |
4316 | 0 | if (state.IsInvalid()) { |
4317 | 0 | MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block=*/true, "invalid header via cmpctblock"); |
4318 | 0 | return; |
4319 | 0 | } |
4320 | 0 | } |
4321 | | |
4322 | 0 | if (received_new_header) { |
4323 | 0 | LogInfo("Saw new cmpctblock header hash=%s peer=%d\n", |
4324 | 0 | blockhash.ToString(), pfrom.GetId()); |
4325 | 0 | } |
4326 | |
|
4327 | 0 | bool fProcessBLOCKTXN = false; |
4328 | | |
4329 | | // If we end up treating this as a plain headers message, call that as well |
4330 | | // without cs_main. |
4331 | 0 | bool fRevertToHeaderProcessing = false; |
4332 | | |
4333 | | // Keep a CBlock for "optimistic" compactblock reconstructions (see |
4334 | | // below) |
4335 | 0 | std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); |
4336 | 0 | bool fBlockReconstructed = false; |
4337 | |
|
4338 | 0 | { |
4339 | 0 | LOCK(cs_main); |
4340 | | // If AcceptBlockHeader returned true, it set pindex |
4341 | 0 | assert(pindex); |
4342 | 0 | UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash()); |
4343 | |
|
4344 | 0 | CNodeState *nodestate = State(pfrom.GetId()); |
4345 | | |
4346 | | // If this was a new header with more work than our tip, update the |
4347 | | // peer's last block announcement time |
4348 | 0 | if (received_new_header && pindex->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) { |
4349 | 0 | nodestate->m_last_block_announcement = GetTime(); |
4350 | 0 | } |
4351 | |
|
4352 | 0 | if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here |
4353 | 0 | return; |
4354 | | |
4355 | 0 | auto range_flight = mapBlocksInFlight.equal_range(pindex->GetBlockHash()); |
4356 | 0 | size_t already_in_flight = std::distance(range_flight.first, range_flight.second); |
4357 | 0 | bool requested_block_from_this_peer{false}; |
4358 | | |
4359 | | // Multimap ensures ordering of outstanding requests. It's either empty or first in line. |
4360 | 0 | bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId()); |
4361 | |
|
4362 | 0 | while (range_flight.first != range_flight.second) { |
4363 | 0 | if (range_flight.first->second.first == pfrom.GetId()) { |
4364 | 0 | requested_block_from_this_peer = true; |
4365 | 0 | break; |
4366 | 0 | } |
4367 | 0 | range_flight.first++; |
4368 | 0 | } |
4369 | |
|
4370 | 0 | if (pindex->nChainWork <= m_chainman.ActiveChain().Tip()->nChainWork || // We know something better |
4371 | 0 | pindex->nTx != 0) { // We had this block at some point, but pruned it |
4372 | 0 | if (requested_block_from_this_peer) { |
4373 | | // We requested this block for some reason, but our mempool will probably be useless |
4374 | | // so we just grab the block via normal getdata |
4375 | 0 | std::vector<CInv> vInv(1); |
4376 | 0 | vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash); |
4377 | 0 | MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv); |
4378 | 0 | } |
4379 | 0 | return; |
4380 | 0 | } |
4381 | | |
4382 | | // If we're not close to tip yet, give up and let parallel block fetch work its magic |
4383 | 0 | if (!already_in_flight && !CanDirectFetch()) { |
4384 | 0 | return; |
4385 | 0 | } |
4386 | | |
4387 | | // We want to be a bit conservative just to be extra careful about DoS |
4388 | | // possibilities in compact block processing... |
4389 | 0 | if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) { |
4390 | 0 | if ((already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK && nodestate->vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) || |
4391 | 0 | requested_block_from_this_peer) { |
4392 | 0 | std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr; |
4393 | 0 | if (!BlockRequested(pfrom.GetId(), *pindex, &queuedBlockIt)) { |
4394 | 0 | if (!(*queuedBlockIt)->partialBlock) |
4395 | 0 | (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool)); |
4396 | 0 | else { |
4397 | | // The block was already in flight using compact blocks from the same peer |
4398 | 0 | LogDebug(BCLog::NET, "Peer sent us compact block we were already syncing!\n"); |
4399 | 0 | return; |
4400 | 0 | } |
4401 | 0 | } |
4402 | | |
4403 | 0 | PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock; |
4404 | 0 | ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact); |
4405 | 0 | if (status == READ_STATUS_INVALID) { |
4406 | 0 | RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect |
4407 | 0 | Misbehaving(*peer, "invalid compact block"); |
4408 | 0 | return; |
4409 | 0 | } else if (status == READ_STATUS_FAILED) { |
4410 | 0 | if (first_in_flight) { |
4411 | | // Duplicate txindexes, the block is now in-flight, so just request it |
4412 | 0 | std::vector<CInv> vInv(1); |
4413 | 0 | vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash); |
4414 | 0 | MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv); |
4415 | 0 | } else { |
4416 | | // Give up for this peer and wait for other peer(s) |
4417 | 0 | RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); |
4418 | 0 | } |
4419 | 0 | return; |
4420 | 0 | } |
4421 | | |
4422 | 0 | BlockTransactionsRequest req; |
4423 | 0 | for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) { |
4424 | 0 | if (!partialBlock.IsTxAvailable(i)) |
4425 | 0 | req.indexes.push_back(i); |
4426 | 0 | } |
4427 | 0 | if (req.indexes.empty()) { |
4428 | 0 | fProcessBLOCKTXN = true; |
4429 | 0 | } else if (first_in_flight) { |
4430 | | // We will try to round-trip any compact blocks we get on failure, |
4431 | | // as long as it's first... |
4432 | 0 | req.blockhash = pindex->GetBlockHash(); |
4433 | 0 | MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req); |
4434 | 0 | } else if (pfrom.m_bip152_highbandwidth_to && |
4435 | 0 | (!pfrom.IsInboundConn() || |
4436 | 0 | IsBlockRequestedFromOutbound(blockhash) || |
4437 | 0 | already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK - 1)) { |
4438 | | // ... or it's a hb relay peer and: |
4439 | | // - peer is outbound, or |
4440 | | // - we already have an outbound attempt in flight(so we'll take what we can get), or |
4441 | | // - it's not the final parallel download slot (which we may reserve for first outbound) |
4442 | 0 | req.blockhash = pindex->GetBlockHash(); |
4443 | 0 | MakeAndPushMessage(pfrom, NetMsgType::GETBLOCKTXN, req); |
4444 | 0 | } else { |
4445 | | // Give up for this peer and wait for other peer(s) |
4446 | 0 | RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); |
4447 | 0 | } |
4448 | 0 | } else { |
4449 | | // This block is either already in flight from a different |
4450 | | // peer, or this peer has too many blocks outstanding to |
4451 | | // download from. |
4452 | | // Optimistically try to reconstruct anyway since we might be |
4453 | | // able to without any round trips. |
4454 | 0 | PartiallyDownloadedBlock tempBlock(&m_mempool); |
4455 | 0 | ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact); |
4456 | 0 | if (status != READ_STATUS_OK) { |
4457 | | // TODO: don't ignore failures |
4458 | 0 | return; |
4459 | 0 | } |
4460 | 0 | std::vector<CTransactionRef> dummy; |
4461 | 0 | status = tempBlock.FillBlock(*pblock, dummy); |
4462 | 0 | if (status == READ_STATUS_OK) { |
4463 | 0 | fBlockReconstructed = true; |
4464 | 0 | } |
4465 | 0 | } |
4466 | 0 | } else { |
4467 | 0 | if (requested_block_from_this_peer) { |
4468 | | // We requested this block, but its far into the future, so our |
4469 | | // mempool will probably be useless - request the block normally |
4470 | 0 | std::vector<CInv> vInv(1); |
4471 | 0 | vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash); |
4472 | 0 | MakeAndPushMessage(pfrom, NetMsgType::GETDATA, vInv); |
4473 | 0 | return; |
4474 | 0 | } else { |
4475 | | // If this was an announce-cmpctblock, we want the same treatment as a header message |
4476 | 0 | fRevertToHeaderProcessing = true; |
4477 | 0 | } |
4478 | 0 | } |
4479 | 0 | } // cs_main |
4480 | | |
4481 | 0 | if (fProcessBLOCKTXN) { |
4482 | 0 | BlockTransactions txn; |
4483 | 0 | txn.blockhash = blockhash; |
4484 | 0 | return ProcessCompactBlockTxns(pfrom, *peer, txn); |
4485 | 0 | } |
4486 | | |
4487 | 0 | if (fRevertToHeaderProcessing) { |
4488 | | // Headers received from HB compact block peers are permitted to be |
4489 | | // relayed before full validation (see BIP 152), so we don't want to disconnect |
4490 | | // the peer if the header turns out to be for an invalid block. |
4491 | | // Note that if a peer tries to build on an invalid chain, that |
4492 | | // will be detected and the peer will be disconnected/discouraged. |
4493 | 0 | return ProcessHeadersMessage(pfrom, *peer, {cmpctblock.header}, /*via_compact_block=*/true); |
4494 | 0 | } |
4495 | | |
4496 | 0 | if (fBlockReconstructed) { |
4497 | | // If we got here, we were able to optimistically reconstruct a |
4498 | | // block that is in flight from some other peer. |
4499 | 0 | { |
4500 | 0 | LOCK(cs_main); |
4501 | 0 | mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.GetId(), false)); |
4502 | 0 | } |
4503 | | // Setting force_processing to true means that we bypass some of |
4504 | | // our anti-DoS protections in AcceptBlock, which filters |
4505 | | // unrequested blocks that might be trying to waste our resources |
4506 | | // (eg disk space). Because we only try to reconstruct blocks when |
4507 | | // we're close to caught up (via the CanDirectFetch() requirement |
4508 | | // above, combined with the behavior of not requesting blocks until |
4509 | | // we have a chain with at least the minimum chain work), and we ignore |
4510 | | // compact blocks with less work than our tip, it is safe to treat |
4511 | | // reconstructed compact blocks as having been requested. |
4512 | 0 | ProcessBlock(pfrom, pblock, /*force_processing=*/true, /*min_pow_checked=*/true); |
4513 | 0 | LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid() |
4514 | 0 | if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) { |
4515 | | // Clear download state for this block, which is in |
4516 | | // process from some other peer. We do this after calling |
4517 | | // ProcessNewBlock so that a malleated cmpctblock announcement |
4518 | | // can't be used to interfere with block relay. |
4519 | 0 | RemoveBlockRequest(pblock->GetHash(), std::nullopt); |
4520 | 0 | } |
4521 | 0 | } |
4522 | 0 | return; |
4523 | 0 | } |
4524 | | |
4525 | 0 | if (msg_type == NetMsgType::BLOCKTXN) |
4526 | 0 | { |
4527 | | // Ignore blocktxn received while importing |
4528 | 0 | if (m_chainman.m_blockman.LoadingBlocks()) { |
4529 | 0 | LogDebug(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId()); |
4530 | 0 | return; |
4531 | 0 | } |
4532 | | |
4533 | 0 | BlockTransactions resp; |
4534 | 0 | vRecv >> resp; |
4535 | |
|
4536 | 0 | return ProcessCompactBlockTxns(pfrom, *peer, resp); |
4537 | 0 | } |
4538 | | |
4539 | 0 | if (msg_type == NetMsgType::HEADERS) |
4540 | 0 | { |
4541 | | // Ignore headers received while importing |
4542 | 0 | if (m_chainman.m_blockman.LoadingBlocks()) { |
4543 | 0 | LogDebug(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId()); |
4544 | 0 | return; |
4545 | 0 | } |
4546 | | |
4547 | 0 | std::vector<CBlockHeader> headers; |
4548 | | |
4549 | | // Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks. |
4550 | 0 | unsigned int nCount = ReadCompactSize(vRecv); |
4551 | 0 | if (nCount > m_opts.max_headers_result) { |
4552 | 0 | Misbehaving(*peer, strprintf("headers message size = %u", nCount)); |
4553 | 0 | return; |
4554 | 0 | } |
4555 | 0 | headers.resize(nCount); |
4556 | 0 | for (unsigned int n = 0; n < nCount; n++) { |
4557 | 0 | vRecv >> headers[n]; |
4558 | 0 | ReadCompactSize(vRecv); // ignore tx count; assume it is 0. |
4559 | 0 | } |
4560 | |
|
4561 | 0 | ProcessHeadersMessage(pfrom, *peer, std::move(headers), /*via_compact_block=*/false); |
4562 | | |
4563 | | // Check if the headers presync progress needs to be reported to validation. |
4564 | | // This needs to be done without holding the m_headers_presync_mutex lock. |
4565 | 0 | if (m_headers_presync_should_signal.exchange(false)) { |
4566 | 0 | HeadersPresyncStats stats; |
4567 | 0 | { |
4568 | 0 | LOCK(m_headers_presync_mutex); |
4569 | 0 | auto it = m_headers_presync_stats.find(m_headers_presync_bestpeer); |
4570 | 0 | if (it != m_headers_presync_stats.end()) stats = it->second; |
4571 | 0 | } |
4572 | 0 | if (stats.second) { |
4573 | 0 | m_chainman.ReportHeadersPresync(stats.first, stats.second->first, stats.second->second); |
4574 | 0 | } |
4575 | 0 | } |
4576 | |
|
4577 | 0 | return; |
4578 | 0 | } |
4579 | | |
4580 | 0 | if (msg_type == NetMsgType::BLOCK) |
4581 | 0 | { |
4582 | | // Ignore block received while importing |
4583 | 0 | if (m_chainman.m_blockman.LoadingBlocks()) { |
4584 | 0 | LogDebug(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId()); |
4585 | 0 | return; |
4586 | 0 | } |
4587 | | |
4588 | 0 | std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>(); |
4589 | 0 | vRecv >> TX_WITH_WITNESS(*pblock); |
4590 | |
|
4591 | 0 | LogDebug(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom.GetId()); |
4592 | |
|
4593 | 0 | const CBlockIndex* prev_block{WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.LookupBlockIndex(pblock->hashPrevBlock))}; |
4594 | | |
4595 | | // Check for possible mutation if it connects to something we know so we can check for DEPLOYMENT_SEGWIT being active |
4596 | 0 | if (prev_block && IsBlockMutated(/*block=*/*pblock, |
4597 | 0 | /*check_witness_root=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT))) { |
4598 | 0 | LogDebug(BCLog::NET, "Received mutated block from peer=%d\n", peer->m_id); |
4599 | 0 | Misbehaving(*peer, "mutated block"); |
4600 | 0 | WITH_LOCK(cs_main, RemoveBlockRequest(pblock->GetHash(), peer->m_id)); |
4601 | 0 | return; |
4602 | 0 | } |
4603 | | |
4604 | 0 | bool forceProcessing = false; |
4605 | 0 | const uint256 hash(pblock->GetHash()); |
4606 | 0 | bool min_pow_checked = false; |
4607 | 0 | { |
4608 | 0 | LOCK(cs_main); |
4609 | | // Always process the block if we requested it, since we may |
4610 | | // need it even when it's not a candidate for a new best tip. |
4611 | 0 | forceProcessing = IsBlockRequested(hash); |
4612 | 0 | RemoveBlockRequest(hash, pfrom.GetId()); |
4613 | | // mapBlockSource is only used for punishing peers and setting |
4614 | | // which peers send us compact blocks, so the race between here and |
4615 | | // cs_main in ProcessNewBlock is fine. |
4616 | 0 | mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true)); |
4617 | | |
4618 | | // Check claimed work on this block against our anti-dos thresholds. |
4619 | 0 | if (prev_block && prev_block->nChainWork + CalculateClaimedHeadersWork({{pblock->GetBlockHeader()}}) >= GetAntiDoSWorkThreshold()) { |
4620 | 0 | min_pow_checked = true; |
4621 | 0 | } |
4622 | 0 | } |
4623 | 0 | ProcessBlock(pfrom, pblock, forceProcessing, min_pow_checked); |
4624 | 0 | return; |
4625 | 0 | } |
4626 | | |
4627 | 0 | if (msg_type == NetMsgType::GETADDR) { |
4628 | | // This asymmetric behavior for inbound and outbound connections was introduced |
4629 | | // to prevent a fingerprinting attack: an attacker can send specific fake addresses |
4630 | | // to users' AddrMan and later request them by sending getaddr messages. |
4631 | | // Making nodes which are behind NAT and can only make outgoing connections ignore |
4632 | | // the getaddr message mitigates the attack. |
4633 | 0 | if (!pfrom.IsInboundConn()) { |
4634 | 0 | LogDebug(BCLog::NET, "Ignoring \"getaddr\" from %s connection. peer=%d\n", pfrom.ConnectionTypeAsString(), pfrom.GetId()); |
4635 | 0 | return; |
4636 | 0 | } |
4637 | | |
4638 | | // Since this must be an inbound connection, SetupAddressRelay will |
4639 | | // never fail. |
4640 | 0 | Assume(SetupAddressRelay(pfrom, *peer)); |
4641 | | |
4642 | | // Only send one GetAddr response per connection to reduce resource waste |
4643 | | // and discourage addr stamping of INV announcements. |
4644 | 0 | if (peer->m_getaddr_recvd) { |
4645 | 0 | LogDebug(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId()); |
4646 | 0 | return; |
4647 | 0 | } |
4648 | 0 | peer->m_getaddr_recvd = true; |
4649 | |
|
4650 | 0 | peer->m_addrs_to_send.clear(); |
4651 | 0 | std::vector<CAddress> vAddr; |
4652 | 0 | if (pfrom.HasPermission(NetPermissionFlags::Addr)) { |
4653 | 0 | vAddr = m_connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND, /*network=*/std::nullopt); |
4654 | 0 | } else { |
4655 | 0 | vAddr = m_connman.GetAddresses(pfrom, MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND); |
4656 | 0 | } |
4657 | 0 | for (const CAddress &addr : vAddr) { |
4658 | 0 | PushAddress(*peer, addr); |
4659 | 0 | } |
4660 | 0 | return; |
4661 | 0 | } |
4662 | | |
4663 | 0 | if (msg_type == NetMsgType::MEMPOOL) { |
4664 | | // Only process received mempool messages if we advertise NODE_BLOOM |
4665 | | // or if the peer has mempool permissions. |
4666 | 0 | if (!(peer->m_our_services & NODE_BLOOM) && !pfrom.HasPermission(NetPermissionFlags::Mempool)) |
4667 | 0 | { |
4668 | 0 | if (!pfrom.HasPermission(NetPermissionFlags::NoBan)) |
4669 | 0 | { |
4670 | 0 | LogDebug(BCLog::NET, "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom.GetId()); |
4671 | 0 | pfrom.fDisconnect = true; |
4672 | 0 | } |
4673 | 0 | return; |
4674 | 0 | } |
4675 | | |
4676 | 0 | if (m_connman.OutboundTargetReached(false) && !pfrom.HasPermission(NetPermissionFlags::Mempool)) |
4677 | 0 | { |
4678 | 0 | if (!pfrom.HasPermission(NetPermissionFlags::NoBan)) |
4679 | 0 | { |
4680 | 0 | LogDebug(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom.GetId()); |
4681 | 0 | pfrom.fDisconnect = true; |
4682 | 0 | } |
4683 | 0 | return; |
4684 | 0 | } |
4685 | | |
4686 | 0 | if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { |
4687 | 0 | LOCK(tx_relay->m_tx_inventory_mutex); |
4688 | 0 | tx_relay->m_send_mempool = true; |
4689 | 0 | } |
4690 | 0 | return; |
4691 | 0 | } |
4692 | | |
4693 | 0 | if (msg_type == NetMsgType::PING) { |
4694 | 0 | if (pfrom.GetCommonVersion() > BIP0031_VERSION) { |
4695 | 0 | uint64_t nonce = 0; |
4696 | 0 | vRecv >> nonce; |
4697 | | // Echo the message back with the nonce. This allows for two useful features: |
4698 | | // |
4699 | | // 1) A remote node can quickly check if the connection is operational |
4700 | | // 2) Remote nodes can measure the latency of the network thread. If this node |
4701 | | // is overloaded it won't respond to pings quickly and the remote node can |
4702 | | // avoid sending us more work, like chain download requests. |
4703 | | // |
4704 | | // The nonce stops the remote getting confused between different pings: without |
4705 | | // it, if the remote node sends a ping once per second and this node takes 5 |
4706 | | // seconds to respond to each, the 5th ping the remote sends would appear to |
4707 | | // return very quickly. |
4708 | 0 | MakeAndPushMessage(pfrom, NetMsgType::PONG, nonce); |
4709 | 0 | } |
4710 | 0 | return; |
4711 | 0 | } |
4712 | | |
4713 | 0 | if (msg_type == NetMsgType::PONG) { |
4714 | 0 | const auto ping_end = time_received; |
4715 | 0 | uint64_t nonce = 0; |
4716 | 0 | size_t nAvail = vRecv.in_avail(); |
4717 | 0 | bool bPingFinished = false; |
4718 | 0 | std::string sProblem; |
4719 | |
|
4720 | 0 | if (nAvail >= sizeof(nonce)) { |
4721 | 0 | vRecv >> nonce; |
4722 | | |
4723 | | // Only process pong message if there is an outstanding ping (old ping without nonce should never pong) |
4724 | 0 | if (peer->m_ping_nonce_sent != 0) { |
4725 | 0 | if (nonce == peer->m_ping_nonce_sent) { |
4726 | | // Matching pong received, this ping is no longer outstanding |
4727 | 0 | bPingFinished = true; |
4728 | 0 | const auto ping_time = ping_end - peer->m_ping_start.load(); |
4729 | 0 | if (ping_time.count() >= 0) { |
4730 | | // Let connman know about this successful ping-pong |
4731 | 0 | pfrom.PongReceived(ping_time); |
4732 | 0 | } else { |
4733 | | // This should never happen |
4734 | 0 | sProblem = "Timing mishap"; |
4735 | 0 | } |
4736 | 0 | } else { |
4737 | | // Nonce mismatches are normal when pings are overlapping |
4738 | 0 | sProblem = "Nonce mismatch"; |
4739 | 0 | if (nonce == 0) { |
4740 | | // This is most likely a bug in another implementation somewhere; cancel this ping |
4741 | 0 | bPingFinished = true; |
4742 | 0 | sProblem = "Nonce zero"; |
4743 | 0 | } |
4744 | 0 | } |
4745 | 0 | } else { |
4746 | 0 | sProblem = "Unsolicited pong without ping"; |
4747 | 0 | } |
4748 | 0 | } else { |
4749 | | // This is most likely a bug in another implementation somewhere; cancel this ping |
4750 | 0 | bPingFinished = true; |
4751 | 0 | sProblem = "Short payload"; |
4752 | 0 | } |
4753 | |
|
4754 | 0 | if (!(sProblem.empty())) { |
4755 | 0 | LogDebug(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n", |
4756 | 0 | pfrom.GetId(), |
4757 | 0 | sProblem, |
4758 | 0 | peer->m_ping_nonce_sent, |
4759 | 0 | nonce, |
4760 | 0 | nAvail); |
4761 | 0 | } |
4762 | 0 | if (bPingFinished) { |
4763 | 0 | peer->m_ping_nonce_sent = 0; |
4764 | 0 | } |
4765 | 0 | return; |
4766 | 0 | } |
4767 | | |
4768 | 0 | if (msg_type == NetMsgType::FILTERLOAD) { |
4769 | 0 | if (!(peer->m_our_services & NODE_BLOOM)) { |
4770 | 0 | LogDebug(BCLog::NET, "filterload received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId()); |
4771 | 0 | pfrom.fDisconnect = true; |
4772 | 0 | return; |
4773 | 0 | } |
4774 | 0 | CBloomFilter filter; |
4775 | 0 | vRecv >> filter; |
4776 | |
|
4777 | 0 | if (!filter.IsWithinSizeConstraints()) |
4778 | 0 | { |
4779 | | // There is no excuse for sending a too-large filter |
4780 | 0 | Misbehaving(*peer, "too-large bloom filter"); |
4781 | 0 | } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { |
4782 | 0 | { |
4783 | 0 | LOCK(tx_relay->m_bloom_filter_mutex); |
4784 | 0 | tx_relay->m_bloom_filter.reset(new CBloomFilter(filter)); |
4785 | 0 | tx_relay->m_relay_txs = true; |
4786 | 0 | } |
4787 | 0 | pfrom.m_bloom_filter_loaded = true; |
4788 | 0 | pfrom.m_relays_txs = true; |
4789 | 0 | } |
4790 | 0 | return; |
4791 | 0 | } |
4792 | | |
4793 | 0 | if (msg_type == NetMsgType::FILTERADD) { |
4794 | 0 | if (!(peer->m_our_services & NODE_BLOOM)) { |
4795 | 0 | LogDebug(BCLog::NET, "filteradd received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId()); |
4796 | 0 | pfrom.fDisconnect = true; |
4797 | 0 | return; |
4798 | 0 | } |
4799 | 0 | std::vector<unsigned char> vData; |
4800 | 0 | vRecv >> vData; |
4801 | | |
4802 | | // Nodes must NEVER send a data item > MAX_SCRIPT_ELEMENT_SIZE bytes (the max size for a script data object, |
4803 | | // and thus, the maximum size any matched object can have) in a filteradd message |
4804 | 0 | bool bad = false; |
4805 | 0 | if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) { |
4806 | 0 | bad = true; |
4807 | 0 | } else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { |
4808 | 0 | LOCK(tx_relay->m_bloom_filter_mutex); |
4809 | 0 | if (tx_relay->m_bloom_filter) { |
4810 | 0 | tx_relay->m_bloom_filter->insert(vData); |
4811 | 0 | } else { |
4812 | 0 | bad = true; |
4813 | 0 | } |
4814 | 0 | } |
4815 | 0 | if (bad) { |
4816 | 0 | Misbehaving(*peer, "bad filteradd message"); |
4817 | 0 | } |
4818 | 0 | return; |
4819 | 0 | } |
4820 | | |
4821 | 0 | if (msg_type == NetMsgType::FILTERCLEAR) { |
4822 | 0 | if (!(peer->m_our_services & NODE_BLOOM)) { |
4823 | 0 | LogDebug(BCLog::NET, "filterclear received despite not offering bloom services from peer=%d; disconnecting\n", pfrom.GetId()); |
4824 | 0 | pfrom.fDisconnect = true; |
4825 | 0 | return; |
4826 | 0 | } |
4827 | 0 | auto tx_relay = peer->GetTxRelay(); |
4828 | 0 | if (!tx_relay) return; |
4829 | | |
4830 | 0 | { |
4831 | 0 | LOCK(tx_relay->m_bloom_filter_mutex); |
4832 | 0 | tx_relay->m_bloom_filter = nullptr; |
4833 | 0 | tx_relay->m_relay_txs = true; |
4834 | 0 | } |
4835 | 0 | pfrom.m_bloom_filter_loaded = false; |
4836 | 0 | pfrom.m_relays_txs = true; |
4837 | 0 | return; |
4838 | 0 | } |
4839 | | |
4840 | 0 | if (msg_type == NetMsgType::FEEFILTER) { |
4841 | 0 | CAmount newFeeFilter = 0; |
4842 | 0 | vRecv >> newFeeFilter; |
4843 | 0 | if (MoneyRange(newFeeFilter)) { |
4844 | 0 | if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { |
4845 | 0 | tx_relay->m_fee_filter_received = newFeeFilter; |
4846 | 0 | } |
4847 | 0 | LogDebug(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId()); |
4848 | 0 | } |
4849 | 0 | return; |
4850 | 0 | } |
4851 | | |
4852 | 0 | if (msg_type == NetMsgType::GETCFILTERS) { |
4853 | 0 | ProcessGetCFilters(pfrom, *peer, vRecv); |
4854 | 0 | return; |
4855 | 0 | } |
4856 | | |
4857 | 0 | if (msg_type == NetMsgType::GETCFHEADERS) { |
4858 | 0 | ProcessGetCFHeaders(pfrom, *peer, vRecv); |
4859 | 0 | return; |
4860 | 0 | } |
4861 | | |
4862 | 0 | if (msg_type == NetMsgType::GETCFCHECKPT) { |
4863 | 0 | ProcessGetCFCheckPt(pfrom, *peer, vRecv); |
4864 | 0 | return; |
4865 | 0 | } |
4866 | | |
4867 | 0 | if (msg_type == NetMsgType::NOTFOUND) { |
4868 | 0 | std::vector<CInv> vInv; |
4869 | 0 | vRecv >> vInv; |
4870 | 0 | std::vector<uint256> tx_invs; |
4871 | 0 | if (vInv.size() <= node::MAX_PEER_TX_ANNOUNCEMENTS + MAX_BLOCKS_IN_TRANSIT_PER_PEER) { |
4872 | 0 | for (CInv &inv : vInv) { |
4873 | 0 | if (inv.IsGenTxMsg()) { |
4874 | 0 | tx_invs.emplace_back(inv.hash); |
4875 | 0 | } |
4876 | 0 | } |
4877 | 0 | } |
4878 | 0 | LOCK(m_tx_download_mutex); |
4879 | 0 | m_txdownloadman.ReceivedNotFound(pfrom.GetId(), tx_invs); |
4880 | 0 | return; |
4881 | 0 | } |
4882 | | |
4883 | | // Ignore unknown commands for extensibility |
4884 | 0 | LogDebug(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId()); |
4885 | 0 | return; |
4886 | 0 | } |
4887 | | |
4888 | | bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer) |
4889 | 0 | { |
4890 | 0 | { |
4891 | 0 | LOCK(peer.m_misbehavior_mutex); |
4892 | | |
4893 | | // There's nothing to do if the m_should_discourage flag isn't set |
4894 | 0 | if (!peer.m_should_discourage) return false; |
4895 | | |
4896 | 0 | peer.m_should_discourage = false; |
4897 | 0 | } // peer.m_misbehavior_mutex |
4898 | | |
4899 | 0 | if (pnode.HasPermission(NetPermissionFlags::NoBan)) { |
4900 | | // We never disconnect or discourage peers for bad behavior if they have NetPermissionFlags::NoBan permission |
4901 | 0 | LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id); |
4902 | 0 | return false; |
4903 | 0 | } |
4904 | | |
4905 | 0 | if (pnode.IsManualConn()) { |
4906 | | // We never disconnect or discourage manual peers for bad behavior |
4907 | 0 | LogPrintf("Warning: not punishing manually connected peer %d!\n", peer.m_id); |
4908 | 0 | return false; |
4909 | 0 | } |
4910 | | |
4911 | 0 | if (pnode.addr.IsLocal()) { |
4912 | | // We disconnect local peers for bad behavior but don't discourage (since that would discourage |
4913 | | // all peers on the same local address) |
4914 | 0 | LogDebug(BCLog::NET, "Warning: disconnecting but not discouraging %s peer %d!\n", |
4915 | 0 | pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id); |
4916 | 0 | pnode.fDisconnect = true; |
4917 | 0 | return true; |
4918 | 0 | } |
4919 | | |
4920 | | // Normal case: Disconnect the peer and discourage all nodes sharing the address |
4921 | 0 | LogDebug(BCLog::NET, "Disconnecting and discouraging peer %d!\n", peer.m_id); |
4922 | 0 | if (m_banman) m_banman->Discourage(pnode.addr); |
4923 | 0 | m_connman.DisconnectNode(pnode.addr); |
4924 | 0 | return true; |
4925 | 0 | } |
4926 | | |
4927 | | bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc) |
4928 | 0 | { |
4929 | 0 | AssertLockNotHeld(m_tx_download_mutex); |
4930 | 0 | AssertLockHeld(g_msgproc_mutex); |
4931 | |
|
4932 | 0 | PeerRef peer = GetPeerRef(pfrom->GetId()); |
4933 | 0 | if (peer == nullptr) return false; |
4934 | | |
4935 | | // For outbound connections, ensure that the initial VERSION message |
4936 | | // has been sent first before processing any incoming messages |
4937 | 0 | if (!pfrom->IsInboundConn() && !peer->m_outbound_version_message_sent) return false; |
4938 | | |
4939 | 0 | { |
4940 | 0 | LOCK(peer->m_getdata_requests_mutex); |
4941 | 0 | if (!peer->m_getdata_requests.empty()) { |
4942 | 0 | ProcessGetData(*pfrom, *peer, interruptMsgProc); |
4943 | 0 | } |
4944 | 0 | } |
4945 | |
|
4946 | 0 | const bool processed_orphan = ProcessOrphanTx(*peer); |
4947 | |
|
4948 | 0 | if (pfrom->fDisconnect) |
4949 | 0 | return false; |
4950 | | |
4951 | 0 | if (processed_orphan) return true; |
4952 | | |
4953 | | // this maintains the order of responses |
4954 | | // and prevents m_getdata_requests to grow unbounded |
4955 | 0 | { |
4956 | 0 | LOCK(peer->m_getdata_requests_mutex); |
4957 | 0 | if (!peer->m_getdata_requests.empty()) return true; |
4958 | 0 | } |
4959 | | |
4960 | | // Don't bother if send buffer is too full to respond anyway |
4961 | 0 | if (pfrom->fPauseSend) return false; |
4962 | | |
4963 | 0 | auto poll_result{pfrom->PollMessage()}; |
4964 | 0 | if (!poll_result) { |
4965 | | // No message to process |
4966 | 0 | return false; |
4967 | 0 | } |
4968 | | |
4969 | 0 | CNetMessage& msg{poll_result->first}; |
4970 | 0 | bool fMoreWork = poll_result->second; |
4971 | |
|
4972 | 0 | TRACE6(net, inbound_message, |
4973 | 0 | pfrom->GetId(), |
4974 | 0 | pfrom->m_addr_name.c_str(), |
4975 | 0 | pfrom->ConnectionTypeAsString().c_str(), |
4976 | 0 | msg.m_type.c_str(), |
4977 | 0 | msg.m_recv.size(), |
4978 | 0 | msg.m_recv.data() |
4979 | 0 | ); |
4980 | |
|
4981 | 0 | if (m_opts.capture_messages) { |
4982 | 0 | CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true); |
4983 | 0 | } |
4984 | |
|
4985 | 0 | try { |
4986 | 0 | ProcessMessage(*pfrom, msg.m_type, msg.m_recv, msg.m_time, interruptMsgProc); |
4987 | 0 | if (interruptMsgProc) return false; |
4988 | 0 | { |
4989 | 0 | LOCK(peer->m_getdata_requests_mutex); |
4990 | 0 | if (!peer->m_getdata_requests.empty()) fMoreWork = true; |
4991 | 0 | } |
4992 | | // Does this peer has an orphan ready to reconsider? |
4993 | | // (Note: we may have provided a parent for an orphan provided |
4994 | | // by another peer that was already processed; in that case, |
4995 | | // the extra work may not be noticed, possibly resulting in an |
4996 | | // unnecessary 100ms delay) |
4997 | 0 | LOCK(m_tx_download_mutex); |
4998 | 0 | if (m_txdownloadman.HaveMoreWork(peer->m_id)) fMoreWork = true; |
4999 | 0 | } catch (const std::exception& e) { |
5000 | 0 | LogDebug(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size, e.what(), typeid(e).name()); |
5001 | 0 | } catch (...) { |
5002 | 0 | LogDebug(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size); |
5003 | 0 | } |
5004 | | |
5005 | 0 | return fMoreWork; |
5006 | 0 | } |
5007 | | |
5008 | | void PeerManagerImpl::ConsiderEviction(CNode& pto, Peer& peer, std::chrono::seconds time_in_seconds) |
5009 | 0 | { |
5010 | 0 | AssertLockHeld(cs_main); |
5011 | |
|
5012 | 0 | CNodeState &state = *State(pto.GetId()); |
5013 | |
|
5014 | 0 | if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) { |
5015 | | // This is an outbound peer subject to disconnection if they don't |
5016 | | // announce a block with as much work as the current tip within |
5017 | | // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if |
5018 | | // their chain has more work than ours, we should sync to it, |
5019 | | // unless it's invalid, in which case we should find that out and |
5020 | | // disconnect from them elsewhere). |
5021 | 0 | if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork) { |
5022 | | // The outbound peer has sent us a block with at least as much work as our current tip, so reset the timeout if it was set |
5023 | 0 | if (state.m_chain_sync.m_timeout != 0s) { |
5024 | 0 | state.m_chain_sync.m_timeout = 0s; |
5025 | 0 | state.m_chain_sync.m_work_header = nullptr; |
5026 | 0 | state.m_chain_sync.m_sent_getheaders = false; |
5027 | 0 | } |
5028 | 0 | } else if (state.m_chain_sync.m_timeout == 0s || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) { |
5029 | | // At this point we know that the outbound peer has either never sent us a block/header or they have, but its tip is behind ours |
5030 | | // AND |
5031 | | // we are noticing this for the first time (m_timeout is 0) |
5032 | | // OR we noticed this at some point within the last CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds and set a timeout |
5033 | | // for them, they caught up to our tip at the time of setting the timer but not to our current one (we've also advanced). |
5034 | | // Either way, set a new timeout based on our current tip. |
5035 | 0 | state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT; |
5036 | 0 | state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip(); |
5037 | 0 | state.m_chain_sync.m_sent_getheaders = false; |
5038 | 0 | } else if (state.m_chain_sync.m_timeout > 0s && time_in_seconds > state.m_chain_sync.m_timeout) { |
5039 | | // No evidence yet that our peer has synced to a chain with work equal to that |
5040 | | // of our tip, when we first detected it was behind. Send a single getheaders |
5041 | | // message to give the peer a chance to update us. |
5042 | 0 | if (state.m_chain_sync.m_sent_getheaders) { |
5043 | | // They've run out of time to catch up! |
5044 | 0 | LogPrintf("Disconnecting outbound peer %d for old chain, best known block = %s\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>"); |
5045 | 0 | pto.fDisconnect = true; |
5046 | 0 | } else { |
5047 | 0 | assert(state.m_chain_sync.m_work_header); |
5048 | | // Here, we assume that the getheaders message goes out, |
5049 | | // because it'll either go out or be skipped because of a |
5050 | | // getheaders in-flight already, in which case the peer should |
5051 | | // still respond to us with a sufficiently high work chain tip. |
5052 | 0 | MaybeSendGetHeaders(pto, |
5053 | 0 | GetLocator(state.m_chain_sync.m_work_header->pprev), |
5054 | 0 | peer); |
5055 | 0 | LogDebug(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString()); |
5056 | 0 | state.m_chain_sync.m_sent_getheaders = true; |
5057 | | // Bump the timeout to allow a response, which could clear the timeout |
5058 | | // (if the response shows the peer has synced), reset the timeout (if |
5059 | | // the peer syncs to the required work but not to our tip), or result |
5060 | | // in disconnect (if we advance to the timeout and pindexBestKnownBlock |
5061 | | // has not sufficiently progressed) |
5062 | 0 | state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME; |
5063 | 0 | } |
5064 | 0 | } |
5065 | 0 | } |
5066 | 0 | } |
5067 | | |
5068 | | void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now) |
5069 | 0 | { |
5070 | | // If we have any extra block-relay-only peers, disconnect the youngest unless |
5071 | | // it's given us a block -- in which case, compare with the second-youngest, and |
5072 | | // out of those two, disconnect the peer who least recently gave us a block. |
5073 | | // The youngest block-relay-only peer would be the extra peer we connected |
5074 | | // to temporarily in order to sync our tip; see net.cpp. |
5075 | | // Note that we use higher nodeid as a measure for most recent connection. |
5076 | 0 | if (m_connman.GetExtraBlockRelayCount() > 0) { |
5077 | 0 | std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0}, next_youngest_peer{-1, 0}; |
5078 | |
|
5079 | 0 | m_connman.ForEachNode([&](CNode* pnode) { |
5080 | 0 | if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) return; |
5081 | 0 | if (pnode->GetId() > youngest_peer.first) { |
5082 | 0 | next_youngest_peer = youngest_peer; |
5083 | 0 | youngest_peer.first = pnode->GetId(); |
5084 | 0 | youngest_peer.second = pnode->m_last_block_time; |
5085 | 0 | } |
5086 | 0 | }); |
5087 | 0 | NodeId to_disconnect = youngest_peer.first; |
5088 | 0 | if (youngest_peer.second > next_youngest_peer.second) { |
5089 | | // Our newest block-relay-only peer gave us a block more recently; |
5090 | | // disconnect our second youngest. |
5091 | 0 | to_disconnect = next_youngest_peer.first; |
5092 | 0 | } |
5093 | 0 | m_connman.ForNode(to_disconnect, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { |
5094 | 0 | AssertLockHeld(::cs_main); |
5095 | | // Make sure we're not getting a block right now, and that |
5096 | | // we've been connected long enough for this eviction to happen |
5097 | | // at all. |
5098 | | // Note that we only request blocks from a peer if we learn of a |
5099 | | // valid headers chain with at least as much work as our tip. |
5100 | 0 | CNodeState *node_state = State(pnode->GetId()); |
5101 | 0 | if (node_state == nullptr || |
5102 | 0 | (now - pnode->m_connected >= MINIMUM_CONNECT_TIME && node_state->vBlocksInFlight.empty())) { |
5103 | 0 | pnode->fDisconnect = true; |
5104 | 0 | LogDebug(BCLog::NET, "disconnecting extra block-relay-only peer=%d (last block received at time %d)\n", |
5105 | 0 | pnode->GetId(), count_seconds(pnode->m_last_block_time)); |
5106 | 0 | return true; |
5107 | 0 | } else { |
5108 | 0 | LogDebug(BCLog::NET, "keeping block-relay-only peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", |
5109 | 0 | pnode->GetId(), count_seconds(pnode->m_connected), node_state->vBlocksInFlight.size()); |
5110 | 0 | } |
5111 | 0 | return false; |
5112 | 0 | }); |
5113 | 0 | } |
5114 | | |
5115 | | // Check whether we have too many outbound-full-relay peers |
5116 | 0 | if (m_connman.GetExtraFullOutboundCount() > 0) { |
5117 | | // If we have more outbound-full-relay peers than we target, disconnect one. |
5118 | | // Pick the outbound-full-relay peer that least recently announced |
5119 | | // us a new block, with ties broken by choosing the more recent |
5120 | | // connection (higher node id) |
5121 | | // Protect peers from eviction if we don't have another connection |
5122 | | // to their network, counting both outbound-full-relay and manual peers. |
5123 | 0 | NodeId worst_peer = -1; |
5124 | 0 | int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max(); |
5125 | |
|
5126 | 0 | m_connman.ForEachNode([&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_connman.GetNodesMutex()) { |
5127 | 0 | AssertLockHeld(::cs_main); |
5128 | | |
5129 | | // Only consider outbound-full-relay peers that are not already |
5130 | | // marked for disconnection |
5131 | 0 | if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) return; |
5132 | 0 | CNodeState *state = State(pnode->GetId()); |
5133 | 0 | if (state == nullptr) return; // shouldn't be possible, but just in case |
5134 | | // Don't evict our protected peers |
5135 | 0 | if (state->m_chain_sync.m_protect) return; |
5136 | | // If this is the only connection on a particular network that is |
5137 | | // OUTBOUND_FULL_RELAY or MANUAL, protect it. |
5138 | 0 | if (!m_connman.MultipleManualOrFullOutboundConns(pnode->addr.GetNetwork())) return; |
5139 | 0 | if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) { |
5140 | 0 | worst_peer = pnode->GetId(); |
5141 | 0 | oldest_block_announcement = state->m_last_block_announcement; |
5142 | 0 | } |
5143 | 0 | }); |
5144 | 0 | if (worst_peer != -1) { |
5145 | 0 | bool disconnected = m_connman.ForNode(worst_peer, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { |
5146 | 0 | AssertLockHeld(::cs_main); |
5147 | | |
5148 | | // Only disconnect a peer that has been connected to us for |
5149 | | // some reasonable fraction of our check-frequency, to give |
5150 | | // it time for new information to have arrived. |
5151 | | // Also don't disconnect any peer we're trying to download a |
5152 | | // block from. |
5153 | 0 | CNodeState &state = *State(pnode->GetId()); |
5154 | 0 | if (now - pnode->m_connected > MINIMUM_CONNECT_TIME && state.vBlocksInFlight.empty()) { |
5155 | 0 | LogDebug(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement); |
5156 | 0 | pnode->fDisconnect = true; |
5157 | 0 | return true; |
5158 | 0 | } else { |
5159 | 0 | LogDebug(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", |
5160 | 0 | pnode->GetId(), count_seconds(pnode->m_connected), state.vBlocksInFlight.size()); |
5161 | 0 | return false; |
5162 | 0 | } |
5163 | 0 | }); |
5164 | 0 | if (disconnected) { |
5165 | | // If we disconnected an extra peer, that means we successfully |
5166 | | // connected to at least one peer after the last time we |
5167 | | // detected a stale tip. Don't try any more extra peers until |
5168 | | // we next detect a stale tip, to limit the load we put on the |
5169 | | // network from these extra connections. |
5170 | 0 | m_connman.SetTryNewOutboundPeer(false); |
5171 | 0 | } |
5172 | 0 | } |
5173 | 0 | } |
5174 | 0 | } |
5175 | | |
5176 | | void PeerManagerImpl::CheckForStaleTipAndEvictPeers() |
5177 | 0 | { |
5178 | 0 | LOCK(cs_main); |
5179 | |
|
5180 | 0 | auto now{GetTime<std::chrono::seconds>()}; |
5181 | |
|
5182 | 0 | EvictExtraOutboundPeers(now); |
5183 | |
|
5184 | 0 | if (now > m_stale_tip_check_time) { |
5185 | | // Check whether our tip is stale, and if so, allow using an extra |
5186 | | // outbound peer |
5187 | 0 | if (!m_chainman.m_blockman.LoadingBlocks() && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) { |
5188 | 0 | LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", |
5189 | 0 | count_seconds(now - m_last_tip_update.load())); |
5190 | 0 | m_connman.SetTryNewOutboundPeer(true); |
5191 | 0 | } else if (m_connman.GetTryNewOutboundPeer()) { |
5192 | 0 | m_connman.SetTryNewOutboundPeer(false); |
5193 | 0 | } |
5194 | 0 | m_stale_tip_check_time = now + STALE_CHECK_INTERVAL; |
5195 | 0 | } |
5196 | |
|
5197 | 0 | if (!m_initial_sync_finished && CanDirectFetch()) { |
5198 | 0 | m_connman.StartExtraBlockRelayPeers(); |
5199 | 0 | m_initial_sync_finished = true; |
5200 | 0 | } |
5201 | 0 | } |
5202 | | |
5203 | | void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer, std::chrono::microseconds now) |
5204 | 0 | { |
5205 | 0 | if (m_connman.ShouldRunInactivityChecks(node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) && |
5206 | 0 | peer.m_ping_nonce_sent && |
5207 | 0 | now > peer.m_ping_start.load() + TIMEOUT_INTERVAL) |
5208 | 0 | { |
5209 | | // The ping timeout is using mocktime. To disable the check during |
5210 | | // testing, increase -peertimeout. |
5211 | 0 | LogDebug(BCLog::NET, "ping timeout: %fs peer=%d\n", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), peer.m_id); |
5212 | 0 | node_to.fDisconnect = true; |
5213 | 0 | return; |
5214 | 0 | } |
5215 | | |
5216 | 0 | bool pingSend = false; |
5217 | |
|
5218 | 0 | if (peer.m_ping_queued) { |
5219 | | // RPC ping request by user |
5220 | 0 | pingSend = true; |
5221 | 0 | } |
5222 | |
|
5223 | 0 | if (peer.m_ping_nonce_sent == 0 && now > peer.m_ping_start.load() + PING_INTERVAL) { |
5224 | | // Ping automatically sent as a latency probe & keepalive. |
5225 | 0 | pingSend = true; |
5226 | 0 | } |
5227 | |
|
5228 | 0 | if (pingSend) { |
5229 | 0 | uint64_t nonce; |
5230 | 0 | do { |
5231 | 0 | nonce = FastRandomContext().rand64(); |
5232 | 0 | } while (nonce == 0); |
5233 | 0 | peer.m_ping_queued = false; |
5234 | 0 | peer.m_ping_start = now; |
5235 | 0 | if (node_to.GetCommonVersion() > BIP0031_VERSION) { |
5236 | 0 | peer.m_ping_nonce_sent = nonce; |
5237 | 0 | MakeAndPushMessage(node_to, NetMsgType::PING, nonce); |
5238 | 0 | } else { |
5239 | | // Peer is too old to support ping command with nonce, pong will never arrive. |
5240 | 0 | peer.m_ping_nonce_sent = 0; |
5241 | 0 | MakeAndPushMessage(node_to, NetMsgType::PING); |
5242 | 0 | } |
5243 | 0 | } |
5244 | 0 | } |
5245 | | |
5246 | | void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::microseconds current_time) |
5247 | 0 | { |
5248 | | // Nothing to do for non-address-relay peers |
5249 | 0 | if (!peer.m_addr_relay_enabled) return; |
5250 | | |
5251 | 0 | LOCK(peer.m_addr_send_times_mutex); |
5252 | | // Periodically advertise our local address to the peer. |
5253 | 0 | if (fListen && !m_chainman.IsInitialBlockDownload() && |
5254 | 0 | peer.m_next_local_addr_send < current_time) { |
5255 | | // If we've sent before, clear the bloom filter for the peer, so that our |
5256 | | // self-announcement will actually go out. |
5257 | | // This might be unnecessary if the bloom filter has already rolled |
5258 | | // over since our last self-announcement, but there is only a small |
5259 | | // bandwidth cost that we can incur by doing this (which happens |
5260 | | // once a day on average). |
5261 | 0 | if (peer.m_next_local_addr_send != 0us) { |
5262 | 0 | peer.m_addr_known->reset(); |
5263 | 0 | } |
5264 | 0 | if (std::optional<CService> local_service = GetLocalAddrForPeer(node)) { |
5265 | 0 | CAddress local_addr{*local_service, peer.m_our_services, Now<NodeSeconds>()}; |
5266 | 0 | PushAddress(peer, local_addr); |
5267 | 0 | } |
5268 | 0 | peer.m_next_local_addr_send = current_time + m_rng.rand_exp_duration(AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); |
5269 | 0 | } |
5270 | | |
5271 | | // We sent an `addr` message to this peer recently. Nothing more to do. |
5272 | 0 | if (current_time <= peer.m_next_addr_send) return; |
5273 | | |
5274 | 0 | peer.m_next_addr_send = current_time + m_rng.rand_exp_duration(AVG_ADDRESS_BROADCAST_INTERVAL); |
5275 | |
|
5276 | 0 | if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) { |
5277 | | // Should be impossible since we always check size before adding to |
5278 | | // m_addrs_to_send. Recover by trimming the vector. |
5279 | 0 | peer.m_addrs_to_send.resize(MAX_ADDR_TO_SEND); |
5280 | 0 | } |
5281 | | |
5282 | | // Remove addr records that the peer already knows about, and add new |
5283 | | // addrs to the m_addr_known filter on the same pass. |
5284 | 0 | auto addr_already_known = [&peer](const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) { |
5285 | 0 | bool ret = peer.m_addr_known->contains(addr.GetKey()); |
5286 | 0 | if (!ret) peer.m_addr_known->insert(addr.GetKey()); |
5287 | 0 | return ret; |
5288 | 0 | }; |
5289 | 0 | peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(), peer.m_addrs_to_send.end(), addr_already_known), |
5290 | 0 | peer.m_addrs_to_send.end()); |
5291 | | |
5292 | | // No addr messages to send |
5293 | 0 | if (peer.m_addrs_to_send.empty()) return; |
5294 | | |
5295 | 0 | if (peer.m_wants_addrv2) { |
5296 | 0 | MakeAndPushMessage(node, NetMsgType::ADDRV2, CAddress::V2_NETWORK(peer.m_addrs_to_send)); |
5297 | 0 | } else { |
5298 | 0 | MakeAndPushMessage(node, NetMsgType::ADDR, CAddress::V1_NETWORK(peer.m_addrs_to_send)); |
5299 | 0 | } |
5300 | 0 | peer.m_addrs_to_send.clear(); |
5301 | | |
5302 | | // we only send the big addr message once |
5303 | 0 | if (peer.m_addrs_to_send.capacity() > 40) { |
5304 | 0 | peer.m_addrs_to_send.shrink_to_fit(); |
5305 | 0 | } |
5306 | 0 | } |
5307 | | |
5308 | | void PeerManagerImpl::MaybeSendSendHeaders(CNode& node, Peer& peer) |
5309 | 0 | { |
5310 | | // Delay sending SENDHEADERS (BIP 130) until we're done with an |
5311 | | // initial-headers-sync with this peer. Receiving headers announcements for |
5312 | | // new blocks while trying to sync their headers chain is problematic, |
5313 | | // because of the state tracking done. |
5314 | 0 | if (!peer.m_sent_sendheaders && node.GetCommonVersion() >= SENDHEADERS_VERSION) { |
5315 | 0 | LOCK(cs_main); |
5316 | 0 | CNodeState &state = *State(node.GetId()); |
5317 | 0 | if (state.pindexBestKnownBlock != nullptr && |
5318 | 0 | state.pindexBestKnownBlock->nChainWork > m_chainman.MinimumChainWork()) { |
5319 | | // Tell our peer we prefer to receive headers rather than inv's |
5320 | | // We send this to non-NODE NETWORK peers as well, because even |
5321 | | // non-NODE NETWORK peers can announce blocks (such as pruning |
5322 | | // nodes) |
5323 | 0 | MakeAndPushMessage(node, NetMsgType::SENDHEADERS); |
5324 | 0 | peer.m_sent_sendheaders = true; |
5325 | 0 | } |
5326 | 0 | } |
5327 | 0 | } |
5328 | | |
5329 | | void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, Peer& peer, std::chrono::microseconds current_time) |
5330 | 0 | { |
5331 | 0 | if (m_opts.ignore_incoming_txs) return; |
5332 | 0 | if (pto.GetCommonVersion() < FEEFILTER_VERSION) return; |
5333 | | // peers with the forcerelay permission should not filter txs to us |
5334 | 0 | if (pto.HasPermission(NetPermissionFlags::ForceRelay)) return; |
5335 | | // Don't send feefilter messages to outbound block-relay-only peers since they should never announce |
5336 | | // transactions to us, regardless of feefilter state. |
5337 | 0 | if (pto.IsBlockOnlyConn()) return; |
5338 | | |
5339 | 0 | CAmount currentFilter = m_mempool.GetMinFee().GetFeePerK(); |
5340 | |
|
5341 | 0 | if (m_chainman.IsInitialBlockDownload()) { |
5342 | | // Received tx-inv messages are discarded when the active |
5343 | | // chainstate is in IBD, so tell the peer to not send them. |
5344 | 0 | currentFilter = MAX_MONEY; |
5345 | 0 | } else { |
5346 | 0 | static const CAmount MAX_FILTER{m_fee_filter_rounder.round(MAX_MONEY)}; |
5347 | 0 | if (peer.m_fee_filter_sent == MAX_FILTER) { |
5348 | | // Send the current filter if we sent MAX_FILTER previously |
5349 | | // and made it out of IBD. |
5350 | 0 | peer.m_next_send_feefilter = 0us; |
5351 | 0 | } |
5352 | 0 | } |
5353 | 0 | if (current_time > peer.m_next_send_feefilter) { |
5354 | 0 | CAmount filterToSend = m_fee_filter_rounder.round(currentFilter); |
5355 | | // We always have a fee filter of at least the min relay fee |
5356 | 0 | filterToSend = std::max(filterToSend, m_mempool.m_opts.min_relay_feerate.GetFeePerK()); |
5357 | 0 | if (filterToSend != peer.m_fee_filter_sent) { |
5358 | 0 | MakeAndPushMessage(pto, NetMsgType::FEEFILTER, filterToSend); |
5359 | 0 | peer.m_fee_filter_sent = filterToSend; |
5360 | 0 | } |
5361 | 0 | peer.m_next_send_feefilter = current_time + m_rng.rand_exp_duration(AVG_FEEFILTER_BROADCAST_INTERVAL); |
5362 | 0 | } |
5363 | | // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY |
5364 | | // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY. |
5365 | 0 | else if (current_time + MAX_FEEFILTER_CHANGE_DELAY < peer.m_next_send_feefilter && |
5366 | 0 | (currentFilter < 3 * peer.m_fee_filter_sent / 4 || currentFilter > 4 * peer.m_fee_filter_sent / 3)) { |
5367 | 0 | peer.m_next_send_feefilter = current_time + m_rng.randrange<std::chrono::microseconds>(MAX_FEEFILTER_CHANGE_DELAY); |
5368 | 0 | } |
5369 | 0 | } |
5370 | | |
5371 | | namespace { |
5372 | | class CompareInvMempoolOrder |
5373 | | { |
5374 | | CTxMemPool* mp; |
5375 | | bool m_wtxid_relay; |
5376 | | public: |
5377 | | explicit CompareInvMempoolOrder(CTxMemPool *_mempool, bool use_wtxid) |
5378 | 0 | { |
5379 | 0 | mp = _mempool; |
5380 | 0 | m_wtxid_relay = use_wtxid; |
5381 | 0 | } |
5382 | | |
5383 | | bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b) |
5384 | 0 | { |
5385 | | /* As std::make_heap produces a max-heap, we want the entries with the |
5386 | | * fewest ancestors/highest fee to sort later. */ |
5387 | 0 | return mp->CompareDepthAndScore(*b, *a, m_wtxid_relay); |
5388 | 0 | } |
5389 | | }; |
5390 | | } // namespace |
5391 | | |
5392 | | bool PeerManagerImpl::RejectIncomingTxs(const CNode& peer) const |
5393 | 0 | { |
5394 | | // block-relay-only peers may never send txs to us |
5395 | 0 | if (peer.IsBlockOnlyConn()) return true; |
5396 | 0 | if (peer.IsFeelerConn()) return true; |
5397 | | // In -blocksonly mode, peers need the 'relay' permission to send txs to us |
5398 | 0 | if (m_opts.ignore_incoming_txs && !peer.HasPermission(NetPermissionFlags::Relay)) return true; |
5399 | 0 | return false; |
5400 | 0 | } |
5401 | | |
5402 | | bool PeerManagerImpl::SetupAddressRelay(const CNode& node, Peer& peer) |
5403 | 0 | { |
5404 | | // We don't participate in addr relay with outbound block-relay-only |
5405 | | // connections to prevent providing adversaries with the additional |
5406 | | // information of addr traffic to infer the link. |
5407 | 0 | if (node.IsBlockOnlyConn()) return false; |
5408 | | |
5409 | 0 | if (!peer.m_addr_relay_enabled.exchange(true)) { |
5410 | | // During version message processing (non-block-relay-only outbound peers) |
5411 | | // or on first addr-related message we have received (inbound peers), initialize |
5412 | | // m_addr_known. |
5413 | 0 | peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001); |
5414 | 0 | } |
5415 | |
|
5416 | 0 | return true; |
5417 | 0 | } |
5418 | | |
5419 | | bool PeerManagerImpl::SendMessages(CNode* pto) |
5420 | 0 | { |
5421 | 0 | AssertLockNotHeld(m_tx_download_mutex); |
5422 | 0 | AssertLockHeld(g_msgproc_mutex); |
5423 | |
|
5424 | 0 | PeerRef peer = GetPeerRef(pto->GetId()); |
5425 | 0 | if (!peer) return false; |
5426 | 0 | const Consensus::Params& consensusParams = m_chainparams.GetConsensus(); |
5427 | | |
5428 | | // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll |
5429 | | // disconnect misbehaving peers even before the version handshake is complete. |
5430 | 0 | if (MaybeDiscourageAndDisconnect(*pto, *peer)) return true; |
5431 | | |
5432 | | // Initiate version handshake for outbound connections |
5433 | 0 | if (!pto->IsInboundConn() && !peer->m_outbound_version_message_sent) { |
5434 | 0 | PushNodeVersion(*pto, *peer); |
5435 | 0 | peer->m_outbound_version_message_sent = true; |
5436 | 0 | } |
5437 | | |
5438 | | // Don't send anything until the version handshake is complete |
5439 | 0 | if (!pto->fSuccessfullyConnected || pto->fDisconnect) |
5440 | 0 | return true; |
5441 | | |
5442 | 0 | const auto current_time{GetTime<std::chrono::microseconds>()}; |
5443 | |
|
5444 | 0 | if (pto->IsAddrFetchConn() && current_time - pto->m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) { |
5445 | 0 | LogDebug(BCLog::NET, "addrfetch connection timeout; disconnecting peer=%d\n", pto->GetId()); |
5446 | 0 | pto->fDisconnect = true; |
5447 | 0 | return true; |
5448 | 0 | } |
5449 | | |
5450 | 0 | MaybeSendPing(*pto, *peer, current_time); |
5451 | | |
5452 | | // MaybeSendPing may have marked peer for disconnection |
5453 | 0 | if (pto->fDisconnect) return true; |
5454 | | |
5455 | 0 | MaybeSendAddr(*pto, *peer, current_time); |
5456 | |
|
5457 | 0 | MaybeSendSendHeaders(*pto, *peer); |
5458 | |
|
5459 | 0 | { |
5460 | 0 | LOCK(cs_main); |
5461 | |
|
5462 | 0 | CNodeState &state = *State(pto->GetId()); |
5463 | | |
5464 | | // Start block sync |
5465 | 0 | if (m_chainman.m_best_header == nullptr) { |
5466 | 0 | m_chainman.m_best_header = m_chainman.ActiveChain().Tip(); |
5467 | 0 | } |
5468 | | |
5469 | | // Determine whether we might try initial headers sync or parallel |
5470 | | // block download from this peer -- this mostly affects behavior while |
5471 | | // in IBD (once out of IBD, we sync from all peers). |
5472 | 0 | bool sync_blocks_and_headers_from_peer = false; |
5473 | 0 | if (state.fPreferredDownload) { |
5474 | 0 | sync_blocks_and_headers_from_peer = true; |
5475 | 0 | } else if (CanServeBlocks(*peer) && !pto->IsAddrFetchConn()) { |
5476 | | // Typically this is an inbound peer. If we don't have any outbound |
5477 | | // peers, or if we aren't downloading any blocks from such peers, |
5478 | | // then allow block downloads from this peer, too. |
5479 | | // We prefer downloading blocks from outbound peers to avoid |
5480 | | // putting undue load on (say) some home user who is just making |
5481 | | // outbound connections to the network, but if our only source of |
5482 | | // the latest blocks is from an inbound peer, we have to be sure to |
5483 | | // eventually download it (and not just wait indefinitely for an |
5484 | | // outbound peer to have it). |
5485 | 0 | if (m_num_preferred_download_peers == 0 || mapBlocksInFlight.empty()) { |
5486 | 0 | sync_blocks_and_headers_from_peer = true; |
5487 | 0 | } |
5488 | 0 | } |
5489 | |
|
5490 | 0 | if (!state.fSyncStarted && CanServeBlocks(*peer) && !m_chainman.m_blockman.LoadingBlocks()) { |
5491 | | // Only actively request headers from a single peer, unless we're close to today. |
5492 | 0 | if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) || m_chainman.m_best_header->Time() > NodeClock::now() - 24h) { |
5493 | 0 | const CBlockIndex* pindexStart = m_chainman.m_best_header; |
5494 | | /* If possible, start at the block preceding the currently |
5495 | | best known header. This ensures that we always get a |
5496 | | non-empty list of headers back as long as the peer |
5497 | | is up-to-date. With a non-empty response, we can initialise |
5498 | | the peer's known best block. This wouldn't be possible |
5499 | | if we requested starting at m_chainman.m_best_header and |
5500 | | got back an empty response. */ |
5501 | 0 | if (pindexStart->pprev) |
5502 | 0 | pindexStart = pindexStart->pprev; |
5503 | 0 | if (MaybeSendGetHeaders(*pto, GetLocator(pindexStart), *peer)) { |
5504 | 0 | LogDebug(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), peer->m_starting_height); |
5505 | |
|
5506 | 0 | state.fSyncStarted = true; |
5507 | 0 | peer->m_headers_sync_timeout = current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE + |
5508 | 0 | ( |
5509 | | // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to microseconds before scaling |
5510 | | // to maintain precision |
5511 | 0 | std::chrono::microseconds{HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} * |
5512 | 0 | Ticks<std::chrono::seconds>(NodeClock::now() - m_chainman.m_best_header->Time()) / consensusParams.nPowTargetSpacing |
5513 | 0 | ); |
5514 | 0 | nSyncStarted++; |
5515 | 0 | } |
5516 | 0 | } |
5517 | 0 | } |
5518 | | |
5519 | | // |
5520 | | // Try sending block announcements via headers |
5521 | | // |
5522 | 0 | { |
5523 | | // If we have no more than MAX_BLOCKS_TO_ANNOUNCE in our |
5524 | | // list of block hashes we're relaying, and our peer wants |
5525 | | // headers announcements, then find the first header |
5526 | | // not yet known to our peer but would connect, and send. |
5527 | | // If no header would connect, or if we have too many |
5528 | | // blocks, or if the peer doesn't want headers, just |
5529 | | // add all to the inv queue. |
5530 | 0 | LOCK(peer->m_block_inv_mutex); |
5531 | 0 | std::vector<CBlock> vHeaders; |
5532 | 0 | bool fRevertToInv = ((!peer->m_prefers_headers && |
5533 | 0 | (!state.m_requested_hb_cmpctblocks || peer->m_blocks_for_headers_relay.size() > 1)) || |
5534 | 0 | peer->m_blocks_for_headers_relay.size() > MAX_BLOCKS_TO_ANNOUNCE); |
5535 | 0 | const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery |
5536 | 0 | ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date |
5537 | |
|
5538 | 0 | if (!fRevertToInv) { |
5539 | 0 | bool fFoundStartingHeader = false; |
5540 | | // Try to find first header that our peer doesn't have, and |
5541 | | // then send all headers past that one. If we come across any |
5542 | | // headers that aren't on m_chainman.ActiveChain(), give up. |
5543 | 0 | for (const uint256& hash : peer->m_blocks_for_headers_relay) { |
5544 | 0 | const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hash); |
5545 | 0 | assert(pindex); |
5546 | 0 | if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) { |
5547 | | // Bail out if we reorged away from this block |
5548 | 0 | fRevertToInv = true; |
5549 | 0 | break; |
5550 | 0 | } |
5551 | 0 | if (pBestIndex != nullptr && pindex->pprev != pBestIndex) { |
5552 | | // This means that the list of blocks to announce don't |
5553 | | // connect to each other. |
5554 | | // This shouldn't really be possible to hit during |
5555 | | // regular operation (because reorgs should take us to |
5556 | | // a chain that has some block not on the prior chain, |
5557 | | // which should be caught by the prior check), but one |
5558 | | // way this could happen is by using invalidateblock / |
5559 | | // reconsiderblock repeatedly on the tip, causing it to |
5560 | | // be added multiple times to m_blocks_for_headers_relay. |
5561 | | // Robustly deal with this rare situation by reverting |
5562 | | // to an inv. |
5563 | 0 | fRevertToInv = true; |
5564 | 0 | break; |
5565 | 0 | } |
5566 | 0 | pBestIndex = pindex; |
5567 | 0 | if (fFoundStartingHeader) { |
5568 | | // add this to the headers message |
5569 | 0 | vHeaders.emplace_back(pindex->GetBlockHeader()); |
5570 | 0 | } else if (PeerHasHeader(&state, pindex)) { |
5571 | 0 | continue; // keep looking for the first new block |
5572 | 0 | } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) { |
5573 | | // Peer doesn't have this header but they do have the prior one. |
5574 | | // Start sending headers. |
5575 | 0 | fFoundStartingHeader = true; |
5576 | 0 | vHeaders.emplace_back(pindex->GetBlockHeader()); |
5577 | 0 | } else { |
5578 | | // Peer doesn't have this header or the prior one -- nothing will |
5579 | | // connect, so bail out. |
5580 | 0 | fRevertToInv = true; |
5581 | 0 | break; |
5582 | 0 | } |
5583 | 0 | } |
5584 | 0 | } |
5585 | 0 | if (!fRevertToInv && !vHeaders.empty()) { |
5586 | 0 | if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) { |
5587 | | // We only send up to 1 block as header-and-ids, as otherwise |
5588 | | // probably means we're doing an initial-ish-sync or they're slow |
5589 | 0 | LogDebug(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__, |
5590 | 0 | vHeaders.front().GetHash().ToString(), pto->GetId()); |
5591 | |
|
5592 | 0 | std::optional<CSerializedNetMsg> cached_cmpctblock_msg; |
5593 | 0 | { |
5594 | 0 | LOCK(m_most_recent_block_mutex); |
5595 | 0 | if (m_most_recent_block_hash == pBestIndex->GetBlockHash()) { |
5596 | 0 | cached_cmpctblock_msg = NetMsg::Make(NetMsgType::CMPCTBLOCK, *m_most_recent_compact_block); |
5597 | 0 | } |
5598 | 0 | } |
5599 | 0 | if (cached_cmpctblock_msg.has_value()) { |
5600 | 0 | PushMessage(*pto, std::move(cached_cmpctblock_msg.value())); |
5601 | 0 | } else { |
5602 | 0 | CBlock block; |
5603 | 0 | const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(block, *pBestIndex)}; |
5604 | 0 | assert(ret); |
5605 | 0 | CBlockHeaderAndShortTxIDs cmpctblock{block, m_rng.rand64()}; |
5606 | 0 | MakeAndPushMessage(*pto, NetMsgType::CMPCTBLOCK, cmpctblock); |
5607 | 0 | } |
5608 | 0 | state.pindexBestHeaderSent = pBestIndex; |
5609 | 0 | } else if (peer->m_prefers_headers) { |
5610 | 0 | if (vHeaders.size() > 1) { |
5611 | 0 | LogDebug(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__, |
5612 | 0 | vHeaders.size(), |
5613 | 0 | vHeaders.front().GetHash().ToString(), |
5614 | 0 | vHeaders.back().GetHash().ToString(), pto->GetId()); |
5615 | 0 | } else { |
5616 | 0 | LogDebug(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__, |
5617 | 0 | vHeaders.front().GetHash().ToString(), pto->GetId()); |
5618 | 0 | } |
5619 | 0 | MakeAndPushMessage(*pto, NetMsgType::HEADERS, TX_WITH_WITNESS(vHeaders)); |
5620 | 0 | state.pindexBestHeaderSent = pBestIndex; |
5621 | 0 | } else |
5622 | 0 | fRevertToInv = true; |
5623 | 0 | } |
5624 | 0 | if (fRevertToInv) { |
5625 | | // If falling back to using an inv, just try to inv the tip. |
5626 | | // The last entry in m_blocks_for_headers_relay was our tip at some point |
5627 | | // in the past. |
5628 | 0 | if (!peer->m_blocks_for_headers_relay.empty()) { |
5629 | 0 | const uint256& hashToAnnounce = peer->m_blocks_for_headers_relay.back(); |
5630 | 0 | const CBlockIndex* pindex = m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce); |
5631 | 0 | assert(pindex); |
5632 | | |
5633 | | // Warn if we're announcing a block that is not on the main chain. |
5634 | | // This should be very rare and could be optimized out. |
5635 | | // Just log for now. |
5636 | 0 | if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) { |
5637 | 0 | LogDebug(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n", |
5638 | 0 | hashToAnnounce.ToString(), m_chainman.ActiveChain().Tip()->GetBlockHash().ToString()); |
5639 | 0 | } |
5640 | | |
5641 | | // If the peer's chain has this block, don't inv it back. |
5642 | 0 | if (!PeerHasHeader(&state, pindex)) { |
5643 | 0 | peer->m_blocks_for_inv_relay.push_back(hashToAnnounce); |
5644 | 0 | LogDebug(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__, |
5645 | 0 | pto->GetId(), hashToAnnounce.ToString()); |
5646 | 0 | } |
5647 | 0 | } |
5648 | 0 | } |
5649 | 0 | peer->m_blocks_for_headers_relay.clear(); |
5650 | 0 | } |
5651 | | |
5652 | | // |
5653 | | // Message: inventory |
5654 | | // |
5655 | 0 | std::vector<CInv> vInv; |
5656 | 0 | { |
5657 | 0 | LOCK(peer->m_block_inv_mutex); |
5658 | 0 | vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(), INVENTORY_BROADCAST_TARGET)); |
5659 | | |
5660 | | // Add blocks |
5661 | 0 | for (const uint256& hash : peer->m_blocks_for_inv_relay) { |
5662 | 0 | vInv.emplace_back(MSG_BLOCK, hash); |
5663 | 0 | if (vInv.size() == MAX_INV_SZ) { |
5664 | 0 | MakeAndPushMessage(*pto, NetMsgType::INV, vInv); |
5665 | 0 | vInv.clear(); |
5666 | 0 | } |
5667 | 0 | } |
5668 | 0 | peer->m_blocks_for_inv_relay.clear(); |
5669 | 0 | } |
5670 | |
|
5671 | 0 | if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) { |
5672 | 0 | LOCK(tx_relay->m_tx_inventory_mutex); |
5673 | | // Check whether periodic sends should happen |
5674 | 0 | bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan); |
5675 | 0 | if (tx_relay->m_next_inv_send_time < current_time) { |
5676 | 0 | fSendTrickle = true; |
5677 | 0 | if (pto->IsInboundConn()) { |
5678 | 0 | tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL); |
5679 | 0 | } else { |
5680 | 0 | tx_relay->m_next_inv_send_time = current_time + m_rng.rand_exp_duration(OUTBOUND_INVENTORY_BROADCAST_INTERVAL); |
5681 | 0 | } |
5682 | 0 | } |
5683 | | |
5684 | | // Time to send but the peer has requested we not relay transactions. |
5685 | 0 | if (fSendTrickle) { |
5686 | 0 | LOCK(tx_relay->m_bloom_filter_mutex); |
5687 | 0 | if (!tx_relay->m_relay_txs) tx_relay->m_tx_inventory_to_send.clear(); |
5688 | 0 | } |
5689 | | |
5690 | | // Respond to BIP35 mempool requests |
5691 | 0 | if (fSendTrickle && tx_relay->m_send_mempool) { |
5692 | 0 | auto vtxinfo = m_mempool.infoAll(); |
5693 | 0 | tx_relay->m_send_mempool = false; |
5694 | 0 | const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()}; |
5695 | |
|
5696 | 0 | LOCK(tx_relay->m_bloom_filter_mutex); |
5697 | |
|
5698 | 0 | for (const auto& txinfo : vtxinfo) { |
5699 | 0 | CInv inv{ |
5700 | 0 | peer->m_wtxid_relay ? MSG_WTX : MSG_TX, |
5701 | 0 | peer->m_wtxid_relay ? |
5702 | 0 | txinfo.tx->GetWitnessHash().ToUint256() : |
5703 | 0 | txinfo.tx->GetHash().ToUint256(), |
5704 | 0 | }; |
5705 | 0 | tx_relay->m_tx_inventory_to_send.erase(inv.hash); |
5706 | | |
5707 | | // Don't send transactions that peers will not put into their mempool |
5708 | 0 | if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { |
5709 | 0 | continue; |
5710 | 0 | } |
5711 | 0 | if (tx_relay->m_bloom_filter) { |
5712 | 0 | if (!tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue; |
5713 | 0 | } |
5714 | 0 | tx_relay->m_tx_inventory_known_filter.insert(inv.hash); |
5715 | 0 | vInv.push_back(inv); |
5716 | 0 | if (vInv.size() == MAX_INV_SZ) { |
5717 | 0 | MakeAndPushMessage(*pto, NetMsgType::INV, vInv); |
5718 | 0 | vInv.clear(); |
5719 | 0 | } |
5720 | 0 | } |
5721 | 0 | } |
5722 | | |
5723 | | // Determine transactions to relay |
5724 | 0 | if (fSendTrickle) { |
5725 | | // Produce a vector with all candidates for sending |
5726 | 0 | std::vector<std::set<uint256>::iterator> vInvTx; |
5727 | 0 | vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size()); |
5728 | 0 | for (std::set<uint256>::iterator it = tx_relay->m_tx_inventory_to_send.begin(); it != tx_relay->m_tx_inventory_to_send.end(); it++) { |
5729 | 0 | vInvTx.push_back(it); |
5730 | 0 | } |
5731 | 0 | const CFeeRate filterrate{tx_relay->m_fee_filter_received.load()}; |
5732 | | // Topologically and fee-rate sort the inventory we send for privacy and priority reasons. |
5733 | | // A heap is used so that not all items need sorting if only a few are being sent. |
5734 | 0 | CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool, peer->m_wtxid_relay); |
5735 | 0 | std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); |
5736 | | // No reason to drain out at many times the network's capacity, |
5737 | | // especially since we have many peers and some will draw much shorter delays. |
5738 | 0 | unsigned int nRelayedTransactions = 0; |
5739 | 0 | LOCK(tx_relay->m_bloom_filter_mutex); |
5740 | 0 | size_t broadcast_max{INVENTORY_BROADCAST_TARGET + (tx_relay->m_tx_inventory_to_send.size()/1000)*5}; |
5741 | 0 | broadcast_max = std::min<size_t>(INVENTORY_BROADCAST_MAX, broadcast_max); |
5742 | 0 | while (!vInvTx.empty() && nRelayedTransactions < broadcast_max) { |
5743 | | // Fetch the top element from the heap |
5744 | 0 | std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); |
5745 | 0 | std::set<uint256>::iterator it = vInvTx.back(); |
5746 | 0 | vInvTx.pop_back(); |
5747 | 0 | uint256 hash = *it; |
5748 | 0 | CInv inv(peer->m_wtxid_relay ? MSG_WTX : MSG_TX, hash); |
5749 | | // Remove it from the to-be-sent set |
5750 | 0 | tx_relay->m_tx_inventory_to_send.erase(it); |
5751 | | // Check if not in the filter already |
5752 | 0 | if (tx_relay->m_tx_inventory_known_filter.contains(hash)) { |
5753 | 0 | continue; |
5754 | 0 | } |
5755 | | // Not in the mempool anymore? don't bother sending it. |
5756 | 0 | auto txinfo = m_mempool.info(ToGenTxid(inv)); |
5757 | 0 | if (!txinfo.tx) { |
5758 | 0 | continue; |
5759 | 0 | } |
5760 | | // Peer told you to not send transactions at that feerate? Don't bother sending it. |
5761 | 0 | if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { |
5762 | 0 | continue; |
5763 | 0 | } |
5764 | 0 | if (tx_relay->m_bloom_filter && !tx_relay->m_bloom_filter->IsRelevantAndUpdate(*txinfo.tx)) continue; |
5765 | | // Send |
5766 | 0 | vInv.push_back(inv); |
5767 | 0 | nRelayedTransactions++; |
5768 | 0 | if (vInv.size() == MAX_INV_SZ) { |
5769 | 0 | MakeAndPushMessage(*pto, NetMsgType::INV, vInv); |
5770 | 0 | vInv.clear(); |
5771 | 0 | } |
5772 | 0 | tx_relay->m_tx_inventory_known_filter.insert(hash); |
5773 | 0 | } |
5774 | | |
5775 | | // Ensure we'll respond to GETDATA requests for anything we've just announced |
5776 | 0 | LOCK(m_mempool.cs); |
5777 | 0 | tx_relay->m_last_inv_sequence = m_mempool.GetSequence(); |
5778 | 0 | } |
5779 | 0 | } |
5780 | 0 | if (!vInv.empty()) |
5781 | 0 | MakeAndPushMessage(*pto, NetMsgType::INV, vInv); |
5782 | | |
5783 | | // Detect whether we're stalling |
5784 | 0 | auto stalling_timeout = m_block_stalling_timeout.load(); |
5785 | 0 | if (state.m_stalling_since.count() && state.m_stalling_since < current_time - stalling_timeout) { |
5786 | | // Stalling only triggers when the block download window cannot move. During normal steady state, |
5787 | | // the download window should be much larger than the to-be-downloaded set of blocks, so disconnection |
5788 | | // should only happen during initial block download. |
5789 | 0 | LogPrintf("Peer=%d%s is stalling block download, disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : ""); |
5790 | 0 | pto->fDisconnect = true; |
5791 | | // Increase timeout for the next peer so that we don't disconnect multiple peers if our own |
5792 | | // bandwidth is insufficient. |
5793 | 0 | const auto new_timeout = std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX); |
5794 | 0 | if (stalling_timeout != new_timeout && m_block_stalling_timeout.compare_exchange_strong(stalling_timeout, new_timeout)) { |
5795 | 0 | LogDebug(BCLog::NET, "Increased stalling timeout temporarily to %d seconds\n", count_seconds(new_timeout)); |
5796 | 0 | } |
5797 | 0 | return true; |
5798 | 0 | } |
5799 | | // In case there is a block that has been in flight from this peer for block_interval * (1 + 0.5 * N) |
5800 | | // (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout. |
5801 | | // We compensate for other peers to prevent killing off peers due to our own downstream link |
5802 | | // being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes |
5803 | | // to unreasonably increase our timeout. |
5804 | 0 | if (state.vBlocksInFlight.size() > 0) { |
5805 | 0 | QueuedBlock &queuedBlock = state.vBlocksInFlight.front(); |
5806 | 0 | int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1; |
5807 | 0 | if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) { |
5808 | 0 | LogPrintf("Timeout downloading block %s from peer=%d%s, disconnecting\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : ""); |
5809 | 0 | pto->fDisconnect = true; |
5810 | 0 | return true; |
5811 | 0 | } |
5812 | 0 | } |
5813 | | // Check for headers sync timeouts |
5814 | 0 | if (state.fSyncStarted && peer->m_headers_sync_timeout < std::chrono::microseconds::max()) { |
5815 | | // Detect whether this is a stalling initial-headers-sync peer |
5816 | 0 | if (m_chainman.m_best_header->Time() <= NodeClock::now() - 24h) { |
5817 | 0 | if (current_time > peer->m_headers_sync_timeout && nSyncStarted == 1 && (m_num_preferred_download_peers - state.fPreferredDownload >= 1)) { |
5818 | | // Disconnect a peer (without NetPermissionFlags::NoBan permission) if it is our only sync peer, |
5819 | | // and we have others we could be using instead. |
5820 | | // Note: If all our peers are inbound, then we won't |
5821 | | // disconnect our sync peer for stalling; we have bigger |
5822 | | // problems if we can't get any outbound peers. |
5823 | 0 | if (!pto->HasPermission(NetPermissionFlags::NoBan)) { |
5824 | 0 | LogPrintf("Timeout downloading headers from peer=%d%s, disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : ""); |
5825 | 0 | pto->fDisconnect = true; |
5826 | 0 | return true; |
5827 | 0 | } else { |
5828 | 0 | LogPrintf("Timeout downloading headers from noban peer=%d%s, not disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : ""); |
5829 | | // Reset the headers sync state so that we have a |
5830 | | // chance to try downloading from a different peer. |
5831 | | // Note: this will also result in at least one more |
5832 | | // getheaders message to be sent to |
5833 | | // this peer (eventually). |
5834 | 0 | state.fSyncStarted = false; |
5835 | 0 | nSyncStarted--; |
5836 | 0 | peer->m_headers_sync_timeout = 0us; |
5837 | 0 | } |
5838 | 0 | } |
5839 | 0 | } else { |
5840 | | // After we've caught up once, reset the timeout so we can't trigger |
5841 | | // disconnect later. |
5842 | 0 | peer->m_headers_sync_timeout = std::chrono::microseconds::max(); |
5843 | 0 | } |
5844 | 0 | } |
5845 | | |
5846 | | // Check that outbound peers have reasonable chains |
5847 | | // GetTime() is used by this anti-DoS logic so we can test this using mocktime |
5848 | 0 | ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>()); |
5849 | | |
5850 | | // |
5851 | | // Message: getdata (blocks) |
5852 | | // |
5853 | 0 | std::vector<CInv> vGetData; |
5854 | 0 | if (CanServeBlocks(*peer) && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.IsInitialBlockDownload()) && state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { |
5855 | 0 | std::vector<const CBlockIndex*> vToDownload; |
5856 | 0 | NodeId staller = -1; |
5857 | 0 | auto get_inflight_budget = [&state]() { |
5858 | 0 | return std::max(0, MAX_BLOCKS_IN_TRANSIT_PER_PEER - static_cast<int>(state.vBlocksInFlight.size())); |
5859 | 0 | }; |
5860 | | |
5861 | | // If a snapshot chainstate is in use, we want to find its next blocks |
5862 | | // before the background chainstate to prioritize getting to network tip. |
5863 | 0 | FindNextBlocksToDownload(*peer, get_inflight_budget(), vToDownload, staller); |
5864 | 0 | if (m_chainman.BackgroundSyncInProgress() && !IsLimitedPeer(*peer)) { |
5865 | | // If the background tip is not an ancestor of the snapshot block, |
5866 | | // we need to start requesting blocks from their last common ancestor. |
5867 | 0 | const CBlockIndex *from_tip = LastCommonAncestor(m_chainman.GetBackgroundSyncTip(), m_chainman.GetSnapshotBaseBlock()); |
5868 | 0 | TryDownloadingHistoricalBlocks( |
5869 | 0 | *peer, |
5870 | 0 | get_inflight_budget(), |
5871 | 0 | vToDownload, from_tip, |
5872 | 0 | Assert(m_chainman.GetSnapshotBaseBlock())); |
5873 | 0 | } |
5874 | 0 | for (const CBlockIndex *pindex : vToDownload) { |
5875 | 0 | uint32_t nFetchFlags = GetFetchFlags(*peer); |
5876 | 0 | vGetData.emplace_back(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()); |
5877 | 0 | BlockRequested(pto->GetId(), *pindex); |
5878 | 0 | LogDebug(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), |
5879 | 0 | pindex->nHeight, pto->GetId()); |
5880 | 0 | } |
5881 | 0 | if (state.vBlocksInFlight.empty() && staller != -1) { |
5882 | 0 | if (State(staller)->m_stalling_since == 0us) { |
5883 | 0 | State(staller)->m_stalling_since = current_time; |
5884 | 0 | LogDebug(BCLog::NET, "Stall started peer=%d\n", staller); |
5885 | 0 | } |
5886 | 0 | } |
5887 | 0 | } |
5888 | | |
5889 | | // |
5890 | | // Message: getdata (transactions) |
5891 | | // |
5892 | 0 | { |
5893 | 0 | LOCK(m_tx_download_mutex); |
5894 | 0 | for (const GenTxid& gtxid : m_txdownloadman.GetRequestsToSend(pto->GetId(), current_time)) { |
5895 | 0 | vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*peer)), gtxid.GetHash()); |
5896 | 0 | if (vGetData.size() >= MAX_GETDATA_SZ) { |
5897 | 0 | MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData); |
5898 | 0 | vGetData.clear(); |
5899 | 0 | } |
5900 | 0 | } |
5901 | 0 | } |
5902 | |
|
5903 | 0 | if (!vGetData.empty()) |
5904 | 0 | MakeAndPushMessage(*pto, NetMsgType::GETDATA, vGetData); |
5905 | 0 | } // release cs_main |
5906 | 0 | MaybeSendFeefilter(*pto, *peer, current_time); |
5907 | 0 | return true; |
5908 | 0 | } |