/root/bitcoin/src/node/txorphanage.cpp
Line | Count | Source |
1 | | // Copyright (c) 2021-2022 The Bitcoin Core developers |
2 | | // Distributed under the MIT software license, see the accompanying |
3 | | // file COPYING or http://www.opensource.org/licenses/mit-license.php. |
4 | | |
5 | | #include <node/txorphanage.h> |
6 | | |
7 | | #include <consensus/validation.h> |
8 | | #include <logging.h> |
9 | | #include <policy/policy.h> |
10 | | #include <primitives/transaction.h> |
11 | | #include <util/feefrac.h> |
12 | | #include <util/time.h> |
13 | | #include <util/hasher.h> |
14 | | |
15 | | #include <boost/multi_index/indexed_by.hpp> |
16 | | #include <boost/multi_index/ordered_index.hpp> |
17 | | #include <boost/multi_index/tag.hpp> |
18 | | #include <boost/multi_index_container.hpp> |
19 | | |
20 | | #include <cassert> |
21 | | #include <cmath> |
22 | | #include <unordered_map> |
23 | | |
24 | | namespace node { |
25 | | /** Minimum NodeId for lower_bound lookups (in practice, NodeIds start at 0). */ |
26 | | static constexpr NodeId MIN_PEER{std::numeric_limits<NodeId>::min()}; |
27 | | /** Maximum NodeId for upper_bound lookups. */ |
28 | | static constexpr NodeId MAX_PEER{std::numeric_limits<NodeId>::max()}; |
29 | | class TxOrphanageImpl final : public TxOrphanage { |
30 | | // Type alias for sequence numbers |
31 | | using SequenceNumber = uint64_t; |
32 | | /** Global sequence number, increment each time an announcement is added. */ |
33 | | SequenceNumber m_current_sequence{0}; |
34 | | |
35 | | /** One orphan announcement. Each announcement (i.e. combination of wtxid, nodeid) is unique. There may be multiple |
36 | | * announcements for the same tx, and multiple transactions with the same txid but different wtxid are possible. */ |
37 | | struct Announcement |
38 | | { |
39 | | const CTransactionRef m_tx; |
40 | | /** Which peer announced this tx */ |
41 | | const NodeId m_announcer; |
42 | | /** What order this transaction entered the orphanage. */ |
43 | | const SequenceNumber m_entry_sequence; |
44 | | /** Whether this tx should be reconsidered. Always starts out false. A peer's workset is the collection of all |
45 | | * announcements with m_reconsider=true. */ |
46 | | bool m_reconsider{false}; |
47 | | |
48 | | Announcement(const CTransactionRef& tx, NodeId peer, SequenceNumber seq) : |
49 | 18.7k | m_tx{tx}, m_announcer{peer}, m_entry_sequence{seq} |
50 | 18.7k | { } |
51 | | |
52 | | /** Get an approximation for "memory usage". The total memory is a function of the memory used to store the |
53 | | * transaction itself, each entry in m_orphans, and each entry in m_outpoint_to_orphan_wtxids. We use weight because |
54 | | * it is often higher than the actual memory usage of the transaction. This metric conveniently encompasses |
55 | | * m_outpoint_to_orphan_wtxids usage since input data does not get the witness discount, and makes it easier to |
56 | | * reason about each peer's limits using well-understood transaction attributes. */ |
57 | 95.8k | TxOrphanage::Usage GetMemUsage() const { |
58 | 95.8k | return GetTransactionWeight(*m_tx); |
59 | 95.8k | } |
60 | | |
61 | | /** Get an approximation of how much this transaction contributes to latency in EraseForBlock and EraseForPeer. |
62 | | * The computation time is a function of the number of entries in m_orphans (thus 1 per announcement) and the |
63 | | * number of entries in m_outpoint_to_orphan_wtxids (thus an additional 1 for every 10 inputs). Transactions with a |
64 | | * small number of inputs (9 or fewer) are counted as 1 to make it easier to reason about each peer's limits in |
65 | | * terms of "normal" transactions. */ |
66 | 95.8k | TxOrphanage::Count GetLatencyScore() const { |
67 | 95.8k | return 1 + (m_tx->vin.size() / 10); |
68 | 95.8k | } |
69 | | }; |
70 | | |
71 | | // Index by wtxid, then peer |
72 | | struct ByWtxid {}; |
73 | | using ByWtxidView = std::tuple<Wtxid, NodeId>; |
74 | | struct WtxidExtractor |
75 | | { |
76 | | using result_type = ByWtxidView; |
77 | | result_type operator()(const Announcement& ann) const |
78 | 812k | { |
79 | 812k | return ByWtxidView{ann.m_tx->GetWitnessHash(), ann.m_announcer}; |
80 | 812k | } |
81 | | }; |
82 | | |
83 | | // Sort by peer, then by whether it is ready to reconsider, then by recency. |
84 | | struct ByPeer {}; |
85 | | using ByPeerView = std::tuple<NodeId, bool, SequenceNumber>; |
86 | | struct ByPeerViewExtractor { |
87 | | using result_type = ByPeerView; |
88 | | result_type operator()(const Announcement& ann) const |
89 | 219k | { |
90 | 219k | return ByPeerView{ann.m_announcer, ann.m_reconsider, ann.m_entry_sequence}; |
91 | 219k | } |
92 | | }; |
93 | | |
94 | | struct OrphanIndices final : boost::multi_index::indexed_by< |
95 | | boost::multi_index::ordered_unique<boost::multi_index::tag<ByWtxid>, WtxidExtractor>, |
96 | | boost::multi_index::ordered_unique<boost::multi_index::tag<ByPeer>, ByPeerViewExtractor> |
97 | | >{}; |
98 | | |
99 | | using AnnouncementMap = boost::multi_index::multi_index_container<Announcement, OrphanIndices>; |
100 | | template<typename Tag> |
101 | | using Iter = typename AnnouncementMap::index<Tag>::type::iterator; |
102 | | AnnouncementMap m_orphans; |
103 | | |
104 | | const TxOrphanage::Count m_max_global_latency_score{DEFAULT_MAX_ORPHANAGE_LATENCY_SCORE}; |
105 | | const TxOrphanage::Usage m_reserved_usage_per_peer{DEFAULT_RESERVED_ORPHAN_WEIGHT_PER_PEER}; |
106 | | |
107 | | /** Number of unique orphans by wtxid. Less than or equal to the number of entries in m_orphans. */ |
108 | | TxOrphanage::Count m_unique_orphans{0}; |
109 | | |
110 | | /** Memory used by orphans (see Announcement::GetMemUsage()), deduplicated by wtxid. */ |
111 | | TxOrphanage::Usage m_unique_orphan_usage{0}; |
112 | | |
113 | | /** The sum of each unique transaction's latency scores including the inputs only (see Announcement::GetLatencyScore |
114 | | * but subtract 1 for the announcements themselves). The total orphanage's latency score is given by this value + |
115 | | * the number of entries in m_orphans. */ |
116 | | TxOrphanage::Count m_unique_rounded_input_scores{0}; |
117 | | |
118 | | /** Index from the parents' outputs to wtxids that exist in m_orphans. Used to find children of |
119 | | * a transaction that can be reconsidered and to remove entries that conflict with a block.*/ |
120 | | std::unordered_map<COutPoint, std::set<Wtxid>, SaltedOutpointHasher> m_outpoint_to_orphan_wtxids; |
121 | | |
122 | | /** Set of Wtxids for which (exactly) one announcement with m_reconsider=true exists. */ |
123 | | std::set<Wtxid> m_reconsiderable_wtxids; |
124 | | |
125 | | struct PeerDoSInfo { |
126 | | TxOrphanage::Usage m_total_usage{0}; |
127 | | TxOrphanage::Count m_count_announcements{0}; |
128 | | TxOrphanage::Count m_total_latency_score{0}; |
129 | | bool operator==(const PeerDoSInfo& other) const |
130 | 865 | { |
131 | 865 | return m_total_usage == other.m_total_usage && |
132 | 865 | m_count_announcements == other.m_count_announcements && |
133 | 865 | m_total_latency_score == other.m_total_latency_score; |
134 | 865 | } |
135 | | void Add(const Announcement& ann) |
136 | 18.7k | { |
137 | 18.7k | m_total_usage += ann.GetMemUsage(); |
138 | 18.7k | m_total_latency_score += ann.GetLatencyScore(); |
139 | 18.7k | m_count_announcements += 1; |
140 | 18.7k | } |
141 | | bool Subtract(const Announcement& ann) |
142 | 18.7k | { |
143 | 18.7k | Assume(m_total_usage >= ann.GetMemUsage()); |
144 | 18.7k | Assume(m_total_latency_score >= ann.GetLatencyScore()); |
145 | 18.7k | Assume(m_count_announcements >= 1); |
146 | | |
147 | 18.7k | m_total_usage -= ann.GetMemUsage(); |
148 | 18.7k | m_total_latency_score -= ann.GetLatencyScore(); |
149 | 18.7k | m_count_announcements -= 1; |
150 | 18.7k | return m_count_announcements == 0; |
151 | 18.7k | } |
152 | | /** There are 2 DoS scores: |
153 | | * - Latency score (ratio of total latency score / max allowed latency score) |
154 | | * - Memory score (ratio of total memory usage / max allowed memory usage). |
155 | | * |
156 | | * If the peer is using more than the allowed for either resource, its DoS score is > 1. |
157 | | * A peer having a DoS score > 1 does not necessarily mean that something is wrong, since we |
158 | | * do not trim unless the orphanage exceeds global limits, but it means that this peer will |
159 | | * be selected for trimming sooner. If the global latency score or global memory usage |
160 | | * limits are exceeded, it must be that there is a peer whose DoS score > 1. */ |
161 | | FeeFrac GetDosScore(TxOrphanage::Count max_peer_latency_score, TxOrphanage::Usage max_peer_memory) const |
162 | 4.77k | { |
163 | 4.77k | assert(max_peer_latency_score > 0); |
164 | 4.77k | assert(max_peer_memory > 0); |
165 | 4.77k | const FeeFrac latency_score(m_total_latency_score, max_peer_latency_score); |
166 | 4.77k | const FeeFrac mem_score(m_total_usage, max_peer_memory); |
167 | 4.77k | return std::max<FeeFrac>(latency_score, mem_score); |
168 | 4.77k | } |
169 | | }; |
170 | | /** Store per-peer statistics. Used to determine each peer's DoS score. The size of this map is used to determine the |
171 | | * number of peers and thus global {latency score, memory} limits. */ |
172 | | std::unordered_map<NodeId, PeerDoSInfo> m_peer_orphanage_info; |
173 | | |
174 | | /** Erase from m_orphans and update m_peer_orphanage_info. */ |
175 | | template<typename Tag> |
176 | | void Erase(Iter<Tag> it); |
177 | | |
178 | | /** Erase by wtxid. */ |
179 | | bool EraseTxInternal(const Wtxid& wtxid); |
180 | | |
181 | | /** Check if there is exactly one announcement with the same wtxid as it. */ |
182 | | bool IsUnique(Iter<ByWtxid> it) const; |
183 | | |
184 | | /** Check if the orphanage needs trimming. */ |
185 | | bool NeedsTrim() const; |
186 | | |
187 | | /** Limit the orphanage to MaxGlobalLatencyScore and MaxGlobalUsage. */ |
188 | | void LimitOrphans(); |
189 | | |
190 | | public: |
191 | 618 | TxOrphanageImpl() = default; |
192 | | TxOrphanageImpl(Count max_global_latency_score, Usage reserved_peer_usage) : |
193 | 0 | m_max_global_latency_score{max_global_latency_score}, |
194 | 0 | m_reserved_usage_per_peer{reserved_peer_usage} |
195 | 0 | {} |
196 | 619 | ~TxOrphanageImpl() noexcept override = default; |
197 | | |
198 | | TxOrphanage::Count CountAnnouncements() const override; |
199 | | TxOrphanage::Count CountUniqueOrphans() const override; |
200 | | TxOrphanage::Count AnnouncementsFromPeer(NodeId peer) const override; |
201 | | TxOrphanage::Count LatencyScoreFromPeer(NodeId peer) const override; |
202 | | TxOrphanage::Usage UsageByPeer(NodeId peer) const override; |
203 | | |
204 | | TxOrphanage::Count MaxGlobalLatencyScore() const override; |
205 | | TxOrphanage::Count TotalLatencyScore() const override; |
206 | | TxOrphanage::Usage ReservedPeerUsage() const override; |
207 | | |
208 | | /** Maximum allowed (deduplicated) latency score for all transactions (see Announcement::GetLatencyScore()). Dynamic |
209 | | * based on number of peers. Each peer has an equal amount, but the global maximum latency score stays constant. The |
210 | | * number of peers times MaxPeerLatencyScore() (rounded) adds up to MaxGlobalLatencyScore(). As long as every peer's |
211 | | * m_total_latency_score / MaxPeerLatencyScore() < 1, MaxGlobalLatencyScore() is not exceeded. */ |
212 | | TxOrphanage::Count MaxPeerLatencyScore() const override; |
213 | | |
214 | | /** Maximum allowed (deduplicated) memory usage for all transactions (see Announcement::GetMemUsage()). Dynamic based |
215 | | * on number of peers. More peers means more allowed memory usage. The number of peers times ReservedPeerUsage() |
216 | | * adds up to MaxGlobalUsage(). As long as every peer's m_total_usage / ReservedPeerUsage() < 1, MaxGlobalUsage() is |
217 | | * not exceeded. */ |
218 | | TxOrphanage::Usage MaxGlobalUsage() const override; |
219 | | |
220 | | bool AddTx(const CTransactionRef& tx, NodeId peer) override; |
221 | | bool AddAnnouncer(const Wtxid& wtxid, NodeId peer) override; |
222 | | CTransactionRef GetTx(const Wtxid& wtxid) const override; |
223 | | bool HaveTx(const Wtxid& wtxid) const override; |
224 | | bool HaveTxFromPeer(const Wtxid& wtxid, NodeId peer) const override; |
225 | | CTransactionRef GetTxToReconsider(NodeId peer) override; |
226 | | bool EraseTx(const Wtxid& wtxid) override; |
227 | | void EraseForPeer(NodeId peer) override; |
228 | | void EraseForBlock(const CBlock& block) override; |
229 | | std::vector<std::pair<Wtxid, NodeId>> AddChildrenToWorkSet(const CTransaction& tx, FastRandomContext& rng) override; |
230 | | bool HaveTxToReconsider(NodeId peer) override; |
231 | | std::vector<CTransactionRef> GetChildrenFromSamePeer(const CTransactionRef& parent, NodeId nodeid) const override; |
232 | | std::vector<OrphanInfo> GetOrphanTransactions() const override; |
233 | | TxOrphanage::Usage TotalOrphanUsage() const override; |
234 | | void SanityCheck() const override; |
235 | | }; |
236 | | |
237 | | template<typename Tag> |
238 | | void TxOrphanageImpl::Erase(Iter<Tag> it) |
239 | 18.7k | { |
240 | | // Update m_peer_orphanage_info and clean up entries if they point to an empty struct. |
241 | | // This means peers that are not storing any orphans do not have an entry in |
242 | | // m_peer_orphanage_info (they can be added back later if they announce another orphan) and |
243 | | // ensures disconnected peers are not tracked forever. |
244 | 18.7k | auto peer_it = m_peer_orphanage_info.find(it->m_announcer); |
245 | 18.7k | Assume(peer_it != m_peer_orphanage_info.end()); |
246 | 18.7k | if (peer_it->second.Subtract(*it)) m_peer_orphanage_info.erase(peer_it); |
247 | | |
248 | 18.7k | if (IsUnique(m_orphans.project<ByWtxid>(it))) { |
249 | 17.3k | m_unique_orphans -= 1; |
250 | 17.3k | m_unique_rounded_input_scores -= it->GetLatencyScore() - 1; |
251 | 17.3k | m_unique_orphan_usage -= it->GetMemUsage(); |
252 | | |
253 | | // Remove references in m_outpoint_to_orphan_wtxids |
254 | 17.3k | const auto& wtxid{it->m_tx->GetWitnessHash()}; |
255 | 91.3k | for (const auto& input : it->m_tx->vin) { |
256 | 91.3k | auto it_prev = m_outpoint_to_orphan_wtxids.find(input.prevout); |
257 | 91.3k | if (it_prev != m_outpoint_to_orphan_wtxids.end()) { |
258 | 83.3k | it_prev->second.erase(wtxid); |
259 | | // Clean up keys if they point to an empty set. |
260 | 83.3k | if (it_prev->second.empty()) { |
261 | 29.3k | m_outpoint_to_orphan_wtxids.erase(it_prev); |
262 | 29.3k | } |
263 | 83.3k | } |
264 | 91.3k | } |
265 | 17.3k | } |
266 | | |
267 | | // If this was the (unique) reconsiderable announcement for its wtxid, then the wtxid won't |
268 | | // have any reconsiderable announcements left after erasing. |
269 | 18.7k | if (it->m_reconsider) m_reconsiderable_wtxids.erase(it->m_tx->GetWitnessHash()); |
270 | | |
271 | 18.7k | m_orphans.get<Tag>().erase(it); |
272 | 18.7k | } _ZN4node15TxOrphanageImpl5EraseINS0_7ByWtxidEEEvN5boost11multi_index21multi_index_containerINS0_12AnnouncementENS0_13OrphanIndicesESaIS6_EE5indexIT_E4type8iteratorE Line | Count | Source | 239 | 11.0k | { | 240 | | // Update m_peer_orphanage_info and clean up entries if they point to an empty struct. | 241 | | // This means peers that are not storing any orphans do not have an entry in | 242 | | // m_peer_orphanage_info (they can be added back later if they announce another orphan) and | 243 | | // ensures disconnected peers are not tracked forever. | 244 | 11.0k | auto peer_it = m_peer_orphanage_info.find(it->m_announcer); | 245 | 11.0k | Assume(peer_it != m_peer_orphanage_info.end()); | 246 | 11.0k | if (peer_it->second.Subtract(*it)) m_peer_orphanage_info.erase(peer_it); | 247 | | | 248 | 11.0k | if (IsUnique(m_orphans.project<ByWtxid>(it))) { | 249 | 10.3k | m_unique_orphans -= 1; | 250 | 10.3k | m_unique_rounded_input_scores -= it->GetLatencyScore() - 1; | 251 | 10.3k | m_unique_orphan_usage -= it->GetMemUsage(); | 252 | | | 253 | | // Remove references in m_outpoint_to_orphan_wtxids | 254 | 10.3k | const auto& wtxid{it->m_tx->GetWitnessHash()}; | 255 | 57.1k | for (const auto& input : it->m_tx->vin) { | 256 | 57.1k | auto it_prev = m_outpoint_to_orphan_wtxids.find(input.prevout); | 257 | 57.1k | if (it_prev != m_outpoint_to_orphan_wtxids.end()) { | 258 | 51.5k | it_prev->second.erase(wtxid); | 259 | | // Clean up keys if they point to an empty set. | 260 | 51.5k | if (it_prev->second.empty()) { | 261 | 19.3k | m_outpoint_to_orphan_wtxids.erase(it_prev); | 262 | 19.3k | } | 263 | 51.5k | } | 264 | 57.1k | } | 265 | 10.3k | } | 266 | | | 267 | | // If this was the (unique) reconsiderable announcement for its wtxid, then the wtxid won't | 268 | | // have any reconsiderable announcements left after erasing. | 269 | 11.0k | if (it->m_reconsider) m_reconsiderable_wtxids.erase(it->m_tx->GetWitnessHash()); | 270 | | | 271 | 11.0k | m_orphans.get<Tag>().erase(it); | 272 | 11.0k | } |
_ZN4node15TxOrphanageImpl5EraseINS0_6ByPeerEEEvN5boost11multi_index21multi_index_containerINS0_12AnnouncementENS0_13OrphanIndicesESaIS6_EE5indexIT_E4type8iteratorE Line | Count | Source | 239 | 7.66k | { | 240 | | // Update m_peer_orphanage_info and clean up entries if they point to an empty struct. | 241 | | // This means peers that are not storing any orphans do not have an entry in | 242 | | // m_peer_orphanage_info (they can be added back later if they announce another orphan) and | 243 | | // ensures disconnected peers are not tracked forever. | 244 | 7.66k | auto peer_it = m_peer_orphanage_info.find(it->m_announcer); | 245 | 7.66k | Assume(peer_it != m_peer_orphanage_info.end()); | 246 | 7.66k | if (peer_it->second.Subtract(*it)) m_peer_orphanage_info.erase(peer_it); | 247 | | | 248 | 7.66k | if (IsUnique(m_orphans.project<ByWtxid>(it))) { | 249 | 7.03k | m_unique_orphans -= 1; | 250 | 7.03k | m_unique_rounded_input_scores -= it->GetLatencyScore() - 1; | 251 | 7.03k | m_unique_orphan_usage -= it->GetMemUsage(); | 252 | | | 253 | | // Remove references in m_outpoint_to_orphan_wtxids | 254 | 7.03k | const auto& wtxid{it->m_tx->GetWitnessHash()}; | 255 | 34.2k | for (const auto& input : it->m_tx->vin) { | 256 | 34.2k | auto it_prev = m_outpoint_to_orphan_wtxids.find(input.prevout); | 257 | 34.2k | if (it_prev != m_outpoint_to_orphan_wtxids.end()) { | 258 | 31.7k | it_prev->second.erase(wtxid); | 259 | | // Clean up keys if they point to an empty set. | 260 | 31.7k | if (it_prev->second.empty()) { | 261 | 9.95k | m_outpoint_to_orphan_wtxids.erase(it_prev); | 262 | 9.95k | } | 263 | 31.7k | } | 264 | 34.2k | } | 265 | 7.03k | } | 266 | | | 267 | | // If this was the (unique) reconsiderable announcement for its wtxid, then the wtxid won't | 268 | | // have any reconsiderable announcements left after erasing. | 269 | 7.66k | if (it->m_reconsider) m_reconsiderable_wtxids.erase(it->m_tx->GetWitnessHash()); | 270 | | | 271 | 7.66k | m_orphans.get<Tag>().erase(it); | 272 | 7.66k | } |
|
273 | | |
274 | | bool TxOrphanageImpl::IsUnique(Iter<ByWtxid> it) const |
275 | 37.4k | { |
276 | | // Iterators ByWtxid are sorted by wtxid, so check if neighboring elements have the same wtxid. |
277 | 37.4k | auto& index = m_orphans.get<ByWtxid>(); |
278 | 37.4k | if (it == index.end()) return false; |
279 | 37.4k | if (std::next(it) != index.end() && std::next(it)->m_tx->GetWitnessHash() == it->m_tx->GetWitnessHash()) return false; |
280 | 35.3k | if (it != index.begin() && std::prev(it)->m_tx->GetWitnessHash() == it->m_tx->GetWitnessHash()) return false; |
281 | 34.7k | return true; |
282 | 35.3k | } |
283 | | |
284 | | TxOrphanage::Usage TxOrphanageImpl::UsageByPeer(NodeId peer) const |
285 | 22.2k | { |
286 | 22.2k | auto it = m_peer_orphanage_info.find(peer); |
287 | 22.2k | return it == m_peer_orphanage_info.end() ? 0 : it->second.m_total_usage; |
288 | 22.2k | } |
289 | | |
290 | 0 | TxOrphanage::Count TxOrphanageImpl::CountAnnouncements() const { return m_orphans.size(); } |
291 | | |
292 | 52.9k | TxOrphanage::Usage TxOrphanageImpl::TotalOrphanUsage() const { return m_unique_orphan_usage; } |
293 | | |
294 | 3.38k | TxOrphanage::Count TxOrphanageImpl::CountUniqueOrphans() const { return m_unique_orphans; } |
295 | | |
296 | 0 | TxOrphanage::Count TxOrphanageImpl::AnnouncementsFromPeer(NodeId peer) const { |
297 | 0 | auto it = m_peer_orphanage_info.find(peer); |
298 | 0 | return it == m_peer_orphanage_info.end() ? 0 : it->second.m_count_announcements; |
299 | 0 | } |
300 | | |
301 | 0 | TxOrphanage::Count TxOrphanageImpl::LatencyScoreFromPeer(NodeId peer) const { |
302 | 0 | auto it = m_peer_orphanage_info.find(peer); |
303 | 0 | return it == m_peer_orphanage_info.end() ? 0 : it->second.m_total_latency_score; |
304 | 0 | } |
305 | | |
306 | | bool TxOrphanageImpl::AddTx(const CTransactionRef& tx, NodeId peer) |
307 | 18.4k | { |
308 | 18.4k | const auto& wtxid{tx->GetWitnessHash()}; |
309 | 18.4k | const auto& txid{tx->GetHash()}; |
310 | | |
311 | | // Ignore transactions above max standard size to avoid a send-big-orphans memory exhaustion attack. |
312 | 18.4k | TxOrphanage::Usage sz = GetTransactionWeight(*tx); |
313 | 18.4k | if (sz > MAX_STANDARD_TX_WEIGHT) { |
314 | 0 | LogDebug(BCLog::TXPACKAGES, "ignoring large orphan tx (size: %u, txid: %s, wtxid: %s)\n", sz, txid.ToString(), wtxid.ToString()); |
315 | 0 | return false; |
316 | 0 | } |
317 | | |
318 | | // We will return false if the tx already exists under a different peer. |
319 | 18.4k | const bool brand_new{!HaveTx(wtxid)}; |
320 | | |
321 | 18.4k | auto [iter, inserted] = m_orphans.get<ByWtxid>().emplace(tx, peer, m_current_sequence); |
322 | | // If the announcement (same wtxid, same peer) already exists, emplacement fails. Return false. |
323 | 18.4k | if (!inserted) return false; |
324 | | |
325 | 18.4k | ++m_current_sequence; |
326 | 18.4k | auto& peer_info = m_peer_orphanage_info.try_emplace(peer).first->second; |
327 | 18.4k | peer_info.Add(*iter); |
328 | | |
329 | | // Add links in m_outpoint_to_orphan_wtxids |
330 | 18.4k | if (brand_new) { |
331 | 91.3k | for (const auto& input : tx->vin) { |
332 | 91.3k | auto& wtxids_for_prevout = m_outpoint_to_orphan_wtxids.try_emplace(input.prevout).first->second; |
333 | 91.3k | wtxids_for_prevout.emplace(wtxid); |
334 | 91.3k | } |
335 | | |
336 | 17.3k | m_unique_orphans += 1; |
337 | 17.3k | m_unique_orphan_usage += iter->GetMemUsage(); |
338 | 17.3k | m_unique_rounded_input_scores += iter->GetLatencyScore() - 1; |
339 | | |
340 | 17.3k | LogDebug(BCLog::TXPACKAGES, "stored orphan tx %s (wtxid=%s), weight: %u (mapsz %u outsz %u)\n", |
341 | 17.3k | txid.ToString(), wtxid.ToString(), sz, m_orphans.size(), m_outpoint_to_orphan_wtxids.size()); |
342 | 17.3k | Assume(IsUnique(iter)); |
343 | 17.3k | } else { |
344 | 1.08k | LogDebug(BCLog::TXPACKAGES, "added peer=%d as announcer of orphan tx %s (wtxid=%s)\n", |
345 | 1.08k | peer, txid.ToString(), wtxid.ToString()); |
346 | 1.08k | Assume(!IsUnique(iter)); |
347 | 1.08k | } |
348 | | |
349 | | // DoS prevention: do not allow m_orphanage to grow unbounded (see CVE-2012-3789) |
350 | 18.4k | LimitOrphans(); |
351 | 18.4k | return brand_new; |
352 | 18.4k | } |
353 | | |
354 | | bool TxOrphanageImpl::AddAnnouncer(const Wtxid& wtxid, NodeId peer) |
355 | 276 | { |
356 | 276 | auto& index_by_wtxid = m_orphans.get<ByWtxid>(); |
357 | 276 | auto it = index_by_wtxid.lower_bound(ByWtxidView{wtxid, MIN_PEER}); |
358 | | |
359 | | // Do nothing if this transaction isn't already present. We can't create an entry if we don't |
360 | | // have the tx data. |
361 | 276 | if (it == index_by_wtxid.end()) return false; |
362 | 276 | if (it->m_tx->GetWitnessHash() != wtxid) return false; |
363 | | |
364 | | // Add another announcement, copying the CTransactionRef from one that already exists. |
365 | 276 | const auto& ptx = it->m_tx; |
366 | 276 | auto [iter, inserted] = index_by_wtxid.emplace(ptx, peer, m_current_sequence); |
367 | | // If the announcement (same wtxid, same peer) already exists, emplacement fails. Return false. |
368 | 276 | if (!inserted) return false; |
369 | | |
370 | 276 | ++m_current_sequence; |
371 | 276 | auto& peer_info = m_peer_orphanage_info.try_emplace(peer).first->second; |
372 | 276 | peer_info.Add(*iter); |
373 | | |
374 | 276 | const auto& txid = ptx->GetHash(); |
375 | 276 | LogDebug(BCLog::TXPACKAGES, "added peer=%d as announcer of orphan tx %s (wtxid=%s)\n", |
376 | 276 | peer, txid.ToString(), wtxid.ToString()); |
377 | | |
378 | 276 | Assume(!IsUnique(iter)); |
379 | | |
380 | | // DoS prevention: do not allow m_orphanage to grow unbounded (see CVE-2012-3789) |
381 | 276 | LimitOrphans(); |
382 | 276 | return true; |
383 | 276 | } |
384 | | |
385 | | bool TxOrphanageImpl::EraseTxInternal(const Wtxid& wtxid) |
386 | 26.6k | { |
387 | 26.6k | auto& index_by_wtxid = m_orphans.get<ByWtxid>(); |
388 | | |
389 | 26.6k | auto it = index_by_wtxid.lower_bound(ByWtxidView{wtxid, MIN_PEER}); |
390 | 26.6k | if (it == index_by_wtxid.end() || it->m_tx->GetWitnessHash() != wtxid) return false; |
391 | | |
392 | 10.3k | auto it_end = index_by_wtxid.upper_bound(ByWtxidView{wtxid, MAX_PEER}); |
393 | 10.3k | unsigned int num_ann{0}; |
394 | 10.3k | const auto txid = it->m_tx->GetHash(); |
395 | 21.4k | while (it != it_end) { |
396 | 11.0k | Assume(it->m_tx->GetWitnessHash() == wtxid); |
397 | 11.0k | Erase<ByWtxid>(it++); |
398 | 11.0k | num_ann += 1; |
399 | 11.0k | } |
400 | 10.3k | LogDebug(BCLog::TXPACKAGES, "removed orphan tx %s (wtxid=%s) (%u announcements)\n", txid.ToString(), wtxid.ToString(), num_ann); |
401 | | |
402 | 10.3k | return true; |
403 | 26.6k | } |
404 | | |
405 | | bool TxOrphanageImpl::EraseTx(const Wtxid& wtxid) |
406 | 16.8k | { |
407 | 16.8k | const auto ret = EraseTxInternal(wtxid); |
408 | | |
409 | | // Deletions can cause the orphanage's MaxGlobalUsage to decrease, so we may need to trim here. |
410 | 16.8k | LimitOrphans(); |
411 | | |
412 | 16.8k | return ret; |
413 | 16.8k | } |
414 | | |
415 | | /** Erase all entries by this peer. */ |
416 | | void TxOrphanageImpl::EraseForPeer(NodeId peer) |
417 | 22.2k | { |
418 | 22.2k | auto& index_by_peer = m_orphans.get<ByPeer>(); |
419 | 22.2k | auto it = index_by_peer.lower_bound(ByPeerView{peer, false, 0}); |
420 | 22.2k | if (it == index_by_peer.end() || it->m_announcer != peer) return; |
421 | | |
422 | 2.50k | unsigned int num_ann{0}; |
423 | 8.11k | while (it != index_by_peer.end() && it->m_announcer == peer) { |
424 | | // Delete item, cleaning up m_outpoint_to_orphan_wtxids iff this entry is unique by wtxid. |
425 | 5.61k | Erase<ByPeer>(it++); |
426 | 5.61k | num_ann += 1; |
427 | 5.61k | } |
428 | 2.50k | Assume(!m_peer_orphanage_info.contains(peer)); |
429 | | |
430 | 2.50k | if (num_ann > 0) LogDebug(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) from peer=%d\n", num_ann, peer); |
431 | | |
432 | | // Deletions can cause the orphanage's MaxGlobalUsage to decrease, so we may need to trim here. |
433 | 2.50k | LimitOrphans(); |
434 | 2.50k | } |
435 | | |
436 | | /** If the data structure needs trimming, evicts announcements by selecting the DoSiest peer and evicting its oldest |
437 | | * announcement (sorting non-reconsiderable orphans first, to give reconsiderable orphans a greater chance of being |
438 | | * processed). Does nothing if no global limits are exceeded. This eviction strategy effectively "reserves" an |
439 | | * amount of announcements and space for each peer. The reserved amount is protected from eviction even if there |
440 | | * are peers spamming the orphanage. |
441 | | */ |
442 | | void TxOrphanageImpl::LimitOrphans() |
443 | 47.7k | { |
444 | 47.7k | if (!NeedsTrim()) return; |
445 | | |
446 | 1.38k | const auto original_unique_txns{CountUniqueOrphans()}; |
447 | | |
448 | | // Even though it's possible for MaxPeerLatencyScore to increase within this call to LimitOrphans |
449 | | // (e.g. if a peer's orphans are removed entirely, changing the number of peers), use consistent limits throughout. |
450 | 1.38k | const auto max_lat{MaxPeerLatencyScore()}; |
451 | 1.38k | const auto max_mem{ReservedPeerUsage()}; |
452 | | |
453 | | // We have exceeded the global limit(s). Now, identify who is using too much and evict their orphans. |
454 | | // Create a heap of pairs (NodeId, DoS score), sorted by descending DoS score. |
455 | 1.38k | std::vector<std::pair<NodeId, FeeFrac>> heap_peer_dos; |
456 | 1.38k | heap_peer_dos.reserve(m_peer_orphanage_info.size()); |
457 | 2.63k | for (const auto& [nodeid, entry] : m_peer_orphanage_info) { |
458 | | // Performance optimization: only consider peers with a DoS score > 1. |
459 | 2.63k | const auto dos_score = entry.GetDosScore(max_lat, max_mem); |
460 | 2.63k | if (dos_score >> FeeFrac{1, 1}) { |
461 | 1.86k | heap_peer_dos.emplace_back(nodeid, dos_score); |
462 | 1.86k | } |
463 | 2.63k | } |
464 | 1.38k | static constexpr auto compare_score = [](const auto& left, const auto& right) { |
465 | 643 | if (left.second != right.second) return left.second < right.second; |
466 | | // Tiebreak by considering the more recent peer (higher NodeId) to be worse. |
467 | 24 | return left.first < right.first; |
468 | 643 | }; |
469 | 1.38k | std::make_heap(heap_peer_dos.begin(), heap_peer_dos.end(), compare_score); |
470 | | |
471 | 1.38k | unsigned int num_erased{0}; |
472 | | // This outer loop finds the peer with the highest DoS score, which is a fraction of memory and latency scores |
473 | | // over the respective allowances. We continue until the orphanage is within global limits. That means some peers |
474 | | // might still have a DoS score > 1 at the end. |
475 | | // Note: if ratios are the same, FeeFrac tiebreaks by denominator. In practice, since the latency denominator (number of |
476 | | // announcements and inputs) is always lower, this means that a peer with only high latency scores will be targeted |
477 | | // before a peer using a lot of memory, even if they have the same ratios. |
478 | 1.48k | do { |
479 | 1.48k | Assume(!heap_peer_dos.empty()); |
480 | | // This is a max-heap, so the worst peer is at the front. pop_heap() |
481 | | // moves it to the back, and the next worst peer is moved to the front. |
482 | 1.48k | std::pop_heap(heap_peer_dos.begin(), heap_peer_dos.end(), compare_score); |
483 | 1.48k | const auto [worst_peer, dos_score] = std::move(heap_peer_dos.back()); |
484 | 1.48k | heap_peer_dos.pop_back(); |
485 | | |
486 | | // If needs trim, then at least one peer has a DoS score higher than 1. |
487 | 1.48k | Assume(dos_score >> (FeeFrac{1, 1})); |
488 | | |
489 | 1.48k | auto it_worst_peer = m_peer_orphanage_info.find(worst_peer); |
490 | | |
491 | | // This inner loop trims until this peer is no longer the DoSiest one or has a score within 1. The score 1 is |
492 | | // just a conservative fallback: once the last peer goes below ratio 1, NeedsTrim() will return false anyway. |
493 | | // We evict the oldest announcement(s) from this peer, sorting non-reconsiderable before reconsiderable. |
494 | | // The number of inner loop iterations is bounded by the total number of announcements. |
495 | 1.48k | const auto& dos_threshold = heap_peer_dos.empty() ? FeeFrac{1, 1} : heap_peer_dos.front().second; |
496 | 1.48k | auto it_ann = m_orphans.get<ByPeer>().lower_bound(ByPeerView{worst_peer, false, 0}); |
497 | 1.48k | unsigned int num_erased_this_round{0}; |
498 | 1.48k | unsigned int starting_num_ann{it_worst_peer->second.m_count_announcements}; |
499 | 2.47k | while (NeedsTrim()) { |
500 | 2.05k | if (!Assume(it_ann != m_orphans.get<ByPeer>().end())) break; |
501 | 2.05k | if (!Assume(it_ann->m_announcer == worst_peer)) break; |
502 | | |
503 | 2.05k | Erase<ByPeer>(it_ann++); |
504 | 2.05k | num_erased += 1; |
505 | 2.05k | num_erased_this_round += 1; |
506 | | |
507 | | // If we erased the last orphan from this peer, it_worst_peer will be invalidated. |
508 | 2.05k | it_worst_peer = m_peer_orphanage_info.find(worst_peer); |
509 | 2.05k | if (it_worst_peer == m_peer_orphanage_info.end() || it_worst_peer->second.GetDosScore(max_lat, max_mem) <= dos_threshold) break; |
510 | 2.05k | } |
511 | 1.48k | LogDebug(BCLog::TXPACKAGES, "peer=%d orphanage overflow, removed %u of %u announcements\n", worst_peer, num_erased_this_round, starting_num_ann); |
512 | | |
513 | 1.48k | if (!NeedsTrim()) break; |
514 | | |
515 | | // Unless this peer is empty, put it back in the heap so we continue to consider evicting its orphans. |
516 | | // We may select this peer for evictions again if there are multiple DoSy peers. |
517 | 97 | if (it_worst_peer != m_peer_orphanage_info.end() && it_worst_peer->second.m_count_announcements > 0) { |
518 | 97 | heap_peer_dos.emplace_back(worst_peer, it_worst_peer->second.GetDosScore(max_lat, max_mem)); |
519 | 97 | std::push_heap(heap_peer_dos.begin(), heap_peer_dos.end(), compare_score); |
520 | 97 | } |
521 | 97 | } while (true); |
522 | | |
523 | 0 | const auto remaining_unique_orphans{CountUniqueOrphans()}; |
524 | 1.38k | LogDebug(BCLog::TXPACKAGES, "orphanage overflow, removed %u tx (%u announcements)\n", original_unique_txns - remaining_unique_orphans, num_erased); |
525 | 1.38k | } |
526 | | |
527 | | std::vector<std::pair<Wtxid, NodeId>> TxOrphanageImpl::AddChildrenToWorkSet(const CTransaction& tx, FastRandomContext& rng) |
528 | 7.33k | { |
529 | 7.33k | std::vector<std::pair<Wtxid, NodeId>> ret; |
530 | 7.33k | auto& index_by_wtxid = m_orphans.get<ByWtxid>(); |
531 | 519k | for (unsigned int i = 0; i < tx.vout.size(); i++) { |
532 | 512k | const auto it_by_prev = m_outpoint_to_orphan_wtxids.find(COutPoint(tx.GetHash(), i)); |
533 | 512k | if (it_by_prev != m_outpoint_to_orphan_wtxids.end()) { |
534 | 17.8k | for (const auto& wtxid : it_by_prev->second) { |
535 | | // If a reconsiderable announcement for this wtxid already exists, skip it. |
536 | 17.8k | if (m_reconsiderable_wtxids.contains(wtxid)) continue; |
537 | | |
538 | | // Belt and suspenders, each entry in m_outpoint_to_orphan_wtxids should always have at least 1 announcement. |
539 | 4.47k | auto it = index_by_wtxid.lower_bound(ByWtxidView{wtxid, MIN_PEER}); |
540 | 4.47k | if (!Assume(it != index_by_wtxid.end() && it->m_tx->GetWitnessHash() == wtxid)) continue; |
541 | | |
542 | | // Select a random peer to assign orphan processing, reducing wasted work if the orphan is still missing |
543 | | // inputs. However, we don't want to create an issue in which the assigned peer can purposefully stop us |
544 | | // from processing the orphan by disconnecting. |
545 | 4.47k | auto it_end = index_by_wtxid.upper_bound(ByWtxidView{wtxid, MAX_PEER}); |
546 | 4.47k | const auto num_announcers{std::distance(it, it_end)}; |
547 | 4.47k | if (!Assume(num_announcers > 0)) continue; |
548 | 4.47k | std::advance(it, rng.randrange(num_announcers)); |
549 | | |
550 | 4.47k | if (!Assume(it->m_tx->GetWitnessHash() == wtxid)) break; |
551 | | |
552 | | // Mark this orphan as ready to be reconsidered. |
553 | 4.47k | static constexpr auto mark_reconsidered_modifier = [](auto& ann) { ann.m_reconsider = true; }; |
554 | 4.47k | Assume(!it->m_reconsider); |
555 | 4.47k | index_by_wtxid.modify(it, mark_reconsidered_modifier); |
556 | 4.47k | ret.emplace_back(wtxid, it->m_announcer); |
557 | 4.47k | m_reconsiderable_wtxids.insert(wtxid); |
558 | | |
559 | 4.47k | LogDebug(BCLog::TXPACKAGES, "added %s (wtxid=%s) to peer %d workset\n", |
560 | 4.47k | it->m_tx->GetHash().ToString(), it->m_tx->GetWitnessHash().ToString(), it->m_announcer); |
561 | 4.47k | } |
562 | 2.97k | } |
563 | 512k | } |
564 | 7.33k | return ret; |
565 | 7.33k | } |
566 | | |
567 | | bool TxOrphanageImpl::HaveTx(const Wtxid& wtxid) const |
568 | 278k | { |
569 | 278k | auto it_lower = m_orphans.get<ByWtxid>().lower_bound(ByWtxidView{wtxid, MIN_PEER}); |
570 | 278k | return it_lower != m_orphans.get<ByWtxid>().end() && it_lower->m_tx->GetWitnessHash() == wtxid; |
571 | 278k | } |
572 | | |
573 | | CTransactionRef TxOrphanageImpl::GetTx(const Wtxid& wtxid) const |
574 | 5.09k | { |
575 | 5.09k | auto it_lower = m_orphans.get<ByWtxid>().lower_bound(ByWtxidView{wtxid, MIN_PEER}); |
576 | 5.09k | if (it_lower != m_orphans.get<ByWtxid>().end() && it_lower->m_tx->GetWitnessHash() == wtxid) return it_lower->m_tx; |
577 | 4.05k | return nullptr; |
578 | 5.09k | } |
579 | | |
580 | | bool TxOrphanageImpl::HaveTxFromPeer(const Wtxid& wtxid, NodeId peer) const |
581 | 26.3k | { |
582 | 26.3k | return m_orphans.get<ByWtxid>().count(ByWtxidView{wtxid, peer}) > 0; |
583 | 26.3k | } |
584 | | |
585 | | /** If there is a tx that can be reconsidered, return it and set it back to |
586 | | * non-reconsiderable. Otherwise, return a nullptr. */ |
587 | | CTransactionRef TxOrphanageImpl::GetTxToReconsider(NodeId peer) |
588 | 9.05k | { |
589 | 9.05k | auto it = m_orphans.get<ByPeer>().lower_bound(ByPeerView{peer, true, 0}); |
590 | 9.05k | if (it != m_orphans.get<ByPeer>().end() && it->m_announcer == peer && it->m_reconsider) { |
591 | | // Flip m_reconsider. Even if this transaction stays in orphanage, it shouldn't be |
592 | | // reconsidered again until there is a new reason to do so. |
593 | 1.05k | static constexpr auto mark_reconsidered_modifier = [](auto& ann) { ann.m_reconsider = false; }; |
594 | 1.05k | m_orphans.get<ByPeer>().modify(it, mark_reconsidered_modifier); |
595 | | // As there is exactly one m_reconsider announcement per reconsiderable wtxids, flipping |
596 | | // the m_reconsider flag means the wtxid is no longer reconsiderable. |
597 | 1.05k | m_reconsiderable_wtxids.erase(it->m_tx->GetWitnessHash()); |
598 | 1.05k | return it->m_tx; |
599 | 1.05k | } |
600 | 8.00k | return nullptr; |
601 | 9.05k | } |
602 | | |
603 | | /** Return whether there is a tx that can be reconsidered. */ |
604 | | bool TxOrphanageImpl::HaveTxToReconsider(NodeId peer) |
605 | 9.05k | { |
606 | 9.05k | auto it = m_orphans.get<ByPeer>().lower_bound(ByPeerView{peer, true, 0}); |
607 | 9.05k | return it != m_orphans.get<ByPeer>().end() && it->m_announcer == peer && it->m_reconsider; |
608 | 9.05k | } |
609 | | |
610 | | void TxOrphanageImpl::EraseForBlock(const CBlock& block) |
611 | 17.3k | { |
612 | 17.3k | if (m_orphans.empty()) return; |
613 | | |
614 | 9.69k | std::set<Wtxid> wtxids_to_erase; |
615 | 9.69k | for (const CTransactionRef& ptx : block.vtx) { |
616 | 9.69k | const CTransaction& block_tx = *ptx; |
617 | | |
618 | | // Which orphan pool entries must we evict? |
619 | 23.4k | for (const auto& input : block_tx.vin) { |
620 | 23.4k | auto it_prev = m_outpoint_to_orphan_wtxids.find(input.prevout); |
621 | 23.4k | if (it_prev != m_outpoint_to_orphan_wtxids.end()) { |
622 | | // Copy all wtxids to wtxids_to_erase. |
623 | 8.84k | std::copy(it_prev->second.cbegin(), it_prev->second.cend(), std::inserter(wtxids_to_erase, wtxids_to_erase.end())); |
624 | 8.84k | } |
625 | 23.4k | } |
626 | 9.69k | } |
627 | | |
628 | 9.69k | unsigned int num_erased{0}; |
629 | 9.82k | for (const auto& wtxid : wtxids_to_erase) { |
630 | | // Don't use EraseTx here because it calls LimitOrphans and announcements deleted in that call are not reflected |
631 | | // in its return result. Waiting until the end to do LimitOrphans helps save repeated computation and allows us |
632 | | // to check that num_erased is what we expect. |
633 | 9.82k | num_erased += EraseTxInternal(wtxid) ? 1 : 0; |
634 | 9.82k | } |
635 | | |
636 | 9.69k | if (num_erased != 0) { |
637 | 3.06k | LogDebug(BCLog::TXPACKAGES, "Erased %d orphan transaction(s) included or conflicted by block\n", num_erased); |
638 | 3.06k | } |
639 | 9.69k | Assume(wtxids_to_erase.size() == num_erased); |
640 | | |
641 | | // Deletions can cause the orphanage's MaxGlobalUsage to decrease, so we may need to trim here. |
642 | 9.69k | LimitOrphans(); |
643 | 9.69k | } |
644 | | |
645 | | std::vector<CTransactionRef> TxOrphanageImpl::GetChildrenFromSamePeer(const CTransactionRef& parent, NodeId peer) const |
646 | 8.61k | { |
647 | 8.61k | std::vector<CTransactionRef> children_found; |
648 | 8.61k | const auto& parent_txid{parent->GetHash()}; |
649 | | |
650 | | // Iterate through all orphans from this peer, in reverse order, so that more recent |
651 | | // transactions are added first. Doing so helps avoid work when one of the orphans replaced |
652 | | // an earlier one. Since we require the NodeId to match, one peer's announcement order does |
653 | | // not bias how we process other peer's orphans. |
654 | 8.61k | auto& index_by_peer = m_orphans.get<ByPeer>(); |
655 | 8.61k | auto it_upper = index_by_peer.upper_bound(ByPeerView{peer, true, std::numeric_limits<uint64_t>::max()}); |
656 | 8.61k | auto it_lower = index_by_peer.lower_bound(ByPeerView{peer, false, 0}); |
657 | | |
658 | 58.5k | while (it_upper != it_lower) { |
659 | 49.9k | --it_upper; |
660 | 49.9k | if (!Assume(it_upper->m_announcer == peer)) break; |
661 | | // Check if this tx spends from parent. |
662 | 98.7k | for (const auto& input : it_upper->m_tx->vin) { |
663 | 98.7k | if (input.prevout.hash == parent_txid) { |
664 | 43.4k | children_found.emplace_back(it_upper->m_tx); |
665 | 43.4k | break; |
666 | 43.4k | } |
667 | 98.7k | } |
668 | 49.9k | } |
669 | 8.61k | return children_found; |
670 | 8.61k | } |
671 | | |
672 | | std::vector<TxOrphanage::OrphanInfo> TxOrphanageImpl::GetOrphanTransactions() const |
673 | 0 | { |
674 | 0 | std::vector<TxOrphanage::OrphanInfo> result; |
675 | 0 | result.reserve(m_unique_orphans); |
676 | |
|
677 | 0 | auto& index_by_wtxid = m_orphans.get<ByWtxid>(); |
678 | 0 | auto it = index_by_wtxid.begin(); |
679 | 0 | std::set<NodeId> this_orphan_announcers; |
680 | 0 | while (it != index_by_wtxid.end()) { |
681 | 0 | this_orphan_announcers.insert(it->m_announcer); |
682 | | // If this is the last entry, or the next entry has a different wtxid, build a OrphanInfo. |
683 | 0 | if (std::next(it) == index_by_wtxid.end() || std::next(it)->m_tx->GetWitnessHash() != it->m_tx->GetWitnessHash()) { |
684 | 0 | result.emplace_back(it->m_tx, std::move(this_orphan_announcers)); |
685 | 0 | this_orphan_announcers.clear(); |
686 | 0 | } |
687 | |
|
688 | 0 | ++it; |
689 | 0 | } |
690 | 0 | Assume(m_unique_orphans == result.size()); |
691 | |
|
692 | 0 | return result; |
693 | 0 | } |
694 | | |
695 | | void TxOrphanageImpl::SanityCheck() const |
696 | 618 | { |
697 | 618 | std::unordered_map<NodeId, PeerDoSInfo> reconstructed_peer_info; |
698 | 618 | std::map<Wtxid, std::pair<TxOrphanage::Usage, TxOrphanage::Count>> unique_wtxids_to_scores; |
699 | 618 | std::set<COutPoint> all_outpoints; |
700 | 618 | std::set<Wtxid> reconstructed_reconsiderable_wtxids; |
701 | | |
702 | 3.06k | for (auto it = m_orphans.begin(); it != m_orphans.end(); ++it) { |
703 | 11.2k | for (const auto& input : it->m_tx->vin) { |
704 | 11.2k | all_outpoints.insert(input.prevout); |
705 | 11.2k | } |
706 | 2.44k | unique_wtxids_to_scores.emplace(it->m_tx->GetWitnessHash(), std::make_pair(it->GetMemUsage(), it->GetLatencyScore() - 1)); |
707 | | |
708 | 2.44k | auto& peer_info = reconstructed_peer_info[it->m_announcer]; |
709 | 2.44k | peer_info.m_total_usage += it->GetMemUsage(); |
710 | 2.44k | peer_info.m_count_announcements += 1; |
711 | 2.44k | peer_info.m_total_latency_score += it->GetLatencyScore(); |
712 | | |
713 | 2.44k | if (it->m_reconsider) { |
714 | 715 | auto [_, added] = reconstructed_reconsiderable_wtxids.insert(it->m_tx->GetWitnessHash()); |
715 | | // Check that there is only ever 1 announcement per wtxid with m_reconsider set. |
716 | 715 | assert(added); |
717 | 715 | } |
718 | 2.44k | } |
719 | 618 | assert(reconstructed_peer_info.size() == m_peer_orphanage_info.size()); |
720 | | |
721 | | // Recalculated per-peer stats are identical to m_peer_orphanage_info |
722 | 618 | assert(reconstructed_peer_info == m_peer_orphanage_info); |
723 | | |
724 | | // Recalculated set of reconsiderable wtxids must match. |
725 | 618 | assert(m_reconsiderable_wtxids == reconstructed_reconsiderable_wtxids); |
726 | | |
727 | | // All outpoints exist in m_outpoint_to_orphan_wtxids, all keys in m_outpoint_to_orphan_wtxids correspond to some |
728 | | // orphan, and all wtxids referenced in m_outpoint_to_orphan_wtxids are also in m_orphans. |
729 | | // This ensures m_outpoint_to_orphan_wtxids is cleaned up. |
730 | 618 | assert(all_outpoints.size() == m_outpoint_to_orphan_wtxids.size()); |
731 | 3.21k | for (const auto& [outpoint, wtxid_set] : m_outpoint_to_orphan_wtxids) { |
732 | 3.21k | assert(all_outpoints.contains(outpoint)); |
733 | 7.74k | for (const auto& wtxid : wtxid_set) { |
734 | 7.74k | assert(unique_wtxids_to_scores.contains(wtxid)); |
735 | 7.74k | } |
736 | 3.21k | } |
737 | | |
738 | | // Cached m_unique_orphans value is correct. |
739 | 618 | assert(m_orphans.size() >= m_unique_orphans); |
740 | 618 | assert(m_orphans.size() <= m_peer_orphanage_info.size() * m_unique_orphans); |
741 | 618 | assert(unique_wtxids_to_scores.size() == m_unique_orphans); |
742 | | |
743 | 618 | const auto calculated_dedup_usage = std::accumulate(unique_wtxids_to_scores.begin(), unique_wtxids_to_scores.end(), |
744 | 2.26k | TxOrphanage::Usage{0}, [](TxOrphanage::Usage sum, const auto pair) { return sum + pair.second.first; }); |
745 | 618 | assert(calculated_dedup_usage == m_unique_orphan_usage); |
746 | | |
747 | | // Global usage is deduplicated, should be less than or equal to the sum of all per-peer usages. |
748 | 618 | const auto summed_peer_usage = std::accumulate(m_peer_orphanage_info.begin(), m_peer_orphanage_info.end(), |
749 | 865 | TxOrphanage::Usage{0}, [](TxOrphanage::Usage sum, const auto pair) { return sum + pair.second.m_total_usage; }); |
750 | 618 | assert(summed_peer_usage >= m_unique_orphan_usage); |
751 | | |
752 | | // Cached m_unique_rounded_input_scores value is correct. |
753 | 618 | const auto calculated_total_latency_score = std::accumulate(unique_wtxids_to_scores.begin(), unique_wtxids_to_scores.end(), |
754 | 2.26k | TxOrphanage::Count{0}, [](TxOrphanage::Count sum, const auto pair) { return sum + pair.second.second; }); |
755 | 618 | assert(calculated_total_latency_score == m_unique_rounded_input_scores); |
756 | | |
757 | | // Global latency score is deduplicated, should be less than or equal to the sum of all per-peer latency scores. |
758 | 618 | const auto summed_peer_latency_score = std::accumulate(m_peer_orphanage_info.begin(), m_peer_orphanage_info.end(), |
759 | 865 | TxOrphanage::Count{0}, [](TxOrphanage::Count sum, const auto pair) { return sum + pair.second.m_total_latency_score; }); |
760 | 618 | assert(summed_peer_latency_score >= m_unique_rounded_input_scores + m_orphans.size()); |
761 | | |
762 | 618 | assert(!NeedsTrim()); |
763 | 618 | } |
764 | | |
765 | 52.3k | TxOrphanage::Count TxOrphanageImpl::MaxGlobalLatencyScore() const { return m_max_global_latency_score; } |
766 | 52.3k | TxOrphanage::Count TxOrphanageImpl::TotalLatencyScore() const { return m_unique_rounded_input_scores + m_orphans.size(); } |
767 | 1.38k | TxOrphanage::Usage TxOrphanageImpl::ReservedPeerUsage() const { return m_reserved_usage_per_peer; } |
768 | 1.38k | TxOrphanage::Count TxOrphanageImpl::MaxPeerLatencyScore() const { return m_max_global_latency_score / std::max<unsigned int>(m_peer_orphanage_info.size(), 1); } |
769 | 52.3k | TxOrphanage::Usage TxOrphanageImpl::MaxGlobalUsage() const { return m_reserved_usage_per_peer * std::max<int64_t>(m_peer_orphanage_info.size(), 1); } |
770 | | |
771 | | bool TxOrphanageImpl::NeedsTrim() const |
772 | 52.3k | { |
773 | 52.3k | return TotalLatencyScore() > MaxGlobalLatencyScore() || TotalOrphanUsage() > MaxGlobalUsage(); |
774 | 52.3k | } |
775 | | std::unique_ptr<TxOrphanage> MakeTxOrphanage() noexcept |
776 | 618 | { |
777 | 618 | return std::make_unique<TxOrphanageImpl>(); |
778 | 618 | } |
779 | | std::unique_ptr<TxOrphanage> MakeTxOrphanage(TxOrphanage::Count max_global_latency_score, TxOrphanage::Usage reserved_peer_usage) noexcept |
780 | 0 | { |
781 | 0 | return std::make_unique<TxOrphanageImpl>(max_global_latency_score, reserved_peer_usage); |
782 | 0 | } |
783 | | } // namespace node |