/root/bitcoin/src/checkqueue.h
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (c) 2012-2022 The Bitcoin Core developers |
2 | | // Distributed under the MIT software license, see the accompanying |
3 | | // file COPYING or http://www.opensource.org/licenses/mit-license.php. |
4 | | |
5 | | #ifndef BITCOIN_CHECKQUEUE_H |
6 | | #define BITCOIN_CHECKQUEUE_H |
7 | | |
8 | | #include <logging.h> |
9 | | #include <sync.h> |
10 | | #include <tinyformat.h> |
11 | | #include <util/threadnames.h> |
12 | | |
13 | | #include <algorithm> |
14 | | #include <iterator> |
15 | | #include <optional> |
16 | | #include <vector> |
17 | | |
18 | | /** |
19 | | * Queue for verifications that have to be performed. |
20 | | * The verifications are represented by a type T, which must provide an |
21 | | * operator(), returning an std::optional<R>. |
22 | | * |
23 | | * The overall result of the computation is std::nullopt if all invocations |
24 | | * return std::nullopt, or one of the other results otherwise. |
25 | | * |
26 | | * One thread (the master) is assumed to push batches of verifications |
27 | | * onto the queue, where they are processed by N-1 worker threads. When |
28 | | * the master is done adding work, it temporarily joins the worker pool |
29 | | * as an N'th worker, until all jobs are done. |
30 | | * |
31 | | */ |
32 | | template <typename T, typename R = std::remove_cvref_t<decltype(std::declval<T>()().value())>> |
33 | | class CCheckQueue |
34 | | { |
35 | | private: |
36 | | //! Mutex to protect the inner state |
37 | | Mutex m_mutex; |
38 | | |
39 | | //! Worker threads block on this when out of work |
40 | | std::condition_variable m_worker_cv; |
41 | | |
42 | | //! Master thread blocks on this when out of work |
43 | | std::condition_variable m_master_cv; |
44 | | |
45 | | //! The queue of elements to be processed. |
46 | | //! As the order of booleans doesn't matter, it is used as a LIFO (stack) |
47 | | std::vector<T> queue GUARDED_BY(m_mutex); |
48 | | |
49 | | //! The number of workers (including the master) that are idle. |
50 | | int nIdle GUARDED_BY(m_mutex){0}; |
51 | | |
52 | | //! The total number of workers (including the master). |
53 | | int nTotal GUARDED_BY(m_mutex){0}; |
54 | | |
55 | | //! The temporary evaluation result. |
56 | | std::optional<R> m_result GUARDED_BY(m_mutex); |
57 | | |
58 | | /** |
59 | | * Number of verifications that haven't completed yet. |
60 | | * This includes elements that are no longer queued, but still in the |
61 | | * worker's own batches. |
62 | | */ |
63 | | unsigned int nTodo GUARDED_BY(m_mutex){0}; |
64 | | |
65 | | //! The maximum number of elements to be processed in one batch |
66 | | const unsigned int nBatchSize; |
67 | | |
68 | | std::vector<std::thread> m_worker_threads; |
69 | | bool m_request_stop GUARDED_BY(m_mutex){false}; |
70 | | |
71 | | /** Internal function that does bulk of the verification work. If fMaster, return the final result. */ |
72 | | std::optional<R> Loop(bool fMaster) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) |
73 | 375k | { |
74 | 375k | std::condition_variable& cond = fMaster ? m_master_cv : m_worker_cv; |
75 | 375k | std::vector<T> vChecks; |
76 | 375k | vChecks.reserve(nBatchSize); |
77 | 375k | unsigned int nNow = 0; |
78 | 375k | std::optional<R> local_result; |
79 | 375k | bool do_work; |
80 | 382k | do { |
81 | 382k | { |
82 | 382k | WAIT_LOCK(m_mutex, lock); |
83 | | // first do the clean-up of the previous loop run (allowing us to do it in the same critsect) |
84 | 382k | if (nNow) { |
85 | 6.98k | if (local_result.has_value() && !m_result.has_value()) { |
86 | 130 | std::swap(local_result, m_result); |
87 | 130 | } |
88 | 6.98k | nTodo -= nNow; |
89 | 6.98k | if (nTodo == 0 && !fMaster) { |
90 | | // We processed the last element; inform the master it can exit and return the result |
91 | 29 | m_master_cv.notify_one(); |
92 | 29 | } |
93 | 375k | } else { |
94 | | // first iteration |
95 | 375k | nTotal++; |
96 | 375k | } |
97 | | // logically, the do loop starts here |
98 | 388k | while (queue.empty() && !m_request_stop) { |
99 | 376k | if (fMaster && nTodo == 0) { |
100 | 369k | nTotal--; |
101 | 369k | std::optional<R> to_return = std::move(m_result); |
102 | | // reset the status for new work later |
103 | 369k | m_result = std::nullopt; |
104 | | // return the current status |
105 | 369k | return to_return; |
106 | 369k | } |
107 | 6.22k | nIdle++; |
108 | 6.22k | cond.wait(lock); // wait |
109 | 6.22k | nIdle--; |
110 | 6.22k | } |
111 | 12.6k | if (m_request_stop) { |
112 | | // return value does not matter, because m_request_stop is only set in the destructor. |
113 | 5.70k | return std::nullopt; |
114 | 5.70k | } |
115 | | |
116 | | // Decide how many work units to process now. |
117 | | // * Do not try to do everything at once, but aim for increasingly smaller batches so |
118 | | // all workers finish approximately simultaneously. |
119 | | // * Try to account for idle jobs which will instantly start helping. |
120 | | // * Don't do batches smaller than 1 (duh), or larger than nBatchSize. |
121 | 6.92k | nNow = std::max(1U, std::min(nBatchSize, (unsigned int)queue.size() / (nTotal + nIdle + 1))); |
122 | 6.92k | auto start_it = queue.end() - nNow; |
123 | 6.92k | vChecks.assign(std::make_move_iterator(start_it), std::make_move_iterator(queue.end())); |
124 | 6.92k | queue.erase(start_it, queue.end()); |
125 | | // Check whether we need to do work at all |
126 | 6.92k | do_work = !m_result.has_value(); |
127 | 6.92k | } |
128 | | // execute work |
129 | 6.92k | if (do_work) { |
130 | 3.66k | for (T& check : vChecks) { |
131 | 3.66k | local_result = check(); |
132 | 3.66k | if (local_result.has_value()) break; |
133 | 3.66k | } |
134 | 1.62k | } |
135 | 6.92k | vChecks.clear(); |
136 | 6.92k | } while (true); |
137 | 375k | } checkqueue.cpp:_ZN11CCheckQueueIN12_GLOBAL__N_19DumbCheckEiE4LoopEb Line | Count | Source | 73 | 215 | { | 74 | 215 | std::condition_variable& cond = fMaster ? m_master_cv : m_worker_cv; | 75 | 215 | std::vector<T> vChecks; | 76 | 215 | vChecks.reserve(nBatchSize); | 77 | 215 | unsigned int nNow = 0; | 78 | 215 | std::optional<R> local_result; | 79 | 215 | bool do_work; | 80 | 6.57k | do { | 81 | 6.57k | { | 82 | 6.57k | WAIT_LOCK(m_mutex, lock); | 83 | | // first do the clean-up of the previous loop run (allowing us to do it in the same critsect) | 84 | 6.57k | if (nNow) { | 85 | 6.35k | if (local_result.has_value() && !m_result.has_value()) { | 86 | 130 | std::swap(local_result, m_result); | 87 | 130 | } | 88 | 6.35k | nTodo -= nNow; | 89 | 6.35k | if (nTodo == 0 && !fMaster) { | 90 | | // We processed the last element; inform the master it can exit and return the result | 91 | 0 | m_master_cv.notify_one(); | 92 | 0 | } | 93 | 6.35k | } else { | 94 | | // first iteration | 95 | 215 | nTotal++; | 96 | 215 | } | 97 | | // logically, the do loop starts here | 98 | 6.57k | while (queue.empty() && !m_request_stop) { | 99 | 215 | if (fMaster && nTodo == 0) { | 100 | 215 | nTotal--; | 101 | 215 | std::optional<R> to_return = std::move(m_result); | 102 | | // reset the status for new work later | 103 | 215 | m_result = std::nullopt; | 104 | | // return the current status | 105 | 215 | return to_return; | 106 | 215 | } | 107 | 0 | nIdle++; | 108 | 0 | cond.wait(lock); // wait | 109 | 0 | nIdle--; | 110 | 0 | } | 111 | 6.35k | if (m_request_stop) { | 112 | | // return value does not matter, because m_request_stop is only set in the destructor. | 113 | 0 | return std::nullopt; | 114 | 0 | } | 115 | | | 116 | | // Decide how many work units to process now. | 117 | | // * Do not try to do everything at once, but aim for increasingly smaller batches so | 118 | | // all workers finish approximately simultaneously. | 119 | | // * Try to account for idle jobs which will instantly start helping. | 120 | | // * Don't do batches smaller than 1 (duh), or larger than nBatchSize. | 121 | 6.35k | nNow = std::max(1U, std::min(nBatchSize, (unsigned int)queue.size() / (nTotal + nIdle + 1))); | 122 | 6.35k | auto start_it = queue.end() - nNow; | 123 | 6.35k | vChecks.assign(std::make_move_iterator(start_it), std::make_move_iterator(queue.end())); | 124 | 6.35k | queue.erase(start_it, queue.end()); | 125 | | // Check whether we need to do work at all | 126 | 6.35k | do_work = !m_result.has_value(); | 127 | 6.35k | } | 128 | | // execute work | 129 | 6.35k | if (do_work) { | 130 | 3.04k | for (T& check : vChecks) { | 131 | 3.04k | local_result = check(); | 132 | 3.04k | if (local_result.has_value()) break; | 133 | 3.04k | } | 134 | 1.00k | } | 135 | 6.35k | vChecks.clear(); | 136 | 6.35k | } while (true); | 137 | 215 | } |
_ZN11CCheckQueueI12CScriptCheckSt4pairI13ScriptError_tNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEEE4LoopEb Line | Count | Source | 73 | 375k | { | 74 | 375k | std::condition_variable& cond = fMaster ? m_master_cv : m_worker_cv; | 75 | 375k | std::vector<T> vChecks; | 76 | 375k | vChecks.reserve(nBatchSize); | 77 | 375k | unsigned int nNow = 0; | 78 | 375k | std::optional<R> local_result; | 79 | 375k | bool do_work; | 80 | 376k | do { | 81 | 376k | { | 82 | 376k | WAIT_LOCK(m_mutex, lock); | 83 | | // first do the clean-up of the previous loop run (allowing us to do it in the same critsect) | 84 | 376k | if (nNow) { | 85 | 624 | if (local_result.has_value() && !m_result.has_value()) { | 86 | 0 | std::swap(local_result, m_result); | 87 | 0 | } | 88 | 624 | nTodo -= nNow; | 89 | 624 | if (nTodo == 0 && !fMaster) { | 90 | | // We processed the last element; inform the master it can exit and return the result | 91 | 29 | m_master_cv.notify_one(); | 92 | 29 | } | 93 | 375k | } else { | 94 | | // first iteration | 95 | 375k | nTotal++; | 96 | 375k | } | 97 | | // logically, the do loop starts here | 98 | 382k | while (queue.empty() && !m_request_stop) { | 99 | 375k | if (fMaster && nTodo == 0) { | 100 | 369k | nTotal--; | 101 | 369k | std::optional<R> to_return = std::move(m_result); | 102 | | // reset the status for new work later | 103 | 369k | m_result = std::nullopt; | 104 | | // return the current status | 105 | 369k | return to_return; | 106 | 369k | } | 107 | 6.22k | nIdle++; | 108 | 6.22k | cond.wait(lock); // wait | 109 | 6.22k | nIdle--; | 110 | 6.22k | } | 111 | 6.27k | if (m_request_stop) { | 112 | | // return value does not matter, because m_request_stop is only set in the destructor. | 113 | 5.70k | return std::nullopt; | 114 | 5.70k | } | 115 | | | 116 | | // Decide how many work units to process now. | 117 | | // * Do not try to do everything at once, but aim for increasingly smaller batches so | 118 | | // all workers finish approximately simultaneously. | 119 | | // * Try to account for idle jobs which will instantly start helping. | 120 | | // * Don't do batches smaller than 1 (duh), or larger than nBatchSize. | 121 | 567 | nNow = std::max(1U, std::min(nBatchSize, (unsigned int)queue.size() / (nTotal + nIdle + 1))); | 122 | 567 | auto start_it = queue.end() - nNow; | 123 | 567 | vChecks.assign(std::make_move_iterator(start_it), std::make_move_iterator(queue.end())); | 124 | 567 | queue.erase(start_it, queue.end()); | 125 | | // Check whether we need to do work at all | 126 | 567 | do_work = !m_result.has_value(); | 127 | 567 | } | 128 | | // execute work | 129 | 624 | if (do_work) { | 130 | 624 | for (T& check : vChecks) { | 131 | 624 | local_result = check(); | 132 | 624 | if (local_result.has_value()) break; | 133 | 624 | } | 134 | 624 | } | 135 | 567 | vChecks.clear(); | 136 | 567 | } while (true); | 137 | 375k | } |
|
138 | | |
139 | | public: |
140 | | //! Mutex to ensure only one concurrent CCheckQueueControl |
141 | | Mutex m_control_mutex; |
142 | | |
143 | | //! Create a new check queue |
144 | | explicit CCheckQueue(unsigned int batch_size, int worker_threads_num) |
145 | 3.07k | : nBatchSize(batch_size) |
146 | 3.07k | { |
147 | 3.07k | LogInfo("Script verification uses %d additional threads", worker_threads_num); |
148 | 3.07k | m_worker_threads.reserve(worker_threads_num); |
149 | 8.72k | for (int n = 0; n < worker_threads_num; ++n) { |
150 | 5.65k | m_worker_threads.emplace_back([this, n]() { |
151 | 5.65k | util::ThreadRename(strprintf("scriptch.%i", n)); |
152 | 5.65k | Loop(false /* worker thread */); |
153 | 5.65k | }); Unexecuted instantiation: checkqueue.cpp:_ZZN11CCheckQueueIN12_GLOBAL__N_19DumbCheckEiEC1EjiENKUlvE_clEv _ZZN11CCheckQueueI12CScriptCheckSt4pairI13ScriptError_tNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEEEC1EjiENKUlvE_clEv Line | Count | Source | 150 | 5.65k | m_worker_threads.emplace_back([this, n]() { | 151 | 5.65k | util::ThreadRename(strprintf("scriptch.%i", n)); | 152 | 5.65k | Loop(false /* worker thread */); | 153 | 5.65k | }); |
|
154 | 5.65k | } |
155 | 3.07k | } checkqueue.cpp:_ZN11CCheckQueueIN12_GLOBAL__N_19DumbCheckEiEC2Eji Line | Count | Source | 145 | 248 | : nBatchSize(batch_size) | 146 | 248 | { | 147 | 248 | LogInfo("Script verification uses %d additional threads", worker_threads_num); | 148 | 248 | m_worker_threads.reserve(worker_threads_num); | 149 | 248 | for (int n = 0; n < worker_threads_num; ++n) { | 150 | 0 | m_worker_threads.emplace_back([this, n]() { | 151 | 0 | util::ThreadRename(strprintf("scriptch.%i", n)); | 152 | 0 | Loop(false /* worker thread */); | 153 | 0 | }); | 154 | 0 | } | 155 | 248 | } |
_ZN11CCheckQueueI12CScriptCheckSt4pairI13ScriptError_tNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEEEC2Eji Line | Count | Source | 145 | 2.82k | : nBatchSize(batch_size) | 146 | 2.82k | { | 147 | 2.82k | LogInfo("Script verification uses %d additional threads", worker_threads_num); | 148 | 2.82k | m_worker_threads.reserve(worker_threads_num); | 149 | 8.47k | for (int n = 0; n < worker_threads_num; ++n) { | 150 | 5.65k | m_worker_threads.emplace_back([this, n]() { | 151 | 5.65k | util::ThreadRename(strprintf("scriptch.%i", n)); | 152 | 5.65k | Loop(false /* worker thread */); | 153 | 5.65k | }); | 154 | 5.65k | } | 155 | 2.82k | } |
|
156 | | |
157 | | // Since this class manages its own resources, which is a thread |
158 | | // pool `m_worker_threads`, copy and move operations are not appropriate. |
159 | | CCheckQueue(const CCheckQueue&) = delete; |
160 | | CCheckQueue& operator=(const CCheckQueue&) = delete; |
161 | | CCheckQueue(CCheckQueue&&) = delete; |
162 | | CCheckQueue& operator=(CCheckQueue&&) = delete; |
163 | | |
164 | | //! Join the execution until completion. If at least one evaluation wasn't successful, return |
165 | | //! its error. |
166 | | std::optional<R> Complete() EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) |
167 | 369k | { |
168 | 369k | return Loop(true /* master thread */); |
169 | 369k | } checkqueue.cpp:_ZN11CCheckQueueIN12_GLOBAL__N_19DumbCheckEiE8CompleteEv Line | Count | Source | 167 | 215 | { | 168 | 215 | return Loop(true /* master thread */); | 169 | 215 | } |
_ZN11CCheckQueueI12CScriptCheckSt4pairI13ScriptError_tNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEEE8CompleteEv Line | Count | Source | 167 | 369k | { | 168 | 369k | return Loop(true /* master thread */); | 169 | 369k | } |
|
170 | | |
171 | | //! Add a batch of checks to the queue |
172 | | void Add(std::vector<T>&& vChecks) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) |
173 | 29.7k | { |
174 | 29.7k | if (vChecks.empty()) { |
175 | 28.9k | return; |
176 | 28.9k | } |
177 | | |
178 | 706 | { |
179 | 706 | LOCK(m_mutex); |
180 | 706 | queue.insert(queue.end(), std::make_move_iterator(vChecks.begin()), std::make_move_iterator(vChecks.end())); |
181 | 706 | nTodo += vChecks.size(); |
182 | 706 | } |
183 | | |
184 | 706 | if (vChecks.size() == 1) { |
185 | 505 | m_worker_cv.notify_one(); |
186 | 505 | } else { |
187 | 201 | m_worker_cv.notify_all(); |
188 | 201 | } |
189 | 706 | } checkqueue.cpp:_ZN11CCheckQueueIN12_GLOBAL__N_19DumbCheckEiE3AddEOSt6vectorIS1_SaIS1_EE Line | Count | Source | 173 | 166 | { | 174 | 166 | if (vChecks.empty()) { | 175 | 8 | return; | 176 | 8 | } | 177 | | | 178 | 158 | { | 179 | 158 | LOCK(m_mutex); | 180 | 158 | queue.insert(queue.end(), std::make_move_iterator(vChecks.begin()), std::make_move_iterator(vChecks.end())); | 181 | 158 | nTodo += vChecks.size(); | 182 | 158 | } | 183 | | | 184 | 158 | if (vChecks.size() == 1) { | 185 | 7 | m_worker_cv.notify_one(); | 186 | 151 | } else { | 187 | 151 | m_worker_cv.notify_all(); | 188 | 151 | } | 189 | 158 | } |
_ZN11CCheckQueueI12CScriptCheckSt4pairI13ScriptError_tNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEEE3AddEOSt6vectorIS0_SaIS0_EE Line | Count | Source | 173 | 29.5k | { | 174 | 29.5k | if (vChecks.empty()) { | 175 | 28.9k | return; | 176 | 28.9k | } | 177 | | | 178 | 548 | { | 179 | 548 | LOCK(m_mutex); | 180 | 548 | queue.insert(queue.end(), std::make_move_iterator(vChecks.begin()), std::make_move_iterator(vChecks.end())); | 181 | 548 | nTodo += vChecks.size(); | 182 | 548 | } | 183 | | | 184 | 548 | if (vChecks.size() == 1) { | 185 | 498 | m_worker_cv.notify_one(); | 186 | 498 | } else { | 187 | 50 | m_worker_cv.notify_all(); | 188 | 50 | } | 189 | 548 | } |
|
190 | | |
191 | | ~CCheckQueue() |
192 | 3.10k | { |
193 | 3.10k | WITH_LOCK(m_mutex, m_request_stop = true); |
194 | 3.10k | m_worker_cv.notify_all(); |
195 | 5.70k | for (std::thread& t : m_worker_threads) { |
196 | 5.70k | t.join(); |
197 | 5.70k | } |
198 | 3.10k | } checkqueue.cpp:_ZN11CCheckQueueIN12_GLOBAL__N_19DumbCheckEiED2Ev Line | Count | Source | 192 | 248 | { | 193 | 248 | WITH_LOCK(m_mutex, m_request_stop = true); | 194 | 248 | m_worker_cv.notify_all(); | 195 | 248 | for (std::thread& t : m_worker_threads) { | 196 | 0 | t.join(); | 197 | 0 | } | 198 | 248 | } |
_ZN11CCheckQueueI12CScriptCheckSt4pairI13ScriptError_tNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEEED2Ev Line | Count | Source | 192 | 2.85k | { | 193 | 2.85k | WITH_LOCK(m_mutex, m_request_stop = true); | 194 | 2.85k | m_worker_cv.notify_all(); | 195 | 5.70k | for (std::thread& t : m_worker_threads) { | 196 | 5.70k | t.join(); | 197 | 5.70k | } | 198 | 2.85k | } |
|
199 | | |
200 | 372k | bool HasThreads() const { return !m_worker_threads.empty(); } |
201 | | }; |
202 | | |
203 | | /** |
204 | | * RAII-style controller object for a CCheckQueue that guarantees the passed |
205 | | * queue is finished before continuing. |
206 | | */ |
207 | | template <typename T, typename R = std::remove_cvref_t<decltype(std::declval<T>()().value())>> |
208 | | class CCheckQueueControl |
209 | | { |
210 | | private: |
211 | | CCheckQueue<T, R> * const pqueue; |
212 | | bool fDone; |
213 | | |
214 | | public: |
215 | | CCheckQueueControl() = delete; |
216 | | CCheckQueueControl(const CCheckQueueControl&) = delete; |
217 | | CCheckQueueControl& operator=(const CCheckQueueControl&) = delete; |
218 | 369k | explicit CCheckQueueControl(CCheckQueue<T> * const pqueueIn) : pqueue(pqueueIn), fDone(false) |
219 | 369k | { |
220 | | // passed queue is supposed to be unused, or nullptr |
221 | 369k | if (pqueue != nullptr) { |
222 | 369k | ENTER_CRITICAL_SECTION(pqueue->m_control_mutex); |
223 | 369k | } |
224 | 369k | } checkqueue.cpp:_ZN18CCheckQueueControlIN12_GLOBAL__N_19DumbCheckEiEC2EP11CCheckQueueIS1_iE Line | Count | Source | 218 | 124 | explicit CCheckQueueControl(CCheckQueue<T> * const pqueueIn) : pqueue(pqueueIn), fDone(false) | 219 | 124 | { | 220 | | // passed queue is supposed to be unused, or nullptr | 221 | 124 | if (pqueue != nullptr) { | 222 | 124 | ENTER_CRITICAL_SECTION(pqueue->m_control_mutex); | 223 | 124 | } | 224 | 124 | } |
_ZN18CCheckQueueControlI12CScriptCheckSt4pairI13ScriptError_tNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEEEC2EP11CCheckQueueIS0_S9_E Line | Count | Source | 218 | 369k | explicit CCheckQueueControl(CCheckQueue<T> * const pqueueIn) : pqueue(pqueueIn), fDone(false) | 219 | 369k | { | 220 | | // passed queue is supposed to be unused, or nullptr | 221 | 369k | if (pqueue != nullptr) { | 222 | 369k | ENTER_CRITICAL_SECTION(pqueue->m_control_mutex); | 223 | 369k | } | 224 | 369k | } |
|
225 | | |
226 | | std::optional<R> Complete() |
227 | 369k | { |
228 | 369k | if (pqueue == nullptr) return std::nullopt; |
229 | 369k | auto ret = pqueue->Complete(); |
230 | 369k | fDone = true; |
231 | 369k | return ret; |
232 | 369k | } checkqueue.cpp:_ZN18CCheckQueueControlIN12_GLOBAL__N_19DumbCheckEiE8CompleteEv Line | Count | Source | 227 | 124 | { | 228 | 124 | if (pqueue == nullptr) return std::nullopt; | 229 | 124 | auto ret = pqueue->Complete(); | 230 | 124 | fDone = true; | 231 | 124 | return ret; | 232 | 124 | } |
_ZN18CCheckQueueControlI12CScriptCheckSt4pairI13ScriptError_tNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEEE8CompleteEv Line | Count | Source | 227 | 369k | { | 228 | 369k | if (pqueue == nullptr) return std::nullopt; | 229 | 369k | auto ret = pqueue->Complete(); | 230 | 369k | fDone = true; | 231 | 369k | return ret; | 232 | 369k | } |
|
233 | | |
234 | | void Add(std::vector<T>&& vChecks) |
235 | 29.6k | { |
236 | 29.6k | if (pqueue != nullptr) { |
237 | 29.6k | pqueue->Add(std::move(vChecks)); |
238 | 29.6k | } |
239 | 29.6k | } checkqueue.cpp:_ZN18CCheckQueueControlIN12_GLOBAL__N_19DumbCheckEiE3AddEOSt6vectorIS1_SaIS1_EE Line | Count | Source | 235 | 70 | { | 236 | 70 | if (pqueue != nullptr) { | 237 | 70 | pqueue->Add(std::move(vChecks)); | 238 | 70 | } | 239 | 70 | } |
_ZN18CCheckQueueControlI12CScriptCheckSt4pairI13ScriptError_tNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEEE3AddEOSt6vectorIS0_SaIS0_EE Line | Count | Source | 235 | 29.5k | { | 236 | 29.5k | if (pqueue != nullptr) { | 237 | 29.5k | pqueue->Add(std::move(vChecks)); | 238 | 29.5k | } | 239 | 29.5k | } |
|
240 | | |
241 | | ~CCheckQueueControl() |
242 | 369k | { |
243 | 369k | if (!fDone) |
244 | 96 | Complete(); |
245 | 369k | if (pqueue != nullptr) { |
246 | 369k | LEAVE_CRITICAL_SECTION(pqueue->m_control_mutex); |
247 | 369k | } |
248 | 369k | } checkqueue.cpp:_ZN18CCheckQueueControlIN12_GLOBAL__N_19DumbCheckEiED2Ev Line | Count | Source | 242 | 124 | { | 243 | 124 | if (!fDone) | 244 | 96 | Complete(); | 245 | 124 | if (pqueue != nullptr) { | 246 | 124 | LEAVE_CRITICAL_SECTION(pqueue->m_control_mutex); | 247 | 124 | } | 248 | 124 | } |
_ZN18CCheckQueueControlI12CScriptCheckSt4pairI13ScriptError_tNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEEED2Ev Line | Count | Source | 242 | 369k | { | 243 | 369k | if (!fDone) | 244 | 0 | Complete(); | 245 | 369k | if (pqueue != nullptr) { | 246 | 369k | LEAVE_CRITICAL_SECTION(pqueue->m_control_mutex); | 247 | 369k | } | 248 | 369k | } |
|
249 | | }; |
250 | | |
251 | | #endif // BITCOIN_CHECKQUEUE_H |