/root/bitcoin/src/index/base.cpp
| Line | Count | Source | 
| 1 |  | // Copyright (c) 2017-present The Bitcoin Core developers | 
| 2 |  | // Distributed under the MIT software license, see the accompanying | 
| 3 |  | // file COPYING or http://www.opensource.org/licenses/mit-license.php. | 
| 4 |  |  | 
| 5 |  | #include <chainparams.h> | 
| 6 |  | #include <common/args.h> | 
| 7 |  | #include <index/base.h> | 
| 8 |  | #include <interfaces/chain.h> | 
| 9 |  | #include <kernel/chain.h> | 
| 10 |  | #include <logging.h> | 
| 11 |  | #include <node/abort.h> | 
| 12 |  | #include <node/blockstorage.h> | 
| 13 |  | #include <node/context.h> | 
| 14 |  | #include <node/database_args.h> | 
| 15 |  | #include <node/interface_ui.h> | 
| 16 |  | #include <tinyformat.h> | 
| 17 |  | #include <undo.h> | 
| 18 |  | #include <util/string.h> | 
| 19 |  | #include <util/thread.h> | 
| 20 |  | #include <util/translation.h> | 
| 21 |  | #include <validation.h> | 
| 22 |  |  | 
| 23 |  | #include <chrono> | 
| 24 |  | #include <memory> | 
| 25 |  | #include <optional> | 
| 26 |  | #include <stdexcept> | 
| 27 |  | #include <string> | 
| 28 |  | #include <thread> | 
| 29 |  | #include <utility> | 
| 30 |  |  | 
| 31 |  | constexpr uint8_t DB_BEST_BLOCK{'B'}; | 
| 32 |  |  | 
| 33 |  | constexpr auto SYNC_LOG_INTERVAL{30s}; | 
| 34 |  | constexpr auto SYNC_LOCATOR_WRITE_INTERVAL{30s}; | 
| 35 |  |  | 
| 36 |  | template <typename... Args> | 
| 37 |  | void BaseIndex::FatalErrorf(util::ConstevalFormatString<sizeof...(Args)> fmt, const Args&... args) | 
| 38 | 0 | { | 
| 39 | 0 |     auto message = tfm::format(fmt, args...); | 
| 40 | 0 |     node::AbortNode(m_chain->context()->shutdown_request, m_chain->context()->exit_status, Untranslated(message), m_chain->context()->warnings.get()); | 
| 41 | 0 | } Unexecuted instantiation: _ZN9BaseIndex11FatalErrorfIJNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEEEEvN4util21ConstevalFormatStringIXsZT_EEEDpRKT_Unexecuted instantiation: _ZN9BaseIndex11FatalErrorfIJiEEEvN4util21ConstevalFormatStringIXsZT_EEEDpRKT_ | 
| 42 |  |  | 
| 43 |  | CBlockLocator GetLocator(interfaces::Chain& chain, const uint256& block_hash) | 
| 44 | 0 | { | 
| 45 | 0 |     CBlockLocator locator; | 
| 46 | 0 |     bool found = chain.findBlock(block_hash, interfaces::FoundBlock().locator(locator)); | 
| 47 | 0 |     assert(found); | 
| 48 | 0 |     assert(!locator.IsNull()); | 
| 49 | 0 |     return locator; | 
| 50 | 0 | } | 
| 51 |  |  | 
| 52 |  | BaseIndex::DB::DB(const fs::path& path, size_t n_cache_size, bool f_memory, bool f_wipe, bool f_obfuscate) : | 
| 53 | 0 |     CDBWrapper{DBParams{ | 
| 54 | 0 |         .path = path, | 
| 55 | 0 |         .cache_bytes = n_cache_size, | 
| 56 | 0 |         .memory_only = f_memory, | 
| 57 | 0 |         .wipe_data = f_wipe, | 
| 58 | 0 |         .obfuscate = f_obfuscate, | 
| 59 | 0 |         .options = [] { DBOptions options; node::ReadDatabaseArgs(gArgs, options); return options; }()}} | 
| 60 | 0 | {} | 
| 61 |  |  | 
| 62 |  | bool BaseIndex::DB::ReadBestBlock(CBlockLocator& locator) const | 
| 63 | 0 | { | 
| 64 | 0 |     bool success = Read(DB_BEST_BLOCK, locator); | 
| 65 | 0 |     if (!success) { | 
| 66 | 0 |         locator.SetNull(); | 
| 67 | 0 |     } | 
| 68 | 0 |     return success; | 
| 69 | 0 | } | 
| 70 |  |  | 
| 71 |  | void BaseIndex::DB::WriteBestBlock(CDBBatch& batch, const CBlockLocator& locator) | 
| 72 | 0 | { | 
| 73 | 0 |     batch.Write(DB_BEST_BLOCK, locator); | 
| 74 | 0 | } | 
| 75 |  |  | 
| 76 |  | BaseIndex::BaseIndex(std::unique_ptr<interfaces::Chain> chain, std::string name) | 
| 77 | 0 |     : m_chain{std::move(chain)}, m_name{std::move(name)} {} | 
| 78 |  |  | 
| 79 |  | BaseIndex::~BaseIndex() | 
| 80 | 0 | { | 
| 81 | 0 |     Interrupt(); | 
| 82 | 0 |     Stop(); | 
| 83 | 0 | } | 
| 84 |  |  | 
| 85 |  | bool BaseIndex::Init() | 
| 86 | 0 | { | 
| 87 | 0 |     AssertLockNotHeld(cs_main); | 
| 88 |  |  | 
| 89 |  |     // May need reset if index is being restarted. | 
| 90 | 0 |     m_interrupt.reset(); | 
| 91 |  |  | 
| 92 |  |     // m_chainstate member gives indexing code access to node internals. It is | 
| 93 |  |     // removed in followup https://github.com/bitcoin/bitcoin/pull/24230 | 
| 94 | 0 |     m_chainstate = WITH_LOCK(::cs_main, | 
| 95 | 0 |         return &m_chain->context()->chainman->GetChainstateForIndexing()); | 
| 96 |  |     // Register to validation interface before setting the 'm_synced' flag, so that | 
| 97 |  |     // callbacks are not missed once m_synced is true. | 
| 98 | 0 |     m_chain->context()->validation_signals->RegisterValidationInterface(this); | 
| 99 |  | 
 | 
| 100 | 0 |     CBlockLocator locator; | 
| 101 | 0 |     if (!GetDB().ReadBestBlock(locator)) { | 
| 102 | 0 |         locator.SetNull(); | 
| 103 | 0 |     } | 
| 104 |  | 
 | 
| 105 | 0 |     LOCK(cs_main); | 
| 106 | 0 |     CChain& index_chain = m_chainstate->m_chain; | 
| 107 |  | 
 | 
| 108 | 0 |     if (locator.IsNull()) { | 
| 109 | 0 |         SetBestBlockIndex(nullptr); | 
| 110 | 0 |     } else { | 
| 111 |  |         // Setting the best block to the locator's top block. If it is not part of the | 
| 112 |  |         // best chain, we will rewind to the fork point during index sync | 
| 113 | 0 |         const CBlockIndex* locator_index{m_chainstate->m_blockman.LookupBlockIndex(locator.vHave.at(0))}; | 
| 114 | 0 |         if (!locator_index) { | 
| 115 | 0 |             return InitError(Untranslated(strprintf("best block of %s not found. Please rebuild the index.", GetName()))); | 
| 116 | 0 |         } | 
| 117 | 0 |         SetBestBlockIndex(locator_index); | 
| 118 | 0 |     } | 
| 119 |  |  | 
| 120 |  |     // Child init | 
| 121 | 0 |     const CBlockIndex* start_block = m_best_block_index.load(); | 
| 122 | 0 |     if (!CustomInit(start_block ? std::make_optional(interfaces::BlockRef{start_block->GetBlockHash(), start_block->nHeight}) : std::nullopt)) { | 
| 123 | 0 |         return false; | 
| 124 | 0 |     } | 
| 125 |  |  | 
| 126 |  |     // Note: this will latch to true immediately if the user starts up with an empty | 
| 127 |  |     // datadir and an index enabled. If this is the case, indexation will happen solely | 
| 128 |  |     // via `BlockConnected` signals until, possibly, the next restart. | 
| 129 | 0 |     m_synced = start_block == index_chain.Tip(); | 
| 130 | 0 |     m_init = true; | 
| 131 | 0 |     return true; | 
| 132 | 0 | } | 
| 133 |  |  | 
| 134 |  | static const CBlockIndex* NextSyncBlock(const CBlockIndex* pindex_prev, CChain& chain) EXCLUSIVE_LOCKS_REQUIRED(cs_main) | 
| 135 | 0 | { | 
| 136 | 0 |     AssertLockHeld(cs_main); | 
| 137 |  | 
 | 
| 138 | 0 |     if (!pindex_prev) { | 
| 139 | 0 |         return chain.Genesis(); | 
| 140 | 0 |     } | 
| 141 |  |  | 
| 142 | 0 |     const CBlockIndex* pindex = chain.Next(pindex_prev); | 
| 143 | 0 |     if (pindex) { | 
| 144 | 0 |         return pindex; | 
| 145 | 0 |     } | 
| 146 |  |  | 
| 147 |  |     // Since block is not in the chain, return the next block in the chain AFTER the last common ancestor. | 
| 148 |  |     // Caller will be responsible for rewinding back to the common ancestor. | 
| 149 | 0 |     return chain.Next(chain.FindFork(pindex_prev)); | 
| 150 | 0 | } | 
| 151 |  |  | 
| 152 |  | bool BaseIndex::ProcessBlock(const CBlockIndex* pindex, const CBlock* block_data) | 
| 153 | 0 | { | 
| 154 | 0 |     interfaces::BlockInfo block_info = kernel::MakeBlockInfo(pindex, block_data); | 
| 155 |  | 
 | 
| 156 | 0 |     CBlock block; | 
| 157 | 0 |     if (!block_data) { // disk lookup if block data wasn't provided | 
| 158 | 0 |         if (!m_chainstate->m_blockman.ReadBlock(block, *pindex)) { | 
| 159 | 0 |             FatalErrorf("Failed to read block %s from disk", | 
| 160 | 0 |                         pindex->GetBlockHash().ToString()); | 
| 161 | 0 |             return false; | 
| 162 | 0 |         } | 
| 163 | 0 |         block_info.data = █ | 
| 164 | 0 |     } | 
| 165 |  |  | 
| 166 | 0 |     CBlockUndo block_undo; | 
| 167 | 0 |     if (CustomOptions().connect_undo_data) { | 
| 168 | 0 |         if (pindex->nHeight > 0 && !m_chainstate->m_blockman.ReadBlockUndo(block_undo, *pindex)) { | 
| 169 | 0 |             FatalErrorf("Failed to read undo block data %s from disk", | 
| 170 | 0 |                         pindex->GetBlockHash().ToString()); | 
| 171 | 0 |             return false; | 
| 172 | 0 |         } | 
| 173 | 0 |         block_info.undo_data = &block_undo; | 
| 174 | 0 |     } | 
| 175 |  |  | 
| 176 | 0 |     if (!CustomAppend(block_info)) { | 
| 177 | 0 |         FatalErrorf("Failed to write block %s to index database", | 
| 178 | 0 |                     pindex->GetBlockHash().ToString()); | 
| 179 | 0 |         return false; | 
| 180 | 0 |     } | 
| 181 |  |  | 
| 182 | 0 |     return true; | 
| 183 | 0 | } | 
| 184 |  |  | 
| 185 |  | void BaseIndex::Sync() | 
| 186 | 0 | { | 
| 187 | 0 |     const CBlockIndex* pindex = m_best_block_index.load(); | 
| 188 | 0 |     if (!m_synced) { | 
| 189 | 0 |         auto last_log_time{NodeClock::now()}; | 
| 190 | 0 |         auto last_locator_write_time{last_log_time}; | 
| 191 | 0 |         while (true) { | 
| 192 | 0 |             if (m_interrupt) { | 
| 193 | 0 |                 LogInfo("%s: m_interrupt set; exiting ThreadSync", GetName()); | 
| 194 |  | 
 | 
| 195 | 0 |                 SetBestBlockIndex(pindex); | 
| 196 |  |                 // No need to handle errors in Commit. If it fails, the error will be already be | 
| 197 |  |                 // logged. The best way to recover is to continue, as index cannot be corrupted by | 
| 198 |  |                 // a missed commit to disk for an advanced index state. | 
| 199 | 0 |                 Commit(); | 
| 200 | 0 |                 return; | 
| 201 | 0 |             } | 
| 202 |  |  | 
| 203 | 0 |             const CBlockIndex* pindex_next = WITH_LOCK(cs_main, return NextSyncBlock(pindex, m_chainstate->m_chain)); | 
| 204 |  |             // If pindex_next is null, it means pindex is the chain tip, so | 
| 205 |  |             // commit data indexed so far. | 
| 206 | 0 |             if (!pindex_next) { | 
| 207 | 0 |                 SetBestBlockIndex(pindex); | 
| 208 |  |                 // No need to handle errors in Commit. See rationale above. | 
| 209 | 0 |                 Commit(); | 
| 210 |  |  | 
| 211 |  |                 // If pindex is still the chain tip after committing, exit the | 
| 212 |  |                 // sync loop. It is important for cs_main to be locked while | 
| 213 |  |                 // setting m_synced = true, otherwise a new block could be | 
| 214 |  |                 // attached while m_synced is still false, and it would not be | 
| 215 |  |                 // indexed. | 
| 216 | 0 |                 LOCK(::cs_main); | 
| 217 | 0 |                 pindex_next = NextSyncBlock(pindex, m_chainstate->m_chain); | 
| 218 | 0 |                 if (!pindex_next) { | 
| 219 | 0 |                     m_synced = true; | 
| 220 | 0 |                     break; | 
| 221 | 0 |                 } | 
| 222 | 0 |             } | 
| 223 | 0 |             if (pindex_next->pprev != pindex && !Rewind(pindex, pindex_next->pprev)) { | 
| 224 | 0 |                 FatalErrorf("Failed to rewind %s to a previous chain tip", GetName()); | 
| 225 | 0 |                 return; | 
| 226 | 0 |             } | 
| 227 | 0 |             pindex = pindex_next; | 
| 228 |  |  | 
| 229 |  | 
 | 
| 230 | 0 |             if (!ProcessBlock(pindex)) return; // error logged internally | 
| 231 |  |  | 
| 232 | 0 |             auto current_time{NodeClock::now()}; | 
| 233 | 0 |             if (current_time - last_log_time >= SYNC_LOG_INTERVAL) { | 
| 234 | 0 |                 LogInfo("Syncing %s with block chain from height %d", GetName(), pindex->nHeight); | 
| 235 | 0 |                 last_log_time = current_time; | 
| 236 | 0 |             } | 
| 237 |  | 
 | 
| 238 | 0 |             if (current_time - last_locator_write_time >= SYNC_LOCATOR_WRITE_INTERVAL) { | 
| 239 | 0 |                 SetBestBlockIndex(pindex); | 
| 240 | 0 |                 last_locator_write_time = current_time; | 
| 241 |  |                 // No need to handle errors in Commit. See rationale above. | 
| 242 | 0 |                 Commit(); | 
| 243 | 0 |             } | 
| 244 | 0 |         } | 
| 245 | 0 |     } | 
| 246 |  |  | 
| 247 | 0 |     if (pindex) { | 
| 248 | 0 |         LogInfo("%s is enabled at height %d", GetName(), pindex->nHeight); | 
| 249 | 0 |     } else { | 
| 250 | 0 |         LogInfo("%s is enabled", GetName()); | 
| 251 | 0 |     } | 
| 252 | 0 | } | 
| 253 |  |  | 
| 254 |  | bool BaseIndex::Commit() | 
| 255 | 0 | { | 
| 256 |  |     // Don't commit anything if we haven't indexed any block yet | 
| 257 |  |     // (this could happen if init is interrupted). | 
| 258 | 0 |     bool ok = m_best_block_index != nullptr; | 
| 259 | 0 |     if (ok) { | 
| 260 | 0 |         CDBBatch batch(GetDB()); | 
| 261 | 0 |         ok = CustomCommit(batch); | 
| 262 | 0 |         if (ok) { | 
| 263 | 0 |             GetDB().WriteBestBlock(batch, GetLocator(*m_chain, m_best_block_index.load()->GetBlockHash())); | 
| 264 | 0 |             ok = GetDB().WriteBatch(batch); | 
| 265 | 0 |         } | 
| 266 | 0 |     } | 
| 267 | 0 |     if (!ok) { | 
| 268 | 0 |         LogError("Failed to commit latest %s state", GetName()); | 
| 269 | 0 |         return false; | 
| 270 | 0 |     } | 
| 271 | 0 |     return true; | 
| 272 | 0 | } | 
| 273 |  |  | 
| 274 |  | bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_tip) | 
| 275 | 0 | { | 
| 276 | 0 |     assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip); | 
| 277 |  |  | 
| 278 | 0 |     CBlock block; | 
| 279 | 0 |     CBlockUndo block_undo; | 
| 280 |  | 
 | 
| 281 | 0 |     for (const CBlockIndex* iter_tip = current_tip; iter_tip != new_tip; iter_tip = iter_tip->pprev) { | 
| 282 | 0 |         interfaces::BlockInfo block_info = kernel::MakeBlockInfo(iter_tip); | 
| 283 | 0 |         if (CustomOptions().disconnect_data) { | 
| 284 | 0 |             if (!m_chainstate->m_blockman.ReadBlock(block, *iter_tip)) { | 
| 285 | 0 |                 LogError("Failed to read block %s from disk", | 
| 286 | 0 |                          iter_tip->GetBlockHash().ToString()); | 
| 287 | 0 |                 return false; | 
| 288 | 0 |             } | 
| 289 | 0 |             block_info.data = █ | 
| 290 | 0 |         } | 
| 291 | 0 |         if (CustomOptions().disconnect_undo_data && iter_tip->nHeight > 0) { | 
| 292 | 0 |             if (!m_chainstate->m_blockman.ReadBlockUndo(block_undo, *iter_tip)) { | 
| 293 | 0 |                 return false; | 
| 294 | 0 |             } | 
| 295 | 0 |             block_info.undo_data = &block_undo; | 
| 296 | 0 |         } | 
| 297 | 0 |         if (!CustomRemove(block_info)) { | 
| 298 | 0 |             return false; | 
| 299 | 0 |         } | 
| 300 | 0 |     } | 
| 301 |  |  | 
| 302 |  |     // Don't commit here - the committed index state must never be ahead of the | 
| 303 |  |     // flushed chainstate, otherwise unclean restarts would lead to index corruption. | 
| 304 |  |     // Pruning has a minimum of 288 blocks-to-keep and getting the index | 
| 305 |  |     // out of sync may be possible but a users fault. | 
| 306 |  |     // In case we reorg beyond the pruned depth, ReadBlock would | 
| 307 |  |     // throw and lead to a graceful shutdown | 
| 308 | 0 |     SetBestBlockIndex(new_tip); | 
| 309 | 0 |     return true; | 
| 310 | 0 | } | 
| 311 |  |  | 
| 312 |  | void BaseIndex::BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& block, const CBlockIndex* pindex) | 
| 313 | 0 | { | 
| 314 |  |     // Ignore events from the assumed-valid chain; we will process its blocks | 
| 315 |  |     // (sequentially) after it is fully verified by the background chainstate. This | 
| 316 |  |     // is to avoid any out-of-order indexing. | 
| 317 |  |     // | 
| 318 |  |     // TODO at some point we could parameterize whether a particular index can be | 
| 319 |  |     // built out of order, but for now just do the conservative simple thing. | 
| 320 | 0 |     if (role == ChainstateRole::ASSUMEDVALID) { | 
| 321 | 0 |         return; | 
| 322 | 0 |     } | 
| 323 |  |  | 
| 324 |  |     // Ignore BlockConnected signals until we have fully indexed the chain. | 
| 325 | 0 |     if (!m_synced) { | 
| 326 | 0 |         return; | 
| 327 | 0 |     } | 
| 328 |  |  | 
| 329 | 0 |     const CBlockIndex* best_block_index = m_best_block_index.load(); | 
| 330 | 0 |     if (!best_block_index) { | 
| 331 | 0 |         if (pindex->nHeight != 0) { | 
| 332 | 0 |             FatalErrorf("First block connected is not the genesis block (height=%d)", | 
| 333 | 0 |                        pindex->nHeight); | 
| 334 | 0 |             return; | 
| 335 | 0 |         } | 
| 336 | 0 |     } else { | 
| 337 |  |         // Ensure block connects to an ancestor of the current best block. This should be the case | 
| 338 |  |         // most of the time, but may not be immediately after the sync thread catches up and sets | 
| 339 |  |         // m_synced. Consider the case where there is a reorg and the blocks on the stale branch are | 
| 340 |  |         // in the ValidationInterface queue backlog even after the sync thread has caught up to the | 
| 341 |  |         // new chain tip. In this unlikely event, log a warning and let the queue clear. | 
| 342 | 0 |         if (best_block_index->GetAncestor(pindex->nHeight - 1) != pindex->pprev) { | 
| 343 | 0 |             LogWarning("Block %s does not connect to an ancestor of " | 
| 344 | 0 |                       "known best chain (tip=%s); not updating index", | 
| 345 | 0 |                       pindex->GetBlockHash().ToString(), | 
| 346 | 0 |                       best_block_index->GetBlockHash().ToString()); | 
| 347 | 0 |             return; | 
| 348 | 0 |         } | 
| 349 | 0 |         if (best_block_index != pindex->pprev && !Rewind(best_block_index, pindex->pprev)) { | 
| 350 | 0 |             FatalErrorf("Failed to rewind %s to a previous chain tip", | 
| 351 | 0 |                        GetName()); | 
| 352 | 0 |             return; | 
| 353 | 0 |         } | 
| 354 | 0 |     } | 
| 355 |  |  | 
| 356 |  |     // Dispatch block to child class; errors are logged internally and abort the node. | 
| 357 | 0 |     if (ProcessBlock(pindex, block.get())) { | 
| 358 |  |         // Setting the best block index is intentionally the last step of this | 
| 359 |  |         // function, so BlockUntilSyncedToCurrentChain callers waiting for the | 
| 360 |  |         // best block index to be updated can rely on the block being fully | 
| 361 |  |         // processed, and the index object being safe to delete. | 
| 362 | 0 |         SetBestBlockIndex(pindex); | 
| 363 | 0 |     } | 
| 364 | 0 | } | 
| 365 |  |  | 
| 366 |  | void BaseIndex::ChainStateFlushed(ChainstateRole role, const CBlockLocator& locator) | 
| 367 | 0 | { | 
| 368 |  |     // Ignore events from the assumed-valid chain; we will process its blocks | 
| 369 |  |     // (sequentially) after it is fully verified by the background chainstate. | 
| 370 | 0 |     if (role == ChainstateRole::ASSUMEDVALID) { | 
| 371 | 0 |         return; | 
| 372 | 0 |     } | 
| 373 |  |  | 
| 374 | 0 |     if (!m_synced) { | 
| 375 | 0 |         return; | 
| 376 | 0 |     } | 
| 377 |  |  | 
| 378 | 0 |     const uint256& locator_tip_hash = locator.vHave.front(); | 
| 379 | 0 |     const CBlockIndex* locator_tip_index; | 
| 380 | 0 |     { | 
| 381 | 0 |         LOCK(cs_main); | 
| 382 | 0 |         locator_tip_index = m_chainstate->m_blockman.LookupBlockIndex(locator_tip_hash); | 
| 383 | 0 |     } | 
| 384 |  | 
 | 
| 385 | 0 |     if (!locator_tip_index) { | 
| 386 | 0 |         FatalErrorf("First block (hash=%s) in locator was not found", | 
| 387 | 0 |                    locator_tip_hash.ToString()); | 
| 388 | 0 |         return; | 
| 389 | 0 |     } | 
| 390 |  |  | 
| 391 |  |     // This checks that ChainStateFlushed callbacks are received after BlockConnected. The check may fail | 
| 392 |  |     // immediately after the sync thread catches up and sets m_synced. Consider the case where | 
| 393 |  |     // there is a reorg and the blocks on the stale branch are in the ValidationInterface queue | 
| 394 |  |     // backlog even after the sync thread has caught up to the new chain tip. In this unlikely | 
| 395 |  |     // event, log a warning and let the queue clear. | 
| 396 | 0 |     const CBlockIndex* best_block_index = m_best_block_index.load(); | 
| 397 | 0 |     if (best_block_index->GetAncestor(locator_tip_index->nHeight) != locator_tip_index) { | 
| 398 | 0 |         LogWarning("Locator contains block (hash=%s) not on known best " | 
| 399 | 0 |                   "chain (tip=%s); not writing index locator", | 
| 400 | 0 |                   locator_tip_hash.ToString(), | 
| 401 | 0 |                   best_block_index->GetBlockHash().ToString()); | 
| 402 | 0 |         return; | 
| 403 | 0 |     } | 
| 404 |  |  | 
| 405 |  |     // No need to handle errors in Commit. If it fails, the error will be already be logged. The | 
| 406 |  |     // best way to recover is to continue, as index cannot be corrupted by a missed commit to disk | 
| 407 |  |     // for an advanced index state. | 
| 408 | 0 |     Commit(); | 
| 409 | 0 | } | 
| 410 |  |  | 
| 411 |  | bool BaseIndex::BlockUntilSyncedToCurrentChain() const | 
| 412 | 0 | { | 
| 413 | 0 |     AssertLockNotHeld(cs_main); | 
| 414 |  | 
 | 
| 415 | 0 |     if (!m_synced) { | 
| 416 | 0 |         return false; | 
| 417 | 0 |     } | 
| 418 |  |  | 
| 419 | 0 |     { | 
| 420 |  |         // Skip the queue-draining stuff if we know we're caught up with | 
| 421 |  |         // m_chain.Tip(). | 
| 422 | 0 |         LOCK(cs_main); | 
| 423 | 0 |         const CBlockIndex* chain_tip = m_chainstate->m_chain.Tip(); | 
| 424 | 0 |         const CBlockIndex* best_block_index = m_best_block_index.load(); | 
| 425 | 0 |         if (best_block_index->GetAncestor(chain_tip->nHeight) == chain_tip) { | 
| 426 | 0 |             return true; | 
| 427 | 0 |         } | 
| 428 | 0 |     } | 
| 429 |  |  | 
| 430 | 0 |     LogInfo("%s is catching up on block notifications", GetName()); | 
| 431 | 0 |     m_chain->context()->validation_signals->SyncWithValidationInterfaceQueue(); | 
| 432 | 0 |     return true; | 
| 433 | 0 | } | 
| 434 |  |  | 
| 435 |  | void BaseIndex::Interrupt() | 
| 436 | 0 | { | 
| 437 | 0 |     m_interrupt(); | 
| 438 | 0 | } | 
| 439 |  |  | 
| 440 |  | bool BaseIndex::StartBackgroundSync() | 
| 441 | 0 | { | 
| 442 | 0 |     if (!m_init) throw std::logic_error("Error: Cannot start a non-initialized index"); | 
| 443 |  |  | 
| 444 | 0 |     m_thread_sync = std::thread(&util::TraceThread, GetName(), [this] { Sync(); }); | 
| 445 | 0 |     return true; | 
| 446 | 0 | } | 
| 447 |  |  | 
| 448 |  | void BaseIndex::Stop() | 
| 449 | 0 | { | 
| 450 | 0 |     if (m_chain->context()->validation_signals) { | 
| 451 | 0 |         m_chain->context()->validation_signals->UnregisterValidationInterface(this); | 
| 452 | 0 |     } | 
| 453 |  | 
 | 
| 454 | 0 |     if (m_thread_sync.joinable()) { | 
| 455 | 0 |         m_thread_sync.join(); | 
| 456 | 0 |     } | 
| 457 | 0 | } | 
| 458 |  |  | 
| 459 |  | IndexSummary BaseIndex::GetSummary() const | 
| 460 | 0 | { | 
| 461 | 0 |     IndexSummary summary{}; | 
| 462 | 0 |     summary.name = GetName(); | 
| 463 | 0 |     summary.synced = m_synced; | 
| 464 | 0 |     if (const auto& pindex = m_best_block_index.load()) { | 
| 465 | 0 |         summary.best_block_height = pindex->nHeight; | 
| 466 | 0 |         summary.best_block_hash = pindex->GetBlockHash(); | 
| 467 | 0 |     } else { | 
| 468 | 0 |         summary.best_block_height = 0; | 
| 469 | 0 |         summary.best_block_hash = m_chain->getBlockHash(0); | 
| 470 | 0 |     } | 
| 471 | 0 |     return summary; | 
| 472 | 0 | } | 
| 473 |  |  | 
| 474 |  | void BaseIndex::SetBestBlockIndex(const CBlockIndex* block) | 
| 475 | 0 | { | 
| 476 | 0 |     assert(!m_chainstate->m_blockman.IsPruneMode() || AllowPrune()); | 
| 477 |  |  | 
| 478 | 0 |     if (AllowPrune() && block) { | 
| 479 | 0 |         node::PruneLockInfo prune_lock; | 
| 480 | 0 |         prune_lock.height_first = block->nHeight; | 
| 481 | 0 |         WITH_LOCK(::cs_main, m_chainstate->m_blockman.UpdatePruneLock(GetName(), prune_lock)); | 
| 482 | 0 |     } | 
| 483 |  |  | 
| 484 |  |     // Intentionally set m_best_block_index as the last step in this function, | 
| 485 |  |     // after updating prune locks above, and after making any other references | 
| 486 |  |     // to *this, so the BlockUntilSyncedToCurrentChain function (which checks | 
| 487 |  |     // m_best_block_index as an optimization) can be used to wait for the last | 
| 488 |  |     // BlockConnected notification and safely assume that prune locks are | 
| 489 |  |     // updated and that the index object is safe to delete. | 
| 490 | 0 |     m_best_block_index = block; | 
| 491 | 0 | } |