/root/bitcoin/src/random.cpp
| Line | Count | Source | 
| 1 |  | // Copyright (c) 2009-2010 Satoshi Nakamoto | 
| 2 |  | // Copyright (c) 2009-present The Bitcoin Core developers | 
| 3 |  | // Distributed under the MIT software license, see the accompanying | 
| 4 |  | // file COPYING or http://www.opensource.org/licenses/mit-license.php. | 
| 5 |  |  | 
| 6 |  | #include <bitcoin-build-config.h> // IWYU pragma: keep | 
| 7 |  |  | 
| 8 |  | #include <random.h> | 
| 9 |  |  | 
| 10 |  | #include <compat/compat.h> | 
| 11 |  | #include <compat/cpuid.h> | 
| 12 |  | #include <crypto/chacha20.h> | 
| 13 |  | #include <crypto/sha256.h> | 
| 14 |  | #include <crypto/sha512.h> | 
| 15 |  | #include <logging.h> | 
| 16 |  | #include <randomenv.h> | 
| 17 |  | #include <span.h> | 
| 18 |  | #include <support/allocators/secure.h> | 
| 19 |  | #include <support/cleanse.h> | 
| 20 |  | #include <sync.h> | 
| 21 |  | #include <util/time.h> | 
| 22 |  |  | 
| 23 |  | #include <array> | 
| 24 |  | #include <cmath> | 
| 25 |  | #include <cstdlib> | 
| 26 |  | #include <optional> | 
| 27 |  | #include <thread> | 
| 28 |  |  | 
| 29 |  | #ifdef WIN32 | 
| 30 |  | #include <bcrypt.h> | 
| 31 |  | #else | 
| 32 |  | #include <fcntl.h> | 
| 33 |  | #include <sys/time.h> | 
| 34 |  | #endif | 
| 35 |  |  | 
| 36 |  | #if defined(HAVE_GETRANDOM) || (defined(HAVE_GETENTROPY_RAND) && defined(__APPLE__)) | 
| 37 |  | #include <sys/random.h> | 
| 38 |  | #endif | 
| 39 |  |  | 
| 40 |  | #ifdef HAVE_SYSCTL_ARND | 
| 41 |  | #include <sys/sysctl.h> | 
| 42 |  | #endif | 
| 43 |  |  | 
| 44 |  | namespace { | 
| 45 |  |  | 
| 46 |  | /* Number of random bytes returned by GetOSRand. | 
| 47 |  |  * When changing this constant make sure to change all call sites, and make | 
| 48 |  |  * sure that the underlying OS APIs for all platforms support the number. | 
| 49 |  |  * (many cap out at 256 bytes). | 
| 50 |  |  */ | 
| 51 |  | static const int NUM_OS_RANDOM_BYTES = 32; | 
| 52 |  |  | 
| 53 |  |  | 
| 54 |  | [[noreturn]] void RandFailure() | 
| 55 | 0 | { | 
| 56 | 0 |     LogError("Failed to read randomness, aborting\n"); | 
| 57 | 0 |     std::abort(); | 
| 58 | 0 | } | 
| 59 |  |  | 
| 60 |  | inline int64_t GetPerformanceCounter() noexcept | 
| 61 | 100k | { | 
| 62 |  |     // Read the hardware time stamp counter when available. | 
| 63 |  |     // See https://en.wikipedia.org/wiki/Time_Stamp_Counter for more information. | 
| 64 |  | #if defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64)) | 
| 65 |  |     return __rdtsc(); | 
| 66 |  | #elif !defined(_MSC_VER) && defined(__i386__) | 
| 67 |  |     uint64_t r = 0; | 
| 68 |  |     __asm__ volatile ("rdtsc" : "=A"(r)); // Constrain the r variable to the eax:edx pair. | 
| 69 |  |     return r; | 
| 70 |  | #elif !defined(_MSC_VER) && (defined(__x86_64__) || defined(__amd64__)) | 
| 71 |  |     uint64_t r1 = 0, r2 = 0; | 
| 72 | 100k |     __asm__ volatile ("rdtsc" : "=a"(r1), "=d"(r2)); // Constrain r1 to rax and r2 to rdx. | 
| 73 | 100k |     return (r2 << 32) | r1; | 
| 74 |  | #else | 
| 75 |  |     // Fall back to using standard library clock (usually microsecond or nanosecond precision) | 
| 76 |  |     return std::chrono::high_resolution_clock::now().time_since_epoch().count(); | 
| 77 |  | #endif | 
| 78 | 100k | } | 
| 79 |  |  | 
| 80 |  | #ifdef HAVE_GETCPUID | 
| 81 |  | bool g_rdrand_supported = false; | 
| 82 |  | bool g_rdseed_supported = false; | 
| 83 |  | constexpr uint32_t CPUID_F1_ECX_RDRAND = 0x40000000; | 
| 84 |  | constexpr uint32_t CPUID_F7_EBX_RDSEED = 0x00040000; | 
| 85 |  | #ifdef bit_RDRND | 
| 86 |  | static_assert(CPUID_F1_ECX_RDRAND == bit_RDRND, "Unexpected value for bit_RDRND"); | 
| 87 |  | #endif | 
| 88 |  | #ifdef bit_RDSEED | 
| 89 |  | static_assert(CPUID_F7_EBX_RDSEED == bit_RDSEED, "Unexpected value for bit_RDSEED"); | 
| 90 |  | #endif | 
| 91 |  |  | 
| 92 |  | void InitHardwareRand() | 
| 93 | 0 | { | 
| 94 | 0 |     uint32_t eax, ebx, ecx, edx; | 
| 95 | 0 |     GetCPUID(1, 0, eax, ebx, ecx, edx); | 
| 96 | 0 |     if (ecx & CPUID_F1_ECX_RDRAND) { | 
| 97 | 0 |         g_rdrand_supported = true; | 
| 98 | 0 |     } | 
| 99 | 0 |     GetCPUID(7, 0, eax, ebx, ecx, edx); | 
| 100 | 0 |     if (ebx & CPUID_F7_EBX_RDSEED) { | 
| 101 | 0 |         g_rdseed_supported = true; | 
| 102 | 0 |     } | 
| 103 | 0 | } | 
| 104 |  |  | 
| 105 |  | void ReportHardwareRand() | 
| 106 | 0 | { | 
| 107 |  |     // This must be done in a separate function, as InitHardwareRand() may be indirectly called | 
| 108 |  |     // from global constructors, before logging is initialized. | 
| 109 | 0 |     if (g_rdseed_supported) { | 
| 110 | 0 |         LogInfo("Using RdSeed as an additional entropy source"); | 
| 111 | 0 |     } | 
| 112 | 0 |     if (g_rdrand_supported) { | 
| 113 | 0 |         LogInfo("Using RdRand as an additional entropy source"); | 
| 114 | 0 |     } | 
| 115 | 0 | } | 
| 116 |  |  | 
| 117 |  | /** Read 64 bits of entropy using rdrand. | 
| 118 |  |  * | 
| 119 |  |  * Must only be called when RdRand is supported. | 
| 120 |  |  */ | 
| 121 |  | uint64_t GetRdRand() noexcept | 
| 122 | 100k | { | 
| 123 |  |     // RdRand may very rarely fail. Invoke it up to 10 times in a loop to reduce this risk. | 
| 124 |  | #ifdef __i386__ | 
| 125 |  |     uint8_t ok = 0; | 
| 126 |  |     // Initialize to 0 to silence a compiler warning that r1 or r2 may be used | 
| 127 |  |     // uninitialized. Even if rdrand fails (!ok) it will set the output to 0, | 
| 128 |  |     // but there is no way that the compiler could know that. | 
| 129 |  |     uint32_t r1 = 0, r2 = 0; | 
| 130 |  |     for (int i = 0; i < 10; ++i) { | 
| 131 |  |         __asm__ volatile (".byte 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdrand %eax | 
| 132 |  |         if (ok) break; | 
| 133 |  |     } | 
| 134 |  |     for (int i = 0; i < 10; ++i) { | 
| 135 |  |         __asm__ volatile (".byte 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r2), "=q"(ok) :: "cc"); // rdrand %eax | 
| 136 |  |         if (ok) break; | 
| 137 |  |     } | 
| 138 |  |     return (((uint64_t)r2) << 32) | r1; | 
| 139 |  | #elif defined(__x86_64__) || defined(__amd64__) | 
| 140 |  |     uint8_t ok = 0; | 
| 141 | 100k |     uint64_t r1 = 0; // See above why we initialize to 0. | 
| 142 | 100k |     for (int i = 0; i < 10; ++i) { | 
| 143 | 100k |         __asm__ volatile (".byte 0x48, 0x0f, 0xc7, 0xf0; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdrand %rax | 
| 144 | 100k |         if (ok) break; | 
| 145 | 100k |     } | 
| 146 | 100k |     return r1; | 
| 147 |  | #else | 
| 148 |  | #error "RdRand is only supported on x86 and x86_64" | 
| 149 |  | #endif | 
| 150 | 100k | } | 
| 151 |  |  | 
| 152 |  | /** Read 64 bits of entropy using rdseed. | 
| 153 |  |  * | 
| 154 |  |  * Must only be called when RdSeed is supported. | 
| 155 |  |  */ | 
| 156 |  | uint64_t GetRdSeed() noexcept | 
| 157 | 0 | { | 
| 158 |  |     // RdSeed may fail when the HW RNG is overloaded. Loop indefinitely until enough entropy is gathered, | 
| 159 |  |     // but pause after every failure. | 
| 160 |  | #ifdef __i386__ | 
| 161 |  |     uint8_t ok = 0; | 
| 162 |  |     uint32_t r1, r2; | 
| 163 |  |     do { | 
| 164 |  |         __asm__ volatile (".byte 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdseed %eax | 
| 165 |  |         if (ok) break; | 
| 166 |  |         __asm__ volatile ("pause"); | 
| 167 |  |     } while(true); | 
| 168 |  |     do { | 
| 169 |  |         __asm__ volatile (".byte 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r2), "=q"(ok) :: "cc"); // rdseed %eax | 
| 170 |  |         if (ok) break; | 
| 171 |  |         __asm__ volatile ("pause"); | 
| 172 |  |     } while(true); | 
| 173 |  |     return (((uint64_t)r2) << 32) | r1; | 
| 174 |  | #elif defined(__x86_64__) || defined(__amd64__) | 
| 175 |  |     uint8_t ok; | 
| 176 | 0 |     uint64_t r1; | 
| 177 | 0 |     do { | 
| 178 | 0 |         __asm__ volatile (".byte 0x48, 0x0f, 0xc7, 0xf8; setc %1" : "=a"(r1), "=q"(ok) :: "cc"); // rdseed %rax | 
| 179 | 0 |         if (ok) break; | 
| 180 | 0 |         __asm__ volatile ("pause"); | 
| 181 | 0 |     } while(true); | 
| 182 | 0 |     return r1; | 
| 183 |  | #else | 
| 184 |  | #error "RdSeed is only supported on x86 and x86_64" | 
| 185 |  | #endif | 
| 186 | 0 | } | 
| 187 |  |  | 
| 188 |  | #else | 
| 189 |  | /* Access to other hardware random number generators could be added here later, | 
| 190 |  |  * assuming it is sufficiently fast (in the order of a few hundred CPU cycles). | 
| 191 |  |  * Slower sources should probably be invoked separately, and/or only from | 
| 192 |  |  * RandAddPeriodic (which is called once a minute). | 
| 193 |  |  */ | 
| 194 |  | void InitHardwareRand() {} | 
| 195 |  | void ReportHardwareRand() {} | 
| 196 |  | #endif | 
| 197 |  |  | 
| 198 |  | /** Add 64 bits of entropy gathered from hardware to hasher. Do nothing if not supported. */ | 
| 199 | 100k | void SeedHardwareFast(CSHA512& hasher) noexcept { | 
| 200 | 100k | #if defined(__x86_64__) || defined(__amd64__) || defined(__i386__) | 
| 201 | 100k |     if (g_rdrand_supported) { | 
| 202 | 100k |         uint64_t out = GetRdRand(); | 
| 203 | 100k |         hasher.Write((const unsigned char*)&out, sizeof(out)); | 
| 204 | 100k |         return; | 
| 205 | 100k |     } | 
| 206 | 100k | #endif | 
| 207 | 100k | } | 
| 208 |  |  | 
| 209 |  | /** Add 256 bits of entropy gathered from hardware to hasher. Do nothing if not supported. */ | 
| 210 | 0 | void SeedHardwareSlow(CSHA512& hasher) noexcept { | 
| 211 | 0 | #if defined(__x86_64__) || defined(__amd64__) || defined(__i386__) | 
| 212 |  |     // When we want 256 bits of entropy, prefer RdSeed over RdRand, as it's | 
| 213 |  |     // guaranteed to produce independent randomness on every call. | 
| 214 | 0 |     if (g_rdseed_supported) { | 
| 215 | 0 |         for (int i = 0; i < 4; ++i) { | 
| 216 | 0 |             uint64_t out = GetRdSeed(); | 
| 217 | 0 |             hasher.Write((const unsigned char*)&out, sizeof(out)); | 
| 218 | 0 |         } | 
| 219 | 0 |         return; | 
| 220 | 0 |     } | 
| 221 |  |     // When falling back to RdRand, XOR the result of 1024 results. | 
| 222 |  |     // This guarantees a reseeding occurs between each. | 
| 223 | 0 |     if (g_rdrand_supported) { | 
| 224 | 0 |         for (int i = 0; i < 4; ++i) { | 
| 225 | 0 |             uint64_t out = 0; | 
| 226 | 0 |             for (int j = 0; j < 1024; ++j) out ^= GetRdRand(); | 
| 227 | 0 |             hasher.Write((const unsigned char*)&out, sizeof(out)); | 
| 228 | 0 |         } | 
| 229 | 0 |         return; | 
| 230 | 0 |     } | 
| 231 | 0 | #endif | 
| 232 | 0 | } | 
| 233 |  |  | 
| 234 |  | /** Use repeated SHA512 to strengthen the randomness in seed32, and feed into hasher. */ | 
| 235 |  | void Strengthen(const unsigned char (&seed)[32], SteadyClock::duration dur, CSHA512& hasher) noexcept | 
| 236 | 0 | { | 
| 237 | 0 |     CSHA512 inner_hasher; | 
| 238 | 0 |     inner_hasher.Write(seed, sizeof(seed)); | 
| 239 |  |  | 
| 240 |  |     // Hash loop | 
| 241 | 0 |     unsigned char buffer[64]; | 
| 242 | 0 |     const auto stop{SteadyClock::now() + dur}; | 
| 243 | 0 |     do { | 
| 244 | 0 |         for (int i = 0; i < 1000; ++i) { | 
| 245 | 0 |             inner_hasher.Finalize(buffer); | 
| 246 | 0 |             inner_hasher.Reset(); | 
| 247 | 0 |             inner_hasher.Write(buffer, sizeof(buffer)); | 
| 248 | 0 |         } | 
| 249 |  |         // Benchmark operation and feed it into outer hasher. | 
| 250 | 0 |         int64_t perf = GetPerformanceCounter(); | 
| 251 | 0 |         hasher.Write((const unsigned char*)&perf, sizeof(perf)); | 
| 252 | 0 |     } while (SteadyClock::now() < stop); | 
| 253 |  |  | 
| 254 |  |     // Produce output from inner state and feed it to outer hasher. | 
| 255 | 0 |     inner_hasher.Finalize(buffer); | 
| 256 | 0 |     hasher.Write(buffer, sizeof(buffer)); | 
| 257 |  |     // Try to clean up. | 
| 258 | 0 |     inner_hasher.Reset(); | 
| 259 | 0 |     memory_cleanse(buffer, sizeof(buffer)); | 
| 260 | 0 | } | 
| 261 |  |  | 
| 262 |  | #ifndef WIN32 | 
| 263 |  | /** Fallback: get 32 bytes of system entropy from /dev/urandom. The most | 
| 264 |  |  * compatible way to get cryptographic randomness on UNIX-ish platforms. | 
| 265 |  |  */ | 
| 266 |  | [[maybe_unused]] void GetDevURandom(unsigned char *ent32) | 
| 267 | 0 | { | 
| 268 | 0 |     int f = open("/dev/urandom", O_RDONLY); | 
| 269 | 0 |     if (f == -1) { | 
| 270 | 0 |         RandFailure(); | 
| 271 | 0 |     } | 
| 272 | 0 |     int have = 0; | 
| 273 | 0 |     do { | 
| 274 | 0 |         ssize_t n = read(f, ent32 + have, NUM_OS_RANDOM_BYTES - have); | 
| 275 | 0 |         if (n <= 0 || n + have > NUM_OS_RANDOM_BYTES) { | 
| 276 | 0 |             close(f); | 
| 277 | 0 |             RandFailure(); | 
| 278 | 0 |         } | 
| 279 | 0 |         have += n; | 
| 280 | 0 |     } while (have < NUM_OS_RANDOM_BYTES); | 
| 281 | 0 |     close(f); | 
| 282 | 0 | } | 
| 283 |  | #endif | 
| 284 |  |  | 
| 285 |  | /** Get 32 bytes of system entropy. */ | 
| 286 |  | void GetOSRand(unsigned char *ent32) | 
| 287 | 0 | { | 
| 288 |  | #if defined(WIN32) | 
| 289 |  |     constexpr uint32_t STATUS_SUCCESS{0x00000000}; | 
| 290 |  |     NTSTATUS status = BCryptGenRandom(/*hAlgorithm=*/NULL, | 
| 291 |  |                                       /*pbBuffer=*/ent32, | 
| 292 |  |                                       /*cbBuffer=*/NUM_OS_RANDOM_BYTES, | 
| 293 |  |                                       /*dwFlags=*/BCRYPT_USE_SYSTEM_PREFERRED_RNG); | 
| 294 |  |  | 
| 295 |  |     if (status != STATUS_SUCCESS) { | 
| 296 |  |         RandFailure(); | 
| 297 |  |     } | 
| 298 |  | #elif defined(HAVE_GETRANDOM) | 
| 299 |  |     /* Linux. From the getrandom(2) man page: | 
| 300 |  |      * "If the urandom source has been initialized, reads of up to 256 bytes | 
| 301 |  |      * will always return as many bytes as requested and will not be | 
| 302 |  |      * interrupted by signals." | 
| 303 |  |      */ | 
| 304 | 0 |     if (getrandom(ent32, NUM_OS_RANDOM_BYTES, 0) != NUM_OS_RANDOM_BYTES) { | 
| 305 | 0 |         RandFailure(); | 
| 306 | 0 |     } | 
| 307 |  | #elif defined(__OpenBSD__) | 
| 308 |  |     /* OpenBSD. From the arc4random(3) man page: | 
| 309 |  |        "Use of these functions is encouraged for almost all random number | 
| 310 |  |         consumption because the other interfaces are deficient in either | 
| 311 |  |         quality, portability, standardization, or availability." | 
| 312 |  |        The function call is always successful. | 
| 313 |  |      */ | 
| 314 |  |     arc4random_buf(ent32, NUM_OS_RANDOM_BYTES); | 
| 315 |  | #elif defined(HAVE_GETENTROPY_RAND) && defined(__APPLE__) | 
| 316 |  |     if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) { | 
| 317 |  |         RandFailure(); | 
| 318 |  |     } | 
| 319 |  | #elif defined(HAVE_SYSCTL_ARND) | 
| 320 |  |     /* FreeBSD, NetBSD and similar. It is possible for the call to return less | 
| 321 |  |      * bytes than requested, so need to read in a loop. | 
| 322 |  |      */ | 
| 323 |  |     static int name[2] = {CTL_KERN, KERN_ARND}; | 
| 324 |  |     int have = 0; | 
| 325 |  |     do { | 
| 326 |  |         size_t len = NUM_OS_RANDOM_BYTES - have; | 
| 327 |  |         if (sysctl(name, std::size(name), ent32 + have, &len, nullptr, 0) != 0) { | 
| 328 |  |             RandFailure(); | 
| 329 |  |         } | 
| 330 |  |         have += len; | 
| 331 |  |     } while (have < NUM_OS_RANDOM_BYTES); | 
| 332 |  | #else | 
| 333 |  |     /* Fall back to /dev/urandom if there is no specific method implemented to | 
| 334 |  |      * get system entropy for this OS. | 
| 335 |  |      */ | 
| 336 |  |     GetDevURandom(ent32); | 
| 337 |  | #endif | 
| 338 | 0 | } | 
| 339 |  |  | 
| 340 |  | class RNGState { | 
| 341 |  |     Mutex m_mutex; | 
| 342 |  |     /* The RNG state consists of 256 bits of entropy, taken from the output of | 
| 343 |  |      * one operation's SHA512 output, and fed as input to the next one. | 
| 344 |  |      * Carrying 256 bits of entropy should be sufficient to guarantee | 
| 345 |  |      * unpredictability as long as any entropy source was ever unpredictable | 
| 346 |  |      * to an attacker. To protect against situations where an attacker might | 
| 347 |  |      * observe the RNG's state, fresh entropy is always mixed when | 
| 348 |  |      * GetStrongRandBytes is called. | 
| 349 |  |      */ | 
| 350 |  |     unsigned char m_state[32] GUARDED_BY(m_mutex) = {0}; | 
| 351 |  |     uint64_t m_counter GUARDED_BY(m_mutex) = 0; | 
| 352 |  |     bool m_strongly_seeded GUARDED_BY(m_mutex) = false; | 
| 353 |  |  | 
| 354 |  |     /** If not nullopt, the output of this RNGState is redirected and drawn from here | 
| 355 |  |      *  (unless always_use_real_rng is passed to MixExtract). */ | 
| 356 |  |     std::optional<ChaCha20> m_deterministic_prng GUARDED_BY(m_mutex); | 
| 357 |  |  | 
| 358 |  |     Mutex m_events_mutex; | 
| 359 |  |     CSHA256 m_events_hasher GUARDED_BY(m_events_mutex); | 
| 360 |  |  | 
| 361 |  | public: | 
| 362 |  |     RNGState() noexcept | 
| 363 | 0 |     { | 
| 364 | 0 |         InitHardwareRand(); | 
| 365 | 0 |     } | 
| 366 |  |  | 
| 367 | 0 |     ~RNGState() = default; | 
| 368 |  |  | 
| 369 |  |     void AddEvent(uint32_t event_info) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_events_mutex) | 
| 370 | 0 |     { | 
| 371 | 0 |         LOCK(m_events_mutex); | 
| 372 |  | 
 | 
| 373 | 0 |         m_events_hasher.Write((const unsigned char *)&event_info, sizeof(event_info)); | 
| 374 |  |         // Get the low four bytes of the performance counter. This translates to roughly the | 
| 375 |  |         // subsecond part. | 
| 376 | 0 |         uint32_t perfcounter = (GetPerformanceCounter() & 0xffffffff); | 
| 377 | 0 |         m_events_hasher.Write((const unsigned char*)&perfcounter, sizeof(perfcounter)); | 
| 378 | 0 |     } | 
| 379 |  |  | 
| 380 |  |     /** | 
| 381 |  |      * Feed (the hash of) all events added through AddEvent() to hasher. | 
| 382 |  |      */ | 
| 383 |  |     void SeedEvents(CSHA512& hasher) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_events_mutex) | 
| 384 | 0 |     { | 
| 385 |  |         // We use only SHA256 for the events hashing to get the ASM speedups we have for SHA256, | 
| 386 |  |         // since we want it to be fast as network peers may be able to trigger it repeatedly. | 
| 387 | 0 |         LOCK(m_events_mutex); | 
| 388 |  | 
 | 
| 389 | 0 |         unsigned char events_hash[32]; | 
| 390 | 0 |         m_events_hasher.Finalize(events_hash); | 
| 391 | 0 |         hasher.Write(events_hash, 32); | 
| 392 |  |  | 
| 393 |  |         // Re-initialize the hasher with the finalized state to use later. | 
| 394 | 0 |         m_events_hasher.Reset(); | 
| 395 | 0 |         m_events_hasher.Write(events_hash, 32); | 
| 396 | 0 |     } | 
| 397 |  |  | 
| 398 |  |     /** Make the output of MixExtract (unless always_use_real_rng) deterministic, with specified seed. */ | 
| 399 |  |     void MakeDeterministic(const uint256& seed) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) | 
| 400 | 559 |     { | 
| 401 | 559 |         LOCK(m_mutex); | 
| 402 | 559 |         m_deterministic_prng.emplace(MakeByteSpan(seed)); | 
| 403 | 559 |     } | 
| 404 |  |  | 
| 405 |  |     /** Extract up to 32 bytes of entropy from the RNG state, mixing in new entropy from hasher. | 
| 406 |  |      * | 
| 407 |  |      * If this function has never been called with strong_seed = true, false is returned. | 
| 408 |  |      * | 
| 409 |  |      * If always_use_real_rng is false, and MakeDeterministic has been called before, output | 
| 410 |  |      * from the deterministic PRNG instead. | 
| 411 |  |      */ | 
| 412 |  |     bool MixExtract(unsigned char* out, size_t num, CSHA512&& hasher, bool strong_seed, bool always_use_real_rng) noexcept EXCLUSIVE_LOCKS_REQUIRED(!m_mutex) | 
| 413 | 100k |     { | 
| 414 | 100k |         assert(num <= 32); | 
| 415 | 100k |         unsigned char buf[64]; | 
| 416 | 100k |         static_assert(sizeof(buf) == CSHA512::OUTPUT_SIZE, "Buffer needs to have hasher's output size"); | 
| 417 | 100k |         bool ret; | 
| 418 | 100k |         { | 
| 419 | 100k |             LOCK(m_mutex); | 
| 420 | 100k |             ret = (m_strongly_seeded |= strong_seed); | 
| 421 |  |             // Write the current state of the RNG into the hasher | 
| 422 | 100k |             hasher.Write(m_state, 32); | 
| 423 |  |             // Write a new counter number into the state | 
| 424 | 100k |             hasher.Write((const unsigned char*)&m_counter, sizeof(m_counter)); | 
| 425 | 100k |             ++m_counter; | 
| 426 |  |             // Finalize the hasher | 
| 427 | 100k |             hasher.Finalize(buf); | 
| 428 |  |             // Store the last 32 bytes of the hash output as new RNG state. | 
| 429 | 100k |             memcpy(m_state, buf + 32, 32); | 
| 430 |  |             // Handle requests for deterministic randomness. | 
| 431 | 100k |             if (!always_use_real_rng && m_deterministic_prng.has_value()) [[unlikely]] { | 
| 432 |  |                 // Overwrite the beginning of buf, which will be used for output. | 
| 433 | 100k |                 m_deterministic_prng->Keystream(std::as_writable_bytes(std::span{buf, num})); | 
| 434 |  |                 // Do not require strong seeding for deterministic output. | 
| 435 | 100k |                 ret = true; | 
| 436 | 100k |             } | 
| 437 | 100k |         } | 
| 438 |  |         // If desired, copy (up to) the first 32 bytes of the hash output as output. | 
| 439 | 100k |         if (num) { | 
| 440 | 100k |             assert(out != nullptr); | 
| 441 | 100k |             memcpy(out, buf, num); | 
| 442 | 100k |         } | 
| 443 |  |         // Best effort cleanup of internal state | 
| 444 | 100k |         hasher.Reset(); | 
| 445 | 100k |         memory_cleanse(buf, 64); | 
| 446 | 100k |         return ret; | 
| 447 | 100k |     } | 
| 448 |  | }; | 
| 449 |  |  | 
| 450 |  | RNGState& GetRNGState() noexcept | 
| 451 | 101k | { | 
| 452 |  |     // This idiom relies on the guarantee that static variable are initialized | 
| 453 |  |     // on first call, even when multiple parallel calls are permitted. | 
| 454 | 101k |     static std::vector<RNGState, secure_allocator<RNGState>> g_rng(1); | 
| 455 | 101k |     return g_rng[0]; | 
| 456 | 101k | } | 
| 457 |  |  | 
| 458 |  | /* A note on the use of noexcept in the seeding functions below: | 
| 459 |  |  * | 
| 460 |  |  * None of the RNG code should ever throw any exception. | 
| 461 |  |  */ | 
| 462 |  |  | 
| 463 |  | void SeedTimestamp(CSHA512& hasher) noexcept | 
| 464 | 100k | { | 
| 465 | 100k |     int64_t perfcounter = GetPerformanceCounter(); | 
| 466 | 100k |     hasher.Write((const unsigned char*)&perfcounter, sizeof(perfcounter)); | 
| 467 | 100k | } | 
| 468 |  |  | 
| 469 |  | void SeedFast(CSHA512& hasher) noexcept | 
| 470 | 100k | { | 
| 471 | 100k |     unsigned char buffer[32]; | 
| 472 |  |  | 
| 473 |  |     // Stack pointer to indirectly commit to thread/callstack | 
| 474 | 100k |     const unsigned char* ptr = buffer; | 
| 475 | 100k |     hasher.Write((const unsigned char*)&ptr, sizeof(ptr)); | 
| 476 |  |  | 
| 477 |  |     // Hardware randomness is very fast when available; use it always. | 
| 478 | 100k |     SeedHardwareFast(hasher); | 
| 479 |  |  | 
| 480 |  |     // High-precision timestamp | 
| 481 | 100k |     SeedTimestamp(hasher); | 
| 482 | 100k | } | 
| 483 |  |  | 
| 484 |  | void SeedSlow(CSHA512& hasher, RNGState& rng) noexcept | 
| 485 | 0 | { | 
| 486 | 0 |     unsigned char buffer[32]; | 
| 487 |  |  | 
| 488 |  |     // Everything that the 'fast' seeder includes | 
| 489 | 0 |     SeedFast(hasher); | 
| 490 |  |  | 
| 491 |  |     // OS randomness | 
| 492 | 0 |     GetOSRand(buffer); | 
| 493 | 0 |     hasher.Write(buffer, sizeof(buffer)); | 
| 494 |  |  | 
| 495 |  |     // Add the events hasher into the mix | 
| 496 | 0 |     rng.SeedEvents(hasher); | 
| 497 |  |  | 
| 498 |  |     // High-precision timestamp. | 
| 499 |  |     // | 
| 500 |  |     // Note that we also commit to a timestamp in the Fast seeder, so we indirectly commit to a | 
| 501 |  |     // benchmark of all the entropy gathering sources in this function). | 
| 502 | 0 |     SeedTimestamp(hasher); | 
| 503 | 0 | } | 
| 504 |  |  | 
| 505 |  | /** Extract entropy from rng, strengthen it, and feed it into hasher. */ | 
| 506 |  | void SeedStrengthen(CSHA512& hasher, RNGState& rng, SteadyClock::duration dur) noexcept | 
| 507 | 0 | { | 
| 508 |  |     // Generate 32 bytes of entropy from the RNG, and a copy of the entropy already in hasher. | 
| 509 |  |     // Never use the deterministic PRNG for this, as the result is only used internally. | 
| 510 | 0 |     unsigned char strengthen_seed[32]; | 
| 511 | 0 |     rng.MixExtract(strengthen_seed, sizeof(strengthen_seed), CSHA512(hasher), false, /*always_use_real_rng=*/true); | 
| 512 |  |     // Strengthen the seed, and feed it into hasher. | 
| 513 | 0 |     Strengthen(strengthen_seed, dur, hasher); | 
| 514 | 0 | } | 
| 515 |  |  | 
| 516 |  | void SeedPeriodic(CSHA512& hasher, RNGState& rng) noexcept | 
| 517 | 0 | { | 
| 518 |  |     // Everything that the 'fast' seeder includes | 
| 519 | 0 |     SeedFast(hasher); | 
| 520 |  |  | 
| 521 |  |     // High-precision timestamp | 
| 522 | 0 |     SeedTimestamp(hasher); | 
| 523 |  |  | 
| 524 |  |     // Add the events hasher into the mix | 
| 525 | 0 |     rng.SeedEvents(hasher); | 
| 526 |  |  | 
| 527 |  |     // Dynamic environment data (clocks, resource usage, ...) | 
| 528 | 0 |     auto old_size = hasher.Size(); | 
| 529 | 0 |     RandAddDynamicEnv(hasher); | 
| 530 | 0 |     LogDebug(BCLog::RAND, "Feeding %i bytes of dynamic environment data into RNG\n", hasher.Size() - old_size); | 
| 531 |  |  | 
| 532 |  |     // Strengthen for 10 ms | 
| 533 | 0 |     SeedStrengthen(hasher, rng, 10ms); | 
| 534 | 0 | } | 
| 535 |  |  | 
| 536 |  | void SeedStartup(CSHA512& hasher, RNGState& rng) noexcept | 
| 537 | 0 | { | 
| 538 |  |     // Gather 256 bits of hardware randomness, if available | 
| 539 | 0 |     SeedHardwareSlow(hasher); | 
| 540 |  |  | 
| 541 |  |     // Everything that the 'slow' seeder includes. | 
| 542 | 0 |     SeedSlow(hasher, rng); | 
| 543 |  |  | 
| 544 |  |     // Dynamic environment data (clocks, resource usage, ...) | 
| 545 | 0 |     auto old_size = hasher.Size(); | 
| 546 | 0 |     RandAddDynamicEnv(hasher); | 
| 547 |  |  | 
| 548 |  |     // Static environment data | 
| 549 | 0 |     RandAddStaticEnv(hasher); | 
| 550 | 0 |     LogDebug(BCLog::RAND, "Feeding %i bytes of environment data into RNG\n", hasher.Size() - old_size); | 
| 551 |  |  | 
| 552 |  |     // Strengthen for 100 ms | 
| 553 | 0 |     SeedStrengthen(hasher, rng, 100ms); | 
| 554 | 0 | } | 
| 555 |  |  | 
| 556 |  | enum class RNGLevel { | 
| 557 |  |     FAST, //!< Automatically called by GetRandBytes | 
| 558 |  |     SLOW, //!< Automatically called by GetStrongRandBytes | 
| 559 |  |     PERIODIC, //!< Called by RandAddPeriodic() | 
| 560 |  | }; | 
| 561 |  |  | 
| 562 |  | void ProcRand(unsigned char* out, int num, RNGLevel level, bool always_use_real_rng) noexcept | 
| 563 | 100k | { | 
| 564 |  |     // Make sure the RNG is initialized first (as all Seed* function possibly need hwrand to be available). | 
| 565 | 100k |     RNGState& rng = GetRNGState(); | 
| 566 |  |  | 
| 567 | 100k |     assert(num <= 32); | 
| 568 |  |  | 
| 569 | 100k |     CSHA512 hasher; | 
| 570 | 100k |     switch (level) { | 
| 571 | 100k |     case RNGLevel::FAST: | 
| 572 | 100k |         SeedFast(hasher); | 
| 573 | 100k |         break; | 
| 574 | 0 |     case RNGLevel::SLOW: | 
| 575 | 0 |         SeedSlow(hasher, rng); | 
| 576 | 0 |         break; | 
| 577 | 0 |     case RNGLevel::PERIODIC: | 
| 578 | 0 |         SeedPeriodic(hasher, rng); | 
| 579 | 0 |         break; | 
| 580 | 100k |     } | 
| 581 |  |  | 
| 582 |  |     // Combine with and update state | 
| 583 | 100k |     if (!rng.MixExtract(out, num, std::move(hasher), false, always_use_real_rng)) { | 
| 584 |  |         // On the first invocation, also seed with SeedStartup(). | 
| 585 | 0 |         CSHA512 startup_hasher; | 
| 586 | 0 |         SeedStartup(startup_hasher, rng); | 
| 587 | 0 |         rng.MixExtract(out, num, std::move(startup_hasher), true, always_use_real_rng); | 
| 588 | 0 |     } | 
| 589 | 100k | } | 
| 590 |  |  | 
| 591 |  | } // namespace | 
| 592 |  |  | 
| 593 |  |  | 
| 594 |  | /** Internal function to set g_determinstic_rng. Only accessed from tests. */ | 
| 595 |  | void MakeRandDeterministicDANGEROUS(const uint256& seed) noexcept | 
| 596 | 559 | { | 
| 597 | 559 |     GetRNGState().MakeDeterministic(seed); | 
| 598 | 559 | } | 
| 599 |  | std::atomic<bool> g_used_g_prng{false}; // Only accessed from tests | 
| 600 |  |  | 
| 601 |  | void GetRandBytes(std::span<unsigned char> bytes) noexcept | 
| 602 | 100k | { | 
| 603 | 100k |     g_used_g_prng = true; | 
| 604 | 100k |     ProcRand(bytes.data(), bytes.size(), RNGLevel::FAST, /*always_use_real_rng=*/false); | 
| 605 | 100k | } | 
| 606 |  |  | 
| 607 |  | void GetStrongRandBytes(std::span<unsigned char> bytes) noexcept | 
| 608 | 0 | { | 
| 609 | 0 |     ProcRand(bytes.data(), bytes.size(), RNGLevel::SLOW, /*always_use_real_rng=*/true); | 
| 610 | 0 | } | 
| 611 |  |  | 
| 612 |  | void RandAddPeriodic() noexcept | 
| 613 | 0 | { | 
| 614 | 0 |     ProcRand(nullptr, 0, RNGLevel::PERIODIC, /*always_use_real_rng=*/false); | 
| 615 | 0 | } | 
| 616 |  |  | 
| 617 | 0 | void RandAddEvent(const uint32_t event_info) noexcept { GetRNGState().AddEvent(event_info); } | 
| 618 |  |  | 
| 619 |  | void FastRandomContext::RandomSeed() noexcept | 
| 620 | 100k | { | 
| 621 | 100k |     uint256 seed = GetRandHash(); | 
| 622 | 100k |     rng.SetKey(MakeByteSpan(seed)); | 
| 623 | 100k |     requires_seed = false; | 
| 624 | 100k | } | 
| 625 |  |  | 
| 626 |  | void FastRandomContext::fillrand(std::span<std::byte> output) noexcept | 
| 627 | 0 | { | 
| 628 | 0 |     if (requires_seed) RandomSeed(); | 
| 629 | 0 |     rng.Keystream(output); | 
| 630 | 0 | } | 
| 631 |  |  | 
| 632 | 0 | FastRandomContext::FastRandomContext(const uint256& seed) noexcept : requires_seed(false), rng(MakeByteSpan(seed)) {} | 
| 633 |  |  | 
| 634 |  | void FastRandomContext::Reseed(const uint256& seed) noexcept | 
| 635 | 0 | { | 
| 636 | 0 |     FlushCache(); | 
| 637 | 0 |     requires_seed = false; | 
| 638 | 0 |     rng = {MakeByteSpan(seed)}; | 
| 639 | 0 | } | 
| 640 |  |  | 
| 641 |  | bool Random_SanityCheck() | 
| 642 | 0 | { | 
| 643 | 0 |     uint64_t start = GetPerformanceCounter(); | 
| 644 |  |  | 
| 645 |  |     /* This does not measure the quality of randomness, but it does test that | 
| 646 |  |      * GetOSRand() overwrites all 32 bytes of the output given a maximum | 
| 647 |  |      * number of tries. | 
| 648 |  |      */ | 
| 649 | 0 |     static constexpr int MAX_TRIES{1024}; | 
| 650 | 0 |     uint8_t data[NUM_OS_RANDOM_BYTES]; | 
| 651 | 0 |     bool overwritten[NUM_OS_RANDOM_BYTES] = {}; /* Tracks which bytes have been overwritten at least once */ | 
| 652 | 0 |     int num_overwritten; | 
| 653 | 0 |     int tries = 0; | 
| 654 |  |     /* Loop until all bytes have been overwritten at least once, or max number tries reached */ | 
| 655 | 0 |     do { | 
| 656 | 0 |         memset(data, 0, NUM_OS_RANDOM_BYTES); | 
| 657 | 0 |         GetOSRand(data); | 
| 658 | 0 |         for (int x=0; x < NUM_OS_RANDOM_BYTES; ++x) { | 
| 659 | 0 |             overwritten[x] |= (data[x] != 0); | 
| 660 | 0 |         } | 
| 661 |  | 
 | 
| 662 | 0 |         num_overwritten = 0; | 
| 663 | 0 |         for (int x=0; x < NUM_OS_RANDOM_BYTES; ++x) { | 
| 664 | 0 |             if (overwritten[x]) { | 
| 665 | 0 |                 num_overwritten += 1; | 
| 666 | 0 |             } | 
| 667 | 0 |         } | 
| 668 |  | 
 | 
| 669 | 0 |         tries += 1; | 
| 670 | 0 |     } while (num_overwritten < NUM_OS_RANDOM_BYTES && tries < MAX_TRIES); | 
| 671 | 0 |     if (num_overwritten != NUM_OS_RANDOM_BYTES) return false; /* If this failed, bailed out after too many tries */ | 
| 672 |  |  | 
| 673 |  |     // Check that GetPerformanceCounter increases at least during a GetOSRand() call + 1ms sleep. | 
| 674 | 0 |     std::this_thread::sleep_for(std::chrono::milliseconds(1)); | 
| 675 | 0 |     uint64_t stop = GetPerformanceCounter(); | 
| 676 | 0 |     if (stop == start) return false; | 
| 677 |  |  | 
| 678 |  |     // We called GetPerformanceCounter. Use it as entropy. | 
| 679 | 0 |     CSHA512 to_add; | 
| 680 | 0 |     to_add.Write((const unsigned char*)&start, sizeof(start)); | 
| 681 | 0 |     to_add.Write((const unsigned char*)&stop, sizeof(stop)); | 
| 682 | 0 |     GetRNGState().MixExtract(nullptr, 0, std::move(to_add), false, /*always_use_real_rng=*/true); | 
| 683 |  | 
 | 
| 684 | 0 |     return true; | 
| 685 | 0 | } | 
| 686 |  |  | 
| 687 |  | static constexpr std::array<std::byte, ChaCha20::KEYLEN> ZERO_KEY{}; | 
| 688 |  |  | 
| 689 | 101k | FastRandomContext::FastRandomContext(bool fDeterministic) noexcept : requires_seed(!fDeterministic), rng(ZERO_KEY) | 
| 690 | 101k | { | 
| 691 |  |     // Note that despite always initializing with ZERO_KEY, requires_seed is set to true if not | 
| 692 |  |     // fDeterministic. That means the rng will be reinitialized with a secure random key upon first | 
| 693 |  |     // use. | 
| 694 | 101k | } | 
| 695 |  |  | 
| 696 |  | void RandomInit() | 
| 697 | 0 | { | 
| 698 |  |     // Invoke RNG code to trigger initialization (if not already performed) | 
| 699 | 0 |     ProcRand(nullptr, 0, RNGLevel::FAST, /*always_use_real_rng=*/true); | 
| 700 |  | 
 | 
| 701 | 0 |     ReportHardwareRand(); | 
| 702 | 0 | } | 
| 703 |  |  | 
| 704 |  | double MakeExponentiallyDistributed(uint64_t uniform) noexcept | 
| 705 | 0 | { | 
| 706 |  |     // To convert uniform into an exponentially-distributed double, we use two steps: | 
| 707 |  |     // - Convert uniform into a uniformly-distributed double in range [0, 1), use the expression | 
| 708 |  |     //   ((uniform >> 11) * 0x1.0p-53), as described in https://prng.di.unimi.it/ under | 
| 709 |  |     //   "Generating uniform doubles in the unit interval". Call this value x. | 
| 710 |  |     // - Given an x in uniformly distributed in [0, 1), we find an exponentially distributed value | 
| 711 |  |     //   by applying the quantile function to it. For the exponential distribution with mean 1 this | 
| 712 |  |     //   is F(x) = -log(1 - x). | 
| 713 |  |     // | 
| 714 |  |     // Combining the two, and using log1p(x) = log(1 + x), we obtain the following: | 
| 715 | 0 |     return -std::log1p((uniform >> 11) * -0x1.0p-53); | 
| 716 | 0 | } |