Changeset View
Changeset View
Standalone View
Standalone View
src/random.cpp
Show First 20 Lines • Show All 76 Lines • ▼ Show 20 Lines | |||||
#if defined(__x86_64__) || defined(__amd64__) || defined(__i386__) | #if defined(__x86_64__) || defined(__amd64__) || defined(__i386__) | ||||
static std::atomic<bool> hwrand_initialized{false}; | static std::atomic<bool> hwrand_initialized{false}; | ||||
static bool rdrand_supported = false; | static bool rdrand_supported = false; | ||||
static constexpr uint32_t CPUID_F1_ECX_RDRAND = 0x40000000; | static constexpr uint32_t CPUID_F1_ECX_RDRAND = 0x40000000; | ||||
static void RDRandInit() { | static void RDRandInit() { | ||||
uint32_t eax, ebx, ecx, edx; | uint32_t eax, ebx, ecx, edx; | ||||
if (__get_cpuid(1, &eax, &ebx, &ecx, &edx) && (ecx & CPUID_F1_ECX_RDRAND)) { | if (__get_cpuid(1, &eax, &ebx, &ecx, &edx) && (ecx & CPUID_F1_ECX_RDRAND)) { | ||||
LogPrintf("Using RdRand as an additional entropy source\n"); | |||||
rdrand_supported = true; | rdrand_supported = true; | ||||
} | } | ||||
hwrand_initialized.store(true); | hwrand_initialized.store(true); | ||||
} | } | ||||
static void RDRandReport() { | |||||
assert(hwrand_initialized.load(std::memory_order_relaxed)); | |||||
if (rdrand_supported) { | |||||
// This must be done in a separate function, as HWRandInit() may be | |||||
// indirectly called from global constructors, before logging is | |||||
// initialized. | |||||
LogPrintf("Using RdRand as an additional entropy source\n"); | |||||
} | |||||
} | |||||
#else | #else | ||||
static void RDRandInit() {} | static void RDRandInit() {} | ||||
static void RDRandReport() {} | |||||
#endif | #endif | ||||
static bool GetHWRand(uint8_t *ent32) { | static bool GetHWRand(uint8_t *ent32) { | ||||
#if defined(__x86_64__) || defined(__amd64__) || defined(__i386__) | #if defined(__x86_64__) || defined(__amd64__) || defined(__i386__) | ||||
assert(hwrand_initialized.load(std::memory_order_relaxed)); | assert(hwrand_initialized.load(std::memory_order_relaxed)); | ||||
if (rdrand_supported) { | if (rdrand_supported) { | ||||
uint8_t ok; | uint8_t ok; | ||||
// Not all assemblers support the rdrand instruction, write it in hex. | // Not all assemblers support the rdrand instruction, write it in hex. | ||||
▲ Show 20 Lines • Show All 184 Lines • ▼ Show 20 Lines | |||||
} | } | ||||
void GetRandBytes(uint8_t *buf, int num) { | void GetRandBytes(uint8_t *buf, int num) { | ||||
if (RAND_bytes(buf, num) != 1) { | if (RAND_bytes(buf, num) != 1) { | ||||
RandFailure(); | RandFailure(); | ||||
} | } | ||||
} | } | ||||
namespace { | |||||
struct RNGState { | |||||
Mutex m_mutex; | |||||
uint8_t m_state[32] = {0}; | |||||
uint64_t m_counter = 0; | |||||
explicit RNGState() { RDRandInit(); } | |||||
}; | |||||
RNGState &GetRNGState() { | |||||
// This C++11 idiom relies on the guarantee that static variable are | |||||
// initialized on first call, even when multiple parallel calls are | |||||
// permitted. | |||||
static std::unique_ptr<RNGState> g_rng{new RNGState()}; | |||||
return *g_rng; | |||||
} | |||||
} // namespace | |||||
static void AddDataToRng(void *data, size_t len); | static void AddDataToRng(void *data, size_t len); | ||||
void RandAddSeedSleep() { | void RandAddSeedSleep() { | ||||
int64_t nPerfCounter1 = GetPerformanceCounter(); | int64_t nPerfCounter1 = GetPerformanceCounter(); | ||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); | std::this_thread::sleep_for(std::chrono::milliseconds(1)); | ||||
int64_t nPerfCounter2 = GetPerformanceCounter(); | int64_t nPerfCounter2 = GetPerformanceCounter(); | ||||
// Combine with and update state | // Combine with and update state | ||||
AddDataToRng(&nPerfCounter1, sizeof(nPerfCounter1)); | AddDataToRng(&nPerfCounter1, sizeof(nPerfCounter1)); | ||||
AddDataToRng(&nPerfCounter2, sizeof(nPerfCounter2)); | AddDataToRng(&nPerfCounter2, sizeof(nPerfCounter2)); | ||||
memory_cleanse(&nPerfCounter1, sizeof(nPerfCounter1)); | memory_cleanse(&nPerfCounter1, sizeof(nPerfCounter1)); | ||||
memory_cleanse(&nPerfCounter2, sizeof(nPerfCounter2)); | memory_cleanse(&nPerfCounter2, sizeof(nPerfCounter2)); | ||||
} | } | ||||
static Mutex cs_rng_state; | |||||
static uint8_t rng_state[32] = {0}; | |||||
static uint64_t rng_counter = 0; | |||||
static void AddDataToRng(void *data, size_t len) { | static void AddDataToRng(void *data, size_t len) { | ||||
RNGState &rng = GetRNGState(); | |||||
CSHA512 hasher; | CSHA512 hasher; | ||||
hasher.Write((const uint8_t *)&len, sizeof(len)); | hasher.Write((const uint8_t *)&len, sizeof(len)); | ||||
hasher.Write((const uint8_t *)data, len); | hasher.Write((const uint8_t *)data, len); | ||||
uint8_t buf[64]; | uint8_t buf[64]; | ||||
{ | { | ||||
WAIT_LOCK(cs_rng_state, lock); | WAIT_LOCK(rng.m_mutex, lock); | ||||
hasher.Write(rng_state, sizeof(rng_state)); | hasher.Write(rng.m_state, sizeof(rng.m_state)); | ||||
hasher.Write((const uint8_t *)&rng_counter, sizeof(rng_counter)); | hasher.Write((const uint8_t *)&rng.m_counter, sizeof(rng.m_counter)); | ||||
++rng_counter; | ++rng.m_counter; | ||||
hasher.Finalize(buf); | hasher.Finalize(buf); | ||||
memcpy(rng_state, buf + 32, 32); | memcpy(rng.m_state, buf + 32, 32); | ||||
} | } | ||||
memory_cleanse(buf, 64); | memory_cleanse(buf, 64); | ||||
} | } | ||||
void GetStrongRandBytes(uint8_t *out, int num) { | void GetStrongRandBytes(uint8_t *out, int num) { | ||||
RNGState &rng = GetRNGState(); | |||||
assert(num <= 32); | assert(num <= 32); | ||||
CSHA512 hasher; | CSHA512 hasher; | ||||
uint8_t buf[64]; | uint8_t buf[64]; | ||||
// First source: OpenSSL's RNG | // First source: OpenSSL's RNG | ||||
RandAddSeedPerfmon(); | RandAddSeedPerfmon(); | ||||
GetRandBytes(buf, 32); | GetRandBytes(buf, 32); | ||||
hasher.Write(buf, 32); | hasher.Write(buf, 32); | ||||
// Second source: OS RNG | // Second source: OS RNG | ||||
GetOSRand(buf); | GetOSRand(buf); | ||||
hasher.Write(buf, 32); | hasher.Write(buf, 32); | ||||
// Third source: HW RNG, if available. | // Third source: HW RNG, if available. | ||||
if (GetHWRand(buf)) { | if (GetHWRand(buf)) { | ||||
hasher.Write(buf, 32); | hasher.Write(buf, 32); | ||||
} | } | ||||
// Combine with and update state | // Combine with and update state | ||||
{ | { | ||||
WAIT_LOCK(cs_rng_state, lock); | WAIT_LOCK(rng.m_mutex, lock); | ||||
hasher.Write(rng_state, sizeof(rng_state)); | hasher.Write(rng.m_state, sizeof(rng.m_state)); | ||||
hasher.Write((const uint8_t *)&rng_counter, sizeof(rng_counter)); | hasher.Write((const uint8_t *)&rng.m_counter, sizeof(rng.m_counter)); | ||||
++rng_counter; | ++rng.m_counter; | ||||
hasher.Finalize(buf); | hasher.Finalize(buf); | ||||
memcpy(rng_state, buf + 32, 32); | memcpy(rng.m_state, buf + 32, 32); | ||||
} | } | ||||
// Produce output | // Produce output | ||||
memcpy(out, buf, num); | memcpy(out, buf, num); | ||||
memory_cleanse(buf, 64); | memory_cleanse(buf, 64); | ||||
} | } | ||||
uint64_t GetRand(uint64_t nMax) { | uint64_t GetRand(uint64_t nMax) { | ||||
▲ Show 20 Lines • Show All 127 Lines • ▼ Show 20 Lines | operator=(FastRandomContext &&from) noexcept { | ||||
bitbuf_size = from.bitbuf_size; | bitbuf_size = from.bitbuf_size; | ||||
from.requires_seed = true; | from.requires_seed = true; | ||||
from.bytebuf_size = 0; | from.bytebuf_size = 0; | ||||
from.bitbuf_size = 0; | from.bitbuf_size = 0; | ||||
return *this; | return *this; | ||||
} | } | ||||
void RandomInit() { | void RandomInit() { | ||||
RDRandInit(); | // Invoke RNG code to trigger initialization (if not already performed) | ||||
GetRNGState(); | |||||
RDRandReport(); | |||||
} | } |