diff --git a/src/compat.h b/src/compat.h index 86e137103..b3575bc51 100644 --- a/src/compat.h +++ b/src/compat.h @@ -1,96 +1,105 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_COMPAT_H #define BITCOIN_COMPAT_H #if defined(HAVE_CONFIG_H) #include #endif #ifdef WIN32 #ifndef NOMINMAX #define NOMINMAX #endif #ifdef FD_SETSIZE #undef FD_SETSIZE // prevent redefinition compiler warning #endif #define FD_SETSIZE 1024 // max number of fds in fd_set #include // Must be included before mswsock.h and windows.h #include #include #include #else #include #include #include #include #include #include #include #include #include #include #include #include #include #endif #ifndef WIN32 typedef unsigned int SOCKET; #include #define WSAGetLastError() errno #define WSAEINVAL EINVAL #define WSAEALREADY EALREADY #define WSAEWOULDBLOCK EWOULDBLOCK #define WSAEMSGSIZE EMSGSIZE #define WSAEINTR EINTR #define WSAEINPROGRESS EINPROGRESS #define WSAEADDRINUSE EADDRINUSE #define WSAENOTSOCK EBADF #define INVALID_SOCKET (SOCKET)(~0) #define SOCKET_ERROR -1 #endif #ifdef WIN32 #ifndef S_IRUSR #define S_IRUSR 0400 #define S_IWUSR 0200 #endif #else #define MAX_PATH 1024 #endif +#ifdef _MSC_VER +#if !defined(ssize_t) +#ifdef _WIN64 +typedef int64_t ssize_t; +#else +typedef int32_t ssize_t; +#endif +#endif +#endif #if HAVE_DECL_STRNLEN == 0 size_t strnlen(const char *start, size_t max_len); #endif // HAVE_DECL_STRNLEN #ifndef WIN32 typedef void *sockopt_arg_type; #else typedef char *sockopt_arg_type; #endif // Note these both should work with the current usage of poll, but best to be // safe // WIN32 poll is broken // https://daniel.haxx.se/blog/2012/10/10/wsapoll-is-broken/ // __APPLE__ poll is broke // https://github.com/bitcoin/bitcoin/pull/14336#issuecomment-437384408 #if defined(__linux__) #define USE_POLL #endif static bool inline IsSelectableSocket(const SOCKET &s) { #if defined(USE_POLL) || defined(WIN32) return true; #else return (s < FD_SETSIZE); #endif } #endif // BITCOIN_COMPAT_H diff --git a/src/random.h b/src/random.h index 471742ff7..5d01fa512 100644 --- a/src/random.h +++ b/src/random.h @@ -1,266 +1,266 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_RANDOM_H #define BITCOIN_RANDOM_H #include #include #include #include // For std::chrono::microseconds #include #include /** * Overall design of the RNG and entropy sources. * * We maintain a single global 256-bit RNG state for all high-quality * randomness. The following (classes of) functions interact with that state by * mixing in new entropy, and optionally extracting random output from it: * * - The GetRand*() class of functions, as well as construction of * FastRandomContext objects, perform 'fast' seeding, consisting of mixing in: * - A stack pointer (indirectly committing to calling thread and call stack) * - A high-precision timestamp (rdtsc when available, c++ * high_resolution_clock otherwise) * - 64 bits from the hardware RNG (rdrand) when available. * These entropy sources are very fast, and only designed to protect against * situations where a VM state restore/copy results in multiple systems with the * same randomness. FastRandomContext on the other hand does not protect against * this once created, but is even faster (and acceptable to use inside tight * loops). * * - The GetStrongRand*() class of function perform 'slow' seeding, including * everything that fast seeding includes, but additionally: * - OS entropy (/dev/urandom, getrandom(), ...). The application will * terminate if this entropy source fails. * - Another high-precision timestamp (indirectly committing to a benchmark of * all the previous sources). These entropy sources are slower, but designed to * make sure the RNG state contains fresh data that is unpredictable to * attackers. * * - RandAddPeriodic() seeds everything that fast seeding includes, but * additionally: * - A high-precision timestamp * - Dynamic environment data (performance monitoring, ...) * - Strengthen the entropy for 10 ms using repeated SHA512. * This is run once every minute. * * On first use of the RNG (regardless of what function is called first), all * entropy sources used in the 'slow' seeder are included, but also: * - 256 bits from the hardware RNG (rdseed or rdrand) when available. * - Dynamic environment data (performance monitoring, ...) * - Static environment data * - Strengthen the entropy for 100 ms using repeated SHA512. * * When mixing in new entropy, H = SHA512(entropy || old_rng_state) is computed, * and (up to) the first 32 bytes of H are produced as output, while the last 32 * bytes become the new RNG state. */ /** * Generate random data via the internal PRNG. * * These functions are designed to be fast (sub microsecond), but do not * necessarily meaningfully add entropy to the PRNG state. * * Thread-safe. */ void GetRandBytes(uint8_t *buf, int num) noexcept; uint64_t GetRand(uint64_t nMax) noexcept; std::chrono::microseconds GetRandMicros(std::chrono::microseconds duration_max) noexcept; int GetRandInt(int nMax) noexcept; uint256 GetRandHash() noexcept; /** * Gather entropy from various sources, feed it into the internal PRNG, and * generate random data using it. * * This function will cause failure whenever the OS RNG fails. * * Thread-safe. */ void GetStrongRandBytes(uint8_t *buf, int num) noexcept; /** * Gather entropy from various expensive sources, and feed them to the PRNG * state. * * Thread-safe. */ void RandAddPeriodic() noexcept; /** * Gathers entropy from the low bits of the time at which events occur. Should * be called with a uint32_t describing the event at the time an event occurs. * * Thread-safe. */ void RandAddEvent(const uint32_t event_info) noexcept; /** * Fast randomness source. This is seeded once with secure random data, but * is completely deterministic and does not gather more entropy after that. * * This class is not thread-safe. */ class FastRandomContext { private: bool requires_seed; ChaCha20 rng; uint8_t bytebuf[64]; int bytebuf_size; uint64_t bitbuf; int bitbuf_size; void RandomSeed(); void FillByteBuffer() { if (requires_seed) { RandomSeed(); } rng.Keystream(bytebuf, sizeof(bytebuf)); bytebuf_size = sizeof(bytebuf); } void FillBitBuffer() { bitbuf = rand64(); bitbuf_size = 64; } public: explicit FastRandomContext(bool fDeterministic = false) noexcept; /** Initialize with explicit seed (only for testing) */ explicit FastRandomContext(const uint256 &seed) noexcept; // Do not permit copying a FastRandomContext (move it, or create a new one // to get reseeded). FastRandomContext(const FastRandomContext &) = delete; FastRandomContext(FastRandomContext &&) = delete; FastRandomContext &operator=(const FastRandomContext &) = delete; /** * Move a FastRandomContext. If the original one is used again, it will be * reseeded. */ FastRandomContext &operator=(FastRandomContext &&from) noexcept; /** Generate a random 64-bit integer. */ uint64_t rand64() noexcept { if (bytebuf_size < 8) { FillByteBuffer(); } uint64_t ret = ReadLE64(bytebuf + 64 - bytebuf_size); bytebuf_size -= 8; return ret; } /** Generate a random (bits)-bit integer. */ uint64_t randbits(int bits) noexcept { if (bits == 0) { return 0; } else if (bits > 32) { return rand64() >> (64 - bits); } else { if (bitbuf_size < bits) { FillBitBuffer(); } uint64_t ret = bitbuf & (~uint64_t(0) >> (64 - bits)); bitbuf >>= bits; bitbuf_size -= bits; return ret; } } /** Generate a random integer in the range [0..range). */ uint64_t randrange(uint64_t range) noexcept { assert(range); --range; int bits = CountBits(range); while (true) { uint64_t ret = randbits(bits); if (ret <= range) { return ret; } } } /** Generate random bytes. */ std::vector randbytes(size_t len); /** Generate a random 32-bit integer. */ uint32_t rand32() noexcept { return randbits(32); } /** generate a random uint256. */ uint256 rand256() noexcept; /** Generate a random boolean. */ bool randbool() noexcept { return randbits(1); } // Compatibility with the C++11 UniformRandomBitGenerator concept typedef uint64_t result_type; static constexpr uint64_t min() { return 0; } static constexpr uint64_t max() { return std::numeric_limits::max(); } inline uint64_t operator()() noexcept { return rand64(); } }; /** * More efficient than using std::shuffle on a FastRandomContext. * * This is more efficient as std::shuffle will consume entropy in groups of * 64 bits at the time and throw away most. * * This also works around a bug in libstdc++ std::shuffle that may cause * type::operator=(type&&) to be invoked on itself, which the library's * debug mode detects and panics on. This is a known issue, see * https://stackoverflow.com/questions/22915325/avoiding-self-assignment-in-stdshuffle */ template void Shuffle(I first, I last, R &&rng) { while (first != last) { size_t j = rng.randrange(last - first); if (j) { using std::swap; swap(*first, *(first + j)); } ++first; } } /** * Number of random bytes returned by GetOSRand. * When changing this constant make sure to change all call sites, and make * sure that the underlying OS APIs for all platforms support the number. * (many cap out at 256 bytes). */ -static const ssize_t NUM_OS_RANDOM_BYTES = 32; +static const int NUM_OS_RANDOM_BYTES = 32; /** * Get 32 bytes of system entropy. Do not use this in application code: use * GetStrongRandBytes instead. */ void GetOSRand(uint8_t *ent32); /** * Check that OS randomness is available and returning the requested number of * bytes. */ bool Random_SanityCheck(); /** * Initialize global RNG state and log any CPU features that are used. * * Calling this function is optional. RNG state will be initialized when first * needed if it is not called. */ void RandomInit(); #endif // BITCOIN_RANDOM_H diff --git a/src/support/cleanse.cpp b/src/support/cleanse.cpp index 467cfa031..fd4d20d35 100644 --- a/src/support/cleanse.cpp +++ b/src/support/cleanse.cpp @@ -1,41 +1,45 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include +#if defined(_MSC_VER) +#include // For SecureZeroMemory. +#endif + /** * Compilers have a bad habit of removing "superfluous" memset calls that * are trying to zero memory. For example, when memset()ing a buffer and * then free()ing it, the compiler might decide that the memset is * unobservable and thus can be removed. * * Previously we used OpenSSL which tried to stop this by a) implementing * memset in assembly on x86 and b) putting the function in its own file * for other platforms. * * This change removes those tricks in favor of using asm directives to * scare the compiler away. As best as our compiler folks can tell, this is * sufficient and will continue to be so. * * Adam Langley * Commit: ad1907fe73334d6c696c8539646c21b11178f20f * BoringSSL (LICENSE: ISC) */ void memory_cleanse(void *ptr, size_t len) { std::memset(ptr, 0, len); /** * As best as we can tell, this is sufficient to break any optimizations * that might try to eliminate "superfluous" memsets. If there's an easy way * to detect memset_s, it would be better to use that. */ #if defined(_MSC_VER) - __asm; + SecureZeroMemory(ptr, len); #else __asm__ __volatile__("" : : "r"(ptr) : "memory"); #endif }