diff --git a/src/test/DoS_tests.cpp b/src/test/DoS_tests.cpp --- a/src/test/DoS_tests.cpp +++ b/src/test/DoS_tests.cpp @@ -97,10 +97,10 @@ peerLogic->FinalizeNode(config, dummyNode1.GetId(), dummy); } -void AddRandomOutboundPeer(const Config &config, - std::vector> &vNodes, - PeerLogicValidation &peerLogic) { - CAddress addr(ip(GetRandInt(0xffffffff)), NODE_NONE); +static void AddRandomOutboundPeer(const Config &config, + std::vector> &vNodes, + PeerLogicValidation &peerLogic) { + CAddress addr(ip(insecure_rand_ctx.randbits(32)), NODE_NONE); vNodes.emplace_back(new CNode(id++, ServiceFlags(NODE_NETWORK), 0, INVALID_SOCKET, addr, 0, 0, CAddress(), "", /*fInboundIn=*/false)); diff --git a/src/test/cuckoocache_tests.cpp b/src/test/cuckoocache_tests.cpp --- a/src/test/cuckoocache_tests.cpp +++ b/src/test/cuckoocache_tests.cpp @@ -24,38 +24,23 @@ * expected behavior. For example improving the hit rate may cause some tests * using BOOST_CHECK_CLOSE to fail. */ -FastRandomContext local_rand_ctx(true); - BOOST_AUTO_TEST_SUITE(cuckoocache_tests); -/** - * insecure_GetRandHash fills in a uint256 from local_rand_ctx - */ -void insecure_GetRandHash(uint256 &t) { - uint32_t *ptr = (uint32_t *)t.begin(); - for (uint8_t j = 0; j < 8; ++j) { - *(ptr++) = local_rand_ctx.rand32(); - } -} - /** * Test that no values not inserted into the cache are read out of it. * * There are no repeats in the first 200000 insecure_GetRandHash calls */ BOOST_AUTO_TEST_CASE(test_cuckoocache_no_fakes) { - local_rand_ctx = FastRandomContext(true); + SeedInsecureRand(true); CuckooCache::cache cc{}; size_t megabytes = 4; cc.setup_bytes(megabytes << 20); - uint256 v; for (int x = 0; x < 100000; ++x) { - insecure_GetRandHash(v); - cc.insert(v); + cc.insert(InsecureRand256()); } for (int x = 0; x < 100000; ++x) { - insecure_GetRandHash(v); - BOOST_CHECK(!cc.contains(v, false)); + BOOST_CHECK(!cc.contains(InsecureRand256(), false)); } }; @@ -64,7 +49,7 @@ * inserted into a megabytes sized cache */ template double test_cache(size_t megabytes, double load) { - local_rand_ctx = FastRandomContext(true); + SeedInsecureRand(true); std::vector hashes; Cache set{}; size_t bytes = megabytes * (1 << 20); @@ -74,7 +59,7 @@ for (uint32_t i = 0; i < n_insert; ++i) { uint32_t *ptr = (uint32_t *)hashes[i].begin(); for (uint8_t j = 0; j < 8; ++j) { - *(ptr++) = local_rand_ctx.rand32(); + *(ptr++) = InsecureRand32(); } } /** @@ -137,7 +122,7 @@ * that the hit rate of "fresher" keys is reasonable*/ template void test_cache_erase(size_t megabytes) { double load = 1; - local_rand_ctx = FastRandomContext(true); + SeedInsecureRand(true); std::vector hashes; Cache set{}; size_t bytes = megabytes * (1 << 20); @@ -147,7 +132,7 @@ for (uint32_t i = 0; i < n_insert; ++i) { uint32_t *ptr = (uint32_t *)hashes[i].begin(); for (uint8_t j = 0; j < 8; ++j) { - *(ptr++) = local_rand_ctx.rand32(); + *(ptr++) = InsecureRand32(); } } /** We make a copy of the hashes because future optimizations of the @@ -206,7 +191,7 @@ template void test_cache_erase_parallel(size_t megabytes) { double load = 1; - local_rand_ctx = FastRandomContext(true); + SeedInsecureRand(true); std::vector hashes; Cache set{}; size_t bytes = megabytes * (1 << 20); @@ -216,7 +201,7 @@ for (uint32_t i = 0; i < n_insert; ++i) { uint32_t *ptr = (uint32_t *)hashes[i].begin(); for (uint8_t j = 0; j < 8; ++j) { - *(ptr++) = local_rand_ctx.rand32(); + *(ptr++) = InsecureRand32(); } } /** We make a copy of the hashes because future optimizations of the @@ -315,7 +300,7 @@ // iterations with non-deterministic values, so it isn't "overfit" to the // specific entropy in FastRandomContext(true) and implementation of the // cache. - local_rand_ctx = FastRandomContext(true); + SeedInsecureRand(true); // block_activity models a chunk of network activity. n_insert elements are // adde to the cache. The first and last n/4 are stored for removal later @@ -331,7 +316,7 @@ for (uint32_t i = 0; i < n_insert; ++i) { uint32_t *ptr = (uint32_t *)inserts[i].begin(); for (uint8_t j = 0; j < 8; ++j) { - *(ptr++) = local_rand_ctx.rand32(); + *(ptr++) = InsecureRand32(); } } for (uint32_t i = 0; i < n_insert / 4; ++i) { diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp --- a/src/test/validation_block_tests.cpp +++ b/src/test/validation_block_tests.cpp @@ -110,8 +110,8 @@ return; } - bool gen_invalid = GetRand(100) < invalid_rate; - bool gen_fork = GetRand(100) < branch_rate; + bool gen_invalid = InsecureRandRange(100) < invalid_rate; + bool gen_fork = InsecureRandRange(100) < branch_rate; const std::shared_ptr pblock = gen_invalid ? BadBlock(config, root) : GoodBlock(config, root); @@ -174,7 +174,7 @@ threads.create_thread([&config, &blocks]() { bool tlignored; for (int j = 0; j < 1000; j++) { - auto block = blocks[GetRand(blocks.size() - 1)]; + auto block = blocks[InsecureRandRange(blocks.size() - 1)]; ProcessNewBlock(config, block, true, &tlignored); }