diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -513,6 +513,7 @@ noui.cpp policy/fees.cpp policy/settings.cpp + pow/aserti32d.cpp pow/daa.cpp pow/eda.cpp pow/pow.cpp diff --git a/src/Makefile.am b/src/Makefile.am --- a/src/Makefile.am +++ b/src/Makefile.am @@ -199,6 +199,7 @@ policy/mempool.h \ policy/policy.h \ policy/settings.h \ + pow/aserti32d.h \ pow/daa.h \ pow/eda.h \ pow/pow.h \ @@ -336,6 +337,7 @@ noui.cpp \ policy/fees.cpp \ policy/settings.cpp \ + pow/aserti32d.cpp \ pow/daa.cpp \ pow/eda.cpp \ pow/pow.cpp \ diff --git a/src/Makefile.test.include b/src/Makefile.test.include --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -117,6 +117,7 @@ avalanche/test/peermanager_tests.cpp \ avalanche/test/processor_tests.cpp \ avalanche/test/proof_tests.cpp \ + pow/test/aserti32d_tests.cpp \ pow/test/daa_tests.cpp \ pow/test/eda_tests.cpp \ test/scriptnum10.h \ diff --git a/src/chainparams.cpp b/src/chainparams.cpp --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -97,6 +97,9 @@ consensus.fPowAllowMinDifficultyBlocks = false; consensus.fPowNoRetargeting = false; + // two days + consensus.nDAAHalfLife = 2 * 24 * 60 * 60; + // nPowTargetTimespan / nPowTargetSpacing consensus.nMinerConfirmationWindow = 2016; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY] = { @@ -292,6 +295,9 @@ consensus.fPowAllowMinDifficultyBlocks = true; consensus.fPowNoRetargeting = false; + // two days + consensus.nDAAHalfLife = 2 * 24 * 60 * 60; + // nPowTargetTimespan / nPowTargetSpacing consensus.nMinerConfirmationWindow = 2016; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY] = { @@ -442,6 +448,9 @@ consensus.fPowAllowMinDifficultyBlocks = true; consensus.fPowNoRetargeting = true; + // two days + consensus.nDAAHalfLife = 2 * 24 * 60 * 60; + // Faster than normal for regtest (144 instead of 2016) consensus.nMinerConfirmationWindow = 144; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY] = { diff --git a/src/consensus/params.h b/src/consensus/params.h --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -95,6 +95,7 @@ uint256 powLimit; bool fPowAllowMinDifficultyBlocks; bool fPowNoRetargeting; + int64_t nDAAHalfLife; int64_t nPowTargetSpacing; int64_t nPowTargetTimespan; int64_t DifficultyAdjustmentInterval() const { diff --git a/src/pow/aserti32d.h b/src/pow/aserti32d.h new file mode 100644 --- /dev/null +++ b/src/pow/aserti32d.h @@ -0,0 +1,35 @@ +// Copyright (c) 2020 The Bitcoin developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_POW_ASERTI32D_H +#define BITCOIN_POW_ASERTI32D_H + +#include + +#include + +class CBlockHeader; +class CBlockIndex; + +namespace Consensus { +struct Params; +} + +arith_uint256 CalculateASERT(const arith_uint256 refTarget, + const int64_t nPowTargetSpacing, + const int64_t nTimeDiff, const int64_t nHeightDiff, + const arith_uint256 powLimit, + const int64_t nHalfLife) noexcept; + +uint32_t GetNextASERTWorkRequired(const CBlockIndex *pindexPrev, + const CBlockHeader *pblock, + const Consensus::Params ¶ms) noexcept; + +uint32_t +GetNextASERTWorkRequired(const CBlockIndex *pindexPrev, + const CBlockHeader *pblock, + const Consensus::Params ¶ms, + const CBlockIndex *pindexReferenceBlock) noexcept; + +#endif // BITCOIN_POW_ASERTI32D_H diff --git a/src/pow/aserti32d.cpp b/src/pow/aserti32d.cpp new file mode 100644 --- /dev/null +++ b/src/pow/aserti32d.cpp @@ -0,0 +1,182 @@ +// Copyright (c) 2020 The Bitcoin developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include + +#include +#include +#include + +/** + * Return pointer to the reference block used for ASERT. + * As reference we use the Axion pre-fork block. + * Note: other reference blocks are conceivable, e.g. a block some time in the + * past before the Axion upgrade. For now the specification is not fixed on + * using the exact pre-fork block. This function is meant to be removed some + * time after the upgrade, once the reference block is deeply buried. + */ +static const CBlockIndex * +GetASERTReferenceBlock(const CBlockIndex *pindexPrev, + const Consensus::Params ¶ms) { + assert(pindexPrev != nullptr); + + // Walk back until we find the first block where ASERT isn't enabled, + // which is also the first block for which Axion rules aren't enabled. + const CBlockIndex *pindex = pindexPrev; + while (pindex->pprev && IsAxionEnabled(params, pindex)) { + pindex = pindex->pprev; + } + return pindex; +} + +uint32_t GetNextASERTWorkRequired(const CBlockIndex *pindexPrev, + const CBlockHeader *pblock, + const Consensus::Params ¶ms) noexcept { + return GetNextASERTWorkRequired(pindexPrev, pblock, params, + GetASERTReferenceBlock(pindexPrev, params)); +} + +/** + * Compute the next required proof of work using an absolutely scheduled + * exponentially weighted target (ASERT). + * + * With ASERT, we define an ideal schedule for block issuance (e.g. 1 block + * every 600 seconds), and we calculate the difficulty based on how far the most + * recent block's timestamp is ahead of or behind that schedule. We set our + * targets (difficulty) exponentially. For every [nHalfLife] seconds ahead of or + * behind schedule we get, we double or halve the difficulty. + */ +uint32_t +GetNextASERTWorkRequired(const CBlockIndex *pindexPrev, + const CBlockHeader *pblock, + const Consensus::Params ¶ms, + const CBlockIndex *pindexReferenceBlock) noexcept { + // This cannot handle the genesis block and early blocks in general. + assert(pindexPrev != nullptr); + + // Reference block is the block on which all ASERT scheduling calculations + // are based. It too must exist. + assert(pindexReferenceBlock != nullptr); + + // We make no further assumptions other than the height of the prev block + // must be >= that of the activation block. + assert(pindexPrev->nHeight >= pindexReferenceBlock->nHeight); + + // Special difficulty rule for testnet + // If the new block's timestamp is more than 2* 10 minutes then allow + // mining of a min-difficulty block. + if (params.fPowAllowMinDifficultyBlocks && + (pblock->GetBlockTime() > + pindexPrev->GetBlockTime() + 2 * params.nPowTargetSpacing)) { + return UintToArith256(params.powLimit).GetCompact(); + } + + const int64_t nTimeDiff = + int64_t(pindexPrev->nTime) - + int64_t(pindexReferenceBlock->GetBlockHeader().nTime); + const int64_t nHeightDiff = + pindexPrev->nHeight - pindexReferenceBlock->nHeight; + + const arith_uint256 refBlockTarget = + arith_uint256().SetCompact(pindexReferenceBlock->nBits); + + static const arith_uint256 powLimit = UintToArith256(params.powLimit); + + // Refactored: do the actual target adaptation calculation in separate + // CalculateCASERT() function + arith_uint256 nextTarget = + CalculateASERT(refBlockTarget, params.nPowTargetSpacing, nTimeDiff, + nHeightDiff, powLimit, params.nDAAHalfLife); + + // CalculateASERT() already clamps to powLimit. + return nextTarget.GetCompact(); +} + +// ASERT calculation function. +// Clamps to powLimit. +arith_uint256 CalculateASERT(const arith_uint256 refTarget, + const int64_t nPowTargetSpacing, + const int64_t nTimeDiff, const int64_t nHeightDiff, + const arith_uint256 powLimit, + const int64_t nHalfLife) noexcept { + // Input target must never be zero nor exceed powLimit. + assert(refTarget > 0 && refTarget <= powLimit); + + // Height diff should NOT be negative. + assert(nHeightDiff >= 0); + + // This algorithm uses fixed-point math. The lowest rbits bits are after + // the radix, and represent the "decimal" (or binary) portion of the value + constexpr uint8_t rbits = 16; + static_assert(rbits > 0, ""); + + arith_uint256 nextTarget = refTarget; + // It will be helpful when reading what follows, to remember that + // nextTarget is adapted from reference block target value. + + // Ultimately, we want to approximate the following ASERT formula, using + // only integer (fixed-point) math: + // new_target = old_target * 2^((blocks_time - + // IDEAL_BLOCK_TIME*(height_diff)) / nHalfLife) + + // First, we'll calculate the exponent: + assert(llabs(nTimeDiff - nPowTargetSpacing * nHeightDiff) < + (1ll << (63 - rbits))); + int64_t exponent = + ((nTimeDiff - nPowTargetSpacing * nHeightDiff) << rbits) / nHalfLife; + + // Next, we use the 2^x = 2 * 2^(x-1) identity to shift our exponent into + // the [0, 1) interval. The truncated exponent tells us how many shifts we + // need to do Note1: This needs to be a right shift. Right shift rounds + // downward (floored division), + // whereas integer division in C++ rounds towards zero (truncated + // division). + // Note2: This algorithm uses arithmetic shifts of negative numbers. This + // is unpecified but very common behavior for C++ compilers before + // C++20, and standard with C++20. We must check this behavior e.g. + // using static_assert. + static_assert(int64_t(-1) >> 1 == int64_t(-1), + "ASERT algorithm needs arithmetic shift support"); + + const int64_t shifts = exponent >> rbits; + + if (shifts < 0) { + nextTarget = nextTarget >> -shifts; + } else { + nextTarget = nextTarget << shifts; + } + // Remove everything but the decimal part from the exponent since we've + // accounted for that through shifting. + exponent -= (shifts << rbits); + // What is left then should now be in the fixed point range [0, 1). + assert(exponent >= 0 && exponent < 65536); + + // Check for overflow and underflow from shifting nextTarget. Since it's a + // uint, both could result in a value of 0, so we'll need to clamp it if so. + // We can figure out which happened by looking at shifts's sign. + if (nextTarget == 0 || nextTarget > powLimit) { + if (shifts < 0) { + return arith_uint256(1); + } else { + return powLimit; + } + } + + // Now we compute an approximated target * 2^(exponent) + + // 2^x ~= (1 + 0.695502049*x + 0.2262698*x**2 + 0.0782318*x**3) for 0 <= x < + // 1 Error versus actual 2^x is less than 0.013%. + uint64_t factor = + (195766423245049ull * exponent + 971821376 * exponent * exponent + + 5127 * exponent * exponent * exponent + (1ull << 47)) >> + (rbits * 3); + nextTarget += (nextTarget * factor) >> rbits; + + // The last operation was strictly increasing, so it could have exceeded + // powLimit. Check and clamp again. + if (nextTarget > powLimit) { + return powLimit; + } + + return nextTarget; +} diff --git a/src/pow/pow.cpp b/src/pow/pow.cpp --- a/src/pow/pow.cpp +++ b/src/pow/pow.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -29,6 +30,10 @@ return pindexPrev->nBits; } + if (IsAxionEnabled(params, pindexPrev)) { + return GetNextASERTWorkRequired(pindexPrev, pblock, params); + } + if (IsDAAEnabled(params, pindexPrev)) { return GetNextDAAWorkRequired(pindexPrev, pblock, params); } diff --git a/src/pow/test/CMakeLists.txt b/src/pow/test/CMakeLists.txt --- a/src/pow/test/CMakeLists.txt +++ b/src/pow/test/CMakeLists.txt @@ -12,6 +12,7 @@ fixture.cpp TESTS + aserti32d_tests.cpp daa_tests.cpp eda_tests.cpp ) diff --git a/src/pow/test/aserti32d_tests.cpp b/src/pow/test/aserti32d_tests.cpp new file mode 100644 --- /dev/null +++ b/src/pow/test/aserti32d_tests.cpp @@ -0,0 +1,337 @@ +// Copyright (c) 2020 The Bitcoin Core developers +// Distributed under the MIT/X11 software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include + +#include +#include +#include + +#include + +#include + +#include + +BOOST_FIXTURE_TEST_SUITE(aserti32d_tests, BasicTestingSetup) + +static double TargetFromBits(const uint32_t nBits) { + return (nBits & 0xffffff) * pow(256, ((nBits & 0xff000000) >> 24) - 3); +} + +static CBlockIndex GetBlockIndex(CBlockIndex *pindexPrev, int64_t nTimeInterval, + uint32_t nBits) { + CBlockIndex block; + block.pprev = pindexPrev; + block.nHeight = pindexPrev->nHeight + 1; + block.nTime = pindexPrev->nTime + nTimeInterval; + block.nBits = nBits; + + block.nChainWork = pindexPrev->nChainWork + GetBlockProof(block); + return block; +} + +static double +GetASERTApproximationError(const CBlockIndex *pindexPrev, + const uint32_t finalBits, + const CBlockIndex *pindexReferenceBlock) { + const int64_t nHeightDiff = + pindexPrev->nHeight - pindexReferenceBlock->nHeight; + const int64_t nTimeDiff = pindexPrev->nTime - pindexReferenceBlock->nTime; + const uint32_t initialBits = pindexReferenceBlock->nBits; + + BOOST_CHECK(nHeightDiff >= 0); + double dInitialPow = TargetFromBits(initialBits); + double dFinalPow = TargetFromBits(finalBits); + + double dExponent = + double(nTimeDiff - nHeightDiff * 600) / double(2 * 24 * 3600); + double dTarget = dInitialPow * pow(2, dExponent); + + return (dFinalPow - dTarget) / dTarget; +} + +BOOST_AUTO_TEST_CASE(asert_difficulty_test) { + DummyConfig config(CBaseChainParams::MAIN); + + std::vector blocks(3000 + 2 * 24 * 3600); + + const Consensus::Params ¶ms = config.GetChainParams().GetConsensus(); + const arith_uint256 powLimit = UintToArith256(params.powLimit); + arith_uint256 currentPow = powLimit >> 3; + uint32_t initialBits = currentPow.GetCompact(); + double dMaxErr = 0.0001166792656486; + + // Genesis block, also ASERT reference block in this test case. + blocks[0] = CBlockIndex(); + blocks[0].nHeight = 0; + blocks[0].nTime = 1269211443; + blocks[0].nBits = initialBits; + + blocks[0].nChainWork = GetBlockProof(blocks[0]); + + // Block counter. + size_t i; + + // Pile up some blocks every 10 mins to establish some history. + for (i = 1; i < 150; i++) { + blocks[i] = GetBlockIndex(&blocks[i - 1], 600, initialBits); + BOOST_CHECK_EQUAL(blocks[i].nBits, initialBits); + } + + CBlockHeader blkHeaderDummy; + uint32_t nBits = GetNextASERTWorkRequired(&blocks[i - 1], &blkHeaderDummy, + params, &blocks[0]); + + BOOST_CHECK_EQUAL(nBits, initialBits); + + // Difficulty stays the same as long as we produce a block every 10 mins. + for (size_t j = 0; j < 10; i++, j++) { + blocks[i] = GetBlockIndex(&blocks[i - 1], 600, nBits); + BOOST_CHECK_EQUAL(GetNextASERTWorkRequired(&blocks[i], &blkHeaderDummy, + params, &blocks[0]), + nBits); + } + + // If we add a two blocks whose solvetimes together add up to 1200s, + // then the next block's target should be the same as the one before these + // blocks (at this point, equal to initialBits). + blocks[i] = GetBlockIndex(&blocks[i - 1], 300, nBits); + nBits = GetNextASERTWorkRequired(&blocks[i++], &blkHeaderDummy, params, + &blocks[0]); + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[0])) < dMaxErr); + blocks[i] = GetBlockIndex(&blocks[i - 1], 900, nBits); + nBits = GetNextASERTWorkRequired(&blocks[i++], &blkHeaderDummy, params, + &blocks[0]); + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[0])) < dMaxErr); + BOOST_CHECK_EQUAL(nBits, initialBits); + BOOST_CHECK(nBits != blocks[i - 1].nBits); + + // Same in reverse - this time slower block first, followed by faster block. + blocks[i] = GetBlockIndex(&blocks[i - 1], 900, nBits); + nBits = GetNextASERTWorkRequired(&blocks[i++], &blkHeaderDummy, params, + &blocks[0]); + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[0])) < dMaxErr); + blocks[i] = GetBlockIndex(&blocks[i - 1], 300, nBits); + nBits = GetNextASERTWorkRequired(&blocks[i++], &blkHeaderDummy, params, + &blocks[0]); + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[0])) < dMaxErr); + BOOST_CHECK_EQUAL(nBits, initialBits); + BOOST_CHECK(nBits != blocks[i - 1].nBits); + + // Jumping forward 2 days should double the target + blocks[i] = GetBlockIndex(&blocks[i - 1], 600 + 2 * 24 * 3600, nBits); + nBits = GetNextASERTWorkRequired(&blocks[i++], &blkHeaderDummy, params, + &blocks[0]); + BOOST_CHECK(fabs(GetASERTApproximationError(&blocks[i - 1], nBits, + &blocks[0])) < dMaxErr); + currentPow = arith_uint256().SetCompact(nBits) / 2; + BOOST_CHECK_EQUAL(currentPow.GetCompact(), initialBits); + + // Iterate over the entire -2*24*3600..+2*24*3600 range to check that our + // integer approximation: + // 1. Should be monotonic. + // 2. Should change target at least once every 8 seconds (worst-case: + // 15-bit precision on nBits). + // 3. Should never change target by more than XXXX per 1-second step. + // 4. Never exceeds dMaxError in absolute error vs a double float + // calculation? + // 5. Has almost exactly the dMax and dMin errors we expect for the + // formula. + double dMin = 0; + double dMax = 0; + double dErr; + double dMaxStep = 0; + uint32_t nBitsRingBuffer[8]; + double dStep = 0; + blocks[i] = GetBlockIndex(&blocks[i - 1], -2 * 24 * 3600 - 30, nBits); + for (size_t j = 0; j < 4 * 24 * 3600 + 660; j++) { + blocks[i].nTime++; + nBits = GetNextASERTWorkRequired(&blocks[i], &blkHeaderDummy, params, + &blocks[0]); + + if (j > 2) { + // 1: Monotonic + BOOST_CHECK( + arith_uint256().SetCompact(nBits) >= + arith_uint256().SetCompact(nBitsRingBuffer[(j - 1) % 8])); + // 2: Changes at least once every 8 seconds (worst case: nBits = + // 1d008000 to 1d008001) + BOOST_CHECK(arith_uint256().SetCompact(nBits) > + arith_uint256().SetCompact(nBitsRingBuffer[j % 8])); + // 3: Check 1-sec step size + dStep = (TargetFromBits(nBits) - + TargetFromBits(nBitsRingBuffer[(j - 1) % 8])) / + TargetFromBits(nBits); + dMaxStep = std::max(dStep, dMaxStep); + // from nBits = 1d008000 to 1d008001 + BOOST_CHECK(dStep < 0.0000314812106363); + } + nBitsRingBuffer[j % 8] = nBits; + + // 4 and 5: check error vs double precision float calculation + dErr = GetASERTApproximationError(&blocks[i], nBits, &blocks[0]); + dMin = std::min(dErr, dMin); + dMax = std::max(dErr, dMax); + BOOST_CHECK(fabs(dErr) < dMaxErr); + } + BOOST_CHECK(dMin < -0.0001013168981059); + BOOST_CHECK(dMin > -0.0001013168981060); + BOOST_CHECK(dMax > 0.0001166792656485); + BOOST_CHECK(dMax < 0.0001166792656486); + + // Difficulty increases as long as we produce fast blocks + for (size_t j = 0; j < 100; i++, j++) { + uint32_t nextBits; + arith_uint256 currentTarget; + currentTarget.SetCompact(nBits); + + blocks[i] = GetBlockIndex(&blocks[i - 1], 500, nBits); + nextBits = GetNextASERTWorkRequired(&blocks[i], &blkHeaderDummy, params, + &blocks[0]); + arith_uint256 nextTarget; + nextTarget.SetCompact(nextBits); + + // Make sure that difficulty is decreased + BOOST_CHECK(nextTarget <= currentTarget); + + nBits = nextBits; + } +} + +// Tests of the CalculateASERT function. +BOOST_AUTO_TEST_CASE(calculate_asert_test) { + DummyConfig config(CBaseChainParams::MAIN); + const Consensus::Params ¶ms = config.GetChainParams().GetConsensus(); + const int64_t nHalfLife = params.nDAAHalfLife; + + const arith_uint256 powLimit = UintToArith256(params.powLimit); + arith_uint256 initialTarget = powLimit >> 4; + int64_t height = 0; + + // Steady + arith_uint256 nextTarget = + CalculateASERT(initialTarget, params.nPowTargetSpacing, + 600 /* nTimeDiff */, ++height, powLimit, nHalfLife); + BOOST_CHECK(nextTarget == initialTarget); + + // A block that arrives in half the expected time + nextTarget = CalculateASERT(initialTarget, params.nPowTargetSpacing, + 600 + 300, ++height, powLimit, nHalfLife); + BOOST_CHECK(nextTarget < initialTarget); + + // A block that makes up for the shortfall of the previous one, restores the + // target to initial + arith_uint256 prevTarget = nextTarget; + nextTarget = CalculateASERT(initialTarget, params.nPowTargetSpacing, + 600 + 300 + 900, ++height, powLimit, nHalfLife); + BOOST_CHECK(nextTarget > prevTarget); + BOOST_CHECK(nextTarget == initialTarget); + + // Two days ahead of schedule should halve the target + prevTarget = nextTarget; + nextTarget = CalculateASERT(prevTarget, params.nPowTargetSpacing, + 288 * 1200, 288, powLimit, nHalfLife); + BOOST_CHECK(nextTarget == prevTarget * 2); + + // Two days behind schedule should halve the target + prevTarget = nextTarget; + nextTarget = CalculateASERT(prevTarget, params.nPowTargetSpacing, 288 * 0, + 288, powLimit, nHalfLife); + BOOST_CHECK(nextTarget == prevTarget / 2); + BOOST_CHECK(nextTarget == initialTarget); + + // Ramp up from initialTarget to PowLimit - should only take 4 doublings... + uint32_t powLimit_nBits = powLimit.GetCompact(); + uint32_t next_nBits; + for (size_t k = 0; k < 3; k++) { + prevTarget = nextTarget; + nextTarget = CalculateASERT(prevTarget, params.nPowTargetSpacing, + 288 * 1200, 288, powLimit, nHalfLife); + BOOST_CHECK(nextTarget == prevTarget * 2); + BOOST_CHECK(nextTarget < powLimit); + next_nBits = nextTarget.GetCompact(); + BOOST_CHECK(next_nBits != powLimit_nBits); + } + + prevTarget = nextTarget; + nextTarget = CalculateASERT(prevTarget, params.nPowTargetSpacing, + 288 * 1200, 288, powLimit, nHalfLife); + next_nBits = nextTarget.GetCompact(); + BOOST_CHECK(nextTarget == prevTarget * 2); + BOOST_CHECK(next_nBits == powLimit_nBits); + + // Fast periods now cannot increase target beyond POW limit, even if we try + // to overflow nextTarget. prevTarget is a uint256, so 256*2 = 512 days + // would overflow nextTarget unless CalculateASERT correctly detects this + // error + nextTarget = CalculateASERT(prevTarget, params.nPowTargetSpacing, + 512 * 144 * 600, 0, powLimit, nHalfLife); + next_nBits = nextTarget.GetCompact(); + BOOST_CHECK(next_nBits == powLimit_nBits); + + // We also need to watch for underflows on nextTarget. We need to withstand + // an extra ~444 days worth of blocks. This should bring down a powLimit + // target to the a minimum target of 1. + nextTarget = CalculateASERT(powLimit, params.nPowTargetSpacing, 0, + 2 * (256 - 34) * 144 + 1, powLimit, nHalfLife); + next_nBits = nextTarget.GetCompact(); + BOOST_CHECK(next_nBits == arith_uint256(1).GetCompact()); + + // Define a structure holding parameters to pass to CalculateASERT. + // We are going to check some expected results against a vector of + // possible arguments. + struct calc_params { + arith_uint256 refTarget; + int64_t targetSpacing; + int64_t timeDiff; + int64_t heightDiff; + arith_uint256 expectedTarget; + uint32_t expectednBits; + }; + + // Define some named input argument values + const arith_uint256 SINGLE_300_TARGET{ + "00000000ffb1fffffffffffffffffffffffffffffffffffffffffffffffffffe"}; + + // Define our expected input and output values. + const std::vector calculate_args = { + + /* refTarget, targetSpacing, timeDiff, heightDiff, expectedTarget, + expectednBits */ + + {powLimit, 600, 0, 2 * 144, powLimit >> 1, 0x1c7fffff}, + {powLimit, 600, 0, 4 * 144, powLimit >> 2, 0x1c3fffff}, + {powLimit >> 1, 600, 0, 2 * 144, powLimit >> 2, 0x1c3fffff}, + {powLimit >> 2, 600, 0, 2 * 144, powLimit >> 3, 0x1c1fffff}, + {powLimit >> 3, 600, 0, 2 * 144, powLimit >> 4, 0x1c0fffff}, + {powLimit, 600, 0, 2 * (256 - 34) * 144, 3, 0x01030000}, + {powLimit, 600, 0, 2 * (256 - 34) * 144 + 1, 1, 0x01010000}, + // 1 bit less since we do not need to shift to 0 + {powLimit, 600, 0, 2 * (256 - 33) * 144, 1, 0x01010000}, + // more will not decrease below 1 + {powLimit, 600, 0, 2 * (256 - 32) * 144, 1, 0x01010000}, + {1, 600, 0, 2 * (256 - 32) * 144, 1, 0x01010000}, + {powLimit, 600, 2 * (512 - 32) * 144, 0, powLimit, powLimit_nBits}, + {1, 600, (512 - 64) * 144 * 600, 0, powLimit, powLimit_nBits}, + // clamps to powLimit + {powLimit, 600, 300, 1, SINGLE_300_TARGET, 0x1d00ffb1}, + }; + + for (auto &v : calculate_args) { + nextTarget = CalculateASERT(v.refTarget, v.targetSpacing, v.timeDiff, + v.heightDiff, powLimit, nHalfLife); + next_nBits = nextTarget.GetCompact(); + if (nextTarget != v.expectedTarget || next_nBits != v.expectednBits) { + BOOST_CHECK(nextTarget == v.expectedTarget); + BOOST_CHECK(next_nBits == v.expectednBits); + } + } +} + +BOOST_AUTO_TEST_SUITE_END()