diff --git a/.arclint b/.arclint index e83b697b2..5d137f199 100644 --- a/.arclint +++ b/.arclint @@ -1,341 +1,341 @@ { "linters": { "generated": { "type": "generated" }, "clang-format": { "type": "clang-format", "version": ">=12.0", "bin": [ "clang-format-12", "clang-format" ], "include": "(^(src|chronik)/.*\\.(h|c|cpp|mm)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "autopep8": { "type": "autopep8", "version": ">=1.3.4", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)", - "(^test/functional/.*\\.py$)" + "(^test/functional/)" ], "flags": [ "--aggressive", "--ignore=W503,W504", "--max-line-length=88" ] }, "black": { "type": "black", "version": ">=23.0.0", "include": [ - "(^test/functional/.*\\.py$)" + "(^test/functional/)" ], "flags": [ "--preview" ] }, "flake8": { "type": "flake8", "version": ">=5.0", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ], "flags": [ "--ignore=A003,E203,E303,E305,E501,E704,W503,W504", "--require-plugins=flake8-comprehensions,flake8-builtins" ] }, "lint-format-strings": { "type": "lint-format-strings", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/test/fuzz/strprintf.cpp$)" ] }, "check-doc": { "type": "check-doc", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)" }, "lint-tests": { "type": "lint-tests", "include": "(^src/(seeder/|rpc/|wallet/)?test/.*\\.(cpp)$)" }, "phpcs": { "type": "phpcs", "include": "(\\.php$)", "exclude": [ "(^arcanist/__phutil_library_.+\\.php$)" ], "phpcs.standard": "arcanist/phpcs.xml" }, "lint-locale-dependence": { "type": "lint-locale-dependence", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(crypto/ctaes/|leveldb/|secp256k1/|tinyformat.h|univalue/))", "(^src/bench/nanobench.h$)" ] }, "lint-cheader": { "type": "lint-cheader", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "spelling": { "type": "spelling", "exclude": [ "(^build-aux/m4/)", "(^depends/)", "(^doc/release-notes/)", "(^contrib/gitian-builder/)", "(^src/(qt/locale|secp256k1|univalue|leveldb)/)", "(^test/lint/dictionary/)", "(package-lock.json)" ], "spelling.dictionaries": [ "test/lint/dictionary/english.json" ] }, "lint-assert-with-side-effects": { "type": "lint-assert-with-side-effects", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-include-quotes": { "type": "lint-include-quotes", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-include-guard": { "type": "lint-include-guard", "include": "(^(src|chronik)/.*\\.h$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/tinyformat.h$)" ] }, "lint-include-source": { "type": "lint-include-source", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-std-chrono": { "type": "lint-std-chrono", "include": "(^(src|chronik)/.*\\.(h|cpp)$)" }, "lint-stdint": { "type": "lint-stdint", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/compat/assumptions.h$)" ] }, "lint-source-filename": { "type": "lint-source-filename", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-boost-dependencies": { "type": "lint-boost-dependencies", "include": "(^(src|chronik)/.*\\.(h|cpp)$)" }, "lint-python-encoding": { "type": "lint-python-encoding", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "lint-python-shebang": { "type": "lint-python-shebang", "include": "(\\.py$)", "exclude": [ "(__init__\\.py$)", "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "lint-bash-shebang": { "type": "lint-bash-shebang", "include": "(\\.sh$)", "exclude": [ "(^contrib/gitian-builder/)" ] }, "shellcheck": { "type": "shellcheck", "version": ">=0.7.0", "flags": [ "--external-sources", "--source-path=SCRIPTDIR" ], "include": "(\\.sh$)", "exclude": [ "(^contrib/gitian-builder/)", "(^src/(secp256k1|univalue)/)" ] }, "lint-shell-locale": { "type": "lint-shell-locale", "include": "(\\.sh$)", "exclude": [ "(^contrib/gitian-builder/)", "(^src/(secp256k1|univalue)/)", "(^cmake/utils/log-and-print-on-failure.sh)" ] }, "lint-cpp-void-parameters": { "type": "lint-cpp-void-parameters", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/compat/glibc_compat.cpp$)" ] }, "lint-logs": { "type": "lint-logs", "include": "(^(src|chronik)/.*\\.(h|cpp|rs)$)" }, "lint-qt": { "type": "lint-qt", "include": "(^src/qt/.*\\.(h|cpp)$)", "exclude": [ "(^src/qt/(locale|forms|res)/)" ] }, "lint-doxygen": { "type": "lint-doxygen", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-whitespace": { "type": "lint-whitespace", "include": "(\\.(ac|am|cmake|conf|in|include|json|m4|md|openrc|php|pl|rs|sh|txt|yml)$)", "exclude": [ "(^contrib/gitian-builder/)", "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "yamllint": { "type": "yamllint", "include": "(\\.(yml|yaml)$)", "exclude": "(^src/(secp256k1|univalue|leveldb)/)" }, "lint-check-nonfatal": { "type": "lint-check-nonfatal", "include": [ "(^src/rpc/.*\\.(h|c|cpp)$)", "(^src/wallet/rpc*.*\\.(h|c|cpp)$)" ], "exclude": "(^src/rpc/server.cpp)" }, "lint-markdown": { "type": "lint-markdown", "include": [ "(\\.md$)" ], "exclude": "(^contrib/gitian-builder/)" }, "lint-python-mypy": { "type": "lint-python-mypy", "version": ">=0.910", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)", "(^contrib/macdeploy/)" ], "flags": [ "--ignore-missing-imports", "--install-types", "--non-interactive" ] }, "lint-python-mutable-default": { "type": "lint-python-mutable-default", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "prettier": { "type": "prettier", "version": ">=2.6.0", "include": [ "(^cashtab/.*\\.(css|html|js|json|jsx|md|scss|ts|tsx)$)", "(^web/.*\\.(css|html|js|json|jsx|md|scss|ts|tsx)$)" ], "exclude": "(^web/.*/translations/.*\\.json$)" }, "lint-python-isort": { "type": "lint-python-isort", "version": ">=5.6.4", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "rustfmt": { "type": "rustfmt", "version": ">=1.5.1", "include": "(\\.rs$)" }, "eslint": { "type": "eslint", "version": ">=8.0.0", "include": [ "(cashtab/.*\\.js$)", "(apps/alias-server/.*\\.js$)", "(modules/ecashaddrjs/.*\\.js$)", "(apps/ecash-herald/.*\\.js$)", "(modules/chronik-client/.*\\.(js|jsx|ts|tsx)$)" ] }, "lint-python-flynt": { "type": "lint-python-flynt", "version": ">=0.78", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] } } } diff --git a/test/functional/data/invalid_txs.py b/test/functional/data/invalid_txs.py index 14f8103d9..cc6e876ef 100644 --- a/test/functional/data/invalid_txs.py +++ b/test/functional/data/invalid_txs.py @@ -1,262 +1,246 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Templates for constructing various sorts of invalid transactions. These templates (or an iterator over all of them) can be reused in different contexts to test using a number of invalid transaction types. Hopefully this makes it easier to get coverage of a full variety of tx validation checks through different interfaces (AcceptBlock, AcceptToMemPool, etc.) without repeating ourselves. Invalid tx cases not covered here can be found by running: $ diff \ <(grep -IREho "bad-txns[a-zA-Z-]+" src | sort -u) \ <(grep -IEho "bad-txns[a-zA-Z-]+" test/functional/data/invalid_txs.py | sort -u) """ import abc from typing import Optional from test_framework import script as sc from test_framework.blocktools import create_tx_with_script from test_framework.messages import MAX_MONEY, COutPoint, CTransaction, CTxIn, CTxOut from test_framework.script import ( OP_2DIV, OP_2MUL, OP_INVERT, OP_LSHIFT, OP_MUL, OP_RSHIFT, CScript, ) from test_framework.txtools import pad_tx -basic_p2sh = sc.CScript( - [sc.OP_HASH160, sc.hash160(sc.CScript([sc.OP_0])), sc.OP_EQUAL]) +basic_p2sh = sc.CScript([sc.OP_HASH160, sc.hash160(sc.CScript([sc.OP_0])), sc.OP_EQUAL]) class BadTxTemplate: """Allows simple construction of a certain kind of invalid tx. Base class to be subclassed.""" + __metaclass__ = abc.ABCMeta # The expected error code given by bitcoind upon submission of the tx. reject_reason: Optional[str] = "" # Only specified if it differs from mempool acceptance error. block_reject_reason = "" # Do we expect to be disconnected after submitting this tx? expect_disconnect = False # Is this tx considered valid when included in a block, but not for acceptance into # the mempool (i.e. does it violate policy but not consensus)? valid_in_block = False def __init__(self, *, spend_tx=None, spend_block=None): self.spend_tx = spend_block.vtx[0] if spend_block else spend_tx self.spend_avail = sum(o.nValue for o in self.spend_tx.vout) - self.valid_txin = CTxIn( - COutPoint( - self.spend_tx.sha256, - 0), - b"", - 0xffffffff) + self.valid_txin = CTxIn(COutPoint(self.spend_tx.sha256, 0), b"", 0xFFFFFFFF) @abc.abstractmethod def get_tx(self, *args, **kwargs): """Return a CTransaction that is invalid per the subclass.""" pass class OutputMissing(BadTxTemplate): reject_reason = "bad-txns-vout-empty" expect_disconnect = True def get_tx(self): tx = CTransaction() tx.vin.append(self.valid_txin) tx.calc_sha256() return tx class InputMissing(BadTxTemplate): reject_reason = "bad-txns-vin-empty" expect_disconnect = True def get_tx(self): tx = CTransaction() tx.vout.append(CTxOut(0, sc.CScript([sc.OP_TRUE] * 100))) tx.calc_sha256() return tx class SizeTooSmall(BadTxTemplate): reject_reason = "bad-txns-undersize" expect_disconnect = False valid_in_block = True def get_tx(self): tx = CTransaction() tx.vin.append(self.valid_txin) tx.vout.append(CTxOut(0, sc.CScript([sc.OP_TRUE]))) tx.calc_sha256() return tx class BadInputOutpointIndex(BadTxTemplate): # Won't be rejected - nonexistent outpoint index is treated as an orphan since the coins # database can't distinguish between spent outpoints and outpoints which # never existed. reject_reason = None expect_disconnect = False def get_tx(self): num_indices = len(self.spend_tx.vin) bad_idx = num_indices + 100 tx = CTransaction() - tx.vin.append( - CTxIn( - COutPoint( - self.spend_tx.sha256, - bad_idx), - b"", - 0xffffffff)) + tx.vin.append(CTxIn(COutPoint(self.spend_tx.sha256, bad_idx), b"", 0xFFFFFFFF)) tx.vout.append(CTxOut(0, basic_p2sh)) tx.calc_sha256() return tx class DuplicateInput(BadTxTemplate): - reject_reason = 'bad-txns-inputs-duplicate' + reject_reason = "bad-txns-inputs-duplicate" expect_disconnect = True def get_tx(self): tx = CTransaction() tx.vin.append(self.valid_txin) tx.vin.append(self.valid_txin) tx.vout.append(CTxOut(1, basic_p2sh)) tx.calc_sha256() return tx class PrevoutNullInput(BadTxTemplate): - reject_reason = 'bad-txns-prevout-null' + reject_reason = "bad-txns-prevout-null" expect_disconnect = True def get_tx(self): tx = CTransaction() tx.vin.append(self.valid_txin) - tx.vin.append(CTxIn(COutPoint(txid=0, n=0xffffffff))) + tx.vin.append(CTxIn(COutPoint(txid=0, n=0xFFFFFFFF))) tx.vout.append(CTxOut(1, basic_p2sh)) tx.calc_sha256() return tx class NonexistentInput(BadTxTemplate): # Added as an orphan tx. reject_reason = None expect_disconnect = False def get_tx(self): tx = CTransaction() - tx.vin.append( - CTxIn( - COutPoint( - self.spend_tx.sha256 + - 1, - 0), - b"", - 0xffffffff)) + tx.vin.append(CTxIn(COutPoint(self.spend_tx.sha256 + 1, 0), b"", 0xFFFFFFFF)) tx.vin.append(self.valid_txin) tx.vout.append(CTxOut(1, basic_p2sh)) tx.calc_sha256() return tx class SpendTooMuch(BadTxTemplate): - reject_reason = 'bad-txns-in-belowout' + reject_reason = "bad-txns-in-belowout" expect_disconnect = True def get_tx(self): return create_tx_with_script( - self.spend_tx, 0, script_pub_key=basic_p2sh, amount=(self.spend_avail + 1)) + self.spend_tx, 0, script_pub_key=basic_p2sh, amount=(self.spend_avail + 1) + ) class CreateNegative(BadTxTemplate): - reject_reason = 'bad-txns-vout-negative' + reject_reason = "bad-txns-vout-negative" expect_disconnect = True def get_tx(self): return create_tx_with_script(self.spend_tx, 0, amount=-1) class CreateTooLarge(BadTxTemplate): - reject_reason = 'bad-txns-vout-toolarge' + reject_reason = "bad-txns-vout-toolarge" expect_disconnect = True def get_tx(self): return create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY + 1) class CreateSumTooLarge(BadTxTemplate): - reject_reason = 'bad-txns-txouttotal-toolarge' + reject_reason = "bad-txns-txouttotal-toolarge" expect_disconnect = True def get_tx(self): tx = create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY) tx.vout = [tx.vout[0]] * 2 tx.calc_sha256() return tx class InvalidOPIFConstruction(BadTxTemplate): reject_reason = "mandatory-script-verify-flag-failed (Invalid OP_IF construction)" expect_disconnect = True valid_in_block = True def get_tx(self): return create_tx_with_script( - self.spend_tx, 0, script_sig=b'\x64' * 35, - amount=(self.spend_avail // 2)) + self.spend_tx, 0, script_sig=b"\x64" * 35, amount=(self.spend_avail // 2) + ) def getDisabledOpcodeTemplate(opcode): - """ Creates disabled opcode tx template class""" + """Creates disabled opcode tx template class""" def get_tx(self): tx = CTransaction() vin = self.valid_txin vin.scriptSig = CScript([opcode]) tx.vin.append(vin) tx.vout.append(CTxOut(1, basic_p2sh)) pad_tx(tx) tx.calc_sha256() return tx - return type(f"DisabledOpcode_{str(opcode)}", (BadTxTemplate,), { - 'reject_reason': "disabled opcode", - 'expect_disconnect': True, - 'get_tx': get_tx, - 'valid_in_block': True - }) + return type( + f"DisabledOpcode_{str(opcode)}", + (BadTxTemplate,), + { + "reject_reason": "disabled opcode", + "expect_disconnect": True, + "get_tx": get_tx, + "valid_in_block": True, + }, + ) # Disabled opcode tx templates (CVE-2010-5137) -DisabledOpcodeTemplates = [getDisabledOpcodeTemplate(opcode) for opcode in [ - OP_INVERT, - OP_2MUL, - OP_2DIV, - OP_MUL, - OP_LSHIFT, - OP_RSHIFT]] +DisabledOpcodeTemplates = [ + getDisabledOpcodeTemplate(opcode) + for opcode in [OP_INVERT, OP_2MUL, OP_2DIV, OP_MUL, OP_LSHIFT, OP_RSHIFT] +] def iter_all_templates(): """Iterate through all bad transaction template types.""" return BadTxTemplate.__subclasses__() diff --git a/test/functional/test_framework/address.py b/test/functional/test_framework/address.py index 0f801e32c..c75e2d981 100644 --- a/test/functional/test_framework/address.py +++ b/test/functional/test_framework/address.py @@ -1,137 +1,128 @@ #!/usr/bin/env python3 # Copyright (c) 2016-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Encode and decode BASE58, P2PKH and P2SH addresses.""" import unittest from .script import OP_TRUE, CScript, CScriptOp, hash160, hash256 from .util import assert_equal -ADDRESS_ECREG_UNSPENDABLE = 'ecregtest:qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqcrl5mqkt' -ADDRESS_ECREG_UNSPENDABLE_DESCRIPTOR = 'addr(ecregtest:qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqcrl5mqkt)#u6xx93xc' +ADDRESS_ECREG_UNSPENDABLE = "ecregtest:qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqcrl5mqkt" +ADDRESS_ECREG_UNSPENDABLE_DESCRIPTOR = ( + "addr(ecregtest:qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqcrl5mqkt)#u6xx93xc" +) # Coins sent to this address can be spent with a scriptSig of just OP_TRUE -ADDRESS_ECREG_P2SH_OP_TRUE = 'ecregtest:prdpw30fk4ym6zl6rftfjuw806arpn26fvkgfu97xt' -P2SH_OP_TRUE = CScript.fromhex( - 'a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b87') +ADDRESS_ECREG_P2SH_OP_TRUE = "ecregtest:prdpw30fk4ym6zl6rftfjuw806arpn26fvkgfu97xt" +P2SH_OP_TRUE = CScript.fromhex("a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b87") SCRIPTSIG_OP_TRUE = CScriptOp.encode_op_pushdata(CScript([OP_TRUE])) -chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' +chars = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" def byte_to_base58(b, version): - result = '' + result = "" # prepend version b = bytes([version]) + b # append checksum b += hash256(b)[:4] - value = int.from_bytes(b, 'big') + value = int.from_bytes(b, "big") while value > 0: result = chars[value % 58] + result value //= 58 while b[0] == 0: result = chars[0] + result b = b[1:] return result def base58_to_byte(s): """Converts a base58-encoded string to its data and version. Throws if the base58 checksum is invalid.""" if not s: - return b'' + return b"" n = 0 for c in s: n *= 58 assert c in chars digit = chars.index(c) n += digit - h = f'{n:x}' + h = f"{n:x}" if len(h) % 2: h = f"0{h}" - res = n.to_bytes((n.bit_length() + 7) // 8, 'big') + res = n.to_bytes((n.bit_length() + 7) // 8, "big") pad = 0 for c in s: if c == chars[0]: pad += 1 else: break - res = b'\x00' * pad + res + res = b"\x00" * pad + res # Assert if the checksum is invalid assert_equal(hash256(res[:-4])[:4], res[-4:]) return res[1:-4], int(res[0]) def keyhash_to_p2pkh(keyhash, main=False): - assert (len(keyhash) == 20) + assert len(keyhash) == 20 version = 0 if main else 111 return byte_to_base58(keyhash, version) def scripthash_to_p2sh(scripthash, main=False): - assert (len(scripthash) == 20) + assert len(scripthash) == 20 version = 5 if main else 196 return byte_to_base58(scripthash, version) def key_to_p2pkh(key, main=False): key = check_key(key) return keyhash_to_p2pkh(hash160(key), main) def script_to_p2sh(script, main=False): script = check_script(script) return scripthash_to_p2sh(hash160(script), main) def check_key(key): - if (isinstance(key, str)): + if isinstance(key, str): key = bytes.fromhex(key) # Assuming this is hex string - if (isinstance(key, bytes) and (len(key) == 33 or len(key) == 65)): + if isinstance(key, bytes) and (len(key) == 33 or len(key) == 65): return key assert False def check_script(script): - if (isinstance(script, str)): + if isinstance(script, str): script = bytes.fromhex(script) # Assuming this is hex string - if (isinstance(script, bytes) or isinstance(script, CScript)): + if isinstance(script, bytes) or isinstance(script, CScript): return script assert False class TestFrameworkScript(unittest.TestCase): def test_base58encodedecode(self): def check_base58(data, version): self.assertEqual( - base58_to_byte(byte_to_base58(data, version)), - (data, version)) - - check_base58( - bytes.fromhex('1f8ea1702a7bd4941bca0941b852c4bbfedb2e05'), 111) - check_base58( - bytes.fromhex('3a0b05f4d7f66c3ba7009f453530296c845cc9cf'), 111) - check_base58( - bytes.fromhex('41c1eaf111802559bad61b60d62b1f897c63928a'), 111) - check_base58( - bytes.fromhex('0041c1eaf111802559bad61b60d62b1f897c63928a'), 111) - check_base58( - bytes.fromhex('000041c1eaf111802559bad61b60d62b1f897c63928a'), 111) - check_base58( - bytes.fromhex('00000041c1eaf111802559bad61b60d62b1f897c63928a'), 111) - check_base58( - bytes.fromhex('1f8ea1702a7bd4941bca0941b852c4bbfedb2e05'), 0) - check_base58( - bytes.fromhex('3a0b05f4d7f66c3ba7009f453530296c845cc9cf'), 0) - check_base58( - bytes.fromhex('41c1eaf111802559bad61b60d62b1f897c63928a'), 0) - check_base58( - bytes.fromhex('0041c1eaf111802559bad61b60d62b1f897c63928a'), 0) - check_base58( - bytes.fromhex('000041c1eaf111802559bad61b60d62b1f897c63928a'), 0) + base58_to_byte(byte_to_base58(data, version)), (data, version) + ) + + check_base58(bytes.fromhex("1f8ea1702a7bd4941bca0941b852c4bbfedb2e05"), 111) + check_base58(bytes.fromhex("3a0b05f4d7f66c3ba7009f453530296c845cc9cf"), 111) + check_base58(bytes.fromhex("41c1eaf111802559bad61b60d62b1f897c63928a"), 111) + check_base58(bytes.fromhex("0041c1eaf111802559bad61b60d62b1f897c63928a"), 111) + check_base58(bytes.fromhex("000041c1eaf111802559bad61b60d62b1f897c63928a"), 111) check_base58( - bytes.fromhex('00000041c1eaf111802559bad61b60d62b1f897c63928a'), 0) + bytes.fromhex("00000041c1eaf111802559bad61b60d62b1f897c63928a"), 111 + ) + check_base58(bytes.fromhex("1f8ea1702a7bd4941bca0941b852c4bbfedb2e05"), 0) + check_base58(bytes.fromhex("3a0b05f4d7f66c3ba7009f453530296c845cc9cf"), 0) + check_base58(bytes.fromhex("41c1eaf111802559bad61b60d62b1f897c63928a"), 0) + check_base58(bytes.fromhex("0041c1eaf111802559bad61b60d62b1f897c63928a"), 0) + check_base58(bytes.fromhex("000041c1eaf111802559bad61b60d62b1f897c63928a"), 0) + check_base58(bytes.fromhex("00000041c1eaf111802559bad61b60d62b1f897c63928a"), 0) diff --git a/test/functional/test_framework/authproxy.py b/test/functional/test_framework/authproxy.py index ac1452c2d..0216f4a4a 100644 --- a/test/functional/test_framework/authproxy.py +++ b/test/functional/test_framework/authproxy.py @@ -1,231 +1,285 @@ #!/usr/bin/env python3 # Copyright (c) 2011 Jeff Garzik # # Previous copyright, from python-jsonrpc/jsonrpc/proxy.py: # # Copyright (c) 2007 Jan-Klaas Kollhof # # This file is part of jsonrpc. # # jsonrpc is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """HTTP proxy for opening RPC connection to bitcoind. AuthServiceProxy has the following improvements over python-jsonrpc's ServiceProxy class: - HTTP connections persist for the life of the AuthServiceProxy object (if server supports HTTP/1.1) - sends protocol 'version', per JSON-RPC 1.1 - sends proper, incrementing 'id' - sends Basic HTTP authentication headers - parses all JSON numbers that look like floats as Decimal - uses standard Python json lib """ import base64 import decimal import http.client import json import logging import os import socket import time import urllib.parse from http import HTTPStatus HTTP_TIMEOUT = 30 USER_AGENT = "AuthServiceProxy/0.1" log = logging.getLogger("BitcoinRPC") class JSONRPCException(Exception): def __init__(self, rpc_error, http_status=None): try: errmsg = f"{rpc_error['message']} ({rpc_error['code']})" except (KeyError, TypeError): - errmsg = '' + errmsg = "" super().__init__(errmsg) self.error = rpc_error self.http_status = http_status def EncodeDecimal(o): if isinstance(o, decimal.Decimal): return str(o) raise TypeError(f"{repr(o)} is not JSON serializable") class AuthServiceProxy: __id_count = 0 # ensure_ascii: escape unicode as \uXXXX, passed to json.dumps - def __init__(self, service_url, service_name=None, - timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True): + def __init__( + self, + service_url, + service_name=None, + timeout=HTTP_TIMEOUT, + connection=None, + ensure_ascii=True, + ): self.__service_url = service_url self._service_name = service_name self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests self.__url = urllib.parse.urlparse(service_url) - user = None if self.__url.username is None else self.__url.username.encode( - 'utf8') - passwd = None if self.__url.password is None else self.__url.password.encode( - 'utf8') - authpair = user + b':' + passwd - self.__auth_header = b'Basic ' + base64.b64encode(authpair) + user = ( + None if self.__url.username is None else self.__url.username.encode("utf8") + ) + passwd = ( + None if self.__url.password is None else self.__url.password.encode("utf8") + ) + authpair = user + b":" + passwd + self.__auth_header = b"Basic " + base64.b64encode(authpair) self.timeout = timeout self._set_conn(connection) def __getattr__(self, name): - if name.startswith('__') and name.endswith('__'): + if name.startswith("__") and name.endswith("__"): # Python internal stuff raise AttributeError if self._service_name is not None: name = f"{self._service_name}.{name}" - return AuthServiceProxy( - self.__service_url, name, connection=self.__conn) + return AuthServiceProxy(self.__service_url, name, connection=self.__conn) def _request(self, method, path, postdata): - ''' + """ Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout). This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5. - ''' - headers = {'Host': self.__url.hostname, - 'User-Agent': USER_AGENT, - 'Authorization': self.__auth_header, - 'Content-type': 'application/json'} - if os.name == 'nt': + """ + headers = { + "Host": self.__url.hostname, + "User-Agent": USER_AGENT, + "Authorization": self.__auth_header, + "Content-type": "application/json", + } + if os.name == "nt": # Windows somehow does not like to re-use connections # TODO: Find out why the connection would disconnect occasionally # and make it reusable on Windows # Avoid "ConnectionAbortedError: [WinError 10053] An established # connection was aborted by the software in your host machine" self._set_conn() try: self.__conn.request(method, path, postdata, headers) return self._get_response() except (BrokenPipeError, ConnectionResetError): # Python 3.5+ raises BrokenPipeError when the connection was reset # ConnectionResetError happens on FreeBSD self.__conn.close() self.__conn.request(method, path, postdata, headers) return self._get_response() except OSError as e: retry = ( - '[WinError 10053] An established connection was aborted by the software in your host machine' in str(e)) + "[WinError 10053] An established connection was aborted by the software" + " in your host machine" + in str(e) + ) # Workaround for a bug on macOS. See # https://bugs.python.org/issue33450 - retry = retry or ( - '[Errno 41] Protocol wrong type for socket' in str(e)) + retry = retry or ("[Errno 41] Protocol wrong type for socket" in str(e)) if retry: self.__conn.close() self.__conn.request(method, path, postdata, headers) return self._get_response() else: raise def get_request(self, *args, **argsn): AuthServiceProxy.__id_count += 1 - log.debug("-{}-> {} {}".format( - AuthServiceProxy.__id_count, - self._service_name, - json.dumps( - args or argsn, - default=EncodeDecimal, - ensure_ascii=self.ensure_ascii), - )) + log.debug( + "-{}-> {} {}".format( + AuthServiceProxy.__id_count, + self._service_name, + json.dumps( + args or argsn, default=EncodeDecimal, ensure_ascii=self.ensure_ascii + ), + ) + ) if args and argsn: - raise ValueError( - 'Cannot handle both named and positional arguments') - return {'version': '1.1', - 'method': self._service_name, - 'params': args or argsn, - 'id': AuthServiceProxy.__id_count} + raise ValueError("Cannot handle both named and positional arguments") + return { + "version": "1.1", + "method": self._service_name, + "params": args or argsn, + "id": AuthServiceProxy.__id_count, + } def __call__(self, *args, **argsn): - postdata = json.dumps(self.get_request( - *args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) + postdata = json.dumps( + self.get_request(*args, **argsn), + default=EncodeDecimal, + ensure_ascii=self.ensure_ascii, + ) response, status = self._request( - 'POST', self.__url.path, postdata.encode('utf-8')) - if response['error'] is not None: - raise JSONRPCException(response['error'], status) - elif 'result' not in response: - raise JSONRPCException({ - 'code': -343, 'message': 'missing JSON-RPC result'}, status) + "POST", self.__url.path, postdata.encode("utf-8") + ) + if response["error"] is not None: + raise JSONRPCException(response["error"], status) + elif "result" not in response: + raise JSONRPCException( + {"code": -343, "message": "missing JSON-RPC result"}, status + ) elif status != HTTPStatus.OK: - raise JSONRPCException({ - 'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status) + raise JSONRPCException( + { + "code": -342, + "message": "non-200 HTTP status code but no JSON-RPC error", + }, + status, + ) else: - return response['result'] + return response["result"] def batch(self, rpc_call_list): postdata = json.dumps( - list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) + list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii + ) log.debug(f"--> {postdata}") response, status = self._request( - 'POST', self.__url.path, postdata.encode('utf-8')) + "POST", self.__url.path, postdata.encode("utf-8") + ) if status != HTTPStatus.OK: - raise JSONRPCException({ - 'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status) + raise JSONRPCException( + { + "code": -342, + "message": "non-200 HTTP status code but no JSON-RPC error", + }, + status, + ) return response def _get_response(self): req_start_time = time.time() try: http_response = self.__conn.getresponse() except socket.timeout: - raise JSONRPCException({ - 'code': -344, - 'message': - f'{self._service_name!r} RPC took longer than ' - f'{self.__conn.timeout} seconds. Consider using larger ' - 'timeout for calls that take longer to return.'}) + raise JSONRPCException( + { + "code": -344, + "message": ( + f"{self._service_name!r} RPC took longer than " + f"{self.__conn.timeout} seconds. Consider using larger " + "timeout for calls that take longer to return." + ), + } + ) if http_response is None: - raise JSONRPCException({ - 'code': -342, 'message': 'missing HTTP response from server'}) + raise JSONRPCException( + {"code": -342, "message": "missing HTTP response from server"} + ) - content_type = http_response.getheader('Content-Type') - if content_type != 'application/json': + content_type = http_response.getheader("Content-Type") + if content_type != "application/json": raise JSONRPCException( - {'code': -342, - 'message': f'non-JSON HTTP response with \'{http_response.status} ' - f'{http_response.reason}\' from server'}, - http_response.status) + { + "code": -342, + "message": ( + f"non-JSON HTTP response with '{http_response.status} " + f"{http_response.reason}' from server" + ), + }, + http_response.status, + ) - responsedata = http_response.read().decode('utf8') + responsedata = http_response.read().decode("utf8") response = json.loads(responsedata, parse_float=decimal.Decimal) elapsed = time.time() - req_start_time if "error" in response and response["error"] is None: - log.debug("<-{}- [{:.6f}] {}".format(response["id"], elapsed, json.dumps( - response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) + log.debug( + "<-{}- [{:.6f}] {}".format( + response["id"], + elapsed, + json.dumps( + response["result"], + default=EncodeDecimal, + ensure_ascii=self.ensure_ascii, + ), + ) + ) else: log.debug(f"<-- [{elapsed:.6f}] {responsedata}") return response, http_response.status def __truediv__(self, relative_uri): - return AuthServiceProxy(f"{self.__service_url}/{relative_uri}", - self._service_name, connection=self.__conn) + return AuthServiceProxy( + f"{self.__service_url}/{relative_uri}", + self._service_name, + connection=self.__conn, + ) def _set_conn(self, connection=None): port = 80 if self.__url.port is None else self.__url.port if connection: self.__conn = connection self.timeout = connection.timeout - elif self.__url.scheme == 'https': + elif self.__url.scheme == "https": self.__conn = http.client.HTTPSConnection( - self.__url.hostname, port, timeout=self.timeout) + self.__url.hostname, port, timeout=self.timeout + ) else: self.__conn = http.client.HTTPConnection( - self.__url.hostname, port, timeout=self.timeout) + self.__url.hostname, port, timeout=self.timeout + ) diff --git a/test/functional/test_framework/avatools.py b/test/functional/test_framework/avatools.py index 86e34d216..a1dfe249c 100644 --- a/test/functional/test_framework/avatools.py +++ b/test/functional/test_framework/avatools.py @@ -1,459 +1,479 @@ #!/usr/bin/env python3 # Copyright (c) 2021 The Bitcoin ABC developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Utilities for avalanche tests.""" import random import struct from typing import TYPE_CHECKING, Any, Dict, List, Optional from .authproxy import JSONRPCException from .key import ECKey from .messages import ( MSG_AVA_PROOF, MSG_BLOCK, NODE_AVALANCHE, NODE_NETWORK, AvalancheDelegation, AvalancheProof, AvalancheResponse, AvalancheVote, AvalancheVoteError, CInv, CTransaction, FromHex, TCPAvalancheResponse, ToHex, calculate_shortid, hash256, msg_avahello, msg_avapoll, msg_avaproof, msg_avaproofs, msg_notfound, msg_tcpavaresponse, ) from .p2p import P2PInterface, p2p_lock if TYPE_CHECKING: from .test_framework import BitcoinTestFramework from .test_node import TestNode from .util import satoshi_round, uint256_hex, wait_until_helper from .wallet_util import bytes_to_wif def avalanche_proof_from_hex(proof_hex: str) -> AvalancheProof: return FromHex(AvalancheProof(), proof_hex) def create_coinbase_stakes( - node: TestNode, - blockhashes: List[str], - priv_key: str, - amount: Optional[str] = None) -> List[Dict[str, Any]]: + node: TestNode, blockhashes: List[str], priv_key: str, amount: Optional[str] = None +) -> List[Dict[str, Any]]: """Returns a list of dictionaries representing stakes, in a format compatible with the buildavalancheproof RPC, using only coinbase transactions. :param node: Test node used to get the block and coinbase data. :param blockhashes: List of block hashes, whose coinbase tx will be used as a stake. :param priv_key: Private key controlling the coinbase UTXO :param amount: If specified, this overwrites the amount information in the coinbase dicts. """ blocks = [node.getblock(h, 2) for h in blockhashes] coinbases = [ { - 'height': b['height'], - 'txid': b['tx'][0]['txid'], - 'n': 0, - 'value': b['tx'][0]['vout'][0]['value'], - } for b in blocks + "height": b["height"], + "txid": b["tx"][0]["txid"], + "n": 0, + "value": b["tx"][0]["vout"][0]["value"], + } + for b in blocks ] - return [{ - 'txid': coinbase['txid'], - 'vout': coinbase['n'], - 'amount': amount or coinbase['value'], - 'height': coinbase['height'], - 'iscoinbase': True, - 'privatekey': priv_key, - } for coinbase in coinbases] + return [ + { + "txid": coinbase["txid"], + "vout": coinbase["n"], + "amount": amount or coinbase["value"], + "height": coinbase["height"], + "iscoinbase": True, + "privatekey": priv_key, + } + for coinbase in coinbases + ] def get_utxos_in_blocks(node: TestNode, blockhashes: List[str]) -> List[Dict]: - """Return all UTXOs in the specified list of blocks. - """ + """Return all UTXOs in the specified list of blocks.""" utxos = filter( lambda u: node.gettransaction(u["txid"])["blockhash"] in blockhashes, - node.listunspent()) + node.listunspent(), + ) return list(utxos) def create_stakes( - test_framework: 'BitcoinTestFramework', - node: TestNode, - blockhashes: List[str], - count: int, - sync_fun=None,) -> List[Dict[str, Any]]: + test_framework: "BitcoinTestFramework", + node: TestNode, + blockhashes: List[str], + count: int, + sync_fun=None, +) -> List[Dict[str, Any]]: """ Create a list of stakes by splitting existing UTXOs from a specified list of blocks into 10 new coins. This function can generate more valid stakes than `get_coinbase_stakes` does, because on the regtest chain halving happens every 150 blocks so the coinbase amount is below the dust threshold after only 900 blocks. :param node: Test node used to generate blocks and send transactions :param blockhashes: List of block hashes whose UTXOs will be split. :param count: Number of stakes to return. """ assert 10 * len(blockhashes) >= count utxos = get_utxos_in_blocks(node, blockhashes) addresses = [node.getnewaddress() for _ in range(10)] private_keys = {addr: node.dumpprivkey(addr) for addr in addresses} for u in utxos: inputs = [{"txid": u["txid"], "vout": u["vout"]}] - outputs = { - addr: satoshi_round(u['amount'] / 10) for addr in addresses} + outputs = {addr: satoshi_round(u["amount"] / 10) for addr in addresses} raw_tx = node.createrawtransaction(inputs, outputs) ctx = FromHex(CTransaction(), raw_tx) ctx.vout[0].nValue -= node.calculate_fee(ctx) signed_tx = node.signrawtransactionwithwallet(ToHex(ctx))["hex"] node.sendrawtransaction(signed_tx) # confirm the transactions new_blocks = [] - while node.getmempoolinfo()['size'] > 0: + while node.getmempoolinfo()["size"] > 0: new_blocks += test_framework.generate( - node, 1, sync_fun=test_framework.no_op if sync_fun is None else sync_fun) + node, 1, sync_fun=test_framework.no_op if sync_fun is None else sync_fun + ) utxos = get_utxos_in_blocks(node, new_blocks) stakes = [] # cache block heights heights = {} for utxo in utxos[:count]: blockhash = node.gettransaction(utxo["txid"])["blockhash"] if blockhash not in heights: heights[blockhash] = node.getblock(blockhash, 1)["height"] - stakes.append({ - 'txid': utxo['txid'], - 'vout': utxo['vout'], - 'amount': utxo['amount'], - 'iscoinbase': utxo['label'] == "coinbase", - 'height': heights[blockhash], - 'privatekey': private_keys[utxo["address"]], - }) + stakes.append( + { + "txid": utxo["txid"], + "vout": utxo["vout"], + "amount": utxo["amount"], + "iscoinbase": utxo["label"] == "coinbase", + "height": heights[blockhash], + "privatekey": private_keys[utxo["address"]], + } + ) return stakes def get_proof_ids(node): - return [int(peer['proofid'], 16) for peer in node.getavalanchepeerinfo()] + return [int(peer["proofid"], 16) for peer in node.getavalanchepeerinfo()] def wait_for_proof(node, proofid_hex, expect_status="boundToPeer", timeout=60): """ Wait for the proof to be known by the node. The expect_status is checked once after the proof is found and can be one of the following: "immature", "boundToPeer", "conflicting" or "finalized". """ ret = {} def proof_found(): nonlocal ret try: ret = node.getrawavalancheproof(proofid_hex) return True except JSONRPCException: return False + wait_until_helper(proof_found, timeout=timeout) assert ret.get(expect_status, False) is True class NoHandshakeAvaP2PInterface(P2PInterface): """P2PInterface with avalanche capabilities""" def __init__(self): self.round = 0 self.avahello = None self.avaresponses = [] self.avapolls = [] self.nodeid: Optional[int] = None super().__init__() def peer_connect(self, *args, **kwargs): create_conn = super().peer_connect(*args, **kwargs) # Save the nonce and extra entropy so they can be reused later. self.local_nonce = self.on_connection_send_msg.nNonce self.local_extra_entropy = self.on_connection_send_msg.nExtraEntropy return create_conn def peer_accept_connection(self, *args, **kwargs): create_conn = super().peer_accept_connection(*args, **kwargs) # Save the nonce and extra entropy so they can be reused later. self.local_nonce = self.on_connection_send_msg.nNonce self.local_extra_entropy = self.on_connection_send_msg.nExtraEntropy return create_conn def on_version(self, message): super().on_version(message) # Save the nonce and extra entropy so they can be reused later. self.remote_nonce = message.nNonce self.remote_extra_entropy = message.nExtraEntropy def on_avaresponse(self, message): self.avaresponses.append(message.response) def on_avapoll(self, message): self.avapolls.append(message.poll) def on_avahello(self, message): assert self.avahello is None self.avahello = message def send_avaresponse(self, avaround, votes, privkey): response = AvalancheResponse(avaround, 0, votes) sig = privkey.sign_schnorr(response.get_hash()) msg = msg_tcpavaresponse() msg.response = TCPAvalancheResponse(response, sig) self.send_message(msg) def wait_for_avaresponse(self, timeout=5): - self.wait_until( - lambda: len(self.avaresponses) > 0, - timeout=timeout) + self.wait_until(lambda: len(self.avaresponses) > 0, timeout=timeout) with p2p_lock: return self.avaresponses.pop(0) def send_poll(self, hashes, inv_type=MSG_BLOCK): msg = msg_avapoll() msg.poll.round = self.round self.round += 1 for h in hashes: msg.poll.invs.append(CInv(inv_type, h)) self.send_message(msg) def send_proof(self, proof): msg = msg_avaproof() msg.proof = proof self.send_message(msg) def get_avapoll_if_available(self): with p2p_lock: return self.avapolls.pop(0) if len(self.avapolls) > 0 else None def wait_for_avahello(self, timeout=5): - self.wait_until( - lambda: self.avahello is not None, - timeout=timeout) + self.wait_until(lambda: self.avahello is not None, timeout=timeout) with p2p_lock: return self.avahello - def build_avahello(self, delegation: AvalancheDelegation, - delegated_privkey: ECKey) -> msg_avahello: + def build_avahello( + self, delegation: AvalancheDelegation, delegated_privkey: ECKey + ) -> msg_avahello: local_sighash = hash256( - delegation.getid() + - struct.pack(" 0: self.send_message(msg_notfound(not_found)) def get_ava_p2p_interface_no_handshake( - node: TestNode, - services=NODE_NETWORK | NODE_AVALANCHE) -> NoHandshakeAvaP2PInterface: + node: TestNode, services=NODE_NETWORK | NODE_AVALANCHE +) -> NoHandshakeAvaP2PInterface: """Build and return a NoHandshakeAvaP2PInterface connected to the specified TestNode. """ n = NoHandshakeAvaP2PInterface() - node.add_p2p_connection( - n, services=services) + node.add_p2p_connection(n, services=services) n.wait_for_verack() - n.nodeid = node.getpeerinfo()[-1]['id'] + n.nodeid = node.getpeerinfo()[-1]["id"] return n def get_ava_p2p_interface( - test_framework: 'BitcoinTestFramework', - node: TestNode, - services=NODE_NETWORK | NODE_AVALANCHE, - stake_utxo_confirmations=1, - sync_fun=None,) -> AvaP2PInterface: - """Build and return an AvaP2PInterface connected to the specified TestNode. - """ + test_framework: "BitcoinTestFramework", + node: TestNode, + services=NODE_NETWORK | NODE_AVALANCHE, + stake_utxo_confirmations=1, + sync_fun=None, +) -> AvaP2PInterface: + """Build and return an AvaP2PInterface connected to the specified TestNode.""" n = AvaP2PInterface(test_framework, node) # Make sure the proof utxos are mature if stake_utxo_confirmations > 1: test_framework.generate( node, stake_utxo_confirmations - 1, - sync_fun=test_framework.no_op if sync_fun is None else sync_fun) + sync_fun=test_framework.no_op if sync_fun is None else sync_fun, + ) assert node.verifyavalancheproof(n.proof.serialize().hex()) proofid_hex = uint256_hex(n.proof.proofid) node.add_p2p_connection(n, services=services) - n.nodeid = node.getpeerinfo()[-1]['id'] + n.nodeid = node.getpeerinfo()[-1]["id"] def avapeer_connected(): node_list = [] try: - node_list = node.getavalanchepeerinfo(proofid_hex)[0]['node_list'] + node_list = node.getavalanchepeerinfo(proofid_hex)[0]["node_list"] except BaseException: pass return n.nodeid in node_list wait_until_helper(avapeer_connected, timeout=5) return n def gen_proof(test_framework, node, coinbase_utxos=1, expiry=0, sync_fun=None): blockhashes = test_framework.generate( node, coinbase_utxos, - sync_fun=test_framework.no_op if sync_fun is None else sync_fun) + sync_fun=test_framework.no_op if sync_fun is None else sync_fun, + ) privkey = ECKey() privkey.generate() stakes = create_coinbase_stakes( - node, blockhashes, node.get_deterministic_priv_key().key) + node, blockhashes, node.get_deterministic_priv_key().key + ) proof_hex = node.buildavalancheproof( - 42, expiry, bytes_to_wif(privkey.get_bytes()), stakes) + 42, expiry, bytes_to_wif(privkey.get_bytes()), stakes + ) return privkey, avalanche_proof_from_hex(proof_hex) -def build_msg_avaproofs(proofs: List[AvalancheProof], prefilled_proofs: Optional[List[AvalancheProof]] - = None, key_pair: Optional[List[int]] = None) -> msg_avaproofs: +def build_msg_avaproofs( + proofs: List[AvalancheProof], + prefilled_proofs: Optional[List[AvalancheProof]] = None, + key_pair: Optional[List[int]] = None, +) -> msg_avaproofs: if key_pair is None: key_pair = [random.randint(0, 2**64 - 1)] * 2 msg = msg_avaproofs() msg.key0 = key_pair[0] msg.key1 = key_pair[1] msg.prefilled_proofs = prefilled_proofs or [] msg.shortids = [ - calculate_shortid( - msg.key0, - msg.key1, - proof.proofid) for proof in proofs] + calculate_shortid(msg.key0, msg.key1, proof.proofid) for proof in proofs + ] return msg def can_find_inv_in_poll(quorum, inv_hash, response=AvalancheVoteError.ACCEPTED): found_hash = False for n in quorum: poll = n.get_avapoll_if_available() # That node has not received a poll if poll is None: continue # We got a poll, check for the hash and repond votes = [] for inv in poll.invs: # Vote yes to everything r = AvalancheVoteError.ACCEPTED # Look for what we expect if inv.hash == inv_hash: r = response found_hash = True votes.append(AvalancheVote(r, inv.hash)) n.send_avaresponse(poll.round, votes, n.delegated_privkey) return found_hash diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py index 39c294403..aa556c71a 100644 --- a/test/functional/test_framework/blocktools.py +++ b/test/functional/test_framework/blocktools.py @@ -1,245 +1,251 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Utilities for manipulating blocks and transactions.""" import struct import time import unittest from typing import Optional from .messages import ( XEC, CBlock, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, ToHex, ) from .script import ( OP_1, OP_CHECKSIG, OP_DUP, OP_EQUALVERIFY, OP_HASH160, OP_RETURN, OP_TRUE, CScript, CScriptNum, CScriptOp, ) from .txtools import pad_tx from .util import assert_equal, satoshi_round # Genesis block data (regtest) TIME_GENESIS_BLOCK = 1296688602 -GENESIS_BLOCK_HASH = '0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206' -GENESIS_CB_TXID = '4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b' +GENESIS_BLOCK_HASH = "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206" +GENESIS_CB_TXID = "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b" GENESIS_CB_PK = ( - '04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38' - 'c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f' + "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38" + "c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f" ) -GENESIS_CB_SCRIPT_PUBKEY = CScript.fromhex(f'41{GENESIS_CB_PK}ac') +GENESIS_CB_SCRIPT_PUBKEY = CScript.fromhex(f"41{GENESIS_CB_PK}ac") GENESIS_CB_SCRIPT_SIG = CScript( - b'\x04\xff\xff\x00\x1d\x01\x04EThe Times 03/Jan/2009 Chancellor on brink of ' - b'second bailout for banks' + b"\x04\xff\xff\x00\x1d\x01\x04EThe Times 03/Jan/2009 Chancellor on brink of " + b"second bailout for banks" ) MAX_FUTURE_BLOCK_TIME = 2 * 60 * 60 def create_block( - hashprev: Optional[int] = None, - coinbase: Optional[CTransaction] = None, - ntime: Optional[int] = None, - *, - version: Optional[int] = None, - tmpl: Optional[dict] = None) -> CBlock: + hashprev: Optional[int] = None, + coinbase: Optional[CTransaction] = None, + ntime: Optional[int] = None, + *, + version: Optional[int] = None, + tmpl: Optional[dict] = None, +) -> CBlock: """Create a block (with regtest difficulty).""" block = CBlock() if tmpl is None: tmpl = {} - block.nVersion = version or tmpl.get('version', 1) - block.nTime = ntime or tmpl.get('curtime', int(time.time() + 600)) - block.hashPrevBlock = hashprev or int(tmpl['previousblockhash'], 0x10) - if tmpl.get('bits') is not None: - block.nBits = struct.unpack('>I', bytes.fromhex(tmpl['bits']))[0] + block.nVersion = version or tmpl.get("version", 1) + block.nTime = ntime or tmpl.get("curtime", int(time.time() + 600)) + block.hashPrevBlock = hashprev or int(tmpl["previousblockhash"], 0x10) + if tmpl.get("bits") is not None: + block.nBits = struct.unpack(">I", bytes.fromhex(tmpl["bits"]))[0] else: # difficulty retargeting is disabled in REGTEST chainparams - block.nBits = 0x207fffff - block.vtx.append(coinbase or create_coinbase(height=tmpl['height'])) + block.nBits = 0x207FFFFF + block.vtx.append(coinbase or create_coinbase(height=tmpl["height"])) block.hashMerkleRoot = block.calc_merkle_root() block.calc_sha256() return block def make_conform_to_ctor(block: CBlock): for tx in block.vtx: tx.rehash() - block.vtx = [block.vtx[0]] + \ - sorted(block.vtx[1:], key=lambda tx: tx.get_id()) + block.vtx = [block.vtx[0]] + sorted(block.vtx[1:], key=lambda tx: tx.get_id()) def script_BIP34_coinbase_height(height: int) -> CScript: if height <= 16: res = CScriptOp.encode_op_n(height) # Append dummy to increase scriptSig size above 2 # (see bad-cb-length consensus rule) return CScript([res, OP_1]) return CScript([CScriptNum(height)]) def create_coinbase( - height: int, pubkey: Optional[bytes] = None, - nValue: int = 50_000_000) -> CTransaction: + height: int, pubkey: Optional[bytes] = None, nValue: int = 50_000_000 +) -> CTransaction: """Create a coinbase transaction, assuming no miner fees. If pubkey is passed in, the coinbase output will be a P2PK output; otherwise an anyone-can-spend output.""" coinbase = CTransaction() - coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), - script_BIP34_coinbase_height(height), - 0xffffffff)) + coinbase.vin.append( + CTxIn( + COutPoint(0, 0xFFFFFFFF), script_BIP34_coinbase_height(height), 0xFFFFFFFF + ) + ) coinbaseoutput = CTxOut() coinbaseoutput.nValue = nValue * XEC if nValue == 50_000_000: halvings = int(height / 150) # regtest coinbaseoutput.nValue >>= halvings if pubkey is not None: coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG]) else: coinbaseoutput.scriptPubKey = CScript([OP_TRUE]) coinbase.vout = [coinbaseoutput] # Make sure the coinbase is at least 100 bytes pad_tx(coinbase) coinbase.calc_sha256() return coinbase -def create_tx_with_script(prevtx, n, script_sig=b"", *, - amount, script_pub_key=CScript()): +def create_tx_with_script( + prevtx, n, script_sig=b"", *, amount, script_pub_key=CScript() +): """Return one-input, one-output transaction object - spending the prevtx's n-th output with the given amount. + spending the prevtx's n-th output with the given amount. - Can optionally pass scriptPubKey and scriptSig, default is anyone-can-spend output. + Can optionally pass scriptPubKey and scriptSig, default is anyone-can-spend output. """ tx = CTransaction() assert n < len(prevtx.vout) - tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, 0xffffffff)) + tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, 0xFFFFFFFF)) tx.vout.append(CTxOut(amount, script_pub_key)) pad_tx(tx) tx.calc_sha256() return tx def create_transaction(node, txid, to_address, *, amount): - """ Return signed transaction spending the first output of the - input txid. Note that the node must be able to sign for the - output that is being spent, and the node must not be running - multiple wallets. + """Return signed transaction spending the first output of the + input txid. Note that the node must be able to sign for the + output that is being spent, and the node must not be running + multiple wallets. """ raw_tx = create_raw_transaction(node, txid, to_address, amount=amount) tx = FromHex(CTransaction(), raw_tx) return tx def create_raw_transaction(node, txid, to_address, *, amount): - """ Return raw signed transaction spending the first output of the - input txid. Note that the node must be able to sign for the - output that is being spent, and the node must not be running - multiple wallets. + """Return raw signed transaction spending the first output of the + input txid. Note that the node must be able to sign for the + output that is being spent, and the node must not be running + multiple wallets. """ rawtx = node.createrawtransaction( - inputs=[{"txid": txid, "vout": 0}], outputs={to_address: amount}) + inputs=[{"txid": txid, "vout": 0}], outputs={to_address: amount} + ) signresult = node.signrawtransactionwithwallet(rawtx) assert_equal(signresult["complete"], True) - return signresult['hex'] + return signresult["hex"] def create_confirmed_utxos(test_framework, node, count, age=101, **kwargs): """ Helper to create at least "count" utxos """ to_generate = int(0.5 * count) + age while to_generate > 0: test_framework.generate(node, min(25, to_generate), **kwargs) to_generate -= 25 utxos = node.listunspent() iterations = count - len(utxos) addr1 = node.getnewaddress() addr2 = node.getnewaddress() if iterations <= 0: return utxos for i in range(iterations): t = utxos.pop() inputs = [] inputs.append({"txid": t["txid"], "vout": t["vout"]}) outputs = {} - outputs[addr1] = satoshi_round(t['amount'] / 2) - outputs[addr2] = satoshi_round(t['amount'] / 2) + outputs[addr1] = satoshi_round(t["amount"] / 2) + outputs[addr2] = satoshi_round(t["amount"] / 2) raw_tx = node.createrawtransaction(inputs, outputs) ctx = FromHex(CTransaction(), raw_tx) fee = node.calculate_fee(ctx) // 2 ctx.vout[0].nValue -= fee # Due to possible truncation, we go ahead and take another satoshi in # fees to ensure the transaction gets through ctx.vout[1].nValue -= fee + 1 signed_tx = node.signrawtransactionwithwallet(ToHex(ctx))["hex"] node.sendrawtransaction(signed_tx) - while (node.getmempoolinfo()['size'] > 0): + while node.getmempoolinfo()["size"] > 0: test_framework.generate(node, 1, **kwargs) utxos = node.listunspent() assert len(utxos) >= count return utxos def mine_big_block(test_framework, node, utxos=None): # generate a 66k transaction, # and 14 of them is close to the 1MB block limit num = 14 utxos = utxos if utxos is not None else [] if len(utxos) < num: utxos.clear() utxos.extend(node.listunspent()) send_big_transactions(node, utxos, num, 100) test_framework.generate(node, 1) def send_big_transactions(node, utxos, num, fee_multiplier): from .cashaddr import decode + txids = [] padding = "1" * 512 addrHash = decode(node.getnewaddress())[2] for _ in range(num): ctx = CTransaction() utxo = utxos.pop() - txid = int(utxo['txid'], 16) + txid = int(utxo["txid"], 16) ctx.vin.append(CTxIn(COutPoint(txid, int(utxo["vout"])), b"")) ctx.vout.append( - CTxOut(int(satoshi_round(utxo['amount'] * XEC)), - CScript([OP_DUP, OP_HASH160, addrHash, OP_EQUALVERIFY, OP_CHECKSIG]))) + CTxOut( + int(satoshi_round(utxo["amount"] * XEC)), + CScript([OP_DUP, OP_HASH160, addrHash, OP_EQUALVERIFY, OP_CHECKSIG]), + ) + ) for i in range(0, 127): - ctx.vout.append(CTxOut(0, CScript( - [OP_RETURN, bytes(padding, 'utf-8')]))) + ctx.vout.append(CTxOut(0, CScript([OP_RETURN, bytes(padding, "utf-8")]))) # Create a proper fee for the transaction to be mined ctx.vout[0].nValue -= int(fee_multiplier * node.calculate_fee(ctx)) - signresult = node.signrawtransactionwithwallet( - ToHex(ctx), None, "NONE|FORKID") + signresult = node.signrawtransactionwithwallet(ToHex(ctx), None, "NONE|FORKID") txid = node.sendrawtransaction(signresult["hex"], 0) txids.append(txid) return txids class TestFrameworkBlockTools(unittest.TestCase): def test_create_coinbase(self): height = 20 coinbase_tx = create_coinbase(height=height) assert_equal(CScriptNum.decode(coinbase_tx.vin[0].scriptSig), height) diff --git a/test/functional/test_framework/cashaddr.py b/test/functional/test_framework/cashaddr.py index 60f56efdf..16e07e6a0 100644 --- a/test/functional/test_framework/cashaddr.py +++ b/test/functional/test_framework/cashaddr.py @@ -1,194 +1,195 @@ #!/usr/bin/env python3 # Copyright (c) 2017 Pieter Wuille, Shammah Chancellor, Neil Booth # # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Originally taken from Electron-Cash: # https://raw.githubusercontent.com/fyookball/electrum/master/lib/cashaddr.py # _CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l" def _polymod(values): """Internal function that computes the cashaddr checksum.""" c = 1 for d in values: c0 = c >> 35 - c = ((c & 0x07ffffffff) << 5) ^ d - if (c0 & 0x01): - c ^= 0x98f2bc8e61 - if (c0 & 0x02): - c ^= 0x79b76d99e2 - if (c0 & 0x04): - c ^= 0xf33e5fb3c4 - if (c0 & 0x08): - c ^= 0xae2eabe2a8 - if (c0 & 0x10): - c ^= 0x1e4f43e470 + c = ((c & 0x07FFFFFFFF) << 5) ^ d + if c0 & 0x01: + c ^= 0x98F2BC8E61 + if c0 & 0x02: + c ^= 0x79B76D99E2 + if c0 & 0x04: + c ^= 0xF33E5FB3C4 + if c0 & 0x08: + c ^= 0xAE2EABE2A8 + if c0 & 0x10: + c ^= 0x1E4F43E470 retval = c ^ 1 return retval def _prefix_expand(prefix): """Expand the prefix into values for checksum computation.""" - retval = bytearray(ord(x) & 0x1f for x in prefix) + retval = bytearray(ord(x) & 0x1F for x in prefix) # Append null separator retval.append(0) return retval def _create_checksum(prefix, data): """Compute the checksum values given prefix and data.""" values = _prefix_expand(prefix) + data + bytes(8) polymod = _polymod(values) # Return the polymod expanded into eight 5-bit elements return bytes((polymod >> 5 * (7 - i)) & 31 for i in range(8)) def _convertbits(data, frombits, tobits, pad=True): """General power-of-2 base conversion.""" acc = 0 bits = 0 ret = bytearray() maxv = (1 << tobits) - 1 max_acc = (1 << (frombits + tobits - 1)) - 1 for value in data: acc = ((acc << frombits) | value) & max_acc bits += frombits while bits >= tobits: bits -= tobits ret.append((acc >> bits) & maxv) if pad and bits: ret.append((acc << (tobits - bits)) & maxv) return ret def _pack_addr_data(kind, addr_hash): """Pack addr data with version byte""" version_byte = kind << 3 offset = 1 encoded_size = 0 if len(addr_hash) >= 40: offset = 2 encoded_size |= 0x04 encoded_size |= (len(addr_hash) - 20 * offset) // (4 * offset) # invalid size? - if ((len(addr_hash) - 20 * offset) % (4 * offset) != 0 - or not 0 <= encoded_size <= 7): - raise ValueError(f'invalid address hash size {addr_hash}') + if (len(addr_hash) - 20 * offset) % (4 * offset) != 0 or not 0 <= encoded_size <= 7: + raise ValueError(f"invalid address hash size {addr_hash}") version_byte |= encoded_size data = bytes([version_byte]) + addr_hash return _convertbits(data, 8, 5, True) def _decode_payload(addr): """Validate a cashaddr string. Throws CashAddr.Error if it is invalid, otherwise returns the tuple (prefix, payload) without the checksum. """ lower = addr.lower() if lower != addr and addr.upper() != addr: - raise ValueError(f'mixed case in address: {addr}') + raise ValueError(f"mixed case in address: {addr}") - parts = lower.split(':', 1) + parts = lower.split(":", 1) if len(parts) != 2: raise ValueError(f"address missing ':' separator: {addr}") prefix, payload = parts if not prefix: - raise ValueError(f'address prefix is missing: {addr}') + raise ValueError(f"address prefix is missing: {addr}") if not all(33 <= ord(x) <= 126 for x in prefix): - raise ValueError(f'invalid address prefix: {prefix}') + raise ValueError(f"invalid address prefix: {prefix}") if not (8 <= len(payload) <= 124): - raise ValueError(f'address payload has invalid length: {len(addr)}') + raise ValueError(f"address payload has invalid length: {len(addr)}") try: data = bytes(_CHARSET.find(x) for x in payload) except ValueError: - raise ValueError(f'invalid characters in address: {payload}') + raise ValueError(f"invalid characters in address: {payload}") if _polymod(_prefix_expand(prefix) + data): - raise ValueError(f'invalid checksum in address: {addr}') + raise ValueError(f"invalid checksum in address: {addr}") if lower != addr: prefix = prefix.upper() # Drop the 40 bit checksum return prefix, data[:-8] + # # External Interface # PUBKEY_TYPE = 0 SCRIPT_TYPE = 1 def decode(address): - '''Given a cashaddr address, return a tuple + """Given a cashaddr address, return a tuple - (prefix, kind, hash) - ''' + (prefix, kind, hash) + """ if not isinstance(address, str): - raise TypeError('address must be a string') + raise TypeError("address must be a string") prefix, payload = _decode_payload(address) # Ensure there isn't extra padding extrabits = len(payload) * 5 % 8 if extrabits >= 5: - raise ValueError(f'excess padding in address {address}') + raise ValueError(f"excess padding in address {address}") # Ensure extrabits are zeros if payload[-1] & ((1 << extrabits) - 1): - raise ValueError(f'non-zero padding in address {address}') + raise ValueError(f"non-zero padding in address {address}") decoded = _convertbits(payload, 5, 8, False) version = decoded[0] addr_hash = bytes(decoded[1:]) size = (version & 0x03) * 4 + 20 # Double the size, if the 3rd bit is on. if version & 0x04: size <<= 1 if size != len(addr_hash): raise ValueError( - f'address hash has length {len(addr_hash)} but expected {size}') + f"address hash has length {len(addr_hash)} but expected {size}" + ) kind = version >> 3 if kind not in (SCRIPT_TYPE, PUBKEY_TYPE): - raise ValueError(f'unrecognised address type {kind}') + raise ValueError(f"unrecognised address type {kind}") return prefix, kind, addr_hash def encode(prefix, kind, addr_hash): """Encode a cashaddr address without prefix and separator.""" if not isinstance(prefix, str): - raise TypeError('prefix must be a string') + raise TypeError("prefix must be a string") if not isinstance(addr_hash, (bytes, bytearray)): - raise TypeError('addr_hash must be binary bytes') + raise TypeError("addr_hash must be binary bytes") if kind not in (SCRIPT_TYPE, PUBKEY_TYPE): - raise ValueError(f'unrecognised address type {kind}') + raise ValueError(f"unrecognised address type {kind}") payload = _pack_addr_data(kind, addr_hash) checksum = _create_checksum(prefix, payload) - return ''.join([_CHARSET[d] for d in (payload + checksum)]) + return "".join([_CHARSET[d] for d in (payload + checksum)]) def encode_full(prefix, kind, addr_hash): """Encode a full cashaddr address, with prefix and separator.""" - return ':'.join([prefix, encode(prefix, kind, addr_hash)]) + return ":".join([prefix, encode(prefix, kind, addr_hash)]) diff --git a/test/functional/test_framework/cdefs.py b/test/functional/test_framework/cdefs.py index 534fc2873..83f1006cd 100644 --- a/test/functional/test_framework/cdefs.py +++ b/test/functional/test_framework/cdefs.py @@ -1,91 +1,96 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Imports some application default values from source files outside the test framework, and defines equivalents of consensus parameters for the test framework. """ import os import re def get_srcdir(): """ Try to find out the base folder containing the 'src' folder. If SRCDIR is set it does a sanity check and returns that. Otherwise it goes on a search and rescue mission. Returns None if it cannot find a suitable folder. """ + def contains_src(path_to_check): if not path_to_check: return False else: - cand_path = os.path.join(path_to_check, 'src') + cand_path = os.path.join(path_to_check, "src") return os.path.exists(cand_path) and os.path.isdir(cand_path) - srcdir = os.environ.get('SRCDIR', '') + srcdir = os.environ.get("SRCDIR", "") if contains_src(srcdir): return srcdir # Try to work it based out on main module import sys - mainmod = sys.modules['__main__'] - mainmod_path = getattr(mainmod, '__file__', '') - if mainmod_path and mainmod_path.endswith('.py'): + + mainmod = sys.modules["__main__"] + mainmod_path = getattr(mainmod, "__file__", "") + if mainmod_path and mainmod_path.endswith(".py"): maybe_top = mainmod_path - while maybe_top != '/': + while maybe_top != "/": maybe_top = os.path.abspath(os.path.dirname(maybe_top)) if contains_src(maybe_top): return maybe_top # No luck, give up. return None # Slurp in consensus.h contents -_consensus_h_fh = open(os.path.join(get_srcdir(), 'src', 'consensus', - 'consensus.h'), 'rt', encoding='utf-8') +_consensus_h_fh = open( + os.path.join(get_srcdir(), "src", "consensus", "consensus.h"), + "rt", + encoding="utf-8", +) _consensus_h_contents = _consensus_h_fh.read() _consensus_h_fh.close() # This constant is currently needed to evaluate some that are formulas ONE_MEGABYTE = 1000000 # Extract relevant default values parameters # The maximum allowed block size before the fork LEGACY_MAX_BLOCK_SIZE = ONE_MEGABYTE # Default setting for maximum allowed size for a block, in bytes -match = re.search(r'DEFAULT_MAX_BLOCK_SIZE = (.+);', _consensus_h_contents) +match = re.search(r"DEFAULT_MAX_BLOCK_SIZE = (.+);", _consensus_h_contents) if match is None: raise RuntimeError("DEFAULT_MAX_BLOCK_SIZE value not found in consensus.h") DEFAULT_MAX_BLOCK_SIZE = eval(match.group(1)) # The following consensus parameters should not be automatically imported. # They *should* cause test failures if application code is changed in ways # that violate current consensus. # The minimum number of max_block_size bytes required per executed signature # check operation in a block. I.e. maximum_block_sigchecks = maximum_block_size # / BLOCK_MAXBYTES_MAXSIGCHECKS_RATIO (network rule). BLOCK_MAXBYTES_MAXSIGCHECKS_RATIO = 141 # Coinbase transaction outputs can only be spent after this number of new # blocks (network rule) COINBASE_MATURITY = 100 # Minimum size a transaction can have. MIN_TX_SIZE = 100 # Maximum bytes in a TxOut pubkey script MAX_TXOUT_PUBKEY_SCRIPT = 10000 if __name__ == "__main__": # Output values if run standalone to verify print(f"DEFAULT_MAX_BLOCK_SIZE = {DEFAULT_MAX_BLOCK_SIZE} (bytes)") print(f"COINBASE_MATURITY = {COINBASE_MATURITY} (blocks)") diff --git a/test/functional/test_framework/coverage.py b/test/functional/test_framework/coverage.py index 8f4fcae4c..95ec7e652 100644 --- a/test/functional/test_framework/coverage.py +++ b/test/functional/test_framework/coverage.py @@ -1,110 +1,110 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Utilities for doing coverage analysis on the RPC interface. Provides a way to track which RPC commands are exercised during testing. """ import os -REFERENCE_FILENAME = 'rpc_interface.txt' +REFERENCE_FILENAME = "rpc_interface.txt" class AuthServiceProxyWrapper: """ An object that wraps AuthServiceProxy to record specific RPC calls. """ def __init__(self, auth_service_proxy_instance, coverage_logfile=None): """ Kwargs: auth_service_proxy_instance (AuthServiceProxy): the instance being wrapped. coverage_logfile (str): if specified, write each service_name out to a file when called. """ self.auth_service_proxy_instance = auth_service_proxy_instance self.coverage_logfile = coverage_logfile def __getattr__(self, name): return_val = getattr(self.auth_service_proxy_instance, name) if not isinstance(return_val, type(self.auth_service_proxy_instance)): # If proxy getattr returned an unwrapped value, do the same here. return return_val return AuthServiceProxyWrapper(return_val, self.coverage_logfile) def __call__(self, *args, **kwargs): """ Delegates to AuthServiceProxy, then writes the particular RPC method called to a file. """ return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs) self._log_call() return return_val def _log_call(self): rpc_method = self.auth_service_proxy_instance._service_name if self.coverage_logfile: - with open(self.coverage_logfile, 'a+', encoding='utf8') as f: + with open(self.coverage_logfile, "a+", encoding="utf8") as f: f.write(f"{rpc_method}\n") def __truediv__(self, relative_uri): - return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri, - self.coverage_logfile) + return AuthServiceProxyWrapper( + self.auth_service_proxy_instance / relative_uri, self.coverage_logfile + ) def get_request(self, *args, **kwargs): self._log_call() return self.auth_service_proxy_instance.get_request(*args, **kwargs) def get_filename(dirname, n_node): """ Get a filename unique to the test process ID and node. This file will contain a list of RPC commands covered. """ pid = str(os.getpid()) - return os.path.join( - dirname, f"coverage.pid{pid}.node{str(n_node)}.txt") + return os.path.join(dirname, f"coverage.pid{pid}.node{str(n_node)}.txt") def write_all_rpc_commands(dirname, node): """ Write out a list of all RPC functions available in `bitcoin-cli` for coverage comparison. This will only happen once per coverage directory. Args: dirname (str): temporary test dir node (AuthServiceProxy): client Returns: bool. if the RPC interface file was written. """ filename = os.path.join(dirname, REFERENCE_FILENAME) if os.path.isfile(filename): return False - help_output = node.help().split('\n') + help_output = node.help().split("\n") commands = set() for line in help_output: line = line.strip() # Ignore blanks and headers - if line and not line.startswith('='): + if line and not line.startswith("="): commands.add(f"{line.split()[0]}\n") - with open(filename, 'w', encoding='utf8') as f: + with open(filename, "w", encoding="utf8") as f: f.writelines(list(commands)) return True diff --git a/test/functional/test_framework/descriptors.py b/test/functional/test_framework/descriptors.py index f88ffc30d..76ab1582f 100644 --- a/test/functional/test_framework/descriptors.py +++ b/test/functional/test_framework/descriptors.py @@ -1,77 +1,75 @@ #!/usr/bin/env python3 # Copyright (c) 2019 Pieter Wuille # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Utility functions related to output descriptors""" import re -INPUT_CHARSET = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ " +INPUT_CHARSET = ( + "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ " +) CHECKSUM_CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l" -GENERATOR = [ - 0xf5dee51989, - 0xa9fdca3312, - 0x1bab10e32d, - 0x3706b1677a, - 0x644d626ffd] +GENERATOR = [0xF5DEE51989, 0xA9FDCA3312, 0x1BAB10E32D, 0x3706B1677A, 0x644D626FFD] def descsum_polymod(symbols): """Internal function that computes the descriptor checksum.""" chk = 1 for value in symbols: top = chk >> 35 - chk = (chk & 0x7ffffffff) << 5 ^ value + chk = (chk & 0x7FFFFFFFF) << 5 ^ value for i in range(5): chk ^= GENERATOR[i] if ((top >> i) & 1) else 0 return chk def descsum_expand(s): """Internal function that does the character to symbol expansion""" groups = [] symbols = [] for c in s: if c not in INPUT_CHARSET: return None v = INPUT_CHARSET.find(c) symbols.append(v & 31) groups.append(v >> 5) if len(groups) == 3: symbols.append(groups[0] * 9 + groups[1] * 3 + groups[2]) groups = [] if len(groups) == 1: symbols.append(groups[0]) elif len(groups) == 2: symbols.append(groups[0] * 3 + groups[1]) return symbols def descsum_create(s): """Add a checksum to a descriptor without""" symbols = descsum_expand(s) + [0, 0, 0, 0, 0, 0, 0, 0] checksum = descsum_polymod(symbols) ^ 1 - return s + '#' + \ - ''.join(CHECKSUM_CHARSET[(checksum >> (5 * (7 - i))) & 31] - for i in range(8)) + return ( + s + + "#" + + "".join(CHECKSUM_CHARSET[(checksum >> (5 * (7 - i))) & 31] for i in range(8)) + ) def descsum_check(s, require=True): """Verify that the checksum is correct in a descriptor""" - if '#' not in s: + if "#" not in s: return not require - if s[-9] != '#': + if s[-9] != "#": return False if not all(x in CHECKSUM_CHARSET for x in s[-8:]): return False - symbols = descsum_expand( - s[:-9]) + [CHECKSUM_CHARSET.find(x) for x in s[-8:]] + symbols = descsum_expand(s[:-9]) + [CHECKSUM_CHARSET.find(x) for x in s[-8:]] return descsum_polymod(symbols) == 1 def drop_origins(s): - '''Drop the key origins from a descriptor''' - desc = re.sub(r'\[.+?\]', '', s) - if '#' in s: - desc = desc[:desc.index('#')] + """Drop the key origins from a descriptor""" + desc = re.sub(r"\[.+?\]", "", s) + if "#" in s: + desc = desc[: desc.index("#")] return descsum_create(desc) diff --git a/test/functional/test_framework/key.py b/test/functional/test_framework/key.py index 98b0659d4..b9d3a7eb4 100755 --- a/test/functional/test_framework/key.py +++ b/test/functional/test_framework/key.py @@ -1,407 +1,411 @@ #!/usr/bin/env python3 # Copyright (c) 2019 Pieter Wuille # Copyright (c) 2019-2020 The Bitcoin developers """Test-only secp256k1 elliptic curve implementation WARNING: This code is slow, uses bad randomness, does not properly protect keys, and is trivially vulnerable to side channel attacks. Do not use for anything but tests. """ import hashlib import random from .util import modinv def jacobi_symbol(n, k): """Compute the Jacobi symbol of n modulo k See http://en.wikipedia.org/wiki/Jacobi_symbol """ assert k > 0 and k & 1 n %= k t = 0 while n != 0: while n & 1 == 0: n >>= 1 r = k & 7 - t ^= (r == 3 or r == 5) + t ^= r == 3 or r == 5 n, k = k, n - t ^= (n & k & 3 == 3) + t ^= n & k & 3 == 3 n = n % k if k == 1: return -1 if t else 1 return 0 def modsqrt(a, p): """Compute the square root of a modulo p For p = 3 mod 4, if a square root exists, it is equal to a**((p+1)/4) mod p. """ assert p % 4 == 3 # Only p = 3 mod 4 is implemented sqrt = pow(a, (p + 1) // 4, p) if pow(sqrt, 2, p) == a % p: return sqrt return None class EllipticCurve: def __init__(self, p, a, b): """Initialize elliptic curve y^2 = x^3 + a*x + b over GF(p).""" self.p = p self.a = a % p self.b = b % p def affine(self, p1): """Convert a Jacobian point tuple p1 to affine form, or None if at infinity.""" x1, y1, z1 = p1 if z1 == 0: return None inv = modinv(z1, self.p) inv_2 = (inv**2) % self.p inv_3 = (inv_2 * inv) % self.p return ((inv_2 * x1) % self.p, (inv_3 * y1) % self.p, 1) def negate(self, p1): """Negate a Jacobian point tuple p1.""" x1, y1, z1 = p1 return (x1, (self.p - y1) % self.p, z1) def on_curve(self, p1): """Determine whether a Jacobian tuple p is on the curve (and not infinity)""" x1, y1, z1 = p1 z2 = pow(z1, 2, self.p) z4 = pow(z2, 2, self.p) - return z1 != 0 and (pow(x1, 3, self.p) + self.a * x1 * - z4 + self.b * z2 * z4 - pow(y1, 2, self.p)) % self.p == 0 + return ( + z1 != 0 + and ( + pow(x1, 3, self.p) + + self.a * x1 * z4 + + self.b * z2 * z4 + - pow(y1, 2, self.p) + ) + % self.p + == 0 + ) def is_x_coord(self, x): """Test whether x is a valid X coordinate on the curve.""" x_3 = pow(x, 3, self.p) return jacobi_symbol(x_3 + self.a * x + self.b, self.p) != -1 def lift_x(self, x): """Given an X coordinate on the curve, return a corresponding affine point.""" x_3 = pow(x, 3, self.p) v = x_3 + self.a * x + self.b y = modsqrt(v, self.p) if y is None: return None return (x, y, 1) def double(self, p1): """Double a Jacobian tuple p1""" x1, y1, z1 = p1 if z1 == 0: return (0, 1, 0) y1_2 = (y1**2) % self.p y1_4 = (y1_2**2) % self.p x1_2 = (x1**2) % self.p s = (4 * x1 * y1_2) % self.p m = 3 * x1_2 if self.a: m += self.a * pow(z1, 4, self.p) m = m % self.p x2 = (m**2 - 2 * s) % self.p y2 = (m * (s - x2) - 8 * y1_4) % self.p z2 = (2 * y1 * z1) % self.p return (x2, y2, z2) def add_mixed(self, p1, p2): """Add a Jacobian tuple p1 and an affine tuple p2""" x1, y1, z1 = p1 x2, y2, z2 = p2 assert z2 == 1 if z1 == 0: return p2 z1_2 = (z1**2) % self.p z1_3 = (z1_2 * z1) % self.p u2 = (x2 * z1_2) % self.p s2 = (y2 * z1_3) % self.p if x1 == u2: - if (y1 != s2): + if y1 != s2: return (0, 1, 0) return self.double(p1) h = u2 - x1 r = s2 - y1 h_2 = (h**2) % self.p h_3 = (h_2 * h) % self.p u1_h_2 = (x1 * h_2) % self.p x3 = (r**2 - h_3 - 2 * u1_h_2) % self.p y3 = (r * (u1_h_2 - x3) - y1 * h_3) % self.p z3 = (h * z1) % self.p return (x3, y3, z3) def add(self, p1, p2): """Add two Jacobian tuples p1 and p2""" x1, y1, z1 = p1 x2, y2, z2 = p2 if z1 == 0: return p2 if z2 == 0: return p1 if z1 == 1: return self.add_mixed(p2, p1) if z2 == 1: return self.add_mixed(p1, p2) z1_2 = (z1**2) % self.p z1_3 = (z1_2 * z1) % self.p z2_2 = (z2**2) % self.p z2_3 = (z2_2 * z2) % self.p u1 = (x1 * z2_2) % self.p u2 = (x2 * z1_2) % self.p s1 = (y1 * z2_3) % self.p s2 = (y2 * z1_3) % self.p if u1 == u2: - if (s1 != s2): + if s1 != s2: return (0, 1, 0) return self.double(p1) h = u2 - u1 r = s2 - s1 h_2 = (h**2) % self.p h_3 = (h_2 * h) % self.p u1_h_2 = (u1 * h_2) % self.p x3 = (r**2 - h_3 - 2 * u1_h_2) % self.p y3 = (r * (u1_h_2 - x3) - s1 * h_3) % self.p z3 = (h * z1 * z2) % self.p return (x3, y3, z3) def mul(self, ps): """Compute a (multi) point multiplication ps is a list of (Jacobian tuple, scalar) pairs. """ r = (0, 1, 0) for i in range(255, -1, -1): r = self.double(r) - for (p, n) in ps: - if ((n >> i) & 1): + for p, n in ps: + if (n >> i) & 1: r = self.add(r, p) return r SECP256K1 = EllipticCurve(2**256 - 2**32 - 977, 0, 7) SECP256K1_G = ( 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8, - 1) + 1, +) SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2 class ECPubKey: """A secp256k1 public key""" def __init__(self): """Construct an uninitialized public key""" self.valid = False def set(self, data): """Construct a public key from a serialization in compressed or uncompressed format""" - if (len(data) == 65 and data[0] == 0x04): - p = (int.from_bytes(data[1:33], 'big'), - int.from_bytes(data[33:65], 'big'), 1) + if len(data) == 65 and data[0] == 0x04: + p = ( + int.from_bytes(data[1:33], "big"), + int.from_bytes(data[33:65], "big"), + 1, + ) self.valid = SECP256K1.on_curve(p) if self.valid: self.p = p self.compressed = False - elif (len(data) == 33 and (data[0] == 0x02 or data[0] == 0x03)): - x = int.from_bytes(data[1:33], 'big') + elif len(data) == 33 and (data[0] == 0x02 or data[0] == 0x03): + x = int.from_bytes(data[1:33], "big") if SECP256K1.is_x_coord(x): p = SECP256K1.lift_x(x) if (p[1] & 1) != (data[0] & 1): p = SECP256K1.negate(p) self.p = p self.valid = True self.compressed = True else: self.valid = False else: self.valid = False @property def is_compressed(self): return self.compressed @property def is_valid(self): return self.valid def get_bytes(self): assert self.valid p = SECP256K1.affine(self.p) if p is None: return None if self.compressed: - return bytes([0x02 + (p[1] & 1)]) + p[0].to_bytes(32, 'big') + return bytes([0x02 + (p[1] & 1)]) + p[0].to_bytes(32, "big") else: - return bytes([0x04]) + p[0].to_bytes(32, 'big') + \ - p[1].to_bytes(32, 'big') + return bytes([0x04]) + p[0].to_bytes(32, "big") + p[1].to_bytes(32, "big") def verify_ecdsa(self, sig, msg, low_s=True): """Verify a strictly DER-encoded ECDSA signature against this pubkey.""" assert self.valid - if (sig[1] + 2 != len(sig)): + if sig[1] + 2 != len(sig): return False - if (len(sig) < 4): + if len(sig) < 4: return False - if (sig[0] != 0x30): + if sig[0] != 0x30: return False - if (sig[2] != 0x02): + if sig[2] != 0x02: return False rlen = sig[3] - if (len(sig) < 6 + rlen): + if len(sig) < 6 + rlen: return False if rlen < 1 or rlen > 33: return False if sig[4] >= 0x80: return False - if (rlen > 1 and (sig[4] == 0) and not (sig[5] & 0x80)): + if rlen > 1 and (sig[4] == 0) and not (sig[5] & 0x80): return False - r = int.from_bytes(sig[4:4 + rlen], 'big') - if (sig[4 + rlen] != 0x02): + r = int.from_bytes(sig[4 : 4 + rlen], "big") + if sig[4 + rlen] != 0x02: return False slen = sig[5 + rlen] if slen < 1 or slen > 33: return False - if (len(sig) != 6 + rlen + slen): + if len(sig) != 6 + rlen + slen: return False if sig[6 + rlen] >= 0x80: return False - if (slen > 1 and (sig[6 + rlen] == 0) and not (sig[7 + rlen] & 0x80)): + if slen > 1 and (sig[6 + rlen] == 0) and not (sig[7 + rlen] & 0x80): return False - s = int.from_bytes(sig[6 + rlen:6 + rlen + slen], 'big') + s = int.from_bytes(sig[6 + rlen : 6 + rlen + slen], "big") if r < 1 or s < 1 or r >= SECP256K1_ORDER or s >= SECP256K1_ORDER: return False if low_s and s >= SECP256K1_ORDER_HALF: return False - z = int.from_bytes(msg, 'big') + z = int.from_bytes(msg, "big") w = modinv(s, SECP256K1_ORDER) u1 = z * w % SECP256K1_ORDER u2 = r * w % SECP256K1_ORDER R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, u1), (self.p, u2)])) if R is None or R[0] != r: return False return True def verify_schnorr(self, sig, msg32): assert self.is_valid assert len(sig) == 64 assert len(msg32) == 32 Rx = sig[:32] - s = int.from_bytes(sig[32:], 'big') + s = int.from_bytes(sig[32:], "big") e = int.from_bytes( - hashlib.sha256( - Rx + - self.get_bytes() + - msg32).digest(), - 'big') + hashlib.sha256(Rx + self.get_bytes() + msg32).digest(), "big" + ) nege = SECP256K1_ORDER - e R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, s), (self.p, nege)])) if R is None: return False if jacobi_symbol(R[1], SECP256K1.p) == -1: return False - return R[0] == int.from_bytes(Rx, 'big') + return R[0] == int.from_bytes(Rx, "big") class ECKey: """A secp256k1 private key""" def __init__(self): self.valid = False def set(self, secret, compressed): """Construct a private key object with given 32-byte secret and compressed flag.""" assert len(secret) == 32 - secret = int.from_bytes(secret, 'big') - self.valid = (secret > 0 and secret < SECP256K1_ORDER) + secret = int.from_bytes(secret, "big") + self.valid = secret > 0 and secret < SECP256K1_ORDER if self.valid: self.secret = secret self.compressed = compressed def generate(self, compressed=True): """Generate a random private key (compressed or uncompressed).""" - self.set( - random.randrange( - 1, - SECP256K1_ORDER).to_bytes( - 32, - 'big'), - compressed) + self.set(random.randrange(1, SECP256K1_ORDER).to_bytes(32, "big"), compressed) def get_bytes(self): """Retrieve the 32-byte representation of this key.""" assert self.valid - return self.secret.to_bytes(32, 'big') + return self.secret.to_bytes(32, "big") @property def is_valid(self): return self.valid @property def is_compressed(self): return self.compressed def get_pubkey(self): """Compute an ECPubKey object for this secret key.""" assert self.valid ret = ECPubKey() p = SECP256K1.mul([(SECP256K1_G, self.secret)]) ret.p = p ret.valid = True ret.compressed = self.compressed return ret def sign_ecdsa(self, msg, low_s=True): """Construct a DER-encoded ECDSA signature with this key.""" assert self.valid - z = int.from_bytes(msg, 'big') + z = int.from_bytes(msg, "big") # Note: no RFC6979, but a simple random nonce (some tests rely on # distinct transactions for the same operation) k = random.randrange(1, SECP256K1_ORDER) R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, k)])) r = R[0] % SECP256K1_ORDER s = (modinv(k, SECP256K1_ORDER) * (z + self.secret * r)) % SECP256K1_ORDER if low_s and s > SECP256K1_ORDER_HALF: s = SECP256K1_ORDER - s - rb = r.to_bytes((r.bit_length() + 8) // 8, 'big') - sb = s.to_bytes((s.bit_length() + 8) // 8, 'big') - return b'\x30' + \ - bytes([4 + len(rb) + len(sb), 2, len(rb)]) + \ - rb + bytes([2, len(sb)]) + sb + rb = r.to_bytes((r.bit_length() + 8) // 8, "big") + sb = s.to_bytes((s.bit_length() + 8) // 8, "big") + return ( + b"\x30" + + bytes([4 + len(rb) + len(sb), 2, len(rb)]) + + rb + + bytes([2, len(sb)]) + + sb + ) def sign_schnorr(self, msg32): """Create Schnorr signature (BIP-Schnorr convention).""" assert self.valid assert len(msg32) == 32 pubkey = self.get_pubkey() assert pubkey.is_valid k = random.randrange(1, SECP256K1_ORDER) R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, k)])) if jacobi_symbol(R[1], SECP256K1.p) == -1: k = SECP256K1_ORDER - k - Rx = R[0].to_bytes(32, 'big') + Rx = R[0].to_bytes(32, "big") e = int.from_bytes( - hashlib.sha256( - Rx + - pubkey.get_bytes() + - msg32).digest(), - 'big') - s = (k + e * int.from_bytes(self.get_bytes(), 'big')) % SECP256K1_ORDER - sig = Rx + s.to_bytes(32, 'big') + hashlib.sha256(Rx + pubkey.get_bytes() + msg32).digest(), "big" + ) + s = (k + e * int.from_bytes(self.get_bytes(), "big")) % SECP256K1_ORDER + sig = Rx + s.to_bytes(32, "big") assert pubkey.verify_schnorr(sig, msg32) return sig diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 6dacefa93..455e73a1f 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -1,2307 +1,2369 @@ #!/usr/bin/env python3 # Copyright (c) 2010 ArtForz -- public domain half-a-node # Copyright (c) 2012 Jeff Garzik # Copyright (c) 2010-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Bitcoin test framework primitive and message structures CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....: data structures that should map to corresponding structures in bitcoin/primitives msg_block, msg_tx, msg_headers, etc.: data structures that represent network messages ser_*, deser_*: functions that handle serialization/deserialization. Classes use __slots__ to ensure extraneous attributes aren't accidentally added by tests, compromising their intended effect. """ import copy import hashlib import random import socket import struct import time import unittest from base64 import b64decode, b64encode from enum import IntEnum from io import BytesIO from typing import List from test_framework.siphash import siphash256 from test_framework.util import assert_equal, uint256_hex MAX_LOCATOR_SZ = 101 MAX_BLOCK_BASE_SIZE = 1000000 MAX_BLOOM_FILTER_SIZE = 36000 MAX_BLOOM_HASH_FUNCS = 50 # 1,000,000 XEC in satoshis (legacy BCHA) COIN = 100000000 # 1 XEC in satoshis XEC = 100 MAX_MONEY = 21000000 * COIN # Maximum length of incoming protocol messages MAX_PROTOCOL_MESSAGE_LENGTH = 2 * 1024 * 1024 MAX_HEADERS_RESULTS = 2000 # Number of headers sent in one getheaders result MAX_INV_SIZE = 50000 # Maximum number of entries in an 'inv' protocol message -NODE_NETWORK = (1 << 0) -NODE_GETUTXO = (1 << 1) -NODE_BLOOM = (1 << 2) +NODE_NETWORK = 1 << 0 +NODE_GETUTXO = 1 << 1 +NODE_BLOOM = 1 << 2 # NODE_WITNESS = (1 << 3) # NODE_XTHIN = (1 << 4) # removed in v0.22.12 -NODE_COMPACT_FILTERS = (1 << 6) -NODE_NETWORK_LIMITED = (1 << 10) -NODE_AVALANCHE = (1 << 24) +NODE_COMPACT_FILTERS = 1 << 6 +NODE_NETWORK_LIMITED = 1 << 10 +NODE_AVALANCHE = 1 << 24 MSG_TX = 1 MSG_BLOCK = 2 MSG_FILTERED_BLOCK = 3 MSG_CMPCT_BLOCK = 4 -MSG_AVA_PROOF = 0x1f000001 -MSG_TYPE_MASK = 0xffffffff >> 2 +MSG_AVA_PROOF = 0x1F000001 +MSG_TYPE_MASK = 0xFFFFFFFF >> 2 FILTER_TYPE_BASIC = 0 # Serialization/deserialization tools def sha256(s): - return hashlib.new('sha256', s).digest() + return hashlib.new("sha256", s).digest() def hash256(s): return sha256(sha256(s)) def ser_compact_size(size): r = b"" if size < 253: r = struct.pack("B", size) elif size < 0x10000: r = struct.pack(">= 32 return rs def uint256_from_str(s): r = 0 t = struct.unpack("> 24) & 0xFF v = (c & 0xFFFFFF) << (8 * (nbytes - 3)) return v # deser_function_name: Allow for an alternate deserialization function on the # entries in the vector. def deser_vector(f, c, deser_function_name=None): nit = deser_compact_size(f) r = [] for _ in range(nit): t = c() if deser_function_name: getattr(t, deser_function_name)(f) else: t.deserialize(f) r.append(t) return r # ser_function_name: Allow for an alternate serialization function on the # entries in the vector. def ser_vector(v, ser_function_name=None): r = ser_compact_size(len(v)) for i in v: if ser_function_name: r += getattr(i, ser_function_name)() else: r += i.serialize() return r def deser_uint256_vector(f): nit = deser_compact_size(f) r = [] for _ in range(nit): t = deser_uint256(f) r.append(t) return r def ser_uint256_vector(v): r = ser_compact_size(len(v)) for i in v: r += ser_uint256(i) return r def deser_string_vector(f): nit = deser_compact_size(f) r = [] for _ in range(nit): t = deser_string(f) r.append(t) return r def ser_string_vector(v): r = ser_compact_size(len(v)) for sv in v: r += ser_string(sv) return r def FromHex(obj, hex_string): """Deserialize from a hex string representation (eg from RPC)""" obj.deserialize(BytesIO(bytes.fromhex(hex_string))) return obj def ToHex(obj): """Convert a binary-serializable object to hex (eg for submission via RPC)""" return obj.serialize().hex() # Objects that map to bitcoind objects, which can be serialized/deserialized + class CAddress: __slots__ = ("net", "ip", "nServices", "port", "time") # see https://github.com/bitcoin/bips/blob/master/bip-0155.mediawiki NET_IPV4 = 1 - ADDRV2_NET_NAME = { - NET_IPV4: "IPv4" - } + ADDRV2_NET_NAME = {NET_IPV4: "IPv4"} - ADDRV2_ADDRESS_LENGTH = { - NET_IPV4: 4 - } + ADDRV2_ADDRESS_LENGTH = {NET_IPV4: 4} def __init__(self): self.time = 0 self.nServices = 1 self.net = self.NET_IPV4 self.ip = "0.0.0.0" self.port = 0 def deserialize(self, f, *, with_time=True): """Deserialize from addrv1 format (pre-BIP155)""" if with_time: # VERSION messages serialize CAddress objects without time self.time = struct.unpack("H", f.read(2))[0] def serialize(self, *, with_time=True): """Serialize in addrv1 format (pre-BIP155)""" assert self.net == self.NET_IPV4 r = b"" if with_time: # VERSION messages serialize CAddress objects without time r += struct.pack("H", self.port) return r def deserialize_v2(self, f): """Deserialize from addrv2 format (BIP155)""" self.time = struct.unpack("H", f.read(2))[0] def serialize_v2(self): """Serialize in addrv2 format (BIP155)""" assert self.net == self.NET_IPV4 r = b"" r += struct.pack("H", self.port) return r def __repr__(self): return ( f"CAddress(nServices={self.nServices} net={self.ADDRV2_NET_NAME[self.net]} " f"addr={self.ip} port={self.port})" ) class CInv: __slots__ = ("hash", "type") typemap = { 0: "Error", MSG_TX: "TX", MSG_BLOCK: "Block", MSG_FILTERED_BLOCK: "filtered Block", MSG_CMPCT_BLOCK: "CompactBlock", MSG_AVA_PROOF: "avalanche proof", } def __init__(self, t=0, h=0): self.type = t self.hash = h def deserialize(self, f): self.type = struct.unpack(" MAX_MONEY: return False return True def __repr__(self): return ( f"CTransaction(nVersion={self.nVersion} vin={self.vin!r} " f"vout={self.vout!r} nLockTime={self.nLockTime})" ) class CBlockHeader: - __slots__ = ("hash", "hashMerkleRoot", "hashPrevBlock", "nBits", "nNonce", - "nTime", "nVersion", "sha256") + __slots__ = ( + "hash", + "hashMerkleRoot", + "hashPrevBlock", + "nBits", + "nNonce", + "nTime", + "nVersion", + "sha256", + ) def __init__(self, header=None): if header is None: self.set_null() else: self.nVersion = header.nVersion self.hashPrevBlock = header.hashPrevBlock self.hashMerkleRoot = header.hashMerkleRoot self.nTime = header.nTime self.nBits = header.nBits self.nNonce = header.nNonce self.sha256 = header.sha256 self.hash = header.hash self.calc_sha256() def set_null(self): self.nVersion = 1 self.hashPrevBlock = 0 self.hashMerkleRoot = 0 self.nTime = 0 self.nBits = 0 self.nNonce = 0 self.sha256 = None self.hash = None def deserialize(self, f): self.nVersion = struct.unpack(" 1: newhashes = [] for i in range(0, len(hashes), 2): i2 = min(i + 1, len(hashes) - 1) newhashes.append(hash256(hashes[i] + hashes[i2])) hashes = newhashes return uint256_from_str(hashes[0]) def calc_merkle_root(self): hashes = [] for tx in self.vtx: tx.calc_sha256() hashes.append(ser_uint256(tx.sha256)) return self.get_merkle_root(hashes) def is_valid(self): self.calc_sha256() target = uint256_from_compact(self.nBits) if self.sha256 > target: return False for tx in self.vtx: if not tx.is_valid(): return False if self.calc_merkle_root() != self.hashMerkleRoot: return False return True def solve(self): self.rehash() target = uint256_from_compact(self.nBits) while self.sha256 > target: self.nNonce += 1 self.rehash() def __repr__(self): return ( f"CBlock(nVersion={self.nVersion} " f"hashPrevBlock={uint256_hex(self.hashPrevBlock)} " f"hashMerkleRoot={uint256_hex(self.hashMerkleRoot)} " f"nTime={self.nTime} nBits={self.nBits:08x} " f"nNonce={self.nNonce:08x} vtx={self.vtx!r})" ) class PrefilledTransaction: __slots__ = ("index", "tx") def __init__(self, index=0, tx=None): self.index = index self.tx = tx def deserialize(self, f): self.index = deser_compact_size(f) self.tx = CTransaction() self.tx.deserialize(f) def serialize(self): r = b"" r += ser_compact_size(self.index) r += self.tx.serialize() return r def __repr__(self): return f"PrefilledTransaction(index={self.index}, tx={self.tx!r})" # This is what we send on the wire, in a cmpctblock message. class P2PHeaderAndShortIDs: - __slots__ = ("header", "nonce", "prefilled_txn", "prefilled_txn_length", - "shortids", "shortids_length") + __slots__ = ( + "header", + "nonce", + "prefilled_txn", + "prefilled_txn_length", + "shortids", + "shortids_length", + ) def __init__(self): self.header = CBlockHeader() self.nonce = 0 self.shortids_length = 0 self.shortids = [] self.prefilled_txn_length = 0 self.prefilled_txn = [] def deserialize(self, f): self.header.deserialize(f) self.nonce = struct.unpack("> 1 self.pubkey = deser_string(f) def serialize(self) -> bytes: r = self.utxo.serialize() height_ser = self.height << 1 | int(self.is_coinbase) - r += struct.pack(' bytes: return self.stake.serialize() + self.sig class AvalancheProof: __slots__ = ( "sequence", "expiration", "master", "stakes", "payout_script", "signature", "limited_proofid", - "proofid") - - def __init__(self, sequence=0, expiration=0, - master=b"", signed_stakes=None, payout_script=b"", signature=b""): + "proofid", + ) + + def __init__( + self, + sequence=0, + expiration=0, + master=b"", + signed_stakes=None, + payout_script=b"", + signature=b"", + ): self.sequence: int = sequence self.expiration: int = expiration self.master: bytes = master self.stakes: List[AvalancheSignedStake] = signed_stakes or [ - AvalancheSignedStake()] + AvalancheSignedStake() + ] self.payout_script = payout_script self.signature = signature self.limited_proofid: int = None self.proofid: int = None self.compute_proof_id() def compute_proof_id(self): """Compute Bitcoin's 256-bit hash (double SHA-256) of the serialized proof data. """ ss = struct.pack(" int: - return uint256_from_str(hash256( - ser_uint256(self.limited_proofid) + ser_string(self.proof_master))) + return uint256_from_str( + hash256(ser_uint256(self.limited_proofid) + ser_string(self.proof_master)) + ) def deserialize(self, f): self.limited_proofid = deser_uint256(f) self.proof_master = deser_string(f) self.levels = deser_vector(f, AvalancheDelegationLevel) self.proofid = self.compute_proofid() def serialize(self): r = b"" r += ser_uint256(self.limited_proofid) r += ser_string(self.proof_master) r += ser_vector(self.levels) return r def __repr__(self): - return f"AvalancheDelegation(" \ - f"limitedProofId={uint256_hex(self.limited_proofid)}, " \ - f"proofMaster={self.proof_master.hex()}, " \ - f"proofid={uint256_hex(self.proofid)}, " \ - f"levels={self.levels})" + return ( + "AvalancheDelegation(" + f"limitedProofId={uint256_hex(self.limited_proofid)}, " + f"proofMaster={self.proof_master.hex()}, " + f"proofid={uint256_hex(self.proofid)}, " + f"levels={self.levels})" + ) def getid(self): h = ser_uint256(self.proofid) for level in self.levels: h = hash256(h + ser_string(level.pubkey)) return h class AvalancheHello: __slots__ = ("delegation", "sig") def __init__(self, delegation=AvalancheDelegation(), sig=b"\0" * 64): self.delegation = delegation self.sig = sig def deserialize(self, f): self.delegation.deserialize(f) self.sig = f.read(64) def serialize(self): r = b"" r += self.delegation.serialize() r += self.sig return r def __repr__(self): return f"AvalancheHello(delegation={self.delegation!r}, sig={self.sig})" def get_sighash(self, node): b = self.delegation.getid() b += struct.pack(" class msg_headers: __slots__ = ("headers",) msgtype = b"headers" def __init__(self, headers=None): self.headers = headers if headers is not None else [] def deserialize(self, f): # comment in bitcoind indicates these should be deserialized as blocks blocks = deser_vector(f, CBlock) for x in blocks: self.headers.append(CBlockHeader(x)) def serialize(self): blocks = [CBlock(x) for x in self.headers] return ser_vector(blocks) def __repr__(self): return f"msg_headers(headers={self.headers!r})" class msg_merkleblock: __slots__ = ("merkleblock",) msgtype = b"merkleblock" def __init__(self, merkleblock=None): if merkleblock is None: self.merkleblock = CMerkleBlock() else: self.merkleblock = merkleblock def deserialize(self, f): self.merkleblock.deserialize(f) def serialize(self): return self.merkleblock.serialize() def __repr__(self): return f"msg_merkleblock(merkleblock={self.merkleblock!r})" class msg_filterload: __slots__ = ("data", "nHashFuncs", "nTweak", "nFlags") msgtype = b"filterload" - def __init__(self, data=b'00', nHashFuncs=0, nTweak=0, nFlags=0): + def __init__(self, data=b"00", nHashFuncs=0, nTweak=0, nFlags=0): self.data = data self.nHashFuncs = nHashFuncs self.nTweak = nTweak self.nFlags = nFlags def deserialize(self, f): self.data = deser_string(f) self.nHashFuncs = struct.unpack("> (32 - bits)) + return ((v << bits) & 0xFFFFFFFF) | (v >> (32 - bits)) def chacha20_doubleround(s): """Apply a ChaCha20 double round to 16-element state array s. See https://cr.yp.to/chacha/chacha-20080128.pdf and https://tools.ietf.org/html/rfc8439 """ - QUARTER_ROUNDS = [(0, 4, 8, 12), - (1, 5, 9, 13), - (2, 6, 10, 14), - (3, 7, 11, 15), - (0, 5, 10, 15), - (1, 6, 11, 12), - (2, 7, 8, 13), - (3, 4, 9, 14)] + QUARTER_ROUNDS = [ + (0, 4, 8, 12), + (1, 5, 9, 13), + (2, 6, 10, 14), + (3, 7, 11, 15), + (0, 5, 10, 15), + (1, 6, 11, 12), + (2, 7, 8, 13), + (3, 4, 9, 14), + ] for a, b, c, d in QUARTER_ROUNDS: - s[a] = (s[a] + s[b]) & 0xffffffff + s[a] = (s[a] + s[b]) & 0xFFFFFFFF s[d] = rot32(s[d] ^ s[a], 16) - s[c] = (s[c] + s[d]) & 0xffffffff + s[c] = (s[c] + s[d]) & 0xFFFFFFFF s[b] = rot32(s[b] ^ s[c], 12) - s[a] = (s[a] + s[b]) & 0xffffffff + s[a] = (s[a] + s[b]) & 0xFFFFFFFF s[d] = rot32(s[d] ^ s[a], 8) - s[c] = (s[c] + s[d]) & 0xffffffff + s[c] = (s[c] + s[d]) & 0xFFFFFFFF s[b] = rot32(s[b] ^ s[c], 7) def chacha20_32_to_384(key32): """Specialized ChaCha20 implementation with 32-byte key, 0 IV, 384-byte output.""" # See RFC 8439 section 2.3 for chacha20 parameters - CONSTANTS = [0x61707865, 0x3320646e, 0x79622d32, 0x6b206574] + CONSTANTS = [0x61707865, 0x3320646E, 0x79622D32, 0x6B206574] key_bytes = [0] * 8 for i in range(8): - key_bytes[i] = int.from_bytes(key32[(4 * i):(4 * (i + 1))], 'little') + key_bytes[i] = int.from_bytes(key32[(4 * i) : (4 * (i + 1))], "little") INITIALIZATION_VECTOR = [0] * 4 init = CONSTANTS + key_bytes + INITIALIZATION_VECTOR out = bytearray() for counter in range(6): init[12] = counter s = init.copy() for _ in range(10): chacha20_doubleround(s) for i in range(16): - out.extend(((s[i] + init[i]) & 0xffffffff).to_bytes(4, 'little')) + out.extend(((s[i] + init[i]) & 0xFFFFFFFF).to_bytes(4, "little")) return bytes(out) def data_to_num3072(data): """Hash a 32-byte array data to a 3072-bit number using 6 Chacha20 operations.""" bytes384 = chacha20_32_to_384(data) - return int.from_bytes(bytes384, 'little') + return int.from_bytes(bytes384, "little") class MuHash3072: """Class representing the MuHash3072 computation of a set. See https://cseweb.ucsd.edu/~mihir/papers/inchash.pdf and https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2017-May/014337.html """ MODULUS = 2**3072 - 1103717 def __init__(self): """Initialize for an empty set.""" self.numerator = 1 self.denominator = 1 def insert(self, data): """Insert a byte array data in the set.""" data_hash = hashlib.sha256(data).digest() - self.numerator = ( - self.numerator * data_to_num3072(data_hash)) % self.MODULUS + self.numerator = (self.numerator * data_to_num3072(data_hash)) % self.MODULUS def remove(self, data): """Remove a byte array from the set.""" data_hash = hashlib.sha256(data).digest() self.denominator = ( - self.denominator * data_to_num3072(data_hash)) % self.MODULUS + self.denominator * data_to_num3072(data_hash) + ) % self.MODULUS def digest(self): """Extract the final hash. Does not modify this object.""" - val = (self.numerator * - modinv(self.denominator, self.MODULUS)) % self.MODULUS - bytes384 = val.to_bytes(384, 'little') + val = (self.numerator * modinv(self.denominator, self.MODULUS)) % self.MODULUS + bytes384 = val.to_bytes(384, "little") return hashlib.sha256(bytes384).digest() class TestFrameworkMuhash(unittest.TestCase): def test_muhash(self): muhash = MuHash3072() - muhash.insert(b'\x00' * 32) - muhash.insert((b'\x01' + b'\x00' * 31)) - muhash.remove((b'\x02' + b'\x00' * 31)) + muhash.insert(b"\x00" * 32) + muhash.insert((b"\x01" + b"\x00" * 31)) + muhash.remove((b"\x02" + b"\x00" * 31)) finalized = muhash.digest() # This mirrors the result in the C++ MuHash3072 unit test self.assertEqual( finalized[::-1].hex(), - "10d312b100cbd32ada024a6646e40d3482fcff103668d2625f10002a607d5863" + "10d312b100cbd32ada024a6646e40d3482fcff103668d2625f10002a607d5863", ) def test_chacha20(self): def chacha_check(key, result): self.assertEqual(chacha20_32_to_384(key)[:64].hex(), result) # Test vectors from https://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04#section-7 # Since the nonce is hardcoded to 0 in our function we only use those # vectors. chacha_check( [0] * 32, - "76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7da41597c5157488d7724e03fb8d84a376a43b8f41518a11cc387b669b2ee6586") + "76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7da41597c5157488d7724e03fb8d84a376a43b8f41518a11cc387b669b2ee6586", + ) chacha_check( [0] * 31 + [1], - "4540f05a9f1fb296d7736e7b208e3c96eb4fe1834688d2604f450952ed432d41bbe2a0b6ea7566d2a5d1e7e20d42af2c53d792b1c43fea817e9ad275ae546963") + "4540f05a9f1fb296d7736e7b208e3c96eb4fe1834688d2604f450952ed432d41bbe2a0b6ea7566d2a5d1e7e20d42af2c53d792b1c43fea817e9ad275ae546963", + ) diff --git a/test/functional/test_framework/netutil.py b/test/functional/test_framework/netutil.py index 643c2592e..0af9e8726 100644 --- a/test/functional/test_framework/netutil.py +++ b/test/functional/test_framework/netutil.py @@ -1,182 +1,190 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Linux network utilities. Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal """ import array import os import socket import struct import sys from errno import EINVAL, ENOENT # STATE_ESTABLISHED = '01' # STATE_SYN_SENT = '02' # STATE_SYN_RECV = '03' # STATE_FIN_WAIT1 = '04' # STATE_FIN_WAIT2 = '05' # STATE_TIME_WAIT = '06' # STATE_CLOSE = '07' # STATE_CLOSE_WAIT = '08' # STATE_LAST_ACK = '09' -STATE_LISTEN = '0A' +STATE_LISTEN = "0A" # STATE_CLOSING = '0B' def get_socket_inodes(pid): - ''' + """ Get list of socket inodes for process pid. - ''' - base = f'/proc/{pid}/fd' + """ + base = f"/proc/{pid}/fd" inodes = [] for item in os.listdir(base): try: target = os.readlink(os.path.join(base, item)) except OSError as err: if err.errno == ENOENT: # The file which is gone in the meantime continue elif err.errno == EINVAL: # Not a link continue else: raise else: - if target.startswith('socket:'): + if target.startswith("socket:"): inodes.append(int(target[8:-1])) return inodes def _remove_empty(array): - return [x for x in array if x != ''] + return [x for x in array if x != ""] def _convert_ip_port(array): - host, port = array.split(':') + host, port = array.split(":") # convert host from mangled-per-four-bytes form as used by kernel host = bytes.fromhex(host) - host_out = '' + host_out = "" for x in range(0, len(host) // 4): - (val,) = struct.unpack('=I', host[x * 4:(x + 1) * 4]) - host_out += f'{val:08x}' + (val,) = struct.unpack("=I", host[x * 4 : (x + 1) * 4]) + host_out += f"{val:08x}" return host_out, int(port, 16) -def netstat(typ='tcp'): - ''' +def netstat(typ="tcp"): + """ Function to return a list with status of tcp connections at linux systems To get pid of all network process running on system, you must run this script as superuser - ''' - with open(f"/proc/net/{typ}", 'r', encoding='utf8') as f: + """ + with open(f"/proc/net/{typ}", "r", encoding="utf8") as f: content = f.readlines() content.pop(0) result = [] for line in content: # Split lines and remove empty spaces. - line_array = _remove_empty(line.split(' ')) + line_array = _remove_empty(line.split(" ")) tcp_id = line_array[0] l_addr = _convert_ip_port(line_array[1]) r_addr = _convert_ip_port(line_array[2]) state = line_array[3] # Need the inode to match with process pid. inode = int(line_array[9]) nline = [tcp_id, l_addr, r_addr, state, inode] result.append(nline) return result def get_bind_addrs(pid): - ''' + """ Get bind addresses as (host,port) tuples for process pid. - ''' + """ inodes = get_socket_inodes(pid) bind_addrs = [] - for conn in netstat('tcp') + netstat('tcp6'): + for conn in netstat("tcp") + netstat("tcp6"): if conn[3] == STATE_LISTEN and conn[4] in inodes: bind_addrs.append(conn[1]) return bind_addrs + # from: http://code.activestate.com/recipes/439093/ def all_interfaces(): - ''' + """ Return all interfaces that are up - ''' + """ # Linux only, so only import when required import fcntl is_64bits = sys.maxsize > 2**32 struct_size = 40 if is_64bits else 32 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) max_possible = 8 # initial value while True: inbytes = max_possible * struct_size - names = array.array('B', b'\0' * inbytes) - outbytes = struct.unpack('iL', fcntl.ioctl( - s.fileno(), - 0x8912, # SIOCGIFCONF - struct.pack('iL', inbytes, names.buffer_info()[0]) - ))[0] + names = array.array("B", b"\0" * inbytes) + outbytes = struct.unpack( + "iL", + fcntl.ioctl( + s.fileno(), + 0x8912, # SIOCGIFCONF + struct.pack("iL", inbytes, names.buffer_info()[0]), + ), + )[0] if outbytes == inbytes: max_possible *= 2 else: break namestr = names.tobytes() - return [(namestr[i:i + 16].split(b'\0', 1)[0], - socket.inet_ntoa(namestr[i + 20:i + 24])) - for i in range(0, outbytes, struct_size)] + return [ + ( + namestr[i : i + 16].split(b"\0", 1)[0], + socket.inet_ntoa(namestr[i + 20 : i + 24]), + ) + for i in range(0, outbytes, struct_size) + ] def addr_to_hex(addr): - ''' + """ Convert string IPv4 or IPv6 address to binary address as returned by get_bind_addrs. Very naive implementation that certainly doesn't work for all IPv6 variants. - ''' - if '.' in addr: # IPv4 - addr = [int(x) for x in addr.split('.')] - elif ':' in addr: # IPv6 + """ + if "." in addr: # IPv4 + addr = [int(x) for x in addr.split(".")] + elif ":" in addr: # IPv6 sub = [[], []] # prefix, suffix x = 0 - addr = addr.split(':') + addr = addr.split(":") for i, comp in enumerate(addr): - if comp == '': + if comp == "": # skip empty component at beginning or end if i == 0 or i == (len(addr) - 1): continue x += 1 # :: skips to suffix assert x < 2 else: # two bytes per component val = int(comp, 16) sub[x].append(val >> 8) - sub[x].append(val & 0xff) + sub[x].append(val & 0xFF) nullbytes = 16 - len(sub[0]) - len(sub[1]) assert (x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0) addr = sub[0] + ([0] * nullbytes) + sub[1] else: - raise ValueError(f'Could not parse address {addr}') + raise ValueError(f"Could not parse address {addr}") return bytearray(addr).hex() def test_ipv6_local(): - ''' + """ Check for (local) IPv6 support. - ''' + """ import socket # By using SOCK_DGRAM this will not actually make a connection, but it will # fail if there is no route to IPv6 localhost. have_ipv6 = True try: s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) - s.connect(('::1', 0)) + s.connect(("::1", 0)) except socket.error: have_ipv6 = False return have_ipv6 diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py index 01b403f43..9c9ba8245 100755 --- a/test/functional/test_framework/p2p.py +++ b/test/functional/test_framework/p2p.py @@ -1,901 +1,974 @@ #!/usr/bin/env python3 # Copyright (c) 2010 ArtForz -- public domain half-a-node # Copyright (c) 2012 Jeff Garzik # Copyright (c) 2010-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test objects for interacting with a bitcoind node over the p2p protocol. The P2PInterface objects interact with the bitcoind nodes under test using the node's p2p interface. They can be used to send messages to the node, and callbacks can be registered that execute when messages are received from the node. Messages are sent to/received from the node on an asyncio event loop. State held inside the objects must be guarded by the p2p_lock to avoid data races between the main testing thread and the event loop. P2PConnection: A low-level connection object to a node's P2P interface P2PInterface: A high-level interface object for communicating to a node over P2P P2PDataStore: A p2p interface class that keeps a store of transactions and blocks and can respond correctly to getdata and getheaders messages P2PTxInvStore: A p2p interface class that inherits from P2PDataStore, and keeps a count of how many times each txid has been announced.""" import asyncio import logging import struct import sys import threading from collections import defaultdict from io import BytesIO from test_framework.messages import ( MAX_HEADERS_RESULTS, MSG_BLOCK, MSG_TX, MSG_TYPE_MASK, NODE_NETWORK, CBlockHeader, msg_addr, msg_addrv2, msg_avahello, msg_avapoll, msg_avaproof, msg_avaproofs, msg_avaproofsreq, msg_block, msg_blocktxn, msg_cfcheckpt, msg_cfheaders, msg_cfilter, msg_cmpctblock, msg_feefilter, msg_filteradd, msg_filterclear, msg_filterload, msg_getaddr, msg_getavaaddr, msg_getavaproofs, msg_getblocks, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_mempool, msg_merkleblock, msg_notfound, msg_ping, msg_pong, msg_sendaddrv2, msg_sendcmpct, msg_sendheaders, msg_tcpavaresponse, msg_tx, msg_verack, msg_version, sha256, ) from test_framework.util import MAX_NODES, p2p_port, wait_until_helper logger = logging.getLogger("TestFramework.p2p") # The minimum P2P version that this test framework supports MIN_P2P_VERSION_SUPPORTED = 60001 # The P2P version that this test framework implements and sends in its `version` # message. Past bip-31 for ping/pong P2P_VERSION = 70014 # The services that this test framework offers in its `version` message P2P_SERVICES = NODE_NETWORK # The P2P user agent string that this test framework sends in its `version` # message P2P_SUBVERSION = "/python-p2p-tester:0.0.3/" # Value for relay that this test framework sends in its `version` message P2P_VERSION_RELAY = 1 MESSAGEMAP = { b"addr": msg_addr, b"addrv2": msg_addrv2, b"avapoll": msg_avapoll, b"avaproof": msg_avaproof, b"avaproofs": msg_avaproofs, b"avaproofsreq": msg_avaproofsreq, b"avaresponse": msg_tcpavaresponse, b"avahello": msg_avahello, b"block": msg_block, b"blocktxn": msg_blocktxn, b"cfcheckpt": msg_cfcheckpt, b"cfheaders": msg_cfheaders, b"cfilter": msg_cfilter, b"cmpctblock": msg_cmpctblock, b"feefilter": msg_feefilter, b"filteradd": msg_filteradd, b"filterclear": msg_filterclear, b"filterload": msg_filterload, b"getaddr": msg_getaddr, b"getavaaddr": msg_getavaaddr, b"getavaproofs": msg_getavaproofs, b"getblocks": msg_getblocks, b"getblocktxn": msg_getblocktxn, b"getdata": msg_getdata, b"getheaders": msg_getheaders, b"headers": msg_headers, b"inv": msg_inv, b"mempool": msg_mempool, b"merkleblock": msg_merkleblock, b"notfound": msg_notfound, b"ping": msg_ping, b"pong": msg_pong, b"sendaddrv2": msg_sendaddrv2, b"sendcmpct": msg_sendcmpct, b"sendheaders": msg_sendheaders, b"tx": msg_tx, b"verack": msg_verack, b"version": msg_version, } MAGIC_BYTES = { "mainnet": b"\xe3\xe1\xf3\xe8", "testnet3": b"\xf4\xe5\xf3\xf4", "regtest": b"\xda\xb5\xbf\xfa", } class P2PConnection(asyncio.Protocol): """A low-level connection object to a node's P2P interface. This class is responsible for: - opening and closing the TCP connection to the node - reading bytes from and writing bytes to the socket - deserializing and serializing the P2P message header - logging messages as they are sent and received This class contains no logic for handing the P2P message payloads. It must be sub-classed and the on_message() callback overridden.""" def __init__(self): # The underlying transport of the connection. # Should only call methods on this from the NetworkThread, c.f. # call_soon_threadsafe self._transport = None @property def is_connected(self): return self._transport is not None def peer_connect_helper(self, dstaddr, dstport, net, timeout_factor): assert not self.is_connected self.timeout_factor = timeout_factor self.dstaddr = dstaddr self.dstport = dstport # The initial message to send after the connection was made: self.on_connection_send_msg = None self.on_connection_send_msg_is_raw = False self.recvbuf = b"" self.magic_bytes = MAGIC_BYTES[net] def peer_connect(self, dstaddr, dstport, *, net, timeout_factor): self.peer_connect_helper(dstaddr, dstport, net, timeout_factor) loop = NetworkThread.network_event_loop - logger.debug( - f'Connecting to Bitcoin ABC Node: {self.dstaddr}:{self.dstport}') + logger.debug(f"Connecting to Bitcoin ABC Node: {self.dstaddr}:{self.dstport}") coroutine = loop.create_connection( - lambda: self, host=self.dstaddr, port=self.dstport) + lambda: self, host=self.dstaddr, port=self.dstport + ) return lambda: loop.call_soon_threadsafe(loop.create_task, coroutine) def peer_accept_connection( - self, connect_id, connect_cb=lambda: None, *, net, timeout_factor): - self.peer_connect_helper('0', 0, net, timeout_factor) + self, connect_id, connect_cb=lambda: None, *, net, timeout_factor + ): + self.peer_connect_helper("0", 0, net, timeout_factor) - logger.debug( - f'Listening for Bitcoin ABC Node with id: {connect_id}') + logger.debug(f"Listening for Bitcoin ABC Node with id: {connect_id}") return lambda: NetworkThread.listen(self, connect_cb, idx=connect_id) def peer_disconnect(self): # Connection could have already been closed by other end. NetworkThread.network_event_loop.call_soon_threadsafe( - lambda: self._transport and self._transport.abort()) + lambda: self._transport and self._transport.abort() + ) # Connection and disconnection methods def connection_made(self, transport): """asyncio callback when a connection is opened.""" assert not self._transport logger.debug(f"Connected & Listening: {self.dstaddr}:{self.dstport}") self._transport = transport if self.on_connection_send_msg: if self.on_connection_send_msg_is_raw: self.send_raw_message(self.on_connection_send_msg) else: self.send_message(self.on_connection_send_msg) # Never used again self.on_connection_send_msg = None self.on_open() def connection_lost(self, exc): """asyncio callback when a connection is closed.""" if exc: logger.warning( - f"Connection lost to {self.dstaddr}:{self.dstport} due to {exc}") + f"Connection lost to {self.dstaddr}:{self.dstport} due to {exc}" + ) else: logger.debug(f"Closed connection to: {self.dstaddr}:{self.dstport}") self._transport = None self.recvbuf = b"" self.on_close() # Socket read methods def data_received(self, t): """asyncio callback when data is read from the socket.""" with p2p_lock: if len(t) > 0: self.recvbuf += t while True: msg = self._on_data() if msg is None: break self.on_message(msg) def _on_data(self): """Try to read P2P messages from the recv buffer. This method reads data from the buffer in a loop. It deserializes, parses and verifies the P2P header, then passes the P2P payload to the on_message callback for processing.""" try: with p2p_lock: if len(self.recvbuf) < 4: return None if self.recvbuf[:4] != self.magic_bytes: raise ValueError( - f"magic bytes mismatch: " - f"{self.magic_bytes!r} != {self.recvbuf!r}") + "magic bytes mismatch: " + f"{self.magic_bytes!r} != {self.recvbuf!r}" + ) if len(self.recvbuf) < 4 + 12 + 4 + 4: return None - msgtype = self.recvbuf[4:4 + 12].split(b"\x00", 1)[0] - msglen = struct.unpack( - " 500: log_message += "... (msg truncated)" logger.debug(log_message) class P2PInterface(P2PConnection): """A high-level P2P interface class for communicating with a Bitcoin Cash node. This class provides high-level callbacks for processing P2P message payloads, as well as convenience methods for interacting with the node over P2P. Individual testcases should subclass this and override the on_* methods if they want to alter message handling behaviour.""" def __init__(self, support_addrv2=False): super().__init__() # Track number of messages of each type received. # Should be read-only in a test. self.message_count = defaultdict(int) # Track the most recent message of each type. # To wait for a message to be received, pop that message from # this and use self.wait_until. self.last_message = {} # A count of the number of ping messages we've sent to the node self.ping_counter = 1 # The network services received from the peer self.nServices = 0 self.support_addrv2 = support_addrv2 def peer_connect_send_version(self, services): # Send a version msg vt = msg_version() vt.nVersion = P2P_VERSION vt.strSubVer = P2P_SUBVERSION vt.relay = P2P_VERSION_RELAY vt.nServices = services vt.addrTo.ip = self.dstaddr vt.addrTo.port = self.dstport vt.addrFrom.ip = "0.0.0.0" vt.addrFrom.port = 0 # Will be sent in connection_made callback self.on_connection_send_msg = vt - def peer_connect(self, *args, services=P2P_SERVICES, - send_version=True, **kwargs): + def peer_connect(self, *args, services=P2P_SERVICES, send_version=True, **kwargs): create_conn = super().peer_connect(*args, **kwargs) if send_version: self.peer_connect_send_version(services) return create_conn def peer_accept_connection(self, *args, services=NODE_NETWORK, **kwargs): create_conn = super().peer_accept_connection(*args, **kwargs) self.peer_connect_send_version(services) return create_conn # Message receiving methods def on_message(self, message): """Receive message and dispatch message to appropriate callback. We keep a count of how many of each message type has been received and the most recent message of each type.""" with p2p_lock: try: - msgtype = message.msgtype.decode('ascii') + msgtype = message.msgtype.decode("ascii") self.message_count[msgtype] += 1 self.last_message[msgtype] = message getattr(self, f"on_{msgtype}")(message) except Exception: print(f"ERROR delivering {repr(message)} ({sys.exc_info()[0]})") raise # Callback methods. Can be overridden by subclasses in individual test # cases to provide custom message handling behaviour. def on_open(self): pass def on_close(self): pass - def on_addr(self, message): pass + def on_addr(self, message): + pass - def on_addrv2(self, message): pass + def on_addrv2(self, message): + pass - def on_avapoll(self, message): pass + def on_avapoll(self, message): + pass - def on_avaproof(self, message): pass + def on_avaproof(self, message): + pass - def on_avaproofs(self, message): pass + def on_avaproofs(self, message): + pass - def on_avaproofsreq(self, message): pass + def on_avaproofsreq(self, message): + pass - def on_avaresponse(self, message): pass + def on_avaresponse(self, message): + pass - def on_avahello(self, message): pass + def on_avahello(self, message): + pass - def on_block(self, message): pass + def on_block(self, message): + pass - def on_blocktxn(self, message): pass + def on_blocktxn(self, message): + pass - def on_cfcheckpt(self, message): pass + def on_cfcheckpt(self, message): + pass - def on_cfheaders(self, message): pass + def on_cfheaders(self, message): + pass - def on_cfilter(self, message): pass + def on_cfilter(self, message): + pass - def on_cmpctblock(self, message): pass + def on_cmpctblock(self, message): + pass - def on_feefilter(self, message): pass + def on_feefilter(self, message): + pass - def on_filteradd(self, message): pass + def on_filteradd(self, message): + pass - def on_filterclear(self, message): pass + def on_filterclear(self, message): + pass - def on_filterload(self, message): pass + def on_filterload(self, message): + pass - def on_getaddr(self, message): pass + def on_getaddr(self, message): + pass - def on_getavaaddr(self, message): pass + def on_getavaaddr(self, message): + pass - def on_getavaproofs(self, message): pass + def on_getavaproofs(self, message): + pass - def on_getblocks(self, message): pass + def on_getblocks(self, message): + pass - def on_getblocktxn(self, message): pass + def on_getblocktxn(self, message): + pass - def on_getdata(self, message): pass + def on_getdata(self, message): + pass - def on_getheaders(self, message): pass + def on_getheaders(self, message): + pass - def on_headers(self, message): pass + def on_headers(self, message): + pass - def on_mempool(self, message): pass + def on_mempool(self, message): + pass - def on_merkleblock(self, message): pass + def on_merkleblock(self, message): + pass - def on_notfound(self, message): pass + def on_notfound(self, message): + pass - def on_pong(self, message): pass + def on_pong(self, message): + pass - def on_sendaddrv2(self, message): pass + def on_sendaddrv2(self, message): + pass - def on_sendcmpct(self, message): pass + def on_sendcmpct(self, message): + pass - def on_sendheaders(self, message): pass + def on_sendheaders(self, message): + pass - def on_tx(self, message): pass + def on_tx(self, message): + pass def on_inv(self, message): want = msg_getdata() for i in message.inv: if i.type != 0: want.inv.append(i) if len(want.inv): self.send_message(want) def on_ping(self, message): self.send_message(msg_pong(message.nonce)) def on_verack(self, message): pass def on_version(self, message): - assert message.nVersion >= MIN_P2P_VERSION_SUPPORTED, \ - f"Version {message.nVersion} received. Test framework only supports " \ + assert message.nVersion >= MIN_P2P_VERSION_SUPPORTED, ( + f"Version {message.nVersion} received. Test framework only supports " f"versions greater than {MIN_P2P_VERSION_SUPPORTED}" + ) self.send_message(msg_verack()) if self.support_addrv2: self.send_message(msg_sendaddrv2()) self.nServices = message.nServices self.send_message(msg_getaddr()) # Connection helper methods - def wait_until(self, test_function_in, *, timeout=60, - check_connected=True): + def wait_until(self, test_function_in, *, timeout=60, check_connected=True): def test_function(): if check_connected: assert self.is_connected return test_function_in() - wait_until_helper(test_function, timeout=timeout, lock=p2p_lock, - timeout_factor=self.timeout_factor) + wait_until_helper( + test_function, + timeout=timeout, + lock=p2p_lock, + timeout_factor=self.timeout_factor, + ) def wait_for_connect(self, timeout=60): - def test_function(): return self.is_connected + def test_function(): + return self.is_connected + self.wait_until(test_function, timeout=timeout, check_connected=False) def wait_for_disconnect(self, timeout=60): - def test_function(): return not self.is_connected + def test_function(): + return not self.is_connected + self.wait_until(test_function, timeout=timeout, check_connected=False) # Message receiving helper methods def wait_for_tx(self, txid, timeout=60): def test_function(): - if not self.last_message.get('tx'): + if not self.last_message.get("tx"): return False - return self.last_message['tx'].tx.rehash() == txid + return self.last_message["tx"].tx.rehash() == txid self.wait_until(test_function, timeout=timeout) def wait_for_block(self, blockhash, timeout=60): def test_function(): - return self.last_message.get( - "block") and self.last_message["block"].block.rehash() == blockhash + return ( + self.last_message.get("block") + and self.last_message["block"].block.rehash() == blockhash + ) self.wait_until(test_function, timeout=timeout) def wait_for_header(self, blockhash, timeout=60): def test_function(): - last_headers = self.last_message.get('headers') + last_headers = self.last_message.get("headers") if not last_headers: return False return last_headers.headers[0].rehash() == int(blockhash, 16) self.wait_until(test_function, timeout=timeout) def wait_for_merkleblock(self, blockhash, timeout=60): def test_function(): - last_filtered_block = self.last_message.get('merkleblock') + last_filtered_block = self.last_message.get("merkleblock") if not last_filtered_block: return False return last_filtered_block.merkleblock.header.rehash() == int(blockhash, 16) self.wait_until(test_function, timeout=timeout) def wait_for_getdata(self, hash_list, timeout=60): """Waits for a getdata message. The object hashes in the inventory vector must match the provided hash_list.""" + def test_function(): last_data = self.last_message.get("getdata") if not last_data: return False return [x.hash for x in last_data.inv] == hash_list self.wait_until(test_function, timeout=timeout) def wait_for_getheaders(self, timeout=60): """Waits for a getheaders message. Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"] value must be explicitly cleared before calling this method, or this will return immediately with success. TODO: change this method to take a hash value and only return true if the correct block header has been requested.""" + def test_function(): return self.last_message.get("getheaders") self.wait_until(test_function, timeout=timeout) def wait_for_inv(self, expected_inv, timeout=60): """Waits for an INV message and checks that the first inv object in the message was as expected.""" if len(expected_inv) > 1: raise NotImplementedError( - "wait_for_inv() will only verify the first inv object") + "wait_for_inv() will only verify the first inv object" + ) def test_function(): - return self.last_message.get("inv") and \ - self.last_message["inv"].inv[0].type == expected_inv[0].type and \ - self.last_message["inv"].inv[0].hash == expected_inv[0].hash + return ( + self.last_message.get("inv") + and self.last_message["inv"].inv[0].type == expected_inv[0].type + and self.last_message["inv"].inv[0].hash == expected_inv[0].hash + ) self.wait_until(test_function, timeout=timeout) def wait_for_verack(self, timeout=60): def test_function(): return "verack" in self.last_message self.wait_until(test_function, timeout=timeout) # Message sending helper functions def send_and_ping(self, message, timeout=60): self.send_message(message) self.sync_with_ping(timeout=timeout) def sync_send_with_ping(self, timeout=60): """Ensure SendMessages is called on this connection""" # Calling sync_with_ping twice requires that the node calls # `ProcessMessage` twice, and thus ensures `SendMessages` must have # been called at least once self.sync_with_ping() self.sync_with_ping() def sync_with_ping(self, timeout=60): """Ensure ProcessMessages is called on this connection""" self.send_message(msg_ping(nonce=self.ping_counter)) def test_function(): - return self.last_message.get( - "pong") and self.last_message["pong"].nonce == self.ping_counter + return ( + self.last_message.get("pong") + and self.last_message["pong"].nonce == self.ping_counter + ) self.wait_until(test_function, timeout=timeout) self.ping_counter += 1 # One lock for synchronizing all data access between the networking thread (see # NetworkThread below) and the thread running the test logic. For simplicity, # P2PConnection acquires this lock whenever delivering a message to a P2PInterface. # This lock should be acquired in the thread running the test logic to synchronize # access to any data shared with the P2PInterface or P2PConnection. p2p_lock = threading.Lock() class NetworkThread(threading.Thread): network_event_loop = None def __init__(self): super().__init__(name="NetworkThread") # There is only one event loop and no more than one thread must be # created assert not self.network_event_loop NetworkThread.listeners = {} NetworkThread.protos = {} NetworkThread.network_event_loop = asyncio.new_event_loop() def run(self): """Start the network thread.""" self.network_event_loop.run_forever() def close(self, timeout=10): """Close the connections and network event loop.""" - self.network_event_loop.call_soon_threadsafe( - self.network_event_loop.stop) - wait_until_helper(lambda: not self.network_event_loop.is_running(), - timeout=timeout) + self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop) + wait_until_helper( + lambda: not self.network_event_loop.is_running(), timeout=timeout + ) self.network_event_loop.close() self.join(timeout) # Safe to remove event loop. NetworkThread.network_event_loop = None @classmethod def listen(cls, p2p, callback, port=None, addr=None, idx=1): - """ Ensure a listening server is running on the given port, and run the + """Ensure a listening server is running on the given port, and run the protocol specified by `p2p` on the next connection to it. Once ready for connections, call `callback`.""" if port is None: assert 0 < idx <= MAX_NODES port = p2p_port(MAX_NODES - idx) if addr is None: - addr = '127.0.0.1' + addr = "127.0.0.1" coroutine = cls.create_listen_server(addr, port, callback, p2p) cls.network_event_loop.call_soon_threadsafe( - cls.network_event_loop.create_task, coroutine) + cls.network_event_loop.create_task, coroutine + ) @classmethod async def create_listen_server(cls, addr, port, callback, proto): def peer_protocol(): """Returns a function that does the protocol handling for a new connection. To allow different connections to have different behaviors, the protocol function is first put in the cls.protos dict. When the connection is made, the function removes the protocol function from that dict, and returns it so the event loop can start executing it.""" response = cls.protos.get((addr, port)) cls.protos[(addr, port)] = None return response if (addr, port) not in cls.listeners: # When creating a listener on a given (addr, port) we only need to # do it once. If we want different behaviors for different # connections, we can accomplish this by providing different # `proto` functions - listener = await cls.network_event_loop.create_server(peer_protocol, addr, port) - logger.debug( - f"Listening server on {addr}:{port} should be started") + listener = await cls.network_event_loop.create_server( + peer_protocol, addr, port + ) + logger.debug(f"Listening server on {addr}:{port} should be started") cls.listeners[(addr, port)] = listener cls.protos[(addr, port)] = proto callback(addr, port) class P2PDataStore(P2PInterface): """A P2P data store class. - Keeps a block and transaction store and responds correctly to getdata and getheaders requests.""" + Keeps a block and transaction store and responds correctly to getdata and getheaders requests. + """ def __init__(self): super().__init__() # store of blocks. key is block hash, value is a CBlock object self.block_store = {} - self.last_block_hash = '' + self.last_block_hash = "" # store of txs. key is txid, value is a CTransaction object self.tx_store = {} self.getdata_requests = [] def on_getdata(self, message): """Check for the tx/block in our stores and if found, reply with an inv message.""" for inv in message.inv: self.getdata_requests.append(inv.hash) - if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys(): + if ( + inv.type & MSG_TYPE_MASK + ) == MSG_TX and inv.hash in self.tx_store.keys(): self.send_message(msg_tx(self.tx_store[inv.hash])) - elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys(): + elif ( + inv.type & MSG_TYPE_MASK + ) == MSG_BLOCK and inv.hash in self.block_store.keys(): self.send_message(msg_block(self.block_store[inv.hash])) else: - logger.debug( - f'getdata message type {hex(inv.type)} received.') + logger.debug(f"getdata message type {hex(inv.type)} received.") def on_getheaders(self, message): """Search back through our block store for the locator, and reply with a headers message if found.""" locator, hash_stop = message.locator, message.hashstop # Assume that the most recent block added is the tip if not self.block_store: return headers_list = [self.block_store[self.last_block_hash]] while headers_list[-1].sha256 not in locator.vHave: # Walk back through the block store, adding headers to headers_list # as we go. prev_block_hash = headers_list[-1].hashPrevBlock if prev_block_hash in self.block_store: - prev_block_header = CBlockHeader( - self.block_store[prev_block_hash]) + prev_block_header = CBlockHeader(self.block_store[prev_block_hash]) headers_list.append(prev_block_header) if prev_block_header.sha256 == hash_stop: # if this is the hashstop header, stop here break else: logger.debug( - f'block hash {hex(prev_block_hash)} not found in block store') + f"block hash {hex(prev_block_hash)} not found in block store" + ) break # Truncate the list if there are too many headers - headers_list = headers_list[:-MAX_HEADERS_RESULTS - 1:-1] + headers_list = headers_list[: -MAX_HEADERS_RESULTS - 1 : -1] response = msg_headers(headers_list) if response is not None: self.send_message(response) - def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, - reject_reason=None, expect_disconnect=False, timeout=60): + def send_blocks_and_test( + self, + blocks, + node, + *, + success=True, + force_send=False, + reject_reason=None, + expect_disconnect=False, + timeout=60, + ): """Send blocks to test node and test whether the tip advances. - - add all blocks to our block_store - - send a headers message for the final block - - the on_getheaders handler will ensure that any getheaders are responded to - - if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will - ensure that any getdata messages are responded to. Otherwise send the full block unsolicited. - - if success is True: assert that the node's tip advances to the most recent block - - if success is False: assert that the node's tip doesn't advance - - if reject_reason is set: assert that the correct reject message is logged""" + - add all blocks to our block_store + - send a headers message for the final block + - the on_getheaders handler will ensure that any getheaders are responded to + - if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will + ensure that any getdata messages are responded to. Otherwise send the full block unsolicited. + - if success is True: assert that the node's tip advances to the most recent block + - if success is False: assert that the node's tip doesn't advance + - if reject_reason is set: assert that the correct reject message is logged""" with p2p_lock: for block in blocks: self.block_store[block.sha256] = block self.last_block_hash = block.sha256 def test(): if force_send: for b in blocks: self.send_message(msg_block(block=b)) else: self.send_message( - msg_headers([CBlockHeader(block) for block in blocks])) + msg_headers([CBlockHeader(block) for block in blocks]) + ) self.wait_until( lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, check_connected=success, ) if expect_disconnect: self.wait_for_disconnect(timeout=timeout) else: self.sync_with_ping(timeout=timeout) if success: - self.wait_until(lambda: node.getbestblockhash() == - blocks[-1].hash, timeout=timeout) + self.wait_until( + lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout + ) else: assert node.getbestblockhash() != blocks[-1].hash if reject_reason: with node.assert_debug_log(expected_msgs=[reject_reason]): test() else: test() - def send_txs_and_test(self, txs, node, *, success=True, - expect_disconnect=False, reject_reason=None): + def send_txs_and_test( + self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None + ): """Send txs to test node and test whether they're accepted to the mempool. - - add all txs to our tx_store - - send tx messages for all txs - - if success is True/False: assert that the txs are/are not accepted to the mempool - - if expect_disconnect is True: Skip the sync with ping - - if reject_reason is set: assert that the correct reject message is logged.""" + - add all txs to our tx_store + - send tx messages for all txs + - if success is True/False: assert that the txs are/are not accepted to the mempool + - if expect_disconnect is True: Skip the sync with ping + - if reject_reason is set: assert that the correct reject message is logged.""" with p2p_lock: for tx in txs: self.tx_store[tx.sha256] = tx def test(): for tx in txs: self.send_message(msg_tx(tx)) if expect_disconnect: self.wait_for_disconnect() else: self.sync_with_ping() raw_mempool = node.getrawmempool() if success: # Check that all txs are now in the mempool for tx in txs: assert tx.hash in raw_mempool, f"{tx.hash} not found in mempool" else: # Check that none of the txs are now in the mempool for tx in txs: assert tx.hash not in raw_mempool, f"{tx.hash} tx found in mempool" if reject_reason: with node.assert_debug_log(expected_msgs=[reject_reason]): test() else: test() class P2PTxInvStore(P2PInterface): """A P2PInterface which stores a count of how many times each txid has been announced.""" def __init__(self): super().__init__() self.tx_invs_received = defaultdict(int) def on_inv(self, message): # Send getdata in response. super().on_inv(message) # Store how many times invs have been received for each tx. for i in message.inv: if i.type == MSG_TX: # save txid self.tx_invs_received[i.hash] += 1 def get_invs(self): with p2p_lock: return list(self.tx_invs_received.keys()) def wait_for_broadcast(self, txns, timeout=60): """Waits for the txns (list of txids) to complete initial broadcast. The mempool should mark unbroadcast=False for these transactions. """ # Wait until invs have been received (and getdatas sent) for each txid. self.wait_until( lambda: set(self.tx_invs_received.keys()) == {int(tx, 16) for tx in txns}, - timeout=timeout) + timeout=timeout, + ) # Flush messages and wait for the getdatas to be processed self.sync_with_ping() diff --git a/test/functional/test_framework/ripemd160.py b/test/functional/test_framework/ripemd160.py index 3c30316de..a501f2277 100644 --- a/test/functional/test_framework/ripemd160.py +++ b/test/functional/test_framework/ripemd160.py @@ -1,131 +1,136 @@ #!/usr/bin/env python3 # Copyright (c) 2021 Pieter Wuille # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test-only pure Python RIPEMD160 implementation.""" import unittest +# fmt: off # Message schedule indexes for the left path. ML = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8, 3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12, 1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2, 4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13 ] # Message schedule indexes for the right path. MR = [ 5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12, 6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2, 15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13, 8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14, 12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11 ] # Rotation counts for the left path. RL = [ 11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8, 7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12, 11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5, 11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12, 9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6 ] # Rotation counts for the right path. RR = [ 8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6, 9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11, 9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5, 15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8, 8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11 ] +# fmt: on # K constants for the left path. -KL = [0, 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xa953fd4e] +KL = [0, 0x5A827999, 0x6ED9EBA1, 0x8F1BBCDC, 0xA953FD4E] # K constants for the right path. -KR = [0x50a28be6, 0x5c4dd124, 0x6d703ef3, 0x7a6d76e9, 0] +KR = [0x50A28BE6, 0x5C4DD124, 0x6D703EF3, 0x7A6D76E9, 0] def fi(x, y, z, i): """The f1, f2, f3, f4, and f5 functions from the specification.""" if i == 0: return x ^ y ^ z elif i == 1: return (x & y) | (~x & z) elif i == 2: return (x | ~y) ^ z elif i == 3: return (x & z) | (y & ~z) elif i == 4: return x ^ (y | ~z) else: assert False def rol(x, i): """Rotate the bottom 32 bits of x left by i bits.""" - return ((x << i) | ((x & 0xffffffff) >> (32 - i))) & 0xffffffff + return ((x << i) | ((x & 0xFFFFFFFF) >> (32 - i))) & 0xFFFFFFFF def compress(h0, h1, h2, h3, h4, block): """Compress state (h0, h1, h2, h3, h4) with block.""" # Left path variables. al, bl, cl, dl, el = h0, h1, h2, h3, h4 # Right path variables. ar, br, cr, dr, er = h0, h1, h2, h3, h4 # Message variables. - x = [int.from_bytes(block[4 * i:4 * (i + 1)], 'little') for i in range(16)] + x = [int.from_bytes(block[4 * i : 4 * (i + 1)], "little") for i in range(16)] # Iterate over the 80 rounds of the compression. for j in range(80): rnd = j >> 4 # Perform left side of the transformation. al = rol(al + fi(bl, cl, dl, rnd) + x[ML[j]] + KL[rnd], RL[j]) + el al, bl, cl, dl, el = el, al, bl, rol(cl, 10), dl # Perform right side of the transformation. ar = rol(ar + fi(br, cr, dr, 4 - rnd) + x[MR[j]] + KR[rnd], RR[j]) + er ar, br, cr, dr, er = er, ar, br, rol(cr, 10), dr # Compose old state, left transform, and right transform into new state. return h1 + cl + dr, h2 + dl + er, h3 + el + ar, h4 + al + br, h0 + bl + cr def ripemd160(data: bytes) -> bytes: """Compute the RIPEMD-160 hash of data.""" # Initialize state. - state = (0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0) + state = (0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0) # Process full 64-byte blocks in the input. for b in range(len(data) >> 6): - state = compress(*state, data[64 * b:64 * (b + 1)]) + state = compress(*state, data[64 * b : 64 * (b + 1)]) # Construct final blocks (with padding and size). pad = b"\x80" + b"\x00" * ((119 - len(data)) & 63) - fin = data[len(data) & ~63:] + pad + (8 * len(data)).to_bytes(8, 'little') + fin = data[len(data) & ~63 :] + pad + (8 * len(data)).to_bytes(8, "little") # Process final blocks. for b in range(len(fin) >> 6): - state = compress(*state, fin[64 * b:64 * (b + 1)]) + state = compress(*state, fin[64 * b : 64 * (b + 1)]) # Produce output. - return b"".join((h & 0xffffffff).to_bytes(4, 'little') for h in state) + return b"".join((h & 0xFFFFFFFF).to_bytes(4, "little") for h in state) class TestFrameworkKey(unittest.TestCase): def test_ripemd160(self): """RIPEMD-160 test vectors.""" # See https://homes.esat.kuleuven.be/~bosselae/ripemd160.html for msg, hexout in [ (b"", "9c1185a5c5e9fc54612808977ee8f548b2258d31"), (b"a", "0bdc9d2d256b3ee9daae347be6f4dc835a467ffe"), (b"abc", "8eb208f7e05d987a9b044a8e98c6b087f15a0bfc"), (b"message digest", "5d0689ef49d2fae572b881b123a85ffa21595f36"), - (b"abcdefghijklmnopqrstuvwxyz", - "f71c27109c692c1b56bbdceb5b9d2865b3708dbc"), - (b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", - "12a053384a9c0c88e405a06c27dcf49ada62eb2b"), - (b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", - "b0e20b6e3116640286ed3a87a5713079b21f5189"), + (b"abcdefghijklmnopqrstuvwxyz", "f71c27109c692c1b56bbdceb5b9d2865b3708dbc"), + ( + b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", + "12a053384a9c0c88e405a06c27dcf49ada62eb2b", + ), + ( + b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", + "b0e20b6e3116640286ed3a87a5713079b21f5189", + ), (b"1234567890" * 8, "9b752e45573d4b39f4dbd3323cab82bf63326bfb"), - (b"a" * 1000000, "52783243c1697bdbe16d37f97f68f08325dc1528") + (b"a" * 1000000, "52783243c1697bdbe16d37f97f68f08325dc1528"), ]: self.assertEqual(ripemd160(msg).hex(), hexout) diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py index 0b6e5e175..263776b88 100644 --- a/test/functional/test_framework/script.py +++ b/test/functional/test_framework/script.py @@ -1,769 +1,787 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Functionality to build scripts, as well as SignatureHash(). This file is modified from python-bitcoinlib. """ import struct import unittest from typing import Dict, List from .messages import ( CTransaction, CTxOut, hash256, ser_string, ser_uint256, sha256, uint256_from_str, ) from .ripemd160 import ripemd160 MAX_SCRIPT_ELEMENT_SIZE = 520 OPCODE_NAMES: Dict["CScriptOp", str] = {} def hash160(s: bytes) -> bytes: return ripemd160(sha256(s)) def bn2vch(v): """Convert number to bitcoin-specific little endian format.""" # We need v.bit_length() bits, plus a sign bit for every nonzero number. n_bits = v.bit_length() + (v != 0) # The number of bytes for that is: n_bytes = (n_bits + 7) // 8 # Convert number to absolute value + sign in top bit. encoded_v = 0 if v == 0 else abs(v) | ((v < 0) << (n_bytes * 8 - 1)) # Serialize to bytes - return encoded_v.to_bytes(n_bytes, 'little') + return encoded_v.to_bytes(n_bytes, "little") _opcode_instances: List["CScriptOp"] = [] class CScriptOp(int): """A single script opcode""" + __slots__ = () @staticmethod def encode_op_pushdata(d): """Encode a PUSHDATA op, returning bytes""" - if len(d) < 0x4c: + if len(d) < 0x4C: # OP_PUSHDATA - return b'' + bytes([len(d)]) + d - elif len(d) <= 0xff: + return b"" + bytes([len(d)]) + d + elif len(d) <= 0xFF: # OP_PUSHDATA1 - return b'\x4c' + bytes([len(d)]) + d - elif len(d) <= 0xffff: + return b"\x4c" + bytes([len(d)]) + d + elif len(d) <= 0xFFFF: # OP_PUSHDATA2 - return b'\x4d' + struct.pack(b'>= 8 if r[-1] & 0x80: r.append(0x80 if neg else 0) elif neg: r[-1] |= 0x80 return bytes([len(r)]) + r @staticmethod def decode(vch): result = 0 # We assume valid push_size and minimal encoding value = vch[1:] if len(value) == 0: return result for i, byte in enumerate(value): result |= int(byte) << 8 * i if value[-1] >= 0x80: # Mask for all but the highest result bit - num_mask = (2**(len(value) * 8) - 1) >> 1 + num_mask = (2 ** (len(value) * 8) - 1) >> 1 result &= num_mask result *= -1 return result class CScript(bytes): """Serialized script A bytes subclass, so you can use this directly whenever bytes are accepted. Note that this means that indexing does *not* work - you'll get an index by byte rather than opcode. This format was chosen for efficiency so that the general case would not require creating a lot of little CScriptOP objects. iter(script) however does iterate by opcode. """ + __slots__ = () @classmethod def __coerce_instance(cls, other): # Coerce other into bytes if isinstance(other, CScriptOp): other = bytes([other]) elif isinstance(other, CScriptNum): - if (other.value == 0): + if other.value == 0: other = bytes([CScriptOp(OP_0)]) else: other = CScriptNum.encode(other) elif isinstance(other, int): if 0 <= other <= 16: other = bytes([CScriptOp.encode_op_n(other)]) elif other == -1: other = bytes([OP_1NEGATE]) else: other = CScriptOp.encode_op_pushdata(bn2vch(other)) elif isinstance(other, (bytes, bytearray)): other = CScriptOp.encode_op_pushdata(other) return other def __add__(self, other): # add makes no sense for a CScript() raise NotImplementedError def join(self, iterable): # join makes no sense for a CScript() raise NotImplementedError - def __new__(cls, value=b''): + def __new__(cls, value=b""): if isinstance(value, bytes) or isinstance(value, bytearray): return super().__new__(cls, value) else: + def coerce_iterable(iterable): for instance in iterable: yield cls.__coerce_instance(instance) + # Annoyingly on both python2 and python3 bytes.join() always # returns a bytes instance even when subclassed. - return super().__new__( - cls, b''.join(coerce_iterable(value))) + return super().__new__(cls, b"".join(coerce_iterable(value))) def raw_iter(self): """Raw iteration Yields tuples of (opcode, data, sop_idx) so that the different possible PUSHDATA encodings can be accurately distinguished, as well as determining the exact opcode byte indexes. (sop_idx) """ i = 0 while i < len(self): sop_idx = i opcode = self[i] i += 1 if opcode > OP_PUSHDATA4: yield (opcode, None, sop_idx) else: datasize = None pushdata_type = None if opcode < OP_PUSHDATA1: - pushdata_type = f'PUSHDATA({opcode})' + pushdata_type = f"PUSHDATA({opcode})" datasize = opcode elif opcode == OP_PUSHDATA1: - pushdata_type = 'PUSHDATA1' + pushdata_type = "PUSHDATA1" if i >= len(self): - raise CScriptInvalidError( - 'PUSHDATA1: missing data length') + raise CScriptInvalidError("PUSHDATA1: missing data length") datasize = self[i] i += 1 elif opcode == OP_PUSHDATA2: - pushdata_type = 'PUSHDATA2' + pushdata_type = "PUSHDATA2" if i + 1 >= len(self): - raise CScriptInvalidError( - 'PUSHDATA2: missing data length') + raise CScriptInvalidError("PUSHDATA2: missing data length") datasize = self[i] + (self[i + 1] << 8) i += 2 elif opcode == OP_PUSHDATA4: - pushdata_type = 'PUSHDATA4' + pushdata_type = "PUSHDATA4" if i + 3 >= len(self): - raise CScriptInvalidError( - 'PUSHDATA4: missing data length') - datasize = self[i] + (self[i + 1] << 8) + \ - (self[i + 2] << 16) + (self[i + 3] << 24) + raise CScriptInvalidError("PUSHDATA4: missing data length") + datasize = ( + self[i] + + (self[i + 1] << 8) + + (self[i + 2] << 16) + + (self[i + 3] << 24) + ) i += 4 else: assert False # shouldn't happen - data = bytes(self[i:i + datasize]) + data = bytes(self[i : i + datasize]) # Check for truncation if len(data) < datasize: raise CScriptTruncatedPushDataError( - f'{pushdata_type}: truncated data', data) + f"{pushdata_type}: truncated data", data + ) i += datasize yield (opcode, data, sop_idx) def __iter__(self): """'Cooked' iteration Returns either a CScriptOP instance, an integer, or bytes, as appropriate. See raw_iter() if you need to distinguish the different possible PUSHDATA encodings. """ - for (opcode, data, sop_idx) in self.raw_iter(): + for opcode, data, sop_idx in self.raw_iter(): if data is not None: yield data else: opcode = CScriptOp(opcode) if opcode.is_small_int(): yield opcode.decode_op_n() else: yield CScriptOp(opcode) def __repr__(self): def _repr(o): if isinstance(o, bytes): return f"x('{o.hex()}')" else: return repr(o) ops = [] i = iter(self) while True: op = None try: op = _repr(next(i)) except CScriptTruncatedPushDataError as err: - op = f'{_repr(err.data)}...' + op = f"{_repr(err.data)}..." break except CScriptInvalidError as err: - op = f'' + op = f"" break except StopIteration: break finally: if op is not None: ops.append(op) return f"CScript([{', '.join(ops)}])" SIGHASH_ALL = 1 SIGHASH_NONE = 2 SIGHASH_SINGLE = 3 SIGHASH_FORKID = 0x40 SIGHASH_ANYONECANPAY = 0x80 def FindAndDelete(script, sig): """Consensus critical, see FindAndDelete() in Satoshi codebase""" - r = b'' + r = b"" last_sop_idx = sop_idx = 0 skip = True - for (opcode, data, sop_idx) in script.raw_iter(): + for opcode, data, sop_idx in script.raw_iter(): if not skip: r += script[last_sop_idx:sop_idx] last_sop_idx = sop_idx - if script[sop_idx:sop_idx + len(sig)] == sig: + if script[sop_idx : sop_idx + len(sig)] == sig: skip = True else: skip = False if not skip: r += script[last_sop_idx:] return CScript(r) def SignatureHash(script, txTo, inIdx, hashtype): """Consensus-correct SignatureHash Returns (sighash, err) to precisely match the consensus-critical behavior of the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity) """ - HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + HASH_ONE = b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" if inIdx >= len(txTo.vin): return (HASH_ONE, f"inIdx {inIdx} out of range ({len(txTo.vin)})") txtmp = CTransaction(txTo) for txin in txtmp.vin: - txin.scriptSig = b'' - txtmp.vin[inIdx].scriptSig = FindAndDelete( - script, CScript([OP_CODESEPARATOR])) + txin.scriptSig = b"" + txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR])) - if (hashtype & 0x1f) == SIGHASH_NONE: + if (hashtype & 0x1F) == SIGHASH_NONE: txtmp.vout = [] for i in range(len(txtmp.vin)): if i != inIdx: txtmp.vin[i].nSequence = 0 - elif (hashtype & 0x1f) == SIGHASH_SINGLE: + elif (hashtype & 0x1F) == SIGHASH_SINGLE: outIdx = inIdx if outIdx >= len(txtmp.vout): return (HASH_ONE, f"outIdx {outIdx} out of range ({len(txtmp.vout)})") tmp = txtmp.vout[outIdx] txtmp.vout = [] for _ in range(outIdx): txtmp.vout.append(CTxOut(-1)) txtmp.vout.append(tmp) for i in range(len(txtmp.vin)): if i != inIdx: txtmp.vin[i].nSequence = 0 if hashtype & SIGHASH_ANYONECANPAY: tmp = txtmp.vin[inIdx] txtmp.vin = [] txtmp.vin.append(tmp) s = txtmp.serialize() s += struct.pack(b"> (64 - b) | (n & ((1 << (64 - b)) - 1)) << b def siphash_round(v0, v1, v2, v3): v0 = (v0 + v1) & ((1 << 64) - 1) v1 = rotl64(v1, 13) v1 ^= v0 v0 = rotl64(v0, 32) v2 = (v2 + v3) & ((1 << 64) - 1) v3 = rotl64(v3, 16) v3 ^= v2 v0 = (v0 + v3) & ((1 << 64) - 1) v3 = rotl64(v3, 21) v3 ^= v0 v2 = (v2 + v1) & ((1 << 64) - 1) v1 = rotl64(v1, 17) v1 ^= v2 v2 = rotl64(v2, 32) return (v0, v1, v2, v3) def siphash256(k0, k1, h): n0 = h & ((1 << 64) - 1) n1 = (h >> 64) & ((1 << 64) - 1) n2 = (h >> 128) & ((1 << 64) - 1) n3 = (h >> 192) & ((1 << 64) - 1) - v0 = 0x736f6d6570736575 ^ k0 - v1 = 0x646f72616e646f6d ^ k1 - v2 = 0x6c7967656e657261 ^ k0 + v0 = 0x736F6D6570736575 ^ k0 + v1 = 0x646F72616E646F6D ^ k1 + v2 = 0x6C7967656E657261 ^ k0 v3 = 0x7465646279746573 ^ k1 ^ n0 v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0 ^= n0 v3 ^= n1 v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0 ^= n1 v3 ^= n2 v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0 ^= n2 v3 ^= n3 v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0 ^= n3 v3 ^= 0x2000000000000000 v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0 ^= 0x2000000000000000 v2 ^= 0xFF v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) return v0 ^ v1 ^ v2 ^ v3 diff --git a/test/functional/test_framework/socks5.py b/test/functional/test_framework/socks5.py index be103c04e..9a653c20e 100644 --- a/test/functional/test_framework/socks5.py +++ b/test/functional/test_framework/socks5.py @@ -1,175 +1,176 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Dummy Socks5 server for testing.""" import logging import queue import socket import threading logger = logging.getLogger("TestFramework.socks5") # Protocol constants class Command: CONNECT = 0x01 class AddressType: IPV4 = 0x01 DOMAINNAME = 0x03 IPV6 = 0x04 + # Utility functions def recvall(s, n): """Receive n bytes from a socket, or fail.""" rv = bytearray() while n > 0: d = s.recv(n) if not d: - raise IOError('Unexpected end of stream') + raise IOError("Unexpected end of stream") rv.extend(d) n -= len(d) return rv + # Implementation classes class Socks5Configuration: """Proxy configuration.""" def __init__(self): self.addr = None # Bind address (must be set) self.af = socket.AF_INET # Bind address family self.unauth = False # Support unauthenticated self.auth = False # Support authentication class Socks5Command: """Information about an incoming socks5 command.""" def __init__(self, cmd, atyp, addr, port, username, password): self.cmd = cmd # Command (one of Command.*) self.atyp = atyp # Address type (one of AddressType.*) self.addr = addr # Address self.port = port # Port to connect to self.username = username self.password = password def __repr__(self): - return f'Socks5Command({self.cmd},{self.atyp},{self.addr},{self.port},{self.username},{self.password})' + return f"Socks5Command({self.cmd},{self.atyp},{self.addr},{self.port},{self.username},{self.password})" class Socks5Connection: def __init__(self, serv, conn): self.serv = serv self.conn = conn def handle(self): """Handle socks5 request according to RFC192.""" try: # Verify socks version ver = recvall(self.conn, 1)[0] if ver != 0x05: - raise IOError(f'Invalid socks version {ver}') + raise IOError(f"Invalid socks version {ver}") # Choose authentication method nmethods = recvall(self.conn, 1)[0] methods = bytearray(recvall(self.conn, nmethods)) method = None if 0x02 in methods and self.serv.conf.auth: method = 0x02 # username/password elif 0x00 in methods and self.serv.conf.unauth: method = 0x00 # unauthenticated if method is None: - raise IOError('No supported authentication method was offered') + raise IOError("No supported authentication method was offered") # Send response self.conn.sendall(bytearray([0x05, method])) # Read authentication (optional) username = None password = None if method == 0x02: ver = recvall(self.conn, 1)[0] if ver != 0x01: - raise IOError(f'Invalid auth packet version {ver}') + raise IOError(f"Invalid auth packet version {ver}") ulen = recvall(self.conn, 1)[0] username = str(recvall(self.conn, ulen)) plen = recvall(self.conn, 1)[0] password = str(recvall(self.conn, plen)) # Send authentication response self.conn.sendall(bytearray([0x01, 0x00])) # Read connect request ver, cmd, _, atyp = recvall(self.conn, 4) if ver != 0x05: - raise IOError( - f'Invalid socks version {ver} in connect request') + raise IOError(f"Invalid socks version {ver} in connect request") if cmd != Command.CONNECT: - raise IOError( - f'Unhandled command {cmd} in connect request') + raise IOError(f"Unhandled command {cmd} in connect request") if atyp == AddressType.IPV4: addr = recvall(self.conn, 4) elif atyp == AddressType.DOMAINNAME: n = recvall(self.conn, 1)[0] addr = recvall(self.conn, n) elif atyp == AddressType.IPV6: addr = recvall(self.conn, 16) else: - raise IOError(f'Unknown address type {atyp}') + raise IOError(f"Unknown address type {atyp}") port_hi, port_lo = recvall(self.conn, 2) port = (port_hi << 8) | port_lo # Send dummy response self.conn.sendall( - bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])) + bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]) + ) cmdin = Socks5Command(cmd, atyp, addr, port, username, password) self.serv.queue.put(cmdin) - logger.info(f'Proxy: {cmdin}') + logger.info(f"Proxy: {cmdin}") # Fall through to disconnect except Exception as e: logger.exception("socks5 request handling failed.") self.serv.queue.put(e) finally: self.conn.close() class Socks5Server: def __init__(self, conf): self.conf = conf self.s = socket.socket(conf.af) self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.s.bind(conf.addr) self.s.listen(5) self.running = False self.thread = None self.queue = queue.Queue() # report connections and exceptions to client def run(self): while self.running: (sockconn, _) = self.s.accept() if self.running: conn = Socks5Connection(self, sockconn) thread = threading.Thread(None, conn.handle) thread.daemon = True thread.start() def start(self): assert not self.running self.running = True self.thread = threading.Thread(None, self.run) self.thread.daemon = True self.thread.start() def stop(self): self.running = False # connect to self to end run loop s = socket.socket(self.conf.af) s.connect(self.conf.addr) s.close() self.thread.join() diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index d852e188a..22072117e 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -1,944 +1,1059 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Base class for RPC testing.""" import argparse import configparser import logging import os import pdb import platform import random import shutil import subprocess import sys import tempfile import time from enum import Enum from typing import List from . import coverage from .address import ADDRESS_ECREG_P2SH_OP_TRUE from .authproxy import JSONRPCException from .avatools import get_proof_ids from .p2p import NetworkThread from .test_node import TestNode from .util import ( MAX_NODES, PortSeed, assert_equal, check_json_precision, chronik_port, get_datadir_path, initialize_datadir, p2p_port, rpc_port, uint256_hex, wait_until_helper, ) class TestStatus(Enum): PASSED = 1 FAILED = 2 SKIPPED = 3 TEST_EXIT_PASSED = 0 TEST_EXIT_FAILED = 1 TEST_EXIT_SKIPPED = 77 # Timestamp is Sep. 20th, 2022 at 12:00:00 TIMESTAMP_IN_THE_PAST = 1663675200 TMPDIR_PREFIX = "bitcoin_func_test_" class SkipTest(Exception): """This exception is raised to skip a test""" def __init__(self, message): self.message = message class BitcoinTestMetaClass(type): """Metaclass for BitcoinTestFramework. Ensures that any attempt to register a subclass of `BitcoinTestFramework` adheres to a standard whereby the subclass overrides `set_test_params` and `run_test` but DOES NOT override either `__init__` or `main`. If any of those standards are violated, a ``TypeError`` is raised.""" def __new__(cls, clsname, bases, dct): - if not clsname == 'BitcoinTestFramework': - if not ('run_test' in dct and 'set_test_params' in dct): - raise TypeError("BitcoinTestFramework subclasses must override " - "'run_test' and 'set_test_params'") - if '__init__' in dct or 'main' in dct: - raise TypeError("BitcoinTestFramework subclasses may not override " - "'__init__' or 'main'") + if not clsname == "BitcoinTestFramework": + if not ("run_test" in dct and "set_test_params" in dct): + raise TypeError( + "BitcoinTestFramework subclasses must override " + "'run_test' and 'set_test_params'" + ) + if "__init__" in dct or "main" in dct: + raise TypeError( + "BitcoinTestFramework subclasses may not override " + "'__init__' or 'main'" + ) return super().__new__(cls, clsname, bases, dct) class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): """Base class for a bitcoin test script. Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods. Individual tests can also override the following methods to customize the test setup: - add_options() - setup_chain() - setup_network() - setup_nodes() The __init__() and main() methods should not be overridden. This class also contains various public and private helper methods.""" def __init__(self): """Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method""" - self.chain: str = 'regtest' + self.chain: str = "regtest" self.setup_clean_chain: bool = False self.nodes: List[TestNode] = [] self.network_thread = None # Wait for up to 60 seconds for the RPC server to respond self.rpc_timeout = 60 self.supports_cli = True self.bind_to_localhost_only = True self.parse_args() self.default_wallet_name = "" self.wallet_data_filename = "wallet.dat" # Optional list of wallet names that can be set in set_test_params to # create and import keys to. If unset, default is len(nodes) * # [default_wallet_name]. If wallet names are None, wallet creation is # skipped. If list is truncated, wallet creation is skipped and keys # are not imported. self.wallet_names = None # Disable ThreadOpenConnections by default, so that adding entries to # addrman will not result in automatic connections to them. self.disable_autoconnect = True self.set_test_params() if self.options.timeout_factor == 0: self.options.timeout_factor = 99999 # optionally, increase timeout by a factor self.rpc_timeout = int(self.rpc_timeout * self.options.timeout_factor) def main(self): """Main function. This should not be overridden by the subclass test scripts.""" assert hasattr( - self, "num_nodes"), "Test must set self.num_nodes in set_test_params()" + self, "num_nodes" + ), "Test must set self.num_nodes in set_test_params()" try: self.setup() self.run_test() except JSONRPCException: self.log.exception("JSONRPC error") self.success = TestStatus.FAILED except SkipTest as e: self.log.warning(f"Test Skipped: {e.message}") self.success = TestStatus.SKIPPED except AssertionError: self.log.exception("Assertion failed") self.success = TestStatus.FAILED except KeyError: self.log.exception("Key error") self.success = TestStatus.FAILED except subprocess.CalledProcessError as e: self.log.exception(f"Called Process failed with '{e.output}'") self.success = TestStatus.FAILED except Exception: self.log.exception("Unexpected exception caught during testing") self.success = TestStatus.FAILED except KeyboardInterrupt: self.log.warning("Exiting after keyboard interrupt") self.success = TestStatus.FAILED finally: exit_code = self.shutdown() sys.exit(exit_code) def parse_args(self): parser = argparse.ArgumentParser(usage="%(prog)s [options]") - parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true", - help="Leave bitcoinds and test.* datadir on exit or error") - parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true", - help="Don't stop bitcoinds after the test execution") - parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(f"{os.path.dirname(os.path.realpath(__file__))}/../../cache"), - help="Directory for caching pregenerated datadirs (default: %(default)s)") - parser.add_argument("--tmpdir", dest="tmpdir", - help="Root directory for datadirs") - parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO", - help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.") - parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true", - help="Print out all RPC calls as they are made") - parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int, - help="The seed to use for assigning port numbers (default: current process id)") - parser.add_argument("--coveragedir", dest="coveragedir", - help="Write tested RPC commands into this directory") - parser.add_argument("--configfile", dest="configfile", default=os.path.abspath(os.path.dirname(os.path.realpath( - __file__)) + "/../../config.ini"), help="Location of the test framework config file (default: %(default)s)") - parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true", - help="Attach a python debugger if test fails") - parser.add_argument("--usecli", dest="usecli", default=False, action="store_true", - help="use bitcoin-cli instead of RPC for all commands") - parser.add_argument("--perf", dest="perf", default=False, action="store_true", - help="profile running nodes with perf for the duration of the test") - parser.add_argument("--valgrind", dest="valgrind", default=False, action="store_true", - help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown, valgrind 3.14 or later required") - parser.add_argument("--randomseed", type=int, - help="set a random seed for deterministically reproducing a previous test run") - parser.add_argument("--descriptors", default=False, action="store_true", - help="Run test using a descriptor wallet") - parser.add_argument("--with-wellingtonactivation", dest="wellingtonactivation", default=False, action="store_true", - help=f"Activate wellington update on timestamp {TIMESTAMP_IN_THE_PAST}") parser.add_argument( - '--timeout-factor', + "--nocleanup", + dest="nocleanup", + default=False, + action="store_true", + help="Leave bitcoinds and test.* datadir on exit or error", + ) + parser.add_argument( + "--noshutdown", + dest="noshutdown", + default=False, + action="store_true", + help="Don't stop bitcoinds after the test execution", + ) + parser.add_argument( + "--cachedir", + dest="cachedir", + default=os.path.abspath( + f"{os.path.dirname(os.path.realpath(__file__))}/../../cache" + ), + help="Directory for caching pregenerated datadirs (default: %(default)s)", + ) + parser.add_argument( + "--tmpdir", dest="tmpdir", help="Root directory for datadirs" + ) + parser.add_argument( + "-l", + "--loglevel", + dest="loglevel", + default="INFO", + help=( + "log events at this level and higher to the console. Can be set to" + " DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG" + " will output all logs to console. Note that logs at all levels are" + " always written to the test_framework.log file in the temporary test" + " directory." + ), + ) + parser.add_argument( + "--tracerpc", + dest="trace_rpc", + default=False, + action="store_true", + help="Print out all RPC calls as they are made", + ) + parser.add_argument( + "--portseed", + dest="port_seed", + default=os.getpid(), + type=int, + help=( + "The seed to use for assigning port numbers (default: current" + " process id)" + ), + ) + parser.add_argument( + "--coveragedir", + dest="coveragedir", + help="Write tested RPC commands into this directory", + ) + parser.add_argument( + "--configfile", + dest="configfile", + default=os.path.abspath( + os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini" + ), + help="Location of the test framework config file (default: %(default)s)", + ) + parser.add_argument( + "--pdbonfailure", + dest="pdbonfailure", + default=False, + action="store_true", + help="Attach a python debugger if test fails", + ) + parser.add_argument( + "--usecli", + dest="usecli", + default=False, + action="store_true", + help="use bitcoin-cli instead of RPC for all commands", + ) + parser.add_argument( + "--perf", + dest="perf", + default=False, + action="store_true", + help="profile running nodes with perf for the duration of the test", + ) + parser.add_argument( + "--valgrind", + dest="valgrind", + default=False, + action="store_true", + help=( + "run nodes under the valgrind memory error detector: expect at least a" + " ~10x slowdown, valgrind 3.14 or later required" + ), + ) + parser.add_argument( + "--randomseed", + type=int, + help=( + "set a random seed for deterministically reproducing a previous" + " test run" + ), + ) + parser.add_argument( + "--descriptors", + default=False, + action="store_true", + help="Run test using a descriptor wallet", + ) + parser.add_argument( + "--with-wellingtonactivation", + dest="wellingtonactivation", + default=False, + action="store_true", + help=f"Activate wellington update on timestamp {TIMESTAMP_IN_THE_PAST}", + ) + parser.add_argument( + "--timeout-factor", dest="timeout_factor", type=float, default=1.0, - help='adjust test timeouts by a factor. ' - 'Setting it to 0 disables all timeouts') + help=( + "adjust test timeouts by a factor. " + "Setting it to 0 disables all timeouts" + ), + ) self.add_options(parser) self.options = parser.parse_args() PortSeed.n = self.options.port_seed def setup(self): """Call this method to start up the test framework object with options set.""" check_json_precision() self.options.cachedir = os.path.abspath(self.options.cachedir) config = configparser.ConfigParser() - config.read_file(open(self.options.configfile, encoding='utf-8')) + config.read_file(open(self.options.configfile, encoding="utf-8")) self.config = config fname_bitcoind = os.path.join( config["environment"]["BUILDDIR"], "src", - f"bitcoind{config['environment']['EXEEXT']}" + f"bitcoind{config['environment']['EXEEXT']}", ) fname_bitcoincli = os.path.join( config["environment"]["BUILDDIR"], "src", - f"bitcoin-cli{config['environment']['EXEEXT']}" + f"bitcoin-cli{config['environment']['EXEEXT']}", ) self.options.bitcoind = os.getenv("BITCOIND", default=fname_bitcoind) - self.options.bitcoincli = os.getenv( - "BITCOINCLI", default=fname_bitcoincli) + self.options.bitcoincli = os.getenv("BITCOINCLI", default=fname_bitcoincli) self.options.emulator = config["environment"]["EMULATOR"] or None - os.environ['PATH'] = config['environment']['BUILDDIR'] + os.pathsep + \ - config['environment']['BUILDDIR'] + os.path.sep + "qt" + os.pathsep + \ - os.environ['PATH'] + os.environ["PATH"] = ( + config["environment"]["BUILDDIR"] + + os.pathsep + + config["environment"]["BUILDDIR"] + + os.path.sep + + "qt" + + os.pathsep + + os.environ["PATH"] + ) # Add test dir to sys.path (to access generated modules) - sys.path.append(os.path.join(config['environment']['BUILDDIR'], "test")) + sys.path.append(os.path.join(config["environment"]["BUILDDIR"], "test")) # Set up temp directory and start logging if self.options.tmpdir: self.options.tmpdir = os.path.abspath(self.options.tmpdir) os.makedirs(self.options.tmpdir, exist_ok=False) else: self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX) self._start_logging() # Seed the PRNG. Note that test runs are reproducible if and only if # a single thread accesses the PRNG. For more information, see # https://docs.python.org/3/library/random.html#notes-on-reproducibility. # The network thread shouldn't access random. If we need to change the # network thread to access randomness, it should instantiate its own # random.Random object. seed = self.options.randomseed if seed is None: seed = random.randrange(sys.maxsize) else: self.log.debug(f"User supplied random seed {seed}") random.seed(seed) self.log.debug(f"PRNG seed is: {seed}") - self.log.debug('Setting up network thread') + self.log.debug("Setting up network thread") self.network_thread = NetworkThread() self.network_thread.start() if self.options.usecli: if not self.supports_cli: - raise SkipTest( - "--usecli specified but test does not support using CLI") + raise SkipTest("--usecli specified but test does not support using CLI") self.skip_if_no_cli() self.skip_test_if_missing_module() self.setup_chain() self.setup_network() self.success = TestStatus.PASSED def shutdown(self): """Call this method to shut down the test framework object.""" if self.success == TestStatus.FAILED and self.options.pdbonfailure: print("Testcase failed. Attaching python debugger. Enter ? for help") pdb.set_trace() - self.log.debug('Closing down network thread') + self.log.debug("Closing down network thread") self.network_thread.close() if not self.options.noshutdown: self.log.info("Stopping nodes") if self.nodes: self.stop_nodes() else: for node in self.nodes: node.cleanup_on_exit = False - self.log.info( - "Note: bitcoinds were not stopped and may still be running") + self.log.info("Note: bitcoinds were not stopped and may still be running") should_clean_up = ( - not self.options.nocleanup and - not self.options.noshutdown and - self.success != TestStatus.FAILED and - not self.options.perf + not self.options.nocleanup + and not self.options.noshutdown + and self.success != TestStatus.FAILED + and not self.options.perf ) if should_clean_up: self.log.info(f"Cleaning up {self.options.tmpdir} on exit") cleanup_tree_on_exit = True elif self.options.perf: self.log.warning( - f"Not cleaning up dir {self.options.tmpdir} due to perf data") + f"Not cleaning up dir {self.options.tmpdir} due to perf data" + ) cleanup_tree_on_exit = False else: - self.log.warning( - f"Not cleaning up dir {self.options.tmpdir}") + self.log.warning(f"Not cleaning up dir {self.options.tmpdir}") cleanup_tree_on_exit = False if self.success == TestStatus.PASSED: self.log.info("Tests successful") exit_code = TEST_EXIT_PASSED elif self.success == TestStatus.SKIPPED: self.log.info("Test skipped") exit_code = TEST_EXIT_SKIPPED else: self.log.error( f"Test failed. Test logging available at {self.options.tmpdir}" - f"/test_framework.log") + "/test_framework.log" + ) self.log.error("") combine_logs_path = os.path.normpath( - f'{os.path.dirname(os.path.realpath(__file__))}/../combine_logs.py') + f"{os.path.dirname(os.path.realpath(__file__))}/../combine_logs.py" + ) self.log.error( f"Hint: Call {combine_logs_path} '{self.options.tmpdir}' to " - f"consolidate all logs") + "consolidate all logs" + ) self.log.error("") self.log.error( "If this failure happened unexpectedly or intermittently, please" - " file a bug and provide a link or upload of the combined log.") - self.log.error(self.config['environment']['PACKAGE_BUGREPORT']) + " file a bug and provide a link or upload of the combined log." + ) + self.log.error(self.config["environment"]["PACKAGE_BUGREPORT"]) self.log.error("") exit_code = TEST_EXIT_FAILED # Logging.shutdown will not remove stream- and filehandlers, so we must # do it explicitly. Handlers are removed so the next test run can apply # different log handler settings. # See: https://docs.python.org/3/library/logging.html#logging.shutdown for h in list(self.log.handlers): h.flush() h.close() self.log.removeHandler(h) rpc_logger = logging.getLogger("BitcoinRPC") for h in list(rpc_logger.handlers): h.flush() rpc_logger.removeHandler(h) if cleanup_tree_on_exit: shutil.rmtree(self.options.tmpdir) self.nodes.clear() return exit_code # Methods to override in subclass test scripts. def set_test_params(self): """Tests must this method to change default values for number of nodes, topology, etc""" raise NotImplementedError def add_options(self, parser): """Override this method to add command-line options to the test""" pass def skip_test_if_missing_module(self): """Override this method to skip a test if a module is not compiled""" pass def setup_chain(self): """Override this method to customize blockchain setup""" self.log.info(f"Initializing test directory {self.options.tmpdir}") if self.setup_clean_chain: self._initialize_chain_clean() else: self._initialize_chain() def setup_network(self): """Override this method to customize test network topology""" self.setup_nodes() # Connect the nodes as a "chain". This allows us # to split the network between nodes 1 and 2 to get # two halves that can work on competing chains. # # Topology looks like this: # node0 <-- node1 <-- node2 <-- node3 # # If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To # ensure block propagation, all nodes will establish outgoing connections toward node0. # See fPreferredDownload in net_processing. # # If further outbound connections are needed, they can be added at the beginning of the test with e.g. # self.connect_nodes(1, 2) for i in range(self.num_nodes - 1): self.connect_nodes(i + 1, i) self.sync_all() def setup_nodes(self): """Override this method to customize test node setup""" extra_args = [[]] * self.num_nodes if hasattr(self, "extra_args"): extra_args = self.extra_args self.add_nodes(self.num_nodes, extra_args) self.start_nodes() if self.is_wallet_compiled(): self.import_deterministic_coinbase_privkeys() if not self.setup_clean_chain: for n in self.nodes: assert_equal(n.getblockchaininfo()["blocks"], 199) # To ensure that all nodes are out of IBD, the most recent block # must have a timestamp not too old (see IsInitialBlockDownload()). - self.log.debug('Generate a block with current time') - block_hash = self.generate( - self.nodes[0], 1, sync_fun=self.no_op)[0] + self.log.debug("Generate a block with current time") + block_hash = self.generate(self.nodes[0], 1, sync_fun=self.no_op)[0] block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0) for n in self.nodes: n.submitblock(block) chain_info = n.getblockchaininfo() assert_equal(chain_info["blocks"], 200) assert_equal(chain_info["initialblockdownload"], False) def import_deterministic_coinbase_privkeys(self): wallet_names = ( [self.default_wallet_name] * len(self.nodes) - if self.wallet_names is None else self.wallet_names + if self.wallet_names is None + else self.wallet_names ) assert len(wallet_names) <= len(self.nodes) for wallet_name, n in zip(wallet_names, self.nodes): if wallet_name is not None: n.createwallet( wallet_name=wallet_name, descriptors=self.options.descriptors, - load_on_startup=True) + load_on_startup=True, + ) n.importprivkey( - privkey=n.get_deterministic_priv_key().key, - label='coinbase') + privkey=n.get_deterministic_priv_key().key, label="coinbase" + ) def run_test(self): """Tests must override this method to define test logic""" raise NotImplementedError # Public helper methods. These can be accessed by the subclass test # scripts. - def add_nodes(self, num_nodes: int, extra_args=None, - *, host=None, binary=None): + def add_nodes(self, num_nodes: int, extra_args=None, *, host=None, binary=None): """Instantiate TestNode objects. Should only be called once after the nodes have been specified in set_test_params().""" if self.bind_to_localhost_only: extra_confs = [["bind=127.0.0.1"]] * num_nodes else: extra_confs = [[]] * num_nodes if extra_args is None: extra_args = [[]] * num_nodes if binary is None: binary = [self.options.bitcoind] * num_nodes assert_equal(len(extra_confs), num_nodes) assert_equal(len(extra_args), num_nodes) assert_equal(len(binary), num_nodes) for i in range(num_nodes): - self.nodes.append(TestNode( - i, - get_datadir_path(self.options.tmpdir, i), - chain=self.chain, - host=host, - rpc_port=rpc_port(i), - p2p_port=p2p_port(i), - chronik_port=chronik_port(i), - timewait=self.rpc_timeout, - timeout_factor=self.options.timeout_factor, - bitcoind=binary[i], - bitcoin_cli=self.options.bitcoincli, - coverage_dir=self.options.coveragedir, - cwd=self.options.tmpdir, - extra_conf=extra_confs[i], - extra_args=extra_args[i], - use_cli=self.options.usecli, - emulator=self.options.emulator, - start_perf=self.options.perf, - use_valgrind=self.options.valgrind, - descriptors=self.options.descriptors, - )) + self.nodes.append( + TestNode( + i, + get_datadir_path(self.options.tmpdir, i), + chain=self.chain, + host=host, + rpc_port=rpc_port(i), + p2p_port=p2p_port(i), + chronik_port=chronik_port(i), + timewait=self.rpc_timeout, + timeout_factor=self.options.timeout_factor, + bitcoind=binary[i], + bitcoin_cli=self.options.bitcoincli, + coverage_dir=self.options.coveragedir, + cwd=self.options.tmpdir, + extra_conf=extra_confs[i], + extra_args=extra_args[i], + use_cli=self.options.usecli, + emulator=self.options.emulator, + start_perf=self.options.perf, + use_valgrind=self.options.valgrind, + descriptors=self.options.descriptors, + ) + ) if self.options.wellingtonactivation: self.nodes[i].extend_default_args( - [f"-wellingtonactivationtime={TIMESTAMP_IN_THE_PAST}"]) + [f"-wellingtonactivationtime={TIMESTAMP_IN_THE_PAST}"] + ) def start_node(self, i, *args, **kwargs): """Start a bitcoind""" node = self.nodes[i] node.start(*args, **kwargs) node.wait_for_rpc_connection() if self.options.coveragedir is not None: coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc) def start_nodes(self, extra_args=None, *args, **kwargs): """Start multiple bitcoinds""" if extra_args is None: extra_args = [None] * self.num_nodes assert_equal(len(extra_args), self.num_nodes) try: for i, node in enumerate(self.nodes): node.start(extra_args[i], *args, **kwargs) for node in self.nodes: node.wait_for_rpc_connection() except BaseException: # If one node failed to start, stop the others self.stop_nodes() raise if self.options.coveragedir is not None: for node in self.nodes: - coverage.write_all_rpc_commands( - self.options.coveragedir, node.rpc) + coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc) - def stop_node(self, i, expected_stderr='', wait=0): + def stop_node(self, i, expected_stderr="", wait=0): """Stop a bitcoind test node""" self.nodes[i].stop_node(expected_stderr, wait=wait) def stop_nodes(self, wait=0): """Stop multiple bitcoind test nodes""" for node in self.nodes: # Issue RPC to stop nodes node.stop_node(wait=wait, wait_until_stopped=False) for node in self.nodes: # Wait for nodes to stop node.wait_until_stopped() def restart_node(self, i, extra_args=None): """Stop and start a test node""" self.stop_node(i) self.start_node(i, extra_args) def wait_for_node_exit(self, i, timeout): self.nodes[i].process.wait(timeout) def connect_nodes(self, a, b): from_node = self.nodes[a] to_node = self.nodes[b] host = to_node.host if host is None: - host = '127.0.0.1' + host = "127.0.0.1" ip_port = f"{host}:{str(to_node.p2p_port)}" from_node.addnode(ip_port, "onetry") # poll until version handshake complete to avoid race conditions # with transaction relaying # See comments in net_processing: # * Must have a version message before anything else # * Must have a verack message before anything else wait_until_helper( - lambda: all(peer['version'] != 0 - for peer in from_node.getpeerinfo())) + lambda: all(peer["version"] != 0 for peer in from_node.getpeerinfo()) + ) wait_until_helper( - lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 - for peer in from_node.getpeerinfo())) + lambda: all( + peer["bytesrecv_per_msg"].pop("verack", 0) == 24 + for peer in from_node.getpeerinfo() + ) + ) def disconnect_nodes(self, a, b): from_node = self.nodes[a] to_node = self.nodes[b] def get_peer_ids(): result = [] for peer in from_node.getpeerinfo(): - if to_node.name in peer['subver']: - result.append(peer['id']) + if to_node.name in peer["subver"]: + result.append(peer["id"]) return result peer_ids = get_peer_ids() if not peer_ids: self.log.warning( f"disconnect_nodes: {from_node.index} and {to_node.index} were not " - "connected") + "connected" + ) return for peer_id in peer_ids: try: from_node.disconnectnode(nodeid=peer_id) except JSONRPCException as e: # If this node is disconnected between calculating the peer id # and issuing the disconnect, don't worry about it. # This avoids a race condition if we're mass-disconnecting # peers. - if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED + if e.error["code"] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED raise # wait to disconnect wait_until_helper(lambda: not get_peer_ids(), timeout=5) def split_network(self): """ Split the network of four nodes into nodes 0/1 and 2/3. """ self.disconnect_nodes(1, 2) self.sync_all(self.nodes[:2]) self.sync_all(self.nodes[2:]) def join_network(self): """ Join the (previously split) network halves together. """ self.connect_nodes(1, 2) self.sync_all() def no_op(self): pass def generate(self, generator, *args, sync_fun=None, **kwargs): blocks = generator.generate(*args, invalid_call=False, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks def generateblock(self, generator, *args, sync_fun=None, **kwargs): blocks = generator.generateblock(*args, invalid_call=False, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks def generatetoaddress(self, generator, *args, sync_fun=None, **kwargs): - blocks = generator.generatetoaddress( - *args, invalid_call=False, **kwargs) + blocks = generator.generatetoaddress(*args, invalid_call=False, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks def generatetodescriptor(self, generator, *args, sync_fun=None, **kwargs): - blocks = generator.generatetodescriptor( - *args, invalid_call=False, **kwargs) + blocks = generator.generatetodescriptor(*args, invalid_call=False, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks def sync_blocks(self, nodes=None, wait=1, timeout=60): """ Wait until everybody has the same tip. sync_blocks needs to be called with an rpc_connections set that has least one node already synced to the latest, stable tip, otherwise there's a chance it might return before all nodes are stably synced. """ rpc_connections = nodes or self.nodes timeout = int(timeout * self.options.timeout_factor) stop_time = time.time() + timeout while time.time() <= stop_time: best_hash = [x.getbestblockhash() for x in rpc_connections] if best_hash.count(best_hash[0]) == len(rpc_connections): return # Check that each peer has at least one connection - assert (all(len(x.getpeerinfo()) for x in rpc_connections)) + assert all(len(x.getpeerinfo()) for x in rpc_connections) time.sleep(wait) best_hashes = "".join(f"\n {b!r}" for b in best_hash) raise AssertionError(f"Block sync timed out after {timeout}s:{best_hashes}") - def sync_mempools(self, nodes=None, wait=1, timeout=60, - flush_scheduler=True): + def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True): """ Wait until everybody has the same transactions in their memory pools """ rpc_connections = nodes or self.nodes timeout = int(timeout * self.options.timeout_factor) stop_time = time.time() + timeout while time.time() <= stop_time: pool = [set(r.getrawmempool()) for r in rpc_connections] if pool.count(pool[0]) == len(rpc_connections): if flush_scheduler: for r in rpc_connections: r.syncwithvalidationinterfacequeue() return # Check that each peer has at least one connection - assert (all(len(x.getpeerinfo()) for x in rpc_connections)) + assert all(len(x.getpeerinfo()) for x in rpc_connections) time.sleep(wait) pool_str = "".join(f"\n {m!r}" for m in pool) raise AssertionError(f"Mempool sync timed out after {timeout}s:{pool_str}") def sync_proofs(self, nodes=None, wait=1, timeout=60): """ Wait until everybody has the same proofs in their proof pools """ rpc_connections = nodes or self.nodes timeout = int(timeout * self.options.timeout_factor) stop_time = time.time() + timeout def format_ids(id_list): """Convert ProodIDs to hex strings for easier debugging""" return [uint256_hex(i) for i in id_list] while time.time() <= stop_time: - nodes_proofs = [ - set(format_ids(get_proof_ids(r))) for r in rpc_connections] + nodes_proofs = [set(format_ids(get_proof_ids(r))) for r in rpc_connections] if nodes_proofs.count(nodes_proofs[0]) == len(rpc_connections): return # Check that each peer has at least one connection - assert (all(len(x.getpeerinfo()) for x in rpc_connections)) + assert all(len(x.getpeerinfo()) for x in rpc_connections) time.sleep(wait) nodes_proofs_str = "".join(f"\n {m!r}" for m in nodes_proofs) raise AssertionError( - f"Proofs sync timed out after {timeout}s:{nodes_proofs_str}") + f"Proofs sync timed out after {timeout}s:{nodes_proofs_str}" + ) def sync_all(self, nodes=None): self.sync_blocks(nodes) self.sync_mempools(nodes) self.sync_proofs(nodes) def wait_until(self, test_function, timeout=60): - return wait_until_helper(test_function, timeout=timeout, - timeout_factor=self.options.timeout_factor) + return wait_until_helper( + test_function, timeout=timeout, timeout_factor=self.options.timeout_factor + ) # Private helper methods. These should not be accessed by the subclass # test scripts. def _start_logging(self): # Add logger and logging handlers - self.log = logging.getLogger('TestFramework') + self.log = logging.getLogger("TestFramework") self.log.setLevel(logging.DEBUG) # Create file handler to log all messages fh = logging.FileHandler( - f"{self.options.tmpdir}/test_framework.log", encoding='utf-8') + f"{self.options.tmpdir}/test_framework.log", encoding="utf-8" + ) fh.setLevel(logging.DEBUG) # Create console handler to log messages to stderr. By default this # logs only error messages, but can be configured with --loglevel. ch = logging.StreamHandler(sys.stdout) # User can provide log level as a number or string (eg DEBUG). loglevel # was caught as a string, so try to convert it to an int - ll = int(self.options.loglevel) if self.options.loglevel.isdigit( - ) else self.options.loglevel.upper() + ll = ( + int(self.options.loglevel) + if self.options.loglevel.isdigit() + else self.options.loglevel.upper() + ) ch.setLevel(ll) # Format logs the same as bitcoind's debug.log with microprecision (so # log files can be concatenated and sorted) formatter = logging.Formatter( - fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S') + fmt="%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s", + datefmt="%Y-%m-%dT%H:%M:%S", + ) formatter.converter = time.gmtime fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger self.log.addHandler(fh) self.log.addHandler(ch) if self.options.trace_rpc: rpc_logger = logging.getLogger("BitcoinRPC") rpc_logger.setLevel(logging.DEBUG) rpc_handler = logging.StreamHandler(sys.stdout) rpc_handler.setLevel(logging.DEBUG) rpc_logger.addHandler(rpc_handler) def _initialize_chain(self): """Initialize a pre-mined blockchain for use by the test. Create a cache of a 199-block-long chain Afterward, create num_nodes copies from the cache.""" # Use node 0 to create the cache for all other nodes CACHE_NODE_ID = 0 cache_node_dir = get_datadir_path(self.options.cachedir, CACHE_NODE_ID) assert self.num_nodes <= MAX_NODES if not os.path.isdir(cache_node_dir): - self.log.debug( - f"Creating cache directory {cache_node_dir}") + self.log.debug(f"Creating cache directory {cache_node_dir}") initialize_datadir( self.options.cachedir, CACHE_NODE_ID, self.chain, self.disable_autoconnect, ) self.nodes.append( TestNode( CACHE_NODE_ID, cache_node_dir, chain=self.chain, extra_conf=["bind=127.0.0.1"], - extra_args=['-disablewallet'], + extra_args=["-disablewallet"], host=None, rpc_port=rpc_port(CACHE_NODE_ID), p2p_port=p2p_port(CACHE_NODE_ID), chronik_port=chronik_port(CACHE_NODE_ID), timewait=self.rpc_timeout, timeout_factor=self.options.timeout_factor, bitcoind=self.options.bitcoind, bitcoin_cli=self.options.bitcoincli, coverage_dir=None, cwd=self.options.tmpdir, descriptors=self.options.descriptors, emulator=self.options.emulator, - )) + ) + ) if self.options.wellingtonactivation: self.nodes[CACHE_NODE_ID].extend_default_args( - [f"-wellingtonactivationtime={TIMESTAMP_IN_THE_PAST}"]) + [f"-wellingtonactivationtime={TIMESTAMP_IN_THE_PAST}"] + ) self.start_node(CACHE_NODE_ID) cache_node = self.nodes[CACHE_NODE_ID] # Wait for RPC connections to be ready cache_node.wait_for_rpc_connection() # Set a time in the past, so that blocks don't end up in the future cache_node.setmocktime( - cache_node.getblockheader( - cache_node.getbestblockhash())['time']) + cache_node.getblockheader(cache_node.getbestblockhash())["time"] + ) # Create a 199-block-long chain; each of the 3 first nodes gets 25 # mature blocks and 25 immature. # The 4th address gets 25 mature and only 24 immature blocks so that # the very last block in the cache does not age too much (have an # old tip age). # This is needed so that we are out of IBD when the test starts, # see the tip age check in IsInitialBlockDownload(). - gen_addresses = [ - k.address for k in TestNode.PRIV_KEYS][:3] + [ADDRESS_ECREG_P2SH_OP_TRUE] + gen_addresses = [k.address for k in TestNode.PRIV_KEYS][:3] + [ + ADDRESS_ECREG_P2SH_OP_TRUE + ] assert_equal(len(gen_addresses), 4) for i in range(8): self.generatetoaddress( cache_node, nblocks=25 if i != 7 else 24, address=gen_addresses[i % len(gen_addresses)], ) assert_equal(cache_node.getblockchaininfo()["blocks"], 199) # Shut it down, and clean up cache directories: self.stop_nodes() self.nodes = [] def cache_path(*paths): return os.path.join(cache_node_dir, self.chain, *paths) # Remove empty wallets dir - os.rmdir(cache_path('wallets')) + os.rmdir(cache_path("wallets")) for entry in os.listdir(cache_path()): # Only keep indexes, chainstate and blocks folders - if entry not in ['chainstate', 'blocks', 'indexes']: + if entry not in ["chainstate", "blocks", "indexes"]: os.remove(cache_path(entry)) for i in range(self.num_nodes): - self.log.debug( - f"Copy cache directory {cache_node_dir} to node {i}") + self.log.debug(f"Copy cache directory {cache_node_dir} to node {i}") to_dir = get_datadir_path(self.options.tmpdir, i) shutil.copytree(cache_node_dir, to_dir) # Overwrite port/rpcport in bitcoin.conf initialize_datadir( self.options.tmpdir, i, self.chain, self.disable_autoconnect, ) def _initialize_chain_clean(self): """Initialize empty blockchain for use by the test. Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization.""" for i in range(self.num_nodes): initialize_datadir( self.options.tmpdir, i, self.chain, self.disable_autoconnect, ) def skip_if_no_py3_zmq(self): """Attempt to import the zmq package and skip the test if the import fails.""" try: import zmq # noqa except ImportError: raise SkipTest("python3-zmq module not available.") def skip_if_no_python_bcc(self): """Attempt to import the bcc package and skip the tests if the import fails.""" try: import bcc # type: ignore[import] # noqa: F401 except ImportError: raise SkipTest("bcc python module not available") def skip_if_no_bitcoind_tracepoints(self): """Skip the running test if bitcoind has not been compiled with USDT tracepoint support.""" if not self.is_usdt_compiled(): - raise SkipTest( - "bitcoind has not been built with USDT tracepoints enabled.") + raise SkipTest("bitcoind has not been built with USDT tracepoints enabled.") def skip_if_no_bpf_permissions(self): """Skip the running test if we don't have permissions to do BPF syscalls and load BPF maps.""" # check for 'root' permissions if os.geteuid() != 0: raise SkipTest( - "no permissions to use BPF (please review the tests carefully before running them with higher privileges)") + "no permissions to use BPF (please review the tests carefully before" + " running them with higher privileges)" + ) def skip_if_platform_not_linux(self): """Skip the running test if we are not on a Linux platform""" if platform.system() != "Linux": raise SkipTest("not on a Linux system") def skip_if_no_bitcoind_zmq(self): """Skip the running test if bitcoind has not been compiled with zmq support.""" if not self.is_zmq_compiled(): raise SkipTest("bitcoind has not been built with zmq enabled.") def skip_if_no_wallet(self): """Skip the running test if wallet has not been compiled.""" if not self.is_wallet_compiled(): raise SkipTest("wallet has not been compiled.") def skip_if_no_wallet_tool(self): """Skip the running test if bitcoin-wallet has not been compiled.""" if not self.is_wallet_tool_compiled(): raise SkipTest("bitcoin-wallet has not been compiled") def skip_if_no_cli(self): """Skip the running test if bitcoin-cli has not been compiled.""" if not self.is_cli_compiled(): raise SkipTest("bitcoin-cli has not been compiled.") def skip_if_no_chronik(self): """Skip the running test if Chronik indexer has not been compiled.""" if not self.is_chronik_compiled(): raise SkipTest("Chronik indexer has not been compiled.") def is_cli_compiled(self): """Checks whether bitcoin-cli was compiled.""" return self.config["components"].getboolean("ENABLE_CLI") def is_wallet_compiled(self): """Checks whether the wallet module was compiled.""" return self.config["components"].getboolean("ENABLE_WALLET") def is_wallet_tool_compiled(self): """Checks whether bitcoin-wallet was compiled.""" return self.config["components"].getboolean("ENABLE_WALLET_TOOL") def is_chronik_compiled(self): """Checks whether Chronik indexer was compiled.""" return self.config["components"].getboolean("ENABLE_CHRONIK") def is_zmq_compiled(self): """Checks whether the zmq module was compiled.""" return self.config["components"].getboolean("ENABLE_ZMQ") def is_usdt_compiled(self): """Checks whether the USDT tracepoints were compiled.""" return self.config["components"].getboolean("ENABLE_USDT_TRACEPOINTS") diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index eb2a9884c..9d3c9b4c8 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -1,1040 +1,1151 @@ #!/usr/bin/env python3 # Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Class for bitcoind node under test""" import collections import contextlib import decimal import errno import http.client import json import logging import os import re import shlex import subprocess import sys import tempfile import time import urllib.parse from enum import Enum from pathlib import Path from typing import Any, Dict, List, Optional from .address import ADDRESS_ECREG_UNSPENDABLE from .authproxy import JSONRPCException from .descriptors import descsum_create from .messages import XEC, CTransaction, FromHex from .p2p import P2P_SUBVERSION from .util import ( EncodeDecimal, append_config, assert_equal, delete_cookie_file, get_auth_cookie, get_rpc_proxy, p2p_port, rpc_url, wait_until_helper, ) BITCOIND_PROC_WAIT_TIMEOUT = 60 class FailedToStartError(Exception): """Raised when a node fails to start correctly.""" class ErrorMatch(Enum): FULL_TEXT = 1 FULL_REGEX = 2 PARTIAL_REGEX = 3 class TestNode: """A class for representing a bitcoind node under test. This class contains: - state about the node (whether it's running, etc) - a Python subprocess.Popen object representing the running process - an RPC connection to the node - one or more P2P connections to the node To make things easier for the test writer, any unrecognised messages will be dispatched to the RPC connection.""" - def __init__(self, i, datadir, *, chain, host, rpc_port, p2p_port, chronik_port, timewait, timeout_factor, bitcoind, bitcoin_cli, - coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, emulator=None, start_perf=False, - use_valgrind=False, descriptors=False): + def __init__( + self, + i, + datadir, + *, + chain, + host, + rpc_port, + p2p_port, + chronik_port, + timewait, + timeout_factor, + bitcoind, + bitcoin_cli, + coverage_dir, + cwd, + extra_conf=None, + extra_args=None, + use_cli=False, + emulator=None, + start_perf=False, + use_valgrind=False, + descriptors=False, + ): """ Kwargs: start_perf (bool): If True, begin profiling the node with `perf` as soon as the node starts. """ self.index = i self.p2p_conn_index = 1 self.datadir = datadir self.bitcoinconf = os.path.join(self.datadir, "bitcoin.conf") self.stdout_dir = os.path.join(self.datadir, "stdout") self.stderr_dir = os.path.join(self.datadir, "stderr") self.chain = chain self.host = host self.rpc_port = rpc_port self.p2p_port = p2p_port self.chronik_port = chronik_port self.name = f"testnode-{i}" self.rpc_timeout = timewait self.binary = bitcoind if not os.path.isfile(self.binary): raise FileNotFoundError( - f"Binary '{self.binary}' could not be found.\nTry setting it manually:\n" - f"\tBITCOIND= {sys.argv[0]}") + f"Binary '{self.binary}' could not be found.\nTry setting it" + f" manually:\n\tBITCOIND= {sys.argv[0]}" + ) self.coverage_dir = coverage_dir self.cwd = cwd self.descriptors = descriptors if extra_conf is not None: append_config(datadir, extra_conf) # Most callers will just need to add extra args to the default list # below. # For those callers that need more flexibility, they can access the # default args using the provided facilities. # Note that common args are set in the config file (see # initialize_datadir) self.extra_args = extra_args # Configuration for logging is set as command-line args rather than in the bitcoin.conf file. # This means that starting a bitcoind using the temp dir to debug a failed test won't # spam debug.log. self.default_args = [ "-datadir=" + self.datadir, "-logtimemicros", "-logthreadnames", "-logsourcelocations", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", - "-uacomment=" + self.name + "-uacomment=" + self.name, ] if use_valgrind: default_suppressions_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), - "..", "..", "..", "contrib", "valgrind.supp") - suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE", - default_suppressions_file) + "..", + "..", + "..", + "contrib", + "valgrind.supp", + ) + suppressions_file = os.getenv( + "VALGRIND_SUPPRESSIONS_FILE", default_suppressions_file + ) self.binary = "valgrind" self.bitcoind_args = [bitcoind] + self.default_args - self.default_args = [f"--suppressions={suppressions_file}", - "--gen-suppressions=all", "--exit-on-first-error=yes", - "--error-exitcode=1", "--quiet"] + self.bitcoind_args + self.default_args = [ + f"--suppressions={suppressions_file}", + "--gen-suppressions=all", + "--exit-on-first-error=yes", + "--error-exitcode=1", + "--quiet", + ] + self.bitcoind_args if emulator is not None: if not os.path.isfile(emulator): - raise FileNotFoundError( - f"Emulator '{emulator}' could not be found.") + raise FileNotFoundError(f"Emulator '{emulator}' could not be found.") self.emulator = emulator if use_cli and not os.path.isfile(bitcoin_cli): raise FileNotFoundError( - f"Binary '{bitcoin_cli}' could not be found.\nTry setting it manually:\n" - f"\tBITCOINCLI= {sys.argv[0]}") + f"Binary '{bitcoin_cli}' could not be found.\nTry setting it" + f" manually:\n\tBITCOINCLI= {sys.argv[0]}" + ) self.cli = TestNodeCLI(bitcoin_cli, self.datadir, self.emulator) self.use_cli = use_cli self.start_perf = start_perf self.running = False self.process = None self.rpc_connected = False self.rpc = None self.url = None self.relay_fee_cache = None - self.log = logging.getLogger(f'TestFramework.node{i}') + self.log = logging.getLogger(f"TestFramework.node{i}") # Whether to kill the node when this object goes away self.cleanup_on_exit = True # Cache perf subprocesses here by their data output filename. self.perf_subprocesses = {} self.p2ps = [] self.timeout_factor = timeout_factor - AddressKeyPair = collections.namedtuple( - 'AddressKeyPair', ['address', 'key']) + AddressKeyPair = collections.namedtuple("AddressKeyPair", ["address", "key"]) PRIV_KEYS = [ # address , privkey AddressKeyPair( - 'mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', - 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'), + "mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z", + "cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW", + ), AddressKeyPair( - 'msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', - 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'), + "msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg", + "cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE", + ), AddressKeyPair( - 'mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', - 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'), + "mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP", + "cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK", + ), AddressKeyPair( - 'mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', - 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'), + "mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR", + "cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim", + ), AddressKeyPair( - 'msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', - 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'), + "msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws", + "cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh", + ), AddressKeyPair( - 'n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', - 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'), + "n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi", + "cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq", + ), AddressKeyPair( - 'myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', - 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'), + "myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6", + "cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK", + ), AddressKeyPair( - 'mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', - 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'), + "mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8", + "cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy", + ), AddressKeyPair( - 'mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', - 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'), + "mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg", + "cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k", + ), AddressKeyPair( - 'mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', - 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'), + "mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf", + "cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik", + ), AddressKeyPair( - 'mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', - 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'), + "mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6", + "cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3", + ), AddressKeyPair( - 'mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', - 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'), + "mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7", + "cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ", + ), ] def get_deterministic_priv_key(self): """Return a deterministic priv key in base58, that only depends on the node's index""" num_keys = len(self.PRIV_KEYS) - assert self.index < num_keys, \ - f"Only {num_keys} keys are defined, please extend TestNode.PRIV_KEYS if " \ - f"more are needed." + assert self.index < num_keys, ( + f"Only {num_keys} keys are defined, please extend TestNode.PRIV_KEYS if " + "more are needed." + ) return self.PRIV_KEYS[self.index] def _node_msg(self, msg: str) -> str: """Return a modified msg that identifies this node by its index as a debugging aid.""" return f"[node {self.index}] {msg}" def _raise_assertion_error(self, msg: str): """Raise an AssertionError with msg modified to identify this node.""" raise AssertionError(self._node_msg(msg)) def __del__(self): # Ensure that we don't leave any bitcoind processes lying around after # the test ends if self.process and self.cleanup_on_exit: # Should only happen on test failure # Avoid using logger, as that may have already been shutdown when # this destructor is called. print(self._node_msg("Cleaning up leftover process")) self.process.kill() def __getattr__(self, name): """Dispatches any unrecognised messages to the RPC connection or a CLI instance.""" if self.use_cli: - return getattr( - RPCOverloadWrapper(self.cli, True, self.descriptors), name) + return getattr(RPCOverloadWrapper(self.cli, True, self.descriptors), name) else: - assert self.rpc is not None, self._node_msg( - "Error: RPC not initialized") - assert self.rpc_connected, self._node_msg( - "Error: No RPC connection") + assert self.rpc is not None, self._node_msg("Error: RPC not initialized") + assert self.rpc_connected, self._node_msg("Error: No RPC connection") return getattr( - RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), - name) + RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), name + ) def clear_default_args(self): self.default_args.clear() def extend_default_args(self, args): self.default_args.extend(args) def remove_default_args(self, args): for rm_arg in args: # Remove all occurrences of rm_arg in self.default_args: # - if the arg is a flag (-flag), then the names must match # - if the arg is a value (-key=value) then the name must starts # with "-key=" (the '"' char is to avoid removing "-key_suffix" # arg is "-key" is the argument to remove). - self.default_args = [def_arg for def_arg in self.default_args - if rm_arg != def_arg and not def_arg.startswith(rm_arg + '=')] + self.default_args = [ + def_arg + for def_arg in self.default_args + if rm_arg != def_arg and not def_arg.startswith(rm_arg + "=") + ] - def start(self, extra_args=None, *, cwd=None, stdout=None, - stderr=None, **kwargs): + def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs): """Start the node.""" if extra_args is None: extra_args = self.extra_args # Add a new stdout and stderr file each time bitcoind is started if stderr is None: - stderr = tempfile.NamedTemporaryFile( - dir=self.stderr_dir, delete=False) + stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) if stdout is None: - stdout = tempfile.NamedTemporaryFile( - dir=self.stdout_dir, delete=False) + stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) self.stderr = stderr self.stdout = stdout if cwd is None: cwd = self.cwd # Delete any existing cookie file -- if such a file exists (eg due to # unclean shutdown), it will get overwritten anyway by bitcoind, and # potentially interfere with our attempt to authenticate delete_cookie_file(self.datadir, self.chain) # add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are # written to stderr and not the terminal subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1") p_args = [self.binary] + self.default_args + extra_args if self.emulator is not None: p_args = [self.emulator] + p_args self.process = subprocess.Popen( - p_args, - env=subp_env, - stdout=stdout, - stderr=stderr, - cwd=cwd, - **kwargs) + p_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs + ) self.running = True self.log.debug("bitcoind started, waiting for RPC to come up") if self.start_perf: self._start_perf() def wait_for_rpc_connection(self): """Sets up an RPC connection to the bitcoind process. Returns False if unable to connect.""" # Poll at a rate of four times per second poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): if self.process.poll() is not None: - raise FailedToStartError(self._node_msg( - f'bitcoind exited with status {self.process.returncode} during ' - f'initialization')) + raise FailedToStartError( + self._node_msg( + f"bitcoind exited with status {self.process.returncode} during " + "initialization" + ) + ) try: rpc = get_rpc_proxy( - rpc_url( - self.datadir, - self.chain, - self.host, - self.rpc_port), + rpc_url(self.datadir, self.chain, self.host, self.rpc_port), self.index, # Shorter timeout to allow for one retry in case of # ETIMEDOUT timeout=self.rpc_timeout // 2, - coveragedir=self.coverage_dir + coveragedir=self.coverage_dir, ) rpc.getblockcount() # If the call to getblockcount() succeeds then the RPC # connection is up - wait_until_helper(lambda: rpc.getmempoolinfo()['loaded'], - timeout_factor=self.timeout_factor) + wait_until_helper( + lambda: rpc.getmempoolinfo()["loaded"], + timeout_factor=self.timeout_factor, + ) # Wait for the node to finish reindex, block import, and # loading the mempool. Usually importing happens fast or # even "immediate" when the node is started. However, there # is no guarantee and sometimes ThreadImport might finish # later. This is going to cause intermittent test failures, # because generally the tests assume the node is fully # ready after being started. # # For example, the node will reject block messages from p2p # when it is still importing with the error "Unexpected # block message received" # # The wait is done here to make tests as robust as possible # and prevent racy tests and intermittent failures as much # as possible. Some tests might not need this, but the # overhead is trivial, and the added guarantees are worth # the minimal performance cost. self.log.debug("RPC successfully started") if self.use_cli: return self.rpc = rpc self.rpc_connected = True self.url = self.rpc.url return except JSONRPCException as e: # Initialization phase # -28 RPC in warmup # -342 Service unavailable, RPC server started but is shutting down due to error - if e.error['code'] != -28 and e.error['code'] != -342: + if e.error["code"] != -28 and e.error["code"] != -342: raise # unknown JSON RPC exception except ConnectionResetError: # This might happen when the RPC server is in warmup, but shut down before the call to getblockcount # succeeds. Try again to properly raise the FailedToStartError pass except OSError as e: if e.errno == errno.ETIMEDOUT: # Treat identical to ConnectionResetError pass elif e.errno == errno.ECONNREFUSED: # Port not yet open? pass else: # unknown OS error raise except ValueError as e: # cookie file not found and no rpcuser or rpcpassword; # bitcoind is still starting if "No RPC credentials" not in str(e): raise time.sleep(1.0 / poll_per_s) self._raise_assertion_error( - f"Unable to connect to bitcoind after {self.rpc_timeout}s") + f"Unable to connect to bitcoind after {self.rpc_timeout}s" + ) def wait_for_cookie_credentials(self): """Ensures auth cookie credentials can be read, e.g. for testing CLI with -rpcwait before RPC connection is up.""" self.log.debug("Waiting for cookie credentials") # Poll at a rate of four times per second. poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): try: get_auth_cookie(self.datadir, self.chain) self.log.debug("Cookie credentials successfully retrieved") return except ValueError: # cookie file not found and no rpcuser or rpcpassword; # bitcoind is still starting so we continue polling until # RPC credentials are retrieved pass time.sleep(1.0 / poll_per_s) self._raise_assertion_error( - f"Unable to retrieve cookie credentials after {self.rpc_timeout}s") + f"Unable to retrieve cookie credentials after {self.rpc_timeout}s" + ) def generate(self, nblocks, maxtries=1000000, **kwargs): self.log.debug( - "TestNode.generate() dispatches `generate` call to `generatetoaddress`") + "TestNode.generate() dispatches `generate` call to `generatetoaddress`" + ) return self.generatetoaddress( - nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries, **kwargs) + nblocks=nblocks, + address=self.get_deterministic_priv_key().address, + maxtries=maxtries, + **kwargs, + ) def generateblock(self, *args, invalid_call, **kwargs): assert not invalid_call - return self.__getattr__('generateblock')(*args, **kwargs) + return self.__getattr__("generateblock")(*args, **kwargs) def generatetoaddress(self, *args, invalid_call, **kwargs): assert not invalid_call - return self.__getattr__('generatetoaddress')(*args, **kwargs) + return self.__getattr__("generatetoaddress")(*args, **kwargs) def generatetodescriptor(self, *args, invalid_call, **kwargs): assert not invalid_call - return self.__getattr__('generatetodescriptor')(*args, **kwargs) - - def buildavalancheproof(self, sequence: int, expiration: int, master: str, - stakes: List[Dict[str, Any]], payoutAddress: Optional[str] = ADDRESS_ECREG_UNSPENDABLE) -> str: - return self.__getattr__('buildavalancheproof')( + return self.__getattr__("generatetodescriptor")(*args, **kwargs) + + def buildavalancheproof( + self, + sequence: int, + expiration: int, + master: str, + stakes: List[Dict[str, Any]], + payoutAddress: Optional[str] = ADDRESS_ECREG_UNSPENDABLE, + ) -> str: + return self.__getattr__("buildavalancheproof")( sequence=sequence, expiration=expiration, master=master, stakes=stakes, payoutAddress=payoutAddress, ) def get_wallet_rpc(self, wallet_name): if self.use_cli: return RPCOverloadWrapper( - self.cli(f"-rpcwallet={wallet_name}"), True, - self.descriptors) + self.cli(f"-rpcwallet={wallet_name}"), True, self.descriptors + ) else: - assert self.rpc is not None, self._node_msg( - "Error: RPC not initialized") - assert self.rpc_connected, self._node_msg( - "Error: RPC not connected") + assert self.rpc is not None, self._node_msg("Error: RPC not initialized") + assert self.rpc_connected, self._node_msg("Error: RPC not connected") wallet_path = f"wallet/{urllib.parse.quote(wallet_name)}" - return RPCOverloadWrapper(self.rpc / wallet_path, - descriptors=self.descriptors) + return RPCOverloadWrapper( + self.rpc / wallet_path, descriptors=self.descriptors + ) - def stop_node(self, expected_stderr='', *, wait=0, - wait_until_stopped=True): + def stop_node(self, expected_stderr="", *, wait=0, wait_until_stopped=True): """Stop the node.""" if not self.running: return self.log.debug("Stopping node") try: self.stop(wait=wait) except http.client.CannotSendRequest: self.log.exception("Unable to stop node.") # If there are any running perf processes, stop them. for profile_name in tuple(self.perf_subprocesses.keys()): self._stop_perf(profile_name) # Check that stderr is as expected self.stderr.seek(0) - stderr = self.stderr.read().decode('utf-8').strip() + stderr = self.stderr.read().decode("utf-8").strip() if stderr != expected_stderr: - raise AssertionError( - f"Unexpected stderr {stderr} != {expected_stderr}") + raise AssertionError(f"Unexpected stderr {stderr} != {expected_stderr}") self.stdout.close() self.stderr.close() del self.p2ps[:] if wait_until_stopped: self.wait_until_stopped() def is_node_stopped(self): """Checks whether the node has stopped. Returns True if the node has stopped. False otherwise. This method is responsible for freeing resources (self.process).""" if not self.running: return True return_code = self.process.poll() if return_code is None: return False # process has stopped. Assert that it didn't return an error code. assert return_code == 0, self._node_msg( - f"Node returned non-zero exit code ({return_code}) when stopping") + f"Node returned non-zero exit code ({return_code}) when stopping" + ) self.running = False self.process = None self.rpc_connected = False self.rpc = None self.log.debug("Node stopped") return True def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT): wait_until_helper( - self.is_node_stopped, - timeout=timeout, - timeout_factor=self.timeout_factor) + self.is_node_stopped, timeout=timeout, timeout_factor=self.timeout_factor + ) @property def chain_path(self) -> Path: return Path(self.datadir) / self.chain @property def debug_log_path(self) -> Path: - return self.chain_path / 'debug.log' + return self.chain_path / "debug.log" def debug_log_bytes(self) -> int: - with open(self.debug_log_path, encoding='utf-8') as dl: + with open(self.debug_log_path, encoding="utf-8") as dl: dl.seek(0, 2) return dl.tell() @contextlib.contextmanager def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2): """Assert that some debug messages are present within some timeout. Unexpected debug messages may be optionally provided to fail a test if they appear before expected messages. Note: expected_msgs must always be non-empty even if the goal is to check for unexpected_msgs. This provides a bounded scenario such that "we expect to reach some target resulting in expected_msgs without seeing unexpected_msgs. Otherwise, we are testing that something never happens, which is fundamentally not robust test logic. """ if not expected_msgs: raise AssertionError("Expected debug messages is empty") if unexpected_msgs is None: unexpected_msgs = [] time_end = time.time() + timeout * self.timeout_factor prev_size = self.debug_log_bytes() yield while True: found = True - with open(self.debug_log_path, encoding='utf-8') as dl: + with open(self.debug_log_path, encoding="utf-8") as dl: dl.seek(prev_size) log = dl.read() print_log = " - " + "\n - ".join(log.splitlines()) for unexpected_msg in unexpected_msgs: - if re.search(re.escape(unexpected_msg), - log, flags=re.MULTILINE): + if re.search(re.escape(unexpected_msg), log, flags=re.MULTILINE): self._raise_assertion_error( f'Unexpected message "{unexpected_msg}" partially matches ' - f'log:\n\n{print_log}\n\n') + f"log:\n\n{print_log}\n\n" + ) for expected_msg in expected_msgs: - if re.search(re.escape(expected_msg), log, - flags=re.MULTILINE) is None: + if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None: found = False if found: return if time.time() >= time_end: break time.sleep(0.05) self._raise_assertion_error( f'Expected messages "{expected_msgs}" does not partially match ' - f'log:\n\n{print_log}\n\n') + f"log:\n\n{print_log}\n\n" + ) @contextlib.contextmanager def wait_for_debug_log( - self, expected_msgs: List[bytes], timeout=60, interval=0.05, chatty_callable=None): + self, + expected_msgs: List[bytes], + timeout=60, + interval=0.05, + chatty_callable=None, + ): """ Block until we see all the debug log messages or until we exceed the timeout. If a chatty_callable is provided, it is repeated at every iteration. """ time_end = time.time() + timeout * self.timeout_factor prev_size = self.debug_log_bytes() yield while True: found = True if chatty_callable is not None: # Ignore the chatty_callable returned value, as we are only # interested in the debug log content here. chatty_callable() with open(self.debug_log_path, "rb") as dl: dl.seek(prev_size) log = dl.read() for expected_msg in expected_msgs: if expected_msg not in log: found = False if found: return if time.time() >= time_end: - print_log = " - " + \ - "\n - ".join([f"\n - {line.decode()}" for line in log.splitlines()]) + print_log = " - " + "\n - ".join( + [f"\n - {line.decode()}" for line in log.splitlines()] + ) break time.sleep(interval) self._raise_assertion_error( f'Expected messages "{str(expected_msgs)}" does not partially match ' - f'log:\n\n{print_log}\n\n') + f"log:\n\n{print_log}\n\n" + ) @contextlib.contextmanager def profile_with_perf(self, profile_name: str): """ Context manager that allows easy profiling of node activity using `perf`. See `test/functional/README.md` for details on perf usage. Args: profile_name: This string will be appended to the profile data filename generated by perf. """ subp = self._start_perf(profile_name) yield if subp: self._stop_perf(profile_name) def _start_perf(self, profile_name=None): """Start a perf process to profile this node. Returns the subprocess running perf.""" subp = None def test_success(cmd): - return subprocess.call( - # shell=True required for pipe use below - cmd, shell=True, - stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0 + return ( + subprocess.call( + # shell=True required for pipe use below + cmd, + shell=True, + stderr=subprocess.DEVNULL, + stdout=subprocess.DEVNULL, + ) + == 0 + ) - if not sys.platform.startswith('linux'): + if not sys.platform.startswith("linux"): self.log.warning( - "Can't profile with perf; only availabe on Linux platforms") + "Can't profile with perf; only availabe on Linux platforms" + ) return None - if not test_success('which perf'): - self.log.warning( - "Can't profile with perf; must install perf-tools") + if not test_success("which perf"): + self.log.warning("Can't profile with perf; must install perf-tools") return None - if not test_success( - f'readelf -S {shlex.quote(self.binary)} | grep .debug_str'): + if not test_success(f"readelf -S {shlex.quote(self.binary)} | grep .debug_str"): self.log.warning( - "perf output won't be very useful without debug symbols compiled into bitcoind") + "perf output won't be very useful without debug symbols compiled into" + " bitcoind" + ) output_path = tempfile.NamedTemporaryFile( dir=self.datadir, prefix=f"{profile_name or 'test'}.perf.data.", delete=False, ).name cmd = [ - 'perf', 'record', - '-g', # Record the callgraph. + "perf", + "record", + "-g", # Record the callgraph. # Compatibility for gcc's --fomit-frame-pointer. - '--call-graph', 'dwarf', - '-F', '101', # Sampling frequency in Hz. - '-p', str(self.process.pid), - '-o', output_path, + "--call-graph", + "dwarf", + "-F", + "101", # Sampling frequency in Hz. + "-p", + str(self.process.pid), + "-o", + output_path, ] - subp = subprocess.Popen( - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.perf_subprocesses[profile_name] = subp return subp def _stop_perf(self, profile_name): """Stop (and pop) a perf subprocess.""" subp = self.perf_subprocesses.pop(profile_name) - output_path = subp.args[subp.args.index('-o') + 1] + output_path = subp.args[subp.args.index("-o") + 1] subp.terminate() subp.wait(timeout=10) stderr = subp.stderr.read().decode() - if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr: + if "Consider tweaking /proc/sys/kernel/perf_event_paranoid" in stderr: self.log.warning( "perf couldn't collect data! Try " - "'sudo sysctl -w kernel.perf_event_paranoid=-1'") + "'sudo sysctl -w kernel.perf_event_paranoid=-1'" + ) else: report_cmd = f"perf report -i {output_path}" self.log.info(f"See perf output by running '{report_cmd}'") def assert_start_raises_init_error( - self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs): + self, + extra_args=None, + expected_msg=None, + match=ErrorMatch.FULL_TEXT, + *args, + **kwargs, + ): """Attempt to start the node and expect it to raise an error. extra_args: extra arguments to pass through to bitcoind expected_msg: regex that stderr should match when bitcoind fails Will throw if bitcoind starts without an error. - Will throw if an expected_msg is provided and it does not match bitcoind's stdout.""" - with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \ - tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout: + Will throw if an expected_msg is provided and it does not match bitcoind's stdout. + """ + with tempfile.NamedTemporaryFile( + dir=self.stderr_dir, delete=False + ) as log_stderr, tempfile.NamedTemporaryFile( + dir=self.stdout_dir, delete=False + ) as log_stdout: try: - self.start(extra_args, stdout=log_stdout, - stderr=log_stderr, *args, **kwargs) + self.start( + extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs + ) ret = self.process.wait(timeout=self.rpc_timeout) - self.log.debug(self._node_msg( - f'bitcoind exited with status {ret} during initialization')) + self.log.debug( + self._node_msg( + f"bitcoind exited with status {ret} during initialization" + ) + ) self.running = False self.process = None # Check stderr for expected message if expected_msg is not None: log_stderr.seek(0) - stderr = log_stderr.read().decode('utf-8').strip() + stderr = log_stderr.read().decode("utf-8").strip() if match == ErrorMatch.PARTIAL_REGEX: - if re.search(expected_msg, stderr, - flags=re.MULTILINE) is None: + if re.search(expected_msg, stderr, flags=re.MULTILINE) is None: self._raise_assertion_error( f'Expected message "{expected_msg}" does not partially ' - f'match stderr:\n"{stderr}"') + f'match stderr:\n"{stderr}"' + ) elif match == ErrorMatch.FULL_REGEX: if re.fullmatch(expected_msg, stderr) is None: self._raise_assertion_error( f'Expected message "{expected_msg}" does not fully ' - f'match stderr:\n"{stderr}"') + f'match stderr:\n"{stderr}"' + ) elif match == ErrorMatch.FULL_TEXT: if expected_msg != stderr: self._raise_assertion_error( f'Expected message "{expected_msg}" does not fully ' - f'match stderr:\n"{stderr}"') + f'match stderr:\n"{stderr}"' + ) except subprocess.TimeoutExpired: self.process.kill() self.running = False self.process = None - assert_msg = f'bitcoind should have exited within {self.rpc_timeout}s ' + assert_msg = f"bitcoind should have exited within {self.rpc_timeout}s " if expected_msg is None: assert_msg += "with an error" else: assert_msg += "with expected error " + expected_msg self._raise_assertion_error(assert_msg) def relay_fee(self, cached=True): if not self.relay_fee_cache or not cached: self.relay_fee_cache = self.getnetworkinfo()["relayfee"] return self.relay_fee_cache def calculate_fee(self, tx): - """ Estimate the necessary fees (in sats) for an unsigned CTransaction assuming: + """Estimate the necessary fees (in sats) for an unsigned CTransaction assuming: - the current relayfee on node - all inputs are compressed-key p2pkh, and will be signed ecdsa or schnorr - all inputs currently unsigned (empty scriptSig) """ billable_size_estimate = tx.billable_size() # Add some padding for signatures / public keys # 107 = length of PUSH(longest_sig = 72 bytes), PUSH(pubkey = 33 bytes) billable_size_estimate += len(tx.vin) * 107 # relay_fee gives a value in XEC per kB. return int(self.relay_fee() / 1000 * billable_size_estimate * XEC) def calculate_fee_from_txid(self, txid): ctx = FromHex(CTransaction(), self.getrawtransaction(txid)) return self.calculate_fee(ctx) def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs): """Add an inbound p2p connection to the node. This method adds the p2p connection to the self.p2ps list and also returns the connection to the caller.""" - if 'dstport' not in kwargs: - kwargs['dstport'] = p2p_port(self.index) - if 'dstaddr' not in kwargs: - kwargs['dstaddr'] = '127.0.0.1' + if "dstport" not in kwargs: + kwargs["dstport"] = p2p_port(self.index) + if "dstaddr" not in kwargs: + kwargs["dstaddr"] = "127.0.0.1" p2p_conn.peer_connect( - **kwargs, - net=self.chain, - timeout_factor=self.timeout_factor)() + **kwargs, net=self.chain, timeout_factor=self.timeout_factor + )() self.p2ps.append(p2p_conn) - p2p_conn.wait_until( - lambda: p2p_conn.is_connected, - check_connected=False) + p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False) if wait_for_verack: # Wait for the node to send us the version and verack p2p_conn.wait_for_verack() # At this point we have sent our version message and received the version and verack, however the full node # has not yet received the verack from us (in reply to their version). So, the connection is not yet fully # established (fSuccessfullyConnected). # # This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the # message we send. However, it might lead to races where we are expecting to receive a message. E.g. a # transaction that will be added to the mempool as soon as we return here. # # So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds) # in comparison to the upside of making tests less fragile and # unexpected intermittent errors less likely. p2p_conn.sync_with_ping() # Consistency check that the Bitcoin ABC has received our user agent # string. This checks the node's newest peer. It could be racy if # another Bitcoin ABC node has connected since we opened our # connection, but we don't expect that to happen. - assert_equal(self.getpeerinfo()[-1]['subver'], P2P_SUBVERSION) + assert_equal(self.getpeerinfo()[-1]["subver"], P2P_SUBVERSION) return p2p_conn def add_outbound_p2p_connection( - self, p2p_conn, *, p2p_idx, connection_type="outbound-full-relay", **kwargs): + self, p2p_conn, *, p2p_idx, connection_type="outbound-full-relay", **kwargs + ): """Add an outbound p2p connection from node. Must be an "outbound-full-relay", "block-relay-only", "addr-fetch", "feeler" or "avalanche" connection. This method adds the p2p connection to the self.p2ps list and returns the connection to the caller. """ def addconnection_callback(address, port): - self.log.debug( - f"Connecting to {address}:{port} {connection_type}") - self.addconnection(f'{address}:{port}', connection_type) + self.log.debug(f"Connecting to {address}:{port} {connection_type}") + self.addconnection(f"{address}:{port}", connection_type) p2p_conn.peer_accept_connection( connect_cb=addconnection_callback, connect_id=p2p_idx + 1, net=self.chain, timeout_factor=self.timeout_factor, - **kwargs)() + **kwargs, + )() if connection_type == "feeler": # feeler connections are closed as soon as the node receives a # `version` message p2p_conn.wait_until( - lambda: p2p_conn.message_count["version"] == 1, - check_connected=False) + lambda: p2p_conn.message_count["version"] == 1, check_connected=False + ) p2p_conn.wait_until( - lambda: not p2p_conn.is_connected, - check_connected=False) + lambda: not p2p_conn.is_connected, check_connected=False + ) else: p2p_conn.wait_for_connect() self.p2ps.append(p2p_conn) p2p_conn.wait_for_verack() p2p_conn.sync_with_ping() return p2p_conn def num_test_p2p_connections(self): """Return number of test framework p2p connections to the node.""" - return len([peer for peer in self.getpeerinfo() - if peer['subver'] == P2P_SUBVERSION]) + return len( + [peer for peer in self.getpeerinfo() if peer["subver"] == P2P_SUBVERSION] + ) def disconnect_p2ps(self): """Close all p2p connections to the node.""" for p in self.p2ps: p.peer_disconnect() del self.p2ps[:] - wait_until_helper(lambda: self.num_test_p2p_connections() == 0, - timeout_factor=self.timeout_factor) + wait_until_helper( + lambda: self.num_test_p2p_connections() == 0, + timeout_factor=self.timeout_factor, + ) class TestNodeCLIAttr: def __init__(self, cli, command): self.cli = cli self.command = command def __call__(self, *args, **kwargs): return self.cli.send_cli(self.command, *args, **kwargs) def get_request(self, *args, **kwargs): return lambda: self(*args, **kwargs) def arg_to_cli(arg): if isinstance(arg, bool): return str(arg).lower() elif arg is None: - return 'null' + return "null" elif isinstance(arg, dict) or isinstance(arg, list): return json.dumps(arg, default=EncodeDecimal) else: return str(arg) class TestNodeCLI: """Interface to bitcoin-cli for an individual node""" def __init__(self, binary, datadir, emulator=None): self.options = [] self.binary = binary self.datadir = datadir self.input = None - self.log = logging.getLogger('TestFramework.bitcoincli') + self.log = logging.getLogger("TestFramework.bitcoincli") self.emulator = emulator def __call__(self, *options, cli_input=None): # TestNodeCLI is callable with bitcoin-cli command-line options cli = TestNodeCLI(self.binary, self.datadir, self.emulator) cli.options = [str(o) for o in options] cli.input = cli_input return cli def __getattr__(self, command): return TestNodeCLIAttr(self, command) def batch(self, requests): results = [] for request in requests: try: results.append({"result": request()}) except JSONRPCException as e: results.append({"error": e}) return results def send_cli(self, command=None, *args, **kwargs): """Run bitcoin-cli command. Deserializes returned string as python object.""" pos_args = [arg_to_cli(arg) for arg in args] - named_args = [str(key) + "=" + arg_to_cli(value) - for (key, value) in kwargs.items()] - assert not (pos_args and named_args), \ - "Cannot use positional arguments and named arguments in the same " \ + named_args = [ + str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items() + ] + assert not (pos_args and named_args), ( + "Cannot use positional arguments and named arguments in the same " "bitcoin-cli call" + ) p_args = [self.binary, "-datadir=" + self.datadir] + self.options if named_args: p_args += ["-named"] if command is not None: p_args += [command] p_args += pos_args + named_args self.log.debug(f"Running bitcoin-cli {p_args[2:]}") if self.emulator is not None: p_args = [self.emulator] + p_args - process = subprocess.Popen(p_args, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + process = subprocess.Popen( + p_args, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) cli_stdout, cli_stderr = process.communicate(input=self.input) returncode = process.poll() if returncode: - match = re.match( - r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr) + match = re.match(r"error code: ([-0-9]+)\nerror message:\n(.*)", cli_stderr) if match: code, message = match.groups() raise JSONRPCException({"code": int(code), "message": message}) # Ignore cli_stdout, raise with cli_stderr raise subprocess.CalledProcessError( - returncode, self.binary, output=cli_stderr) + returncode, self.binary, output=cli_stderr + ) try: return json.loads(cli_stdout, parse_float=decimal.Decimal) except (json.JSONDecodeError, decimal.InvalidOperation): return cli_stdout.rstrip("\n") class RPCOverloadWrapper: def __init__(self, rpc, cli=False, descriptors=False): self.rpc = rpc self.is_cli = cli self.descriptors = descriptors def __getattr__(self, name): return getattr(self.rpc, name) - def createwallet(self, wallet_name, disable_private_keys=None, blank=None, - passphrase='', avoid_reuse=None, descriptors=None, load_on_startup=None): + def createwallet( + self, + wallet_name, + disable_private_keys=None, + blank=None, + passphrase="", + avoid_reuse=None, + descriptors=None, + load_on_startup=None, + ): if descriptors is None: descriptors = self.descriptors - return self.__getattr__('createwallet')( - wallet_name, disable_private_keys, blank, passphrase, avoid_reuse, descriptors, load_on_startup) + return self.__getattr__("createwallet")( + wallet_name, + disable_private_keys, + blank, + passphrase, + avoid_reuse, + descriptors, + load_on_startup, + ) def importprivkey(self, privkey, label=None, rescan=None): wallet_info = self.getwalletinfo() - if 'descriptors' not in wallet_info or ( - 'descriptors' in wallet_info and not wallet_info['descriptors']): - return self.__getattr__('importprivkey')(privkey, label, rescan) - desc = descsum_create('combo(' + privkey + ')') - req = [{ - 'desc': desc, - 'timestamp': 0 if rescan else 'now', - 'label': label if label else '' - }] + if "descriptors" not in wallet_info or ( + "descriptors" in wallet_info and not wallet_info["descriptors"] + ): + return self.__getattr__("importprivkey")(privkey, label, rescan) + desc = descsum_create("combo(" + privkey + ")") + req = [ + { + "desc": desc, + "timestamp": 0 if rescan else "now", + "label": label if label else "", + } + ] import_res = self.importdescriptors(req) - if not import_res[0]['success']: - raise JSONRPCException(import_res[0]['error']) + if not import_res[0]["success"]: + raise JSONRPCException(import_res[0]["error"]) - def addmultisigaddress(self, nrequired, keys, - label=None): + def addmultisigaddress(self, nrequired, keys, label=None): wallet_info = self.getwalletinfo() - if 'descriptors' not in wallet_info or ( - 'descriptors' in wallet_info and not wallet_info['descriptors']): - return self.__getattr__('addmultisigaddress')( - nrequired, keys, label) + if "descriptors" not in wallet_info or ( + "descriptors" in wallet_info and not wallet_info["descriptors"] + ): + return self.__getattr__("addmultisigaddress")(nrequired, keys, label) cms = self.createmultisig(nrequired, keys) - req = [{ - 'desc': cms['descriptor'], - 'timestamp': 0, - 'label': label if label else '' - }] + req = [ + {"desc": cms["descriptor"], "timestamp": 0, "label": label if label else ""} + ] import_res = self.importdescriptors(req) - if not import_res[0]['success']: - raise JSONRPCException(import_res[0]['error']) + if not import_res[0]["success"]: + raise JSONRPCException(import_res[0]["error"]) return cms def importpubkey(self, pubkey, label=None, rescan=None): wallet_info = self.getwalletinfo() - if 'descriptors' not in wallet_info or ( - 'descriptors' in wallet_info and not wallet_info['descriptors']): - return self.__getattr__('importpubkey')(pubkey, label, rescan) - desc = descsum_create('combo(' + pubkey + ')') - req = [{ - 'desc': desc, - 'timestamp': 0 if rescan else 'now', - 'label': label if label else '' - }] + if "descriptors" not in wallet_info or ( + "descriptors" in wallet_info and not wallet_info["descriptors"] + ): + return self.__getattr__("importpubkey")(pubkey, label, rescan) + desc = descsum_create("combo(" + pubkey + ")") + req = [ + { + "desc": desc, + "timestamp": 0 if rescan else "now", + "label": label if label else "", + } + ] import_res = self.importdescriptors(req) - if not import_res[0]['success']: - raise JSONRPCException(import_res[0]['error']) + if not import_res[0]["success"]: + raise JSONRPCException(import_res[0]["error"]) def importaddress(self, address, label=None, rescan=None, p2sh=None): wallet_info = self.getwalletinfo() - if 'descriptors' not in wallet_info or ( - 'descriptors' in wallet_info and not wallet_info['descriptors']): - return self.__getattr__('importaddress')( - address, label, rescan, p2sh) + if "descriptors" not in wallet_info or ( + "descriptors" in wallet_info and not wallet_info["descriptors"] + ): + return self.__getattr__("importaddress")(address, label, rescan, p2sh) is_hex = False try: int(address, 16) is_hex = True - desc = descsum_create('raw(' + address + ')') + desc = descsum_create("raw(" + address + ")") except BaseException: - desc = descsum_create('addr(' + address + ')') - reqs = [{ - 'desc': desc, - 'timestamp': 0 if rescan else 'now', - 'label': label if label else '' - }] + desc = descsum_create("addr(" + address + ")") + reqs = [ + { + "desc": desc, + "timestamp": 0 if rescan else "now", + "label": label if label else "", + } + ] if is_hex and p2sh: - reqs.append({ - 'desc': descsum_create('p2sh(raw(' + address + '))'), - 'timestamp': 0 if rescan else 'now', - 'label': label if label else '' - }) + reqs.append( + { + "desc": descsum_create("p2sh(raw(" + address + "))"), + "timestamp": 0 if rescan else "now", + "label": label if label else "", + } + ) import_res = self.importdescriptors(reqs) for res in import_res: - if not res['success']: - raise JSONRPCException(res['error']) + if not res["success"]: + raise JSONRPCException(res["error"]) diff --git a/test/functional/test_framework/txtools.py b/test/functional/test_framework/txtools.py index bbe5e8c66..e7e5e6372 100644 --- a/test/functional/test_framework/txtools.py +++ b/test/functional/test_framework/txtools.py @@ -1,126 +1,122 @@ #!/usr/bin/env python3 import random import sys import unittest from .cdefs import MIN_TX_SIZE from .messages import CTransaction, CTxOut, FromHex, ToHex from .script import OP_RETURN, CScript MAX_OP_RETURN_PAYLOAD = 220 VOUT_VALUE_SIZE = 8 def get_random_bytes(size: int) -> bytes: if sys.version_info >= (3, 9, 0): return random.randbytes(size) # type: ignore[attr-defined] # slower workaround if not size: - return b'' + return b"" return bytes.fromhex(f"{random.randrange(2**(8*size)):0{2*size}x}") def pad_tx(tx: CTransaction, pad_to_size: int = MIN_TX_SIZE): """ Pad a transaction with op_return junk data until it is at least pad_to_size, or leave it alone if it's already bigger than that. This function attempts to make the tx to be exactly of size pad_to_size. There is one case in which this is not possible: when the requested size is less than the current size plus the minimum vout overhead of 10 bytes. To get an exact size, make you sure you pad to a size of at least 10 more bytes than the input transaction. """ curr_size = len(tx.serialize()) required_padding = pad_to_size - curr_size while required_padding > 0: if required_padding <= 10: # Smallest possible padding with an empty OP_RETURN vout: # vout.value (8 bytes) + script length (1) + OP_RETURN (1) tx.vout.append(CTxOut(0, CScript([OP_RETURN]))) break # The total padding size, for a payload < 0x4c, is: # vout.value (8 bytes) + script_length (1) + OP_RETURN (1) + # + data length (1) + data data_size = required_padding - VOUT_VALUE_SIZE - 3 was_op_pushdata1_used = True - if data_size <= 0x4c: + if data_size <= 0x4C: was_op_pushdata1_used = False - if data_size == 0x4c: + if data_size == 0x4C: # Adding one more byte to the data causes two more bytes to be # added to the tx size, because of the need for OP_PUSHDATA1. # So remove 10 bytes to add an empty OP_RETURN vout instead in # the next iteration. data_size -= 10 elif MAX_OP_RETURN_PAYLOAD < data_size <= MAX_OP_RETURN_PAYLOAD + 10: # We require more than one VOUT, but the extra space needed is # less than the VOUT footprint. Remove 10 bytes from the current # data to avoid overpadding in next iteration. data_size -= 10 elif data_size > MAX_OP_RETURN_PAYLOAD + 10: # Use a full OP_RETURN. data_size = MAX_OP_RETURN_PAYLOAD + 1 if was_op_pushdata1_used: # OP_PUSHDATA1 adds 1 extra byte to the transaction size. data_size -= 1 required_padding -= 1 required_padding -= data_size + VOUT_VALUE_SIZE + 3 - tx.vout.append( - CTxOut(0, CScript([OP_RETURN, get_random_bytes(data_size)])) - ) + tx.vout.append(CTxOut(0, CScript([OP_RETURN, get_random_bytes(data_size)]))) tx.rehash() def pad_raw_tx(rawtx_hex, min_size=MIN_TX_SIZE): """ Pad a raw transaction with OP_RETURN data until it reaches at least min_size """ tx = CTransaction() FromHex(tx, rawtx_hex) pad_tx(tx, min_size) return ToHex(tx) class TestFrameworkScript(unittest.TestCase): def test_pad_raw_tx(self): raw_tx = ( "0100000001dd22777f85ab958c065cabced6115c4a2604abb9a2273f0eedce14a" "55c7b1201000000000201510000000001ebf802950000000017a914da1745e9b5" "49bd0bfa1a569971c77eba30cd5a4b8700000000" ) # Helper functions def rawtx_length(rawtx): return len(bytes.fromhex(rawtx)) def test_size(requested_size, expected_size): self.assertEqual( - rawtx_length(pad_raw_tx(raw_tx, requested_size)), - expected_size) + rawtx_length(pad_raw_tx(raw_tx, requested_size)), expected_size + ) self.assertEqual(rawtx_length(raw_tx), 85) # The tx size is never reduced. for size in [-1, 0, 1, 83, 84, 85]: test_size(size, expected_size=85) # The first new VOUT is added as soon as the requested size is more # than the initial size. The next 9 sizes are overpadded to 95 bytes, # because a VOUT with an empty OP_RETURN is the minimum data we can # add. for size in [86, 87, 88, 89, 90, 91, 92, 93, 94]: - test_size(requested_size=size, - expected_size=95) + test_size(requested_size=size, expected_size=95) # After that, the size is exactly as expected. for size in range(95, 1000): - test_size(requested_size=size, - expected_size=size) + test_size(requested_size=size, expected_size=size) diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 2ce125155..eca3aeb22 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -1,623 +1,642 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Helpful routines for regression testing.""" import enum import inspect import json import logging import os import re import socket import time import unittest from base64 import b64encode from decimal import ROUND_DOWN, Decimal from functools import lru_cache from io import BytesIO from subprocess import CalledProcessError from typing import Callable, Dict, Optional from . import coverage from .authproxy import AuthServiceProxy, JSONRPCException logger = logging.getLogger("TestFramework.utils") # Assert functions ################## def assert_approx(v, vexp, vspan=10): """Assert that `v` is within `vspan` of `vexp`""" if v < vexp - vspan: raise AssertionError(f"{str(v)} < [{str(vexp - vspan)}..{str(vexp + vspan)}]") if v > vexp + vspan: raise AssertionError(f"{str(v)} > [{str(vexp - vspan)}..{str(vexp + vspan)}]") def assert_fee_amount(fee, tx_size, fee_per_kB, wiggleroom=2): """ Assert the fee was in range wiggleroom defines an amount that the test expects the wallet to be off by when estimating fees. This can be due to the dummy signature that is added during fee calculation, or due to the wallet funding transactions using the ceiling of the calculated fee. """ target_fee = satoshi_round(tx_size * fee_per_kB / 1000) if fee < (tx_size - wiggleroom) * fee_per_kB / 1000: raise AssertionError( - f"Fee of {str(fee)} XEC too low! (Should be {str(target_fee)} XEC)") + f"Fee of {str(fee)} XEC too low! (Should be {str(target_fee)} XEC)" + ) if fee > (tx_size + wiggleroom) * fee_per_kB / 1000: raise AssertionError( - f"Fee of {str(fee)} XEC too high! (Should be {str(target_fee)} XEC)") + f"Fee of {str(fee)} XEC too high! (Should be {str(target_fee)} XEC)" + ) def assert_equal(thing1, thing2, *args): if thing1 != thing2 or any(thing1 != arg for arg in args): raise AssertionError( - f"not({' == '.join(str(arg) for arg in (thing1, thing2) + args)})") + f"not({' == '.join(str(arg) for arg in (thing1, thing2) + args)})" + ) def assert_greater_than(thing1, thing2): if thing1 <= thing2: raise AssertionError(f"{str(thing1)} <= {str(thing2)}") def assert_greater_than_or_equal(thing1, thing2): if thing1 < thing2: raise AssertionError(f"{str(thing1)} < {str(thing2)}") def assert_raises(exc, fun, *args, **kwds): assert_raises_message(exc, None, fun, *args, **kwds) def assert_raises_message(exc, message, fun, *args, **kwds): try: fun(*args, **kwds) except JSONRPCException: - raise AssertionError( - "Use assert_raises_rpc_error() to test RPC failures") + raise AssertionError("Use assert_raises_rpc_error() to test RPC failures") except exc as e: - if message is not None and message not in e.error['message']: + if message is not None and message not in e.error["message"]: raise AssertionError( - f"Expected substring not found in error message:\nsubstring: '{message}'\nerror message: '{e.error['message']}'.") + "Expected substring not found in error message:\nsubstring:" + f" '{message}'\nerror message: '{e.error['message']}'." + ) except Exception as e: - raise AssertionError( - f"Unexpected exception raised: {type(e).__name__}") + raise AssertionError(f"Unexpected exception raised: {type(e).__name__}") else: raise AssertionError("No exception raised") def assert_raises_process_error( - returncode: int, output: str, fun: Callable, *args, **kwds): + returncode: int, output: str, fun: Callable, *args, **kwds +): """Execute a process and asserts the process return code and output. Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError and verifies that the return code and output are as expected. Throws AssertionError if no CalledProcessError was raised or if the return code and output are not as expected. Args: returncode: the process return code. output: [a substring of] the process output. fun: the function to call. This should execute a process. args*: positional arguments for the function. kwds**: named arguments for the function. """ try: fun(*args, **kwds) except CalledProcessError as e: if returncode != e.returncode: - raise AssertionError( - f"Unexpected returncode {e.returncode}") + raise AssertionError(f"Unexpected returncode {e.returncode}") if output not in e.output: raise AssertionError(f"Expected substring not found:{e.output}") else: raise AssertionError("No exception raised") def assert_raises_rpc_error( - code: Optional[int], message: Optional[str], fun: Callable, *args, **kwds): + code: Optional[int], message: Optional[str], fun: Callable, *args, **kwds +): """Run an RPC and verify that a specific JSONRPC exception code and message is raised. Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException and verifies that the error code and message are as expected. Throws AssertionError if no JSONRPCException was raised or if the error code/message are not as expected. Args: code: the error code returned by the RPC call (defined in src/rpc/protocol.h). Set to None if checking the error code is not required. message: [a substring of] the error string returned by the RPC call. Set to None if checking the error string is not required. fun: the function to call. This should be the name of an RPC. args*: positional arguments for the function. kwds**: named arguments for the function. """ assert try_rpc(code, message, fun, *args, **kwds), "No exception raised" def try_rpc(code, message, fun, *args, **kwds): """Tries to run an rpc command. Test against error code and message if the rpc fails. Returns whether a JSONRPCException was raised.""" try: fun(*args, **kwds) except JSONRPCException as e: # JSONRPCException was thrown as expected. Check the code and message # values are correct. if (code is not None) and (code != e.error["code"]): + raise AssertionError(f"Unexpected JSONRPC error code {e.error['code']}") + if (message is not None) and (message not in e.error["message"]): raise AssertionError( - f"Unexpected JSONRPC error code {e.error['code']}") - if (message is not None) and (message not in e.error['message']): - raise AssertionError( - f"Expected substring not found in error message:\nsubstring: '{message}'\nerror message: '{e.error['message']}'.") + "Expected substring not found in error message:\nsubstring:" + f" '{message}'\nerror message: '{e.error['message']}'." + ) return True except Exception as e: - raise AssertionError( - f"Unexpected exception raised: {type(e).__name__}") + raise AssertionError(f"Unexpected exception raised: {type(e).__name__}") else: return False def assert_is_hex_string(string): try: int(string, 16) except Exception as e: raise AssertionError( - f"Couldn't interpret {string!r} as hexadecimal; raised: {e}") + f"Couldn't interpret {string!r} as hexadecimal; raised: {e}" + ) def assert_is_hash_string(string, length=64): if not isinstance(string, str): - raise AssertionError( - f"Expected a string, got type {type(string)!r}") + raise AssertionError(f"Expected a string, got type {type(string)!r}") elif length and len(string) != length: + raise AssertionError(f"String of length {length} expected; got {len(string)}") + elif not re.match("[abcdef0-9]+$", string): raise AssertionError( - f"String of length {length} expected; got {len(string)}") - elif not re.match('[abcdef0-9]+$', string): - raise AssertionError( - f"String {string!r} contains invalid characters for a hash.") + f"String {string!r} contains invalid characters for a hash." + ) -def assert_array_result(object_array, to_match, expected, - should_not_find=False): +def assert_array_result(object_array, to_match, expected, should_not_find=False): """ Pass in array of JSON objects, a dictionary with key/value pairs to match against, and another dictionary with expected key/value pairs. If the should_not_find flag is true, to_match should not be found in object_array """ if should_not_find: assert_equal(expected, {}) num_matched = 0 for item in object_array: all_match = True for key, value in to_match.items(): if item[key] != value: all_match = False if not all_match: continue elif should_not_find: num_matched = num_matched + 1 for key, value in expected.items(): if item[key] != value: raise AssertionError(f"{str(item)} : expected {str(key)}={str(value)}") num_matched = num_matched + 1 if num_matched == 0 and not should_not_find: raise AssertionError(f"No objects matched {str(to_match)}") if num_matched > 0 and should_not_find: raise AssertionError(f"Objects were found {str(to_match)}") + # Utility functions ################### def check_json_precision(): """Make sure json library being used does not lose precision converting XEC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def EncodeDecimal(o): if isinstance(o, Decimal): return str(o) raise TypeError(f"{repr(o)} is not JSON serializable") def count_bytes(hex_string): return len(bytearray.fromhex(hex_string)) def str_to_b64str(string): - return b64encode(string.encode('utf-8')).decode('ascii') + return b64encode(string.encode("utf-8")).decode("ascii") def satoshi_round(amount): - return Decimal(amount).quantize(Decimal('0.01'), rounding=ROUND_DOWN) + return Decimal(amount).quantize(Decimal("0.01"), rounding=ROUND_DOWN) def iter_chunks(lst: list, n: int): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): - yield lst[i:i + n] + yield lst[i : i + n] -def wait_until_helper(predicate, *, attempts=float('inf'), - timeout=float('inf'), lock=None, timeout_factor=1.0): +def wait_until_helper( + predicate, + *, + attempts=float("inf"), + timeout=float("inf"), + lock=None, + timeout_factor=1.0, +): """Sleep until the predicate resolves to be True. Warning: Note that this method is not recommended to be used in tests as it is not aware of the context of the test framework. Using the `wait_until()` members from `BitcoinTestFramework` or `P2PInterface` class ensures the timeout is properly scaled. Furthermore, `wait_until()` from `P2PInterface` class in `p2p.py` has a preset lock. """ - if attempts == float('inf') and timeout == float('inf'): + if attempts == float("inf") and timeout == float("inf"): timeout = 60 timeout = timeout * timeout_factor attempt = 0 time_end = time.time() + timeout while attempt < attempts and time.time() < time_end: if lock: with lock: if predicate(): return else: if predicate(): return attempt += 1 time.sleep(0.05) # Print the cause of the timeout predicate_source = f"''''\n{inspect.getsource(predicate)}'''" logger.error(f"wait_until() failed. Predicate: {predicate_source}") if attempt >= attempts: raise AssertionError( - f"Predicate {predicate_source} not true after {attempts} attempts") + f"Predicate {predicate_source} not true after {attempts} attempts" + ) elif time.time() >= time_end: raise AssertionError( - f"Predicate {predicate_source} not true after {timeout} seconds") - raise RuntimeError('Unreachable') + f"Predicate {predicate_source} not true after {timeout} seconds" + ) + raise RuntimeError("Unreachable") + # RPC/P2P connection constants and functions ############################################ class PortName(enum.Enum): P2P = 0 RPC = 1 CHRONIK = 2 # The maximum number of nodes a single test can spawn MAX_NODES = 64 # Don't assign rpc or p2p ports lower than this (for example: 18333 is the # default testnet port) -PORT_MIN = int(os.getenv('TEST_RUNNER_PORT_MIN', default=20000)) +PORT_MIN = int(os.getenv("TEST_RUNNER_PORT_MIN", default=20000)) # The number of ports to "reserve" for p2p and rpc, each PORT_RANGE = 5000 # The number of times we increment the port counters and test it again before # giving up. MAX_PORT_RETRY = 5 PORT_START_MAP: Dict[PortName, int] = { PortName.P2P: 0, PortName.RPC: PORT_RANGE, PortName.CHRONIK: PORT_RANGE * 2, } # Globals used for incrementing ports. Initially uninitialized because they # depend on PortSeed.n. LAST_USED_PORT_MAP: Dict[PortName, int] = {} class PortSeed: # Must be initialized with a unique integer for each process n = None def get_rpc_proxy(url, node_number, *, timeout=None, coveragedir=None): """ Args: url (str): URL of the RPC server to call node_number (int): the node number (or id) that this calls to Kwargs: timeout (int): HTTP timeout in seconds coveragedir (str): Directory Returns: AuthServiceProxy. convenience object for making RPC calls. """ proxy_kwargs = {} if timeout is not None: - proxy_kwargs['timeout'] = int(timeout) + proxy_kwargs["timeout"] = int(timeout) proxy = AuthServiceProxy(url, **proxy_kwargs) proxy.url = url # store URL on proxy for info - coverage_logfile = coverage.get_filename( - coveragedir, node_number) if coveragedir else None + coverage_logfile = ( + coverage.get_filename(coveragedir, node_number) if coveragedir else None + ) return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile) # We initialize the port counters at runtime, because at import time PortSeed.n # will not yet be defined. It is defined based on a command line option # in the BitcoinTestFramework class __init__ def initialize_port(port_name: PortName): global LAST_USED_PORT_MAP assert PortSeed.n is not None LAST_USED_PORT_MAP[port_name] = ( - PORT_MIN + - PORT_START_MAP[port_name] + - (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) + PORT_MIN + + PORT_START_MAP[port_name] + + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) ) def is_port_available(port: int) -> bool: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: - sock.bind(('127.0.0.1', port)) + sock.bind(("127.0.0.1", port)) return True except OSError: return False # The LRU cache ensures that for a given type and peer / node index, the # functions always return the same port, and that it is tested only the # first time. The parameter `n` is not unused, it is the key in the cache # dictionary. @lru_cache(maxsize=None) def unique_port(port_name: PortName, n: int) -> int: global LAST_USED_PORT_MAP if port_name not in LAST_USED_PORT_MAP: initialize_port(port_name) for _ in range(MAX_PORT_RETRY): LAST_USED_PORT_MAP[port_name] += 1 if is_port_available(LAST_USED_PORT_MAP[port_name]): return LAST_USED_PORT_MAP[port_name] raise RuntimeError( - f"Could not find available {port_name} port after {MAX_PORT_RETRY} attempts.") + f"Could not find available {port_name} port after {MAX_PORT_RETRY} attempts." + ) def p2p_port(n: int) -> int: return unique_port(PortName.P2P, n) def rpc_port(n: int) -> int: return unique_port(PortName.RPC, n) def chronik_port(n: int) -> int: return unique_port(PortName.CHRONIK, n) def rpc_url(datadir, chain, host, port): rpc_u, rpc_p = get_auth_cookie(datadir, chain) if host is None: - host = '127.0.0.1' + host = "127.0.0.1" return f"http://{rpc_u}:{rpc_p}@{host}:{int(port)}" + # Node functions ################ def initialize_datadir(dirname, n, chain, disable_autoconnect=True): datadir = get_datadir_path(dirname, n) if not os.path.isdir(datadir): os.makedirs(datadir) # Translate chain name to config name - if chain == 'testnet3': - chain_name_conf_arg = 'testnet' - chain_name_conf_section = 'test' + if chain == "testnet3": + chain_name_conf_arg = "testnet" + chain_name_conf_section = "test" else: chain_name_conf_arg = chain chain_name_conf_section = chain - with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f: + with open(os.path.join(datadir, "bitcoin.conf"), "w", encoding="utf8") as f: f.write(f"{chain_name_conf_arg}=1\n") f.write(f"[{chain_name_conf_section}]\n") f.write(f"port={str(p2p_port(n))}\n") f.write(f"rpcport={str(rpc_port(n))}\n") f.write(f"chronikbind=127.0.0.1:{str(chronik_port(n))}\n") f.write("fallbackfee=200\n") f.write("server=1\n") f.write("keypool=1\n") f.write("discover=0\n") f.write("dnsseed=0\n") f.write("fixedseeds=0\n") f.write("listenonion=0\n") f.write("printtoconsole=0\n") f.write("upnp=0\n") f.write("natpmp=0\n") f.write("usecashaddr=1\n") # Increase peertimeout to avoid disconnects while using mocktime. # peertimeout is measured in mock time, so setting it large enough to # cover any duration in mock time is sufficient. It can be overridden # in tests. f.write("peertimeout=999999999\n") f.write("shrinkdebugfile=0\n") if disable_autoconnect: f.write("connect=0\n") - os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True) - os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True) + os.makedirs(os.path.join(datadir, "stderr"), exist_ok=True) + os.makedirs(os.path.join(datadir, "stdout"), exist_ok=True) return datadir def get_datadir_path(dirname, n): return os.path.join(dirname, f"node{str(n)}") def append_config(datadir, options): - with open(os.path.join(datadir, "bitcoin.conf"), 'a', encoding='utf8') as f: + with open(os.path.join(datadir, "bitcoin.conf"), "a", encoding="utf8") as f: for option in options: f.write(f"{option}\n") def get_auth_cookie(datadir, chain): user = None password = None if os.path.isfile(os.path.join(datadir, "bitcoin.conf")): - with open(os.path.join(datadir, "bitcoin.conf"), 'r', encoding='utf8') as f: + with open(os.path.join(datadir, "bitcoin.conf"), "r", encoding="utf8") as f: for line in f: if line.startswith("rpcuser="): assert user is None # Ensure that there is only one rpcuser line user = line.split("=")[1].strip("\n") if line.startswith("rpcpassword="): - assert password is None # Ensure that there is only one rpcpassword line + assert ( + password is None + ) # Ensure that there is only one rpcpassword line password = line.split("=")[1].strip("\n") try: - with open(os.path.join(datadir, chain, ".cookie"), 'r', encoding="ascii") as f: + with open(os.path.join(datadir, chain, ".cookie"), "r", encoding="ascii") as f: userpass = f.read() - split_userpass = userpass.split(':') + split_userpass = userpass.split(":") user = split_userpass[0] password = split_userpass[1] except OSError: pass if user is None or password is None: raise ValueError("No RPC credentials") return user, password # If a cookie file exists in the given datadir, delete it. def delete_cookie_file(datadir, chain): if os.path.isfile(os.path.join(datadir, chain, ".cookie")): logger.debug("Deleting leftover cookie file") os.remove(os.path.join(datadir, chain, ".cookie")) def set_node_times(nodes, t): for node in nodes: node.setmocktime(t) def check_node_connections(*, node, num_in, num_out): info = node.getnetworkinfo() assert_equal(info["connections_in"], num_in) assert_equal(info["connections_out"], num_out) # Transaction/Block functions ############################# def find_output(node, txid, amount, *, blockhash=None): """ Return index to output of txid with value amount Raises exception if there is none. """ txdata = node.getrawtransaction(txid, 1, blockhash) for i in range(len(txdata["vout"])): if txdata["vout"][i]["value"] == amount: return i raise RuntimeError(f"find_output txid {txid} : {str(amount)} not found") # Create large OP_RETURN txouts that can be appended to a transaction # to make it large (helper for constructing large transactions). def gen_return_txouts(): # Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create # So we have big transactions (and therefore can't fit very many into each block) # create one script_pubkey script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes for _ in range(512): script_pubkey = f"{script_pubkey}01" # concatenate 128 txouts of above script_pubkey which we'll insert before # the txout for change txouts = [] from .messages import CTxOut + txout = CTxOut() txout.nValue = 0 txout.scriptPubKey = bytes.fromhex(script_pubkey) for _ in range(128): txouts.append(txout) return txouts + # Create a spend of each passed-in utxo, splicing in "txouts" to each raw # transaction to make it large. See gen_return_txouts() above. def create_lots_of_big_transactions(node, txouts, utxos, num, fee): addr = node.getnewaddress() txids = [] from .messages import CTransaction + for _ in range(num): t = utxos.pop() inputs = [{"txid": t["txid"], "vout": t["vout"]}] outputs = {} - change = t['amount'] - fee + change = t["amount"] - fee outputs[addr] = satoshi_round(change) rawtx = node.createrawtransaction(inputs, outputs) tx = CTransaction() tx.deserialize(BytesIO(bytes.fromhex(rawtx))) for txout in txouts: tx.vout.append(txout) newtx = tx.serialize().hex() - signresult = node.signrawtransactionwithwallet( - newtx, None, "NONE|FORKID") + signresult = node.signrawtransactionwithwallet(newtx, None, "NONE|FORKID") txid = node.sendrawtransaction(signresult["hex"], 0) txids.append(txid) return txids def find_vout_for_address(node, txid, addr): """ Locate the vout index of the given transaction sending to the given address. Raises runtime error exception if not found. """ tx = node.getrawtransaction(txid, True) for i in range(len(tx["vout"])): if any(addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]): return i - raise RuntimeError( - f"Vout not found for address: txid={txid}, addr={addr}") + raise RuntimeError(f"Vout not found for address: txid={txid}, addr={addr}") def modinv(a, n): """Compute the modular inverse of a modulo n using the extended Euclidean Algorithm. See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Modular_integers. """ # TODO: Change to pow(a, -1, n) available in Python 3.8 t1, t2 = 0, 1 r1, r2 = n, a while r2 != 0: q = r1 // r2 t1, t2 = t2, t1 - q * t2 r1, r2 = r2, r1 - q * r2 if r1 > 1: return None if t1 < 0: t1 += n return t1 def uint256_hex(hash_int: int) -> str: return f"{hash_int:0{64}x}" class TestFrameworkUtil(unittest.TestCase): def test_modinv(self): test_vectors = [ [7, 11], [11, 29], [90, 13], [1891, 3797], [6003722857, 77695236973], ] for a, n in test_vectors: self.assertEqual(modinv(a, n), pow(a, n - 2, n)) diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py index adde9776f..2049312cb 100644 --- a/test/functional/test_framework/wallet.py +++ b/test/functional/test_framework/wallet.py @@ -1,302 +1,322 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """A limited-functionality wallet, which may replace a real wallet in tests""" from copy import deepcopy from decimal import Decimal from typing import Any, Optional from test_framework.address import ( ADDRESS_ECREG_P2SH_OP_TRUE, SCRIPTSIG_OP_TRUE, base58_to_byte, key_to_p2pkh, ) from test_framework.key import ECKey from test_framework.messages import ( XEC, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, ToHex, ) from test_framework.script import ( OP_CHECKSIG, OP_DUP, OP_EQUAL, OP_EQUALVERIFY, OP_HASH160, CScript, hash160, ) from test_framework.txtools import pad_tx from test_framework.util import ( assert_equal, assert_greater_than_or_equal, satoshi_round, ) DEFAULT_FEE = Decimal("100.00") class MiniWallet: def __init__(self, test_node): self._test_node = test_node self._utxos = [] self._address = ADDRESS_ECREG_P2SH_OP_TRUE self._scriptPubKey = bytes.fromhex( - self._test_node.validateaddress( - self._address)['scriptPubKey']) + self._test_node.validateaddress(self._address)["scriptPubKey"] + ) def _create_utxo(self, *, txid, vout, value, height): return {"txid": txid, "vout": vout, "value": value, "height": height} def rescan_utxos(self): """Drop all utxos and rescan the utxo set""" self._utxos = [] res = self._test_node.scantxoutset( - action="start", scanobjects=[f'raw({self._scriptPubKey.hex()})']) - assert_equal(True, res['success']) - for utxo in res['unspents']: + action="start", scanobjects=[f"raw({self._scriptPubKey.hex()})"] + ) + assert_equal(True, res["success"]) + for utxo in res["unspents"]: self._utxos.append( self._create_utxo( txid=utxo["txid"], vout=utxo["vout"], value=utxo["amount"], - height=utxo["height"])) + height=utxo["height"], + ) + ) def scan_tx(self, tx): """Scan the tx and adjust the internal list of owned utxos""" for spent in tx["vin"]: # Mark spent. This may happen when the caller has ownership of a # utxo that remained in this wallet. For example, by passing # mark_as_spent=False to get_utxo or by using an utxo returned by a # create_self_transfer* call. try: self.get_utxo(txid=spent["txid"], vout=spent["vout"]) except StopIteration: pass - for out in tx['vout']: - if out['scriptPubKey']['hex'] == self._scriptPubKey.hex(): + for out in tx["vout"]: + if out["scriptPubKey"]["hex"] == self._scriptPubKey.hex(): self._utxos.append( self._create_utxo( - txid=tx["txid"], - vout=out["n"], - value=out["value"], - height=0)) + txid=tx["txid"], vout=out["n"], value=out["value"], height=0 + ) + ) def generate(self, num_blocks, **kwargs): """Generate blocks with coinbase outputs to the internal address, and call rescan_utxos""" blocks = self._test_node.generatetodescriptor( - num_blocks, f'raw({self._scriptPubKey.hex()})', **kwargs) + num_blocks, f"raw({self._scriptPubKey.hex()})", **kwargs + ) # Calling rescan_utxos here makes sure that after a generate the utxo # set is in a clean state. For example, the wallet will update # - if the caller consumed utxos, but never used them # - if the caller sent a transaction that is not mined # - after block re-orgs # - the utxo height for mined mempool txs # - However, the wallet will not consider remaining mempool txs self.rescan_utxos() return blocks def get_scriptPubKey(self): return self._scriptPubKey - def get_utxo(self, *, txid: str = '', vout: Optional[int] = None): + def get_utxo(self, *, txid: str = "", vout: Optional[int] = None): """ Returns a utxo and marks it as spent (pops it from the internal list) Args: txid: get the first utxo we find from a specific transaction """ # Put the largest utxo last - self._utxos = sorted( - self._utxos, key=lambda k: ( - k['value'], -k['height'])) + self._utxos = sorted(self._utxos, key=lambda k: (k["value"], -k["height"])) if txid: - utxo_filter: Any = filter( - lambda utxo: txid == utxo['txid'], self._utxos) + utxo_filter: Any = filter(lambda utxo: txid == utxo["txid"], self._utxos) else: # By default the largest utxo utxo_filter = reversed(self._utxos) if vout is not None: - utxo_filter = filter( - lambda utxo: vout == utxo['vout'], utxo_filter) + utxo_filter = filter(lambda utxo: vout == utxo["vout"], utxo_filter) index = self._utxos.index(next(utxo_filter)) return self._utxos.pop(index) def send_self_transfer(self, **kwargs): """Create and send a tx with the specified fee_rate. Fee may be exact or at most one satoshi higher than needed.""" tx = self.create_self_transfer(**kwargs) - self.sendrawtransaction( - from_node=kwargs['from_node'], - tx_hex=tx['hex']) + self.sendrawtransaction(from_node=kwargs["from_node"], tx_hex=tx["hex"]) return tx def send_to(self, *, from_node, scriptPubKey, amount, fee=1000): """ Create and send a tx with an output to a given scriptPubKey/amount, plus a change output to our internal address. To keep things simple, a fixed fee given in Satoshi is used. Note that this method fails if there is no single internal utxo available that can cover the cost for the amount and the fixed fee (the utxo with the largest value is taken). Returns a tuple (txid, n) referring to the created external utxo outpoint. """ tx = self.create_self_transfer(from_node=from_node, fee_rate=0)["tx"] assert_greater_than_or_equal(tx.vout[0].nValue, amount + fee) # change output -> MiniWallet - tx.vout[0].nValue -= (amount + fee) + tx.vout[0].nValue -= amount + fee # arbitrary output -> to be returned tx.vout.append(CTxOut(amount, scriptPubKey)) - txid = self.sendrawtransaction( - from_node=from_node, - tx_hex=tx.serialize().hex()) + txid = self.sendrawtransaction(from_node=from_node, tx_hex=tx.serialize().hex()) return txid, len(tx.vout) - 1 - def create_self_transfer(self, *, fee_rate=Decimal("3000.00"), - from_node, utxo_to_spend=None, locktime=0): + def create_self_transfer( + self, *, fee_rate=Decimal("3000.00"), from_node, utxo_to_spend=None, locktime=0 + ): """Create and return a tx with the specified fee_rate. Fee may be exact or at most one satoshi higher than needed.""" utxo_to_spend = utxo_to_spend or self.get_utxo() # The size will be enforced by pad_tx() size = 100 send_value = satoshi_round( - utxo_to_spend['value'] - fee_rate * (Decimal(size) / 1000)) + utxo_to_spend["value"] - fee_rate * (Decimal(size) / 1000) + ) assert send_value > 0 tx = CTransaction() - tx.vin = [CTxIn(COutPoint(int(utxo_to_spend['txid'], 16), - utxo_to_spend['vout']))] + tx.vin = [ + CTxIn(COutPoint(int(utxo_to_spend["txid"], 16), utxo_to_spend["vout"])) + ] tx.vout = [CTxOut(int(send_value * XEC), self._scriptPubKey)] tx.nLockTime = locktime tx.vin[0].scriptSig = SCRIPTSIG_OP_TRUE pad_tx(tx, size) tx_hex = tx.serialize().hex() assert_equal(len(tx.serialize()), size) new_utxo = self._create_utxo( - txid=tx.rehash(), vout=0, value=send_value, height=0) + txid=tx.rehash(), vout=0, value=send_value, height=0 + ) - return {"txid": new_utxo["txid"], - "hex": tx_hex, "tx": tx, "new_utxo": new_utxo} + return {"txid": new_utxo["txid"], "hex": tx_hex, "tx": tx, "new_utxo": new_utxo} def sendrawtransaction(self, *, from_node, tx_hex): txid = from_node.sendrawtransaction(tx_hex) self.scan_tx(from_node.decoderawtransaction(tx_hex)) return txid def getnewdestination(): """Generate a random destination and return the corresponding public key, - scriptPubKey and address. Can be used when a random destination is - needed, but no compiled wallet is available (e.g. as replacement to the - getnewaddress/getaddressinfo RPCs).""" + scriptPubKey and address. Can be used when a random destination is + needed, but no compiled wallet is available (e.g. as replacement to the + getnewaddress/getaddressinfo RPCs).""" key = ECKey() key.generate() pubkey = key.get_pubkey().get_bytes() scriptpubkey = CScript( - [OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG]) + [OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG] + ) return pubkey, scriptpubkey, key_to_p2pkh(pubkey) def address_to_scriptpubkey(address): """Converts a given address to the corresponding output script (scriptPubKey).""" payload, version = base58_to_byte(address) if version == 111: # testnet pubkey hash - return CScript([OP_DUP, OP_HASH160, payload, - OP_EQUALVERIFY, OP_CHECKSIG]) + return CScript([OP_DUP, OP_HASH160, payload, OP_EQUALVERIFY, OP_CHECKSIG]) elif version == 196: # testnet script hash return CScript([OP_HASH160, payload, OP_EQUAL]) # TODO: also support other address formats else: assert False -def make_chain(node, address, privkeys, parent_txid, parent_value, n=0, - parent_locking_script=None, fee=DEFAULT_FEE): +def make_chain( + node, + address, + privkeys, + parent_txid, + parent_value, + n=0, + parent_locking_script=None, + fee=DEFAULT_FEE, +): """Build a transaction that spends parent_txid.vout[n] and produces one output with amount = parent_value with a fee deducted. Return tuple (CTransaction object, raw hex, nValue, scriptPubKey of the output created). """ inputs = [{"txid": parent_txid, "vout": n}] my_value = parent_value - fee outputs = {address: my_value} rawtx = node.createrawtransaction(inputs, outputs) - prevtxs = [{ - "txid": parent_txid, - "vout": n, - "scriptPubKey": parent_locking_script, - "amount": parent_value, - }] if parent_locking_script else None + prevtxs = ( + [ + { + "txid": parent_txid, + "vout": n, + "scriptPubKey": parent_locking_script, + "amount": parent_value, + } + ] + if parent_locking_script + else None + ) signedtx = node.signrawtransactionwithkey( - hexstring=rawtx, privkeys=privkeys, prevtxs=prevtxs) + hexstring=rawtx, privkeys=privkeys, prevtxs=prevtxs + ) assert signedtx["complete"] tx = FromHex(CTransaction(), signedtx["hex"]) return (tx, signedtx["hex"], my_value, tx.vout[0].scriptPubKey.hex()) -def create_child_with_parents(node, address, privkeys, parents_tx, values, - locking_scripts, fee=DEFAULT_FEE): +def create_child_with_parents( + node, address, privkeys, parents_tx, values, locking_scripts, fee=DEFAULT_FEE +): """Creates a transaction that spends the first output of each parent in parents_tx.""" num_parents = len(parents_tx) total_value = sum(values) inputs = [{"txid": tx.get_id(), "vout": 0} for tx in parents_tx] outputs = {address: total_value - fee} rawtx_child = node.createrawtransaction(inputs, outputs) prevtxs = [] for i in range(num_parents): prevtxs.append( - {"txid": parents_tx[i].get_id(), "vout": 0, - "scriptPubKey": locking_scripts[i], "amount": values[i]}) + { + "txid": parents_tx[i].get_id(), + "vout": 0, + "scriptPubKey": locking_scripts[i], + "amount": values[i], + } + ) signedtx_child = node.signrawtransactionwithkey( - hexstring=rawtx_child, privkeys=privkeys, prevtxs=prevtxs) + hexstring=rawtx_child, privkeys=privkeys, prevtxs=prevtxs + ) assert signedtx_child["complete"] return signedtx_child["hex"] def create_raw_chain(node, first_coin, address, privkeys, chain_length=50): """Helper function: create a "chain" of chain_length transactions. The nth transaction in the chain is a child of the n-1th transaction and parent of the n+1th transaction. """ parent_locking_script = None txid = first_coin["txid"] chain_hex = [] chain_txns = [] value = first_coin["amount"] for _ in range(chain_length): (tx, txhex, value, parent_locking_script) = make_chain( - node, address, privkeys, txid, value, 0, parent_locking_script) + node, address, privkeys, txid, value, 0, parent_locking_script + ) txid = tx.get_id() chain_hex.append(txhex) chain_txns.append(tx) return (chain_hex, chain_txns) def bulk_transaction( - tx: CTransaction, node, target_size: int, privkeys=None, prevtxs=None + tx: CTransaction, node, target_size: int, privkeys=None, prevtxs=None ) -> CTransaction: """Return a padded and signed transaction. The original transaction is left unaltered. If privkeys is not specified, it is assumed that the transaction has an anyone-can-spend output as unique output. """ tx_heavy = deepcopy(tx) pad_tx(tx_heavy, target_size) assert_greater_than_or_equal(tx_heavy.billable_size(), target_size) if privkeys is not None: - signed_tx = node.signrawtransactionwithkey( - ToHex(tx_heavy), privkeys, prevtxs) + signed_tx = node.signrawtransactionwithkey(ToHex(tx_heavy), privkeys, prevtxs) return FromHex(CTransaction(), signed_tx["hex"]) # OP_TRUE tx_heavy.vin[0].scriptSig = SCRIPTSIG_OP_TRUE return tx_heavy diff --git a/test/functional/test_framework/wallet_util.py b/test/functional/test_framework/wallet_util.py index 4c49a67f0..d22c16fa2 100755 --- a/test/functional/test_framework/wallet_util.py +++ b/test/functional/test_framework/wallet_util.py @@ -1,108 +1,115 @@ #!/usr/bin/env python3 # Copyright (c) 2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Useful util functions for testing the wallet""" from collections import namedtuple from test_framework.address import byte_to_base58, key_to_p2pkh, script_to_p2sh from test_framework.key import ECKey from test_framework.script import ( OP_2, OP_3, OP_CHECKMULTISIG, OP_CHECKSIG, OP_DUP, OP_EQUAL, OP_EQUALVERIFY, OP_HASH160, CScript, hash160, ) -Key = namedtuple('Key', ['privkey', - 'pubkey', - 'p2pkh_script', - 'p2pkh_addr']) +Key = namedtuple("Key", ["privkey", "pubkey", "p2pkh_script", "p2pkh_addr"]) -Multisig = namedtuple('Multisig', ['privkeys', - 'pubkeys', - 'p2sh_script', - 'p2sh_addr', - 'redeem_script']) +Multisig = namedtuple( + "Multisig", ["privkeys", "pubkeys", "p2sh_script", "p2sh_addr", "redeem_script"] +) def get_key(node): """Generate a fresh key on node Returns a named tuple of privkey, pubkey and all address and scripts.""" addr = node.getnewaddress() - pubkey = node.getaddressinfo(addr)['pubkey'] + pubkey = node.getaddressinfo(addr)["pubkey"] pkh = hash160(bytes.fromhex(pubkey)) - return Key(privkey=node.dumpprivkey(addr), - pubkey=pubkey, - p2pkh_script=CScript( - [OP_DUP, OP_HASH160, pkh, OP_EQUALVERIFY, OP_CHECKSIG]).hex(), - p2pkh_addr=key_to_p2pkh(pubkey)) + return Key( + privkey=node.dumpprivkey(addr), + pubkey=pubkey, + p2pkh_script=CScript( + [OP_DUP, OP_HASH160, pkh, OP_EQUALVERIFY, OP_CHECKSIG] + ).hex(), + p2pkh_addr=key_to_p2pkh(pubkey), + ) def get_generate_key(): """Generate a fresh key Returns a named tuple of privkey, pubkey and all address and scripts.""" eckey = ECKey() eckey.generate() privkey = bytes_to_wif(eckey.get_bytes()) pubkey = eckey.get_pubkey().get_bytes().hex() pkh = hash160(bytes.fromhex(pubkey)) - return Key(privkey=privkey, - pubkey=pubkey, - p2pkh_script=CScript( - [OP_DUP, OP_HASH160, pkh, OP_EQUALVERIFY, OP_CHECKSIG]).hex(), - p2pkh_addr=key_to_p2pkh(pubkey)) + return Key( + privkey=privkey, + pubkey=pubkey, + p2pkh_script=CScript( + [OP_DUP, OP_HASH160, pkh, OP_EQUALVERIFY, OP_CHECKSIG] + ).hex(), + p2pkh_addr=key_to_p2pkh(pubkey), + ) def get_multisig(node): """Generate a fresh 2-of-3 multisig on node Returns a named tuple of privkeys, pubkeys and all address and scripts.""" addrs = [] pubkeys = [] for _ in range(3): addr = node.getaddressinfo(node.getnewaddress()) - addrs.append(addr['address']) - pubkeys.append(addr['pubkey']) - script_code = CScript([OP_2] + [bytes.fromhex(pubkey) - for pubkey in pubkeys] + [OP_3, OP_CHECKMULTISIG]) - return Multisig(privkeys=[node.dumpprivkey(addr) for addr in addrs], - pubkeys=pubkeys, - p2sh_script=CScript( - [OP_HASH160, hash160(script_code), OP_EQUAL]).hex(), - p2sh_addr=script_to_p2sh(script_code), - redeem_script=script_code.hex()) + addrs.append(addr["address"]) + pubkeys.append(addr["pubkey"]) + script_code = CScript( + [OP_2] + + [bytes.fromhex(pubkey) for pubkey in pubkeys] + + [OP_3, OP_CHECKMULTISIG] + ) + return Multisig( + privkeys=[node.dumpprivkey(addr) for addr in addrs], + pubkeys=pubkeys, + p2sh_script=CScript([OP_HASH160, hash160(script_code), OP_EQUAL]).hex(), + p2sh_addr=script_to_p2sh(script_code), + redeem_script=script_code.hex(), + ) def test_address(node, address, **kwargs): """Get address info for `address` and test whether the returned values are as expected.""" addr_info = node.getaddressinfo(address) for key, value in kwargs.items(): if value is None: if key in addr_info.keys(): raise AssertionError( - f"key {key} unexpectedly returned in getaddressinfo.") + f"key {key} unexpectedly returned in getaddressinfo." + ) elif addr_info[key] != value: raise AssertionError( - f"key {key} value {addr_info[key]} did not match expected value {value}") + f"key {key} value {addr_info[key]} did not match expected value {value}" + ) def bytes_to_wif(b, compressed=True): if compressed: - b += b'\x01' + b += b"\x01" return byte_to_base58(b, 239) def generate_wif_key(): # Makes a WIF privkey for imports k = ECKey() k.generate() return bytes_to_wif(k.get_bytes(), k.is_compressed)