diff --git a/.arclint b/.arclint --- a/.arclint +++ b/.arclint @@ -23,7 +23,7 @@ "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)", - "(^test/functional/.*\\.py$)" + "(^test/functional/)" ], "flags": [ "--aggressive", @@ -35,7 +35,7 @@ "type": "black", "version": ">=23.0.0", "include": [ - "(^test/functional/.*\\.py$)" + "(^test/functional/)" ], "flags": [ "--preview" diff --git a/test/functional/data/invalid_txs.py b/test/functional/data/invalid_txs.py --- a/test/functional/data/invalid_txs.py +++ b/test/functional/data/invalid_txs.py @@ -36,12 +36,12 @@ ) from test_framework.txtools import pad_tx -basic_p2sh = sc.CScript( - [sc.OP_HASH160, sc.hash160(sc.CScript([sc.OP_0])), sc.OP_EQUAL]) +basic_p2sh = sc.CScript([sc.OP_HASH160, sc.hash160(sc.CScript([sc.OP_0])), sc.OP_EQUAL]) class BadTxTemplate: """Allows simple construction of a certain kind of invalid tx. Base class to be subclassed.""" + __metaclass__ = abc.ABCMeta # The expected error code given by bitcoind upon submission of the tx. @@ -60,12 +60,7 @@ def __init__(self, *, spend_tx=None, spend_block=None): self.spend_tx = spend_block.vtx[0] if spend_block else spend_tx self.spend_avail = sum(o.nValue for o in self.spend_tx.vout) - self.valid_txin = CTxIn( - COutPoint( - self.spend_tx.sha256, - 0), - b"", - 0xffffffff) + self.valid_txin = CTxIn(COutPoint(self.spend_tx.sha256, 0), b"", 0xFFFFFFFF) @abc.abstractmethod def get_tx(self, *args, **kwargs): @@ -120,20 +115,14 @@ bad_idx = num_indices + 100 tx = CTransaction() - tx.vin.append( - CTxIn( - COutPoint( - self.spend_tx.sha256, - bad_idx), - b"", - 0xffffffff)) + tx.vin.append(CTxIn(COutPoint(self.spend_tx.sha256, bad_idx), b"", 0xFFFFFFFF)) tx.vout.append(CTxOut(0, basic_p2sh)) tx.calc_sha256() return tx class DuplicateInput(BadTxTemplate): - reject_reason = 'bad-txns-inputs-duplicate' + reject_reason = "bad-txns-inputs-duplicate" expect_disconnect = True def get_tx(self): @@ -146,13 +135,13 @@ class PrevoutNullInput(BadTxTemplate): - reject_reason = 'bad-txns-prevout-null' + reject_reason = "bad-txns-prevout-null" expect_disconnect = True def get_tx(self): tx = CTransaction() tx.vin.append(self.valid_txin) - tx.vin.append(CTxIn(COutPoint(txid=0, n=0xffffffff))) + tx.vin.append(CTxIn(COutPoint(txid=0, n=0xFFFFFFFF))) tx.vout.append(CTxOut(1, basic_p2sh)) tx.calc_sha256() return tx @@ -165,14 +154,7 @@ def get_tx(self): tx = CTransaction() - tx.vin.append( - CTxIn( - COutPoint( - self.spend_tx.sha256 + - 1, - 0), - b"", - 0xffffffff)) + tx.vin.append(CTxIn(COutPoint(self.spend_tx.sha256 + 1, 0), b"", 0xFFFFFFFF)) tx.vin.append(self.valid_txin) tx.vout.append(CTxOut(1, basic_p2sh)) tx.calc_sha256() @@ -180,16 +162,17 @@ class SpendTooMuch(BadTxTemplate): - reject_reason = 'bad-txns-in-belowout' + reject_reason = "bad-txns-in-belowout" expect_disconnect = True def get_tx(self): return create_tx_with_script( - self.spend_tx, 0, script_pub_key=basic_p2sh, amount=(self.spend_avail + 1)) + self.spend_tx, 0, script_pub_key=basic_p2sh, amount=(self.spend_avail + 1) + ) class CreateNegative(BadTxTemplate): - reject_reason = 'bad-txns-vout-negative' + reject_reason = "bad-txns-vout-negative" expect_disconnect = True def get_tx(self): @@ -197,7 +180,7 @@ class CreateTooLarge(BadTxTemplate): - reject_reason = 'bad-txns-vout-toolarge' + reject_reason = "bad-txns-vout-toolarge" expect_disconnect = True def get_tx(self): @@ -205,7 +188,7 @@ class CreateSumTooLarge(BadTxTemplate): - reject_reason = 'bad-txns-txouttotal-toolarge' + reject_reason = "bad-txns-txouttotal-toolarge" expect_disconnect = True def get_tx(self): @@ -222,12 +205,12 @@ def get_tx(self): return create_tx_with_script( - self.spend_tx, 0, script_sig=b'\x64' * 35, - amount=(self.spend_avail // 2)) + self.spend_tx, 0, script_sig=b"\x64" * 35, amount=(self.spend_avail // 2) + ) def getDisabledOpcodeTemplate(opcode): - """ Creates disabled opcode tx template class""" + """Creates disabled opcode tx template class""" def get_tx(self): tx = CTransaction() @@ -239,22 +222,23 @@ tx.calc_sha256() return tx - return type(f"DisabledOpcode_{str(opcode)}", (BadTxTemplate,), { - 'reject_reason': "disabled opcode", - 'expect_disconnect': True, - 'get_tx': get_tx, - 'valid_in_block': True - }) + return type( + f"DisabledOpcode_{str(opcode)}", + (BadTxTemplate,), + { + "reject_reason": "disabled opcode", + "expect_disconnect": True, + "get_tx": get_tx, + "valid_in_block": True, + }, + ) # Disabled opcode tx templates (CVE-2010-5137) -DisabledOpcodeTemplates = [getDisabledOpcodeTemplate(opcode) for opcode in [ - OP_INVERT, - OP_2MUL, - OP_2DIV, - OP_MUL, - OP_LSHIFT, - OP_RSHIFT]] +DisabledOpcodeTemplates = [ + getDisabledOpcodeTemplate(opcode) + for opcode in [OP_INVERT, OP_2MUL, OP_2DIV, OP_MUL, OP_LSHIFT, OP_RSHIFT] +] def iter_all_templates(): diff --git a/test/functional/test_framework/address.py b/test/functional/test_framework/address.py --- a/test/functional/test_framework/address.py +++ b/test/functional/test_framework/address.py @@ -9,24 +9,25 @@ from .script import OP_TRUE, CScript, CScriptOp, hash160, hash256 from .util import assert_equal -ADDRESS_ECREG_UNSPENDABLE = 'ecregtest:qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqcrl5mqkt' -ADDRESS_ECREG_UNSPENDABLE_DESCRIPTOR = 'addr(ecregtest:qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqcrl5mqkt)#u6xx93xc' +ADDRESS_ECREG_UNSPENDABLE = "ecregtest:qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqcrl5mqkt" +ADDRESS_ECREG_UNSPENDABLE_DESCRIPTOR = ( + "addr(ecregtest:qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqcrl5mqkt)#u6xx93xc" +) # Coins sent to this address can be spent with a scriptSig of just OP_TRUE -ADDRESS_ECREG_P2SH_OP_TRUE = 'ecregtest:prdpw30fk4ym6zl6rftfjuw806arpn26fvkgfu97xt' -P2SH_OP_TRUE = CScript.fromhex( - 'a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b87') +ADDRESS_ECREG_P2SH_OP_TRUE = "ecregtest:prdpw30fk4ym6zl6rftfjuw806arpn26fvkgfu97xt" +P2SH_OP_TRUE = CScript.fromhex("a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b87") SCRIPTSIG_OP_TRUE = CScriptOp.encode_op_pushdata(CScript([OP_TRUE])) -chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' +chars = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" def byte_to_base58(b, version): - result = '' + result = "" # prepend version b = bytes([version]) + b # append checksum b += hash256(b)[:4] - value = int.from_bytes(b, 'big') + value = int.from_bytes(b, "big") while value > 0: result = chars[value % 58] + result value //= 58 @@ -41,24 +42,24 @@ Throws if the base58 checksum is invalid.""" if not s: - return b'' + return b"" n = 0 for c in s: n *= 58 assert c in chars digit = chars.index(c) n += digit - h = f'{n:x}' + h = f"{n:x}" if len(h) % 2: h = f"0{h}" - res = n.to_bytes((n.bit_length() + 7) // 8, 'big') + res = n.to_bytes((n.bit_length() + 7) // 8, "big") pad = 0 for c in s: if c == chars[0]: pad += 1 else: break - res = b'\x00' * pad + res + res = b"\x00" * pad + res # Assert if the checksum is invalid assert_equal(hash256(res[:-4])[:4], res[-4:]) @@ -67,13 +68,13 @@ def keyhash_to_p2pkh(keyhash, main=False): - assert (len(keyhash) == 20) + assert len(keyhash) == 20 version = 0 if main else 111 return byte_to_base58(keyhash, version) def scripthash_to_p2sh(scripthash, main=False): - assert (len(scripthash) == 20) + assert len(scripthash) == 20 version = 5 if main else 196 return byte_to_base58(scripthash, version) @@ -89,17 +90,17 @@ def check_key(key): - if (isinstance(key, str)): + if isinstance(key, str): key = bytes.fromhex(key) # Assuming this is hex string - if (isinstance(key, bytes) and (len(key) == 33 or len(key) == 65)): + if isinstance(key, bytes) and (len(key) == 33 or len(key) == 65): return key assert False def check_script(script): - if (isinstance(script, str)): + if isinstance(script, str): script = bytes.fromhex(script) # Assuming this is hex string - if (isinstance(script, bytes) or isinstance(script, CScript)): + if isinstance(script, bytes) or isinstance(script, CScript): return script assert False @@ -108,30 +109,20 @@ def test_base58encodedecode(self): def check_base58(data, version): self.assertEqual( - base58_to_byte(byte_to_base58(data, version)), - (data, version)) - - check_base58( - bytes.fromhex('1f8ea1702a7bd4941bca0941b852c4bbfedb2e05'), 111) - check_base58( - bytes.fromhex('3a0b05f4d7f66c3ba7009f453530296c845cc9cf'), 111) - check_base58( - bytes.fromhex('41c1eaf111802559bad61b60d62b1f897c63928a'), 111) - check_base58( - bytes.fromhex('0041c1eaf111802559bad61b60d62b1f897c63928a'), 111) - check_base58( - bytes.fromhex('000041c1eaf111802559bad61b60d62b1f897c63928a'), 111) - check_base58( - bytes.fromhex('00000041c1eaf111802559bad61b60d62b1f897c63928a'), 111) - check_base58( - bytes.fromhex('1f8ea1702a7bd4941bca0941b852c4bbfedb2e05'), 0) - check_base58( - bytes.fromhex('3a0b05f4d7f66c3ba7009f453530296c845cc9cf'), 0) - check_base58( - bytes.fromhex('41c1eaf111802559bad61b60d62b1f897c63928a'), 0) - check_base58( - bytes.fromhex('0041c1eaf111802559bad61b60d62b1f897c63928a'), 0) - check_base58( - bytes.fromhex('000041c1eaf111802559bad61b60d62b1f897c63928a'), 0) + base58_to_byte(byte_to_base58(data, version)), (data, version) + ) + + check_base58(bytes.fromhex("1f8ea1702a7bd4941bca0941b852c4bbfedb2e05"), 111) + check_base58(bytes.fromhex("3a0b05f4d7f66c3ba7009f453530296c845cc9cf"), 111) + check_base58(bytes.fromhex("41c1eaf111802559bad61b60d62b1f897c63928a"), 111) + check_base58(bytes.fromhex("0041c1eaf111802559bad61b60d62b1f897c63928a"), 111) + check_base58(bytes.fromhex("000041c1eaf111802559bad61b60d62b1f897c63928a"), 111) check_base58( - bytes.fromhex('00000041c1eaf111802559bad61b60d62b1f897c63928a'), 0) + bytes.fromhex("00000041c1eaf111802559bad61b60d62b1f897c63928a"), 111 + ) + check_base58(bytes.fromhex("1f8ea1702a7bd4941bca0941b852c4bbfedb2e05"), 0) + check_base58(bytes.fromhex("3a0b05f4d7f66c3ba7009f453530296c845cc9cf"), 0) + check_base58(bytes.fromhex("41c1eaf111802559bad61b60d62b1f897c63928a"), 0) + check_base58(bytes.fromhex("0041c1eaf111802559bad61b60d62b1f897c63928a"), 0) + check_base58(bytes.fromhex("000041c1eaf111802559bad61b60d62b1f897c63928a"), 0) + check_base58(bytes.fromhex("00000041c1eaf111802559bad61b60d62b1f897c63928a"), 0) diff --git a/test/functional/test_framework/authproxy.py b/test/functional/test_framework/authproxy.py --- a/test/functional/test_framework/authproxy.py +++ b/test/functional/test_framework/authproxy.py @@ -56,7 +56,7 @@ try: errmsg = f"{rpc_error['message']} ({rpc_error['code']})" except (KeyError, TypeError): - errmsg = '' + errmsg = "" super().__init__(errmsg) self.error = rpc_error self.http_status = http_status @@ -72,40 +72,49 @@ __id_count = 0 # ensure_ascii: escape unicode as \uXXXX, passed to json.dumps - def __init__(self, service_url, service_name=None, - timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True): + def __init__( + self, + service_url, + service_name=None, + timeout=HTTP_TIMEOUT, + connection=None, + ensure_ascii=True, + ): self.__service_url = service_url self._service_name = service_name self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests self.__url = urllib.parse.urlparse(service_url) - user = None if self.__url.username is None else self.__url.username.encode( - 'utf8') - passwd = None if self.__url.password is None else self.__url.password.encode( - 'utf8') - authpair = user + b':' + passwd - self.__auth_header = b'Basic ' + base64.b64encode(authpair) + user = ( + None if self.__url.username is None else self.__url.username.encode("utf8") + ) + passwd = ( + None if self.__url.password is None else self.__url.password.encode("utf8") + ) + authpair = user + b":" + passwd + self.__auth_header = b"Basic " + base64.b64encode(authpair) self.timeout = timeout self._set_conn(connection) def __getattr__(self, name): - if name.startswith('__') and name.endswith('__'): + if name.startswith("__") and name.endswith("__"): # Python internal stuff raise AttributeError if self._service_name is not None: name = f"{self._service_name}.{name}" - return AuthServiceProxy( - self.__service_url, name, connection=self.__conn) + return AuthServiceProxy(self.__service_url, name, connection=self.__conn) def _request(self, method, path, postdata): - ''' + """ Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout). This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5. - ''' - headers = {'Host': self.__url.hostname, - 'User-Agent': USER_AGENT, - 'Authorization': self.__auth_header, - 'Content-type': 'application/json'} - if os.name == 'nt': + """ + headers = { + "Host": self.__url.hostname, + "User-Agent": USER_AGENT, + "Authorization": self.__auth_header, + "Content-type": "application/json", + } + if os.name == "nt": # Windows somehow does not like to re-use connections # TODO: Find out why the connection would disconnect occasionally # and make it reusable on Windows @@ -123,11 +132,13 @@ return self._get_response() except OSError as e: retry = ( - '[WinError 10053] An established connection was aborted by the software in your host machine' in str(e)) + "[WinError 10053] An established connection was aborted by the software" + " in your host machine" + in str(e) + ) # Workaround for a bug on macOS. See # https://bugs.python.org/issue33450 - retry = retry or ( - '[Errno 41] Protocol wrong type for socket' in str(e)) + retry = retry or ("[Errno 41] Protocol wrong type for socket" in str(e)) if retry: self.__conn.close() self.__conn.request(method, path, postdata, headers) @@ -138,47 +149,66 @@ def get_request(self, *args, **argsn): AuthServiceProxy.__id_count += 1 - log.debug("-{}-> {} {}".format( - AuthServiceProxy.__id_count, - self._service_name, - json.dumps( - args or argsn, - default=EncodeDecimal, - ensure_ascii=self.ensure_ascii), - )) + log.debug( + "-{}-> {} {}".format( + AuthServiceProxy.__id_count, + self._service_name, + json.dumps( + args or argsn, default=EncodeDecimal, ensure_ascii=self.ensure_ascii + ), + ) + ) if args and argsn: - raise ValueError( - 'Cannot handle both named and positional arguments') - return {'version': '1.1', - 'method': self._service_name, - 'params': args or argsn, - 'id': AuthServiceProxy.__id_count} + raise ValueError("Cannot handle both named and positional arguments") + return { + "version": "1.1", + "method": self._service_name, + "params": args or argsn, + "id": AuthServiceProxy.__id_count, + } def __call__(self, *args, **argsn): - postdata = json.dumps(self.get_request( - *args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) + postdata = json.dumps( + self.get_request(*args, **argsn), + default=EncodeDecimal, + ensure_ascii=self.ensure_ascii, + ) response, status = self._request( - 'POST', self.__url.path, postdata.encode('utf-8')) - if response['error'] is not None: - raise JSONRPCException(response['error'], status) - elif 'result' not in response: - raise JSONRPCException({ - 'code': -343, 'message': 'missing JSON-RPC result'}, status) + "POST", self.__url.path, postdata.encode("utf-8") + ) + if response["error"] is not None: + raise JSONRPCException(response["error"], status) + elif "result" not in response: + raise JSONRPCException( + {"code": -343, "message": "missing JSON-RPC result"}, status + ) elif status != HTTPStatus.OK: - raise JSONRPCException({ - 'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status) + raise JSONRPCException( + { + "code": -342, + "message": "non-200 HTTP status code but no JSON-RPC error", + }, + status, + ) else: - return response['result'] + return response["result"] def batch(self, rpc_call_list): postdata = json.dumps( - list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) + list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii + ) log.debug(f"--> {postdata}") response, status = self._request( - 'POST', self.__url.path, postdata.encode('utf-8')) + "POST", self.__url.path, postdata.encode("utf-8") + ) if status != HTTPStatus.OK: - raise JSONRPCException({ - 'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status) + raise JSONRPCException( + { + "code": -342, + "message": "non-200 HTTP status code but no JSON-RPC error", + }, + status, + ) return response def _get_response(self): @@ -186,46 +216,70 @@ try: http_response = self.__conn.getresponse() except socket.timeout: - raise JSONRPCException({ - 'code': -344, - 'message': - f'{self._service_name!r} RPC took longer than ' - f'{self.__conn.timeout} seconds. Consider using larger ' - 'timeout for calls that take longer to return.'}) + raise JSONRPCException( + { + "code": -344, + "message": ( + f"{self._service_name!r} RPC took longer than " + f"{self.__conn.timeout} seconds. Consider using larger " + "timeout for calls that take longer to return." + ), + } + ) if http_response is None: - raise JSONRPCException({ - 'code': -342, 'message': 'missing HTTP response from server'}) + raise JSONRPCException( + {"code": -342, "message": "missing HTTP response from server"} + ) - content_type = http_response.getheader('Content-Type') - if content_type != 'application/json': + content_type = http_response.getheader("Content-Type") + if content_type != "application/json": raise JSONRPCException( - {'code': -342, - 'message': f'non-JSON HTTP response with \'{http_response.status} ' - f'{http_response.reason}\' from server'}, - http_response.status) + { + "code": -342, + "message": ( + f"non-JSON HTTP response with '{http_response.status} " + f"{http_response.reason}' from server" + ), + }, + http_response.status, + ) - responsedata = http_response.read().decode('utf8') + responsedata = http_response.read().decode("utf8") response = json.loads(responsedata, parse_float=decimal.Decimal) elapsed = time.time() - req_start_time if "error" in response and response["error"] is None: - log.debug("<-{}- [{:.6f}] {}".format(response["id"], elapsed, json.dumps( - response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) + log.debug( + "<-{}- [{:.6f}] {}".format( + response["id"], + elapsed, + json.dumps( + response["result"], + default=EncodeDecimal, + ensure_ascii=self.ensure_ascii, + ), + ) + ) else: log.debug(f"<-- [{elapsed:.6f}] {responsedata}") return response, http_response.status def __truediv__(self, relative_uri): - return AuthServiceProxy(f"{self.__service_url}/{relative_uri}", - self._service_name, connection=self.__conn) + return AuthServiceProxy( + f"{self.__service_url}/{relative_uri}", + self._service_name, + connection=self.__conn, + ) def _set_conn(self, connection=None): port = 80 if self.__url.port is None else self.__url.port if connection: self.__conn = connection self.timeout = connection.timeout - elif self.__url.scheme == 'https': + elif self.__url.scheme == "https": self.__conn = http.client.HTTPSConnection( - self.__url.hostname, port, timeout=self.timeout) + self.__url.hostname, port, timeout=self.timeout + ) else: self.__conn = http.client.HTTPConnection( - self.__url.hostname, port, timeout=self.timeout) + self.__url.hostname, port, timeout=self.timeout + ) diff --git a/test/functional/test_framework/avatools.py b/test/functional/test_framework/avatools.py --- a/test/functional/test_framework/avatools.py +++ b/test/functional/test_framework/avatools.py @@ -49,10 +49,8 @@ def create_coinbase_stakes( - node: TestNode, - blockhashes: List[str], - priv_key: str, - amount: Optional[str] = None) -> List[Dict[str, Any]]: + node: TestNode, blockhashes: List[str], priv_key: str, amount: Optional[str] = None +) -> List[Dict[str, Any]]: """Returns a list of dictionaries representing stakes, in a format compatible with the buildavalancheproof RPC, using only coinbase transactions. @@ -67,38 +65,43 @@ blocks = [node.getblock(h, 2) for h in blockhashes] coinbases = [ { - 'height': b['height'], - 'txid': b['tx'][0]['txid'], - 'n': 0, - 'value': b['tx'][0]['vout'][0]['value'], - } for b in blocks + "height": b["height"], + "txid": b["tx"][0]["txid"], + "n": 0, + "value": b["tx"][0]["vout"][0]["value"], + } + for b in blocks ] - return [{ - 'txid': coinbase['txid'], - 'vout': coinbase['n'], - 'amount': amount or coinbase['value'], - 'height': coinbase['height'], - 'iscoinbase': True, - 'privatekey': priv_key, - } for coinbase in coinbases] + return [ + { + "txid": coinbase["txid"], + "vout": coinbase["n"], + "amount": amount or coinbase["value"], + "height": coinbase["height"], + "iscoinbase": True, + "privatekey": priv_key, + } + for coinbase in coinbases + ] def get_utxos_in_blocks(node: TestNode, blockhashes: List[str]) -> List[Dict]: - """Return all UTXOs in the specified list of blocks. - """ + """Return all UTXOs in the specified list of blocks.""" utxos = filter( lambda u: node.gettransaction(u["txid"])["blockhash"] in blockhashes, - node.listunspent()) + node.listunspent(), + ) return list(utxos) def create_stakes( - test_framework: 'BitcoinTestFramework', - node: TestNode, - blockhashes: List[str], - count: int, - sync_fun=None,) -> List[Dict[str, Any]]: + test_framework: "BitcoinTestFramework", + node: TestNode, + blockhashes: List[str], + count: int, + sync_fun=None, +) -> List[Dict[str, Any]]: """ Create a list of stakes by splitting existing UTXOs from a specified list of blocks into 10 new coins. @@ -119,8 +122,7 @@ for u in utxos: inputs = [{"txid": u["txid"], "vout": u["vout"]}] - outputs = { - addr: satoshi_round(u['amount'] / 10) for addr in addresses} + outputs = {addr: satoshi_round(u["amount"] / 10) for addr in addresses} raw_tx = node.createrawtransaction(inputs, outputs) ctx = FromHex(CTransaction(), raw_tx) ctx.vout[0].nValue -= node.calculate_fee(ctx) @@ -129,9 +131,10 @@ # confirm the transactions new_blocks = [] - while node.getmempoolinfo()['size'] > 0: + while node.getmempoolinfo()["size"] > 0: new_blocks += test_framework.generate( - node, 1, sync_fun=test_framework.no_op if sync_fun is None else sync_fun) + node, 1, sync_fun=test_framework.no_op if sync_fun is None else sync_fun + ) utxos = get_utxos_in_blocks(node, new_blocks) stakes = [] @@ -141,20 +144,22 @@ blockhash = node.gettransaction(utxo["txid"])["blockhash"] if blockhash not in heights: heights[blockhash] = node.getblock(blockhash, 1)["height"] - stakes.append({ - 'txid': utxo['txid'], - 'vout': utxo['vout'], - 'amount': utxo['amount'], - 'iscoinbase': utxo['label'] == "coinbase", - 'height': heights[blockhash], - 'privatekey': private_keys[utxo["address"]], - }) + stakes.append( + { + "txid": utxo["txid"], + "vout": utxo["vout"], + "amount": utxo["amount"], + "iscoinbase": utxo["label"] == "coinbase", + "height": heights[blockhash], + "privatekey": private_keys[utxo["address"]], + } + ) return stakes def get_proof_ids(node): - return [int(peer['proofid'], 16) for peer in node.getavalanchepeerinfo()] + return [int(peer["proofid"], 16) for peer in node.getavalanchepeerinfo()] def wait_for_proof(node, proofid_hex, expect_status="boundToPeer", timeout=60): @@ -172,6 +177,7 @@ return True except JSONRPCException: return False + wait_until_helper(proof_found, timeout=timeout) assert ret.get(expect_status, False) is True @@ -230,9 +236,7 @@ self.send_message(msg) def wait_for_avaresponse(self, timeout=5): - self.wait_until( - lambda: len(self.avaresponses) > 0, - timeout=timeout) + self.wait_until(lambda: len(self.avaresponses) > 0, timeout=timeout) with p2p_lock: return self.avaresponses.pop(0) @@ -255,19 +259,24 @@ return self.avapolls.pop(0) if len(self.avapolls) > 0 else None def wait_for_avahello(self, timeout=5): - self.wait_until( - lambda: self.avahello is not None, - timeout=timeout) + self.wait_until(lambda: self.avahello is not None, timeout=timeout) with p2p_lock: return self.avahello - def build_avahello(self, delegation: AvalancheDelegation, - delegated_privkey: ECKey) -> msg_avahello: + def build_avahello( + self, delegation: AvalancheDelegation, delegated_privkey: ECKey + ) -> msg_avahello: local_sighash = hash256( - delegation.getid() + - struct.pack(" NoHandshakeAvaP2PInterface: + node: TestNode, services=NODE_NETWORK | NODE_AVALANCHE +) -> NoHandshakeAvaP2PInterface: """Build and return a NoHandshakeAvaP2PInterface connected to the specified TestNode. """ n = NoHandshakeAvaP2PInterface() - node.add_p2p_connection( - n, services=services) + node.add_p2p_connection(n, services=services) n.wait_for_verack() - n.nodeid = node.getpeerinfo()[-1]['id'] + n.nodeid = node.getpeerinfo()[-1]["id"] return n def get_ava_p2p_interface( - test_framework: 'BitcoinTestFramework', - node: TestNode, - services=NODE_NETWORK | NODE_AVALANCHE, - stake_utxo_confirmations=1, - sync_fun=None,) -> AvaP2PInterface: - """Build and return an AvaP2PInterface connected to the specified TestNode. - """ + test_framework: "BitcoinTestFramework", + node: TestNode, + services=NODE_NETWORK | NODE_AVALANCHE, + stake_utxo_confirmations=1, + sync_fun=None, +) -> AvaP2PInterface: + """Build and return an AvaP2PInterface connected to the specified TestNode.""" n = AvaP2PInterface(test_framework, node) # Make sure the proof utxos are mature @@ -375,18 +390,19 @@ test_framework.generate( node, stake_utxo_confirmations - 1, - sync_fun=test_framework.no_op if sync_fun is None else sync_fun) + sync_fun=test_framework.no_op if sync_fun is None else sync_fun, + ) assert node.verifyavalancheproof(n.proof.serialize().hex()) proofid_hex = uint256_hex(n.proof.proofid) node.add_p2p_connection(n, services=services) - n.nodeid = node.getpeerinfo()[-1]['id'] + n.nodeid = node.getpeerinfo()[-1]["id"] def avapeer_connected(): node_list = [] try: - node_list = node.getavalanchepeerinfo(proofid_hex)[0]['node_list'] + node_list = node.getavalanchepeerinfo(proofid_hex)[0]["node_list"] except BaseException: pass @@ -401,21 +417,27 @@ blockhashes = test_framework.generate( node, coinbase_utxos, - sync_fun=test_framework.no_op if sync_fun is None else sync_fun) + sync_fun=test_framework.no_op if sync_fun is None else sync_fun, + ) privkey = ECKey() privkey.generate() stakes = create_coinbase_stakes( - node, blockhashes, node.get_deterministic_priv_key().key) + node, blockhashes, node.get_deterministic_priv_key().key + ) proof_hex = node.buildavalancheproof( - 42, expiry, bytes_to_wif(privkey.get_bytes()), stakes) + 42, expiry, bytes_to_wif(privkey.get_bytes()), stakes + ) return privkey, avalanche_proof_from_hex(proof_hex) -def build_msg_avaproofs(proofs: List[AvalancheProof], prefilled_proofs: Optional[List[AvalancheProof]] - = None, key_pair: Optional[List[int]] = None) -> msg_avaproofs: +def build_msg_avaproofs( + proofs: List[AvalancheProof], + prefilled_proofs: Optional[List[AvalancheProof]] = None, + key_pair: Optional[List[int]] = None, +) -> msg_avaproofs: if key_pair is None: key_pair = [random.randint(0, 2**64 - 1)] * 2 @@ -424,10 +446,8 @@ msg.key1 = key_pair[1] msg.prefilled_proofs = prefilled_proofs or [] msg.shortids = [ - calculate_shortid( - msg.key0, - msg.key1, - proof.proofid) for proof in proofs] + calculate_shortid(msg.key0, msg.key1, proof.proofid) for proof in proofs + ] return msg diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py --- a/test/functional/test_framework/blocktools.py +++ b/test/functional/test_framework/blocktools.py @@ -36,41 +36,42 @@ # Genesis block data (regtest) TIME_GENESIS_BLOCK = 1296688602 -GENESIS_BLOCK_HASH = '0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206' -GENESIS_CB_TXID = '4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b' +GENESIS_BLOCK_HASH = "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206" +GENESIS_CB_TXID = "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b" GENESIS_CB_PK = ( - '04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38' - 'c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f' + "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38" + "c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f" ) -GENESIS_CB_SCRIPT_PUBKEY = CScript.fromhex(f'41{GENESIS_CB_PK}ac') +GENESIS_CB_SCRIPT_PUBKEY = CScript.fromhex(f"41{GENESIS_CB_PK}ac") GENESIS_CB_SCRIPT_SIG = CScript( - b'\x04\xff\xff\x00\x1d\x01\x04EThe Times 03/Jan/2009 Chancellor on brink of ' - b'second bailout for banks' + b"\x04\xff\xff\x00\x1d\x01\x04EThe Times 03/Jan/2009 Chancellor on brink of " + b"second bailout for banks" ) MAX_FUTURE_BLOCK_TIME = 2 * 60 * 60 def create_block( - hashprev: Optional[int] = None, - coinbase: Optional[CTransaction] = None, - ntime: Optional[int] = None, - *, - version: Optional[int] = None, - tmpl: Optional[dict] = None) -> CBlock: + hashprev: Optional[int] = None, + coinbase: Optional[CTransaction] = None, + ntime: Optional[int] = None, + *, + version: Optional[int] = None, + tmpl: Optional[dict] = None, +) -> CBlock: """Create a block (with regtest difficulty).""" block = CBlock() if tmpl is None: tmpl = {} - block.nVersion = version or tmpl.get('version', 1) - block.nTime = ntime or tmpl.get('curtime', int(time.time() + 600)) - block.hashPrevBlock = hashprev or int(tmpl['previousblockhash'], 0x10) - if tmpl.get('bits') is not None: - block.nBits = struct.unpack('>I', bytes.fromhex(tmpl['bits']))[0] + block.nVersion = version or tmpl.get("version", 1) + block.nTime = ntime or tmpl.get("curtime", int(time.time() + 600)) + block.hashPrevBlock = hashprev or int(tmpl["previousblockhash"], 0x10) + if tmpl.get("bits") is not None: + block.nBits = struct.unpack(">I", bytes.fromhex(tmpl["bits"]))[0] else: # difficulty retargeting is disabled in REGTEST chainparams - block.nBits = 0x207fffff - block.vtx.append(coinbase or create_coinbase(height=tmpl['height'])) + block.nBits = 0x207FFFFF + block.vtx.append(coinbase or create_coinbase(height=tmpl["height"])) block.hashMerkleRoot = block.calc_merkle_root() block.calc_sha256() return block @@ -79,8 +80,7 @@ def make_conform_to_ctor(block: CBlock): for tx in block.vtx: tx.rehash() - block.vtx = [block.vtx[0]] + \ - sorted(block.vtx[1:], key=lambda tx: tx.get_id()) + block.vtx = [block.vtx[0]] + sorted(block.vtx[1:], key=lambda tx: tx.get_id()) def script_BIP34_coinbase_height(height: int) -> CScript: @@ -93,16 +93,18 @@ def create_coinbase( - height: int, pubkey: Optional[bytes] = None, - nValue: int = 50_000_000) -> CTransaction: + height: int, pubkey: Optional[bytes] = None, nValue: int = 50_000_000 +) -> CTransaction: """Create a coinbase transaction, assuming no miner fees. If pubkey is passed in, the coinbase output will be a P2PK output; otherwise an anyone-can-spend output.""" coinbase = CTransaction() - coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), - script_BIP34_coinbase_height(height), - 0xffffffff)) + coinbase.vin.append( + CTxIn( + COutPoint(0, 0xFFFFFFFF), script_BIP34_coinbase_height(height), 0xFFFFFFFF + ) + ) coinbaseoutput = CTxOut() coinbaseoutput.nValue = nValue * XEC if nValue == 50_000_000: @@ -121,16 +123,17 @@ return coinbase -def create_tx_with_script(prevtx, n, script_sig=b"", *, - amount, script_pub_key=CScript()): +def create_tx_with_script( + prevtx, n, script_sig=b"", *, amount, script_pub_key=CScript() +): """Return one-input, one-output transaction object - spending the prevtx's n-th output with the given amount. + spending the prevtx's n-th output with the given amount. - Can optionally pass scriptPubKey and scriptSig, default is anyone-can-spend output. + Can optionally pass scriptPubKey and scriptSig, default is anyone-can-spend output. """ tx = CTransaction() assert n < len(prevtx.vout) - tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, 0xffffffff)) + tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, 0xFFFFFFFF)) tx.vout.append(CTxOut(amount, script_pub_key)) pad_tx(tx) tx.calc_sha256() @@ -138,10 +141,10 @@ def create_transaction(node, txid, to_address, *, amount): - """ Return signed transaction spending the first output of the - input txid. Note that the node must be able to sign for the - output that is being spent, and the node must not be running - multiple wallets. + """Return signed transaction spending the first output of the + input txid. Note that the node must be able to sign for the + output that is being spent, and the node must not be running + multiple wallets. """ raw_tx = create_raw_transaction(node, txid, to_address, amount=amount) tx = FromHex(CTransaction(), raw_tx) @@ -149,16 +152,17 @@ def create_raw_transaction(node, txid, to_address, *, amount): - """ Return raw signed transaction spending the first output of the - input txid. Note that the node must be able to sign for the - output that is being spent, and the node must not be running - multiple wallets. + """Return raw signed transaction spending the first output of the + input txid. Note that the node must be able to sign for the + output that is being spent, and the node must not be running + multiple wallets. """ rawtx = node.createrawtransaction( - inputs=[{"txid": txid, "vout": 0}], outputs={to_address: amount}) + inputs=[{"txid": txid, "vout": 0}], outputs={to_address: amount} + ) signresult = node.signrawtransactionwithwallet(rawtx) assert_equal(signresult["complete"], True) - return signresult['hex'] + return signresult["hex"] def create_confirmed_utxos(test_framework, node, count, age=101, **kwargs): @@ -180,8 +184,8 @@ inputs = [] inputs.append({"txid": t["txid"], "vout": t["vout"]}) outputs = {} - outputs[addr1] = satoshi_round(t['amount'] / 2) - outputs[addr2] = satoshi_round(t['amount'] / 2) + outputs[addr1] = satoshi_round(t["amount"] / 2) + outputs[addr2] = satoshi_round(t["amount"] / 2) raw_tx = node.createrawtransaction(inputs, outputs) ctx = FromHex(CTransaction(), raw_tx) fee = node.calculate_fee(ctx) // 2 @@ -192,7 +196,7 @@ signed_tx = node.signrawtransactionwithwallet(ToHex(ctx))["hex"] node.sendrawtransaction(signed_tx) - while (node.getmempoolinfo()['size'] > 0): + while node.getmempoolinfo()["size"] > 0: test_framework.generate(node, 1, **kwargs) utxos = node.listunspent() @@ -214,6 +218,7 @@ def send_big_transactions(node, utxos, num, fee_multiplier): from .cashaddr import decode + txids = [] padding = "1" * 512 addrHash = decode(node.getnewaddress())[2] @@ -221,18 +226,19 @@ for _ in range(num): ctx = CTransaction() utxo = utxos.pop() - txid = int(utxo['txid'], 16) + txid = int(utxo["txid"], 16) ctx.vin.append(CTxIn(COutPoint(txid, int(utxo["vout"])), b"")) ctx.vout.append( - CTxOut(int(satoshi_round(utxo['amount'] * XEC)), - CScript([OP_DUP, OP_HASH160, addrHash, OP_EQUALVERIFY, OP_CHECKSIG]))) + CTxOut( + int(satoshi_round(utxo["amount"] * XEC)), + CScript([OP_DUP, OP_HASH160, addrHash, OP_EQUALVERIFY, OP_CHECKSIG]), + ) + ) for i in range(0, 127): - ctx.vout.append(CTxOut(0, CScript( - [OP_RETURN, bytes(padding, 'utf-8')]))) + ctx.vout.append(CTxOut(0, CScript([OP_RETURN, bytes(padding, "utf-8")]))) # Create a proper fee for the transaction to be mined ctx.vout[0].nValue -= int(fee_multiplier * node.calculate_fee(ctx)) - signresult = node.signrawtransactionwithwallet( - ToHex(ctx), None, "NONE|FORKID") + signresult = node.signrawtransactionwithwallet(ToHex(ctx), None, "NONE|FORKID") txid = node.sendrawtransaction(signresult["hex"], 0) txids.append(txid) return txids diff --git a/test/functional/test_framework/cashaddr.py b/test/functional/test_framework/cashaddr.py --- a/test/functional/test_framework/cashaddr.py +++ b/test/functional/test_framework/cashaddr.py @@ -15,24 +15,24 @@ c = 1 for d in values: c0 = c >> 35 - c = ((c & 0x07ffffffff) << 5) ^ d - if (c0 & 0x01): - c ^= 0x98f2bc8e61 - if (c0 & 0x02): - c ^= 0x79b76d99e2 - if (c0 & 0x04): - c ^= 0xf33e5fb3c4 - if (c0 & 0x08): - c ^= 0xae2eabe2a8 - if (c0 & 0x10): - c ^= 0x1e4f43e470 + c = ((c & 0x07FFFFFFFF) << 5) ^ d + if c0 & 0x01: + c ^= 0x98F2BC8E61 + if c0 & 0x02: + c ^= 0x79B76D99E2 + if c0 & 0x04: + c ^= 0xF33E5FB3C4 + if c0 & 0x08: + c ^= 0xAE2EABE2A8 + if c0 & 0x10: + c ^= 0x1E4F43E470 retval = c ^ 1 return retval def _prefix_expand(prefix): """Expand the prefix into values for checksum computation.""" - retval = bytearray(ord(x) & 0x1f for x in prefix) + retval = bytearray(ord(x) & 0x1F for x in prefix) # Append null separator retval.append(0) return retval @@ -78,9 +78,8 @@ encoded_size |= (len(addr_hash) - 20 * offset) // (4 * offset) # invalid size? - if ((len(addr_hash) - 20 * offset) % (4 * offset) != 0 - or not 0 <= encoded_size <= 7): - raise ValueError(f'invalid address hash size {addr_hash}') + if (len(addr_hash) - 20 * offset) % (4 * offset) != 0 or not 0 <= encoded_size <= 7: + raise ValueError(f"invalid address hash size {addr_hash}") version_byte |= encoded_size @@ -100,26 +99,26 @@ """ lower = addr.lower() if lower != addr and addr.upper() != addr: - raise ValueError(f'mixed case in address: {addr}') + raise ValueError(f"mixed case in address: {addr}") - parts = lower.split(':', 1) + parts = lower.split(":", 1) if len(parts) != 2: raise ValueError(f"address missing ':' separator: {addr}") prefix, payload = parts if not prefix: - raise ValueError(f'address prefix is missing: {addr}') + raise ValueError(f"address prefix is missing: {addr}") if not all(33 <= ord(x) <= 126 for x in prefix): - raise ValueError(f'invalid address prefix: {prefix}') + raise ValueError(f"invalid address prefix: {prefix}") if not (8 <= len(payload) <= 124): - raise ValueError(f'address payload has invalid length: {len(addr)}') + raise ValueError(f"address payload has invalid length: {len(addr)}") try: data = bytes(_CHARSET.find(x) for x in payload) except ValueError: - raise ValueError(f'invalid characters in address: {payload}') + raise ValueError(f"invalid characters in address: {payload}") if _polymod(_prefix_expand(prefix) + data): - raise ValueError(f'invalid checksum in address: {addr}') + raise ValueError(f"invalid checksum in address: {addr}") if lower != addr: prefix = prefix.upper() @@ -127,6 +126,7 @@ # Drop the 40 bit checksum return prefix, data[:-8] + # # External Interface # @@ -137,23 +137,23 @@ def decode(address): - '''Given a cashaddr address, return a tuple + """Given a cashaddr address, return a tuple - (prefix, kind, hash) - ''' + (prefix, kind, hash) + """ if not isinstance(address, str): - raise TypeError('address must be a string') + raise TypeError("address must be a string") prefix, payload = _decode_payload(address) # Ensure there isn't extra padding extrabits = len(payload) * 5 % 8 if extrabits >= 5: - raise ValueError(f'excess padding in address {address}') + raise ValueError(f"excess padding in address {address}") # Ensure extrabits are zeros if payload[-1] & ((1 << extrabits) - 1): - raise ValueError(f'non-zero padding in address {address}') + raise ValueError(f"non-zero padding in address {address}") decoded = _convertbits(payload, 5, 8, False) version = decoded[0] @@ -164,11 +164,12 @@ size <<= 1 if size != len(addr_hash): raise ValueError( - f'address hash has length {len(addr_hash)} but expected {size}') + f"address hash has length {len(addr_hash)} but expected {size}" + ) kind = version >> 3 if kind not in (SCRIPT_TYPE, PUBKEY_TYPE): - raise ValueError(f'unrecognised address type {kind}') + raise ValueError(f"unrecognised address type {kind}") return prefix, kind, addr_hash @@ -176,19 +177,19 @@ def encode(prefix, kind, addr_hash): """Encode a cashaddr address without prefix and separator.""" if not isinstance(prefix, str): - raise TypeError('prefix must be a string') + raise TypeError("prefix must be a string") if not isinstance(addr_hash, (bytes, bytearray)): - raise TypeError('addr_hash must be binary bytes') + raise TypeError("addr_hash must be binary bytes") if kind not in (SCRIPT_TYPE, PUBKEY_TYPE): - raise ValueError(f'unrecognised address type {kind}') + raise ValueError(f"unrecognised address type {kind}") payload = _pack_addr_data(kind, addr_hash) checksum = _create_checksum(prefix, payload) - return ''.join([_CHARSET[d] for d in (payload + checksum)]) + return "".join([_CHARSET[d] for d in (payload + checksum)]) def encode_full(prefix, kind, addr_hash): """Encode a full cashaddr address, with prefix and separator.""" - return ':'.join([prefix, encode(prefix, kind, addr_hash)]) + return ":".join([prefix, encode(prefix, kind, addr_hash)]) diff --git a/test/functional/test_framework/cdefs.py b/test/functional/test_framework/cdefs.py --- a/test/functional/test_framework/cdefs.py +++ b/test/functional/test_framework/cdefs.py @@ -20,24 +20,26 @@ Returns None if it cannot find a suitable folder. """ + def contains_src(path_to_check): if not path_to_check: return False else: - cand_path = os.path.join(path_to_check, 'src') + cand_path = os.path.join(path_to_check, "src") return os.path.exists(cand_path) and os.path.isdir(cand_path) - srcdir = os.environ.get('SRCDIR', '') + srcdir = os.environ.get("SRCDIR", "") if contains_src(srcdir): return srcdir # Try to work it based out on main module import sys - mainmod = sys.modules['__main__'] - mainmod_path = getattr(mainmod, '__file__', '') - if mainmod_path and mainmod_path.endswith('.py'): + + mainmod = sys.modules["__main__"] + mainmod_path = getattr(mainmod, "__file__", "") + if mainmod_path and mainmod_path.endswith(".py"): maybe_top = mainmod_path - while maybe_top != '/': + while maybe_top != "/": maybe_top = os.path.abspath(os.path.dirname(maybe_top)) if contains_src(maybe_top): return maybe_top @@ -47,8 +49,11 @@ # Slurp in consensus.h contents -_consensus_h_fh = open(os.path.join(get_srcdir(), 'src', 'consensus', - 'consensus.h'), 'rt', encoding='utf-8') +_consensus_h_fh = open( + os.path.join(get_srcdir(), "src", "consensus", "consensus.h"), + "rt", + encoding="utf-8", +) _consensus_h_contents = _consensus_h_fh.read() _consensus_h_fh.close() @@ -61,7 +66,7 @@ LEGACY_MAX_BLOCK_SIZE = ONE_MEGABYTE # Default setting for maximum allowed size for a block, in bytes -match = re.search(r'DEFAULT_MAX_BLOCK_SIZE = (.+);', _consensus_h_contents) +match = re.search(r"DEFAULT_MAX_BLOCK_SIZE = (.+);", _consensus_h_contents) if match is None: raise RuntimeError("DEFAULT_MAX_BLOCK_SIZE value not found in consensus.h") DEFAULT_MAX_BLOCK_SIZE = eval(match.group(1)) diff --git a/test/functional/test_framework/coverage.py b/test/functional/test_framework/coverage.py --- a/test/functional/test_framework/coverage.py +++ b/test/functional/test_framework/coverage.py @@ -10,7 +10,7 @@ import os -REFERENCE_FILENAME = 'rpc_interface.txt' +REFERENCE_FILENAME = "rpc_interface.txt" class AuthServiceProxyWrapper: @@ -52,12 +52,13 @@ rpc_method = self.auth_service_proxy_instance._service_name if self.coverage_logfile: - with open(self.coverage_logfile, 'a+', encoding='utf8') as f: + with open(self.coverage_logfile, "a+", encoding="utf8") as f: f.write(f"{rpc_method}\n") def __truediv__(self, relative_uri): - return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri, - self.coverage_logfile) + return AuthServiceProxyWrapper( + self.auth_service_proxy_instance / relative_uri, self.coverage_logfile + ) def get_request(self, *args, **kwargs): self._log_call() @@ -71,8 +72,7 @@ This file will contain a list of RPC commands covered. """ pid = str(os.getpid()) - return os.path.join( - dirname, f"coverage.pid{pid}.node{str(n_node)}.txt") + return os.path.join(dirname, f"coverage.pid{pid}.node{str(n_node)}.txt") def write_all_rpc_commands(dirname, node): @@ -94,17 +94,17 @@ if os.path.isfile(filename): return False - help_output = node.help().split('\n') + help_output = node.help().split("\n") commands = set() for line in help_output: line = line.strip() # Ignore blanks and headers - if line and not line.startswith('='): + if line and not line.startswith("="): commands.add(f"{line.split()[0]}\n") - with open(filename, 'w', encoding='utf8') as f: + with open(filename, "w", encoding="utf8") as f: f.writelines(list(commands)) return True diff --git a/test/functional/test_framework/descriptors.py b/test/functional/test_framework/descriptors.py --- a/test/functional/test_framework/descriptors.py +++ b/test/functional/test_framework/descriptors.py @@ -6,14 +6,11 @@ import re -INPUT_CHARSET = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ " +INPUT_CHARSET = ( + "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ " +) CHECKSUM_CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l" -GENERATOR = [ - 0xf5dee51989, - 0xa9fdca3312, - 0x1bab10e32d, - 0x3706b1677a, - 0x644d626ffd] +GENERATOR = [0xF5DEE51989, 0xA9FDCA3312, 0x1BAB10E32D, 0x3706B1677A, 0x644D626FFD] def descsum_polymod(symbols): @@ -21,7 +18,7 @@ chk = 1 for value in symbols: top = chk >> 35 - chk = (chk & 0x7ffffffff) << 5 ^ value + chk = (chk & 0x7FFFFFFFF) << 5 ^ value for i in range(5): chk ^= GENERATOR[i] if ((top >> i) & 1) else 0 return chk @@ -51,27 +48,28 @@ """Add a checksum to a descriptor without""" symbols = descsum_expand(s) + [0, 0, 0, 0, 0, 0, 0, 0] checksum = descsum_polymod(symbols) ^ 1 - return s + '#' + \ - ''.join(CHECKSUM_CHARSET[(checksum >> (5 * (7 - i))) & 31] - for i in range(8)) + return ( + s + + "#" + + "".join(CHECKSUM_CHARSET[(checksum >> (5 * (7 - i))) & 31] for i in range(8)) + ) def descsum_check(s, require=True): """Verify that the checksum is correct in a descriptor""" - if '#' not in s: + if "#" not in s: return not require - if s[-9] != '#': + if s[-9] != "#": return False if not all(x in CHECKSUM_CHARSET for x in s[-8:]): return False - symbols = descsum_expand( - s[:-9]) + [CHECKSUM_CHARSET.find(x) for x in s[-8:]] + symbols = descsum_expand(s[:-9]) + [CHECKSUM_CHARSET.find(x) for x in s[-8:]] return descsum_polymod(symbols) == 1 def drop_origins(s): - '''Drop the key origins from a descriptor''' - desc = re.sub(r'\[.+?\]', '', s) - if '#' in s: - desc = desc[:desc.index('#')] + """Drop the key origins from a descriptor""" + desc = re.sub(r"\[.+?\]", "", s) + if "#" in s: + desc = desc[: desc.index("#")] return descsum_create(desc) diff --git a/test/functional/test_framework/key.py b/test/functional/test_framework/key.py --- a/test/functional/test_framework/key.py +++ b/test/functional/test_framework/key.py @@ -27,9 +27,9 @@ while n & 1 == 0: n >>= 1 r = k & 7 - t ^= (r == 3 or r == 5) + t ^= r == 3 or r == 5 n, k = k, n - t ^= (n & k & 3 == 3) + t ^= n & k & 3 == 3 n = n % k if k == 1: return -1 if t else 1 @@ -75,8 +75,17 @@ x1, y1, z1 = p1 z2 = pow(z1, 2, self.p) z4 = pow(z2, 2, self.p) - return z1 != 0 and (pow(x1, 3, self.p) + self.a * x1 * - z4 + self.b * z2 * z4 - pow(y1, 2, self.p)) % self.p == 0 + return ( + z1 != 0 + and ( + pow(x1, 3, self.p) + + self.a * x1 * z4 + + self.b * z2 * z4 + - pow(y1, 2, self.p) + ) + % self.p + == 0 + ) def is_x_coord(self, x): """Test whether x is a valid X coordinate on the curve.""" @@ -122,7 +131,7 @@ u2 = (x2 * z1_2) % self.p s2 = (y2 * z1_3) % self.p if x1 == u2: - if (y1 != s2): + if y1 != s2: return (0, 1, 0) return self.double(p1) h = u2 - x1 @@ -156,7 +165,7 @@ s1 = (y1 * z2_3) % self.p s2 = (y2 * z1_3) % self.p if u1 == u2: - if (s1 != s2): + if s1 != s2: return (0, 1, 0) return self.double(p1) h = u2 - u1 @@ -177,8 +186,8 @@ r = (0, 1, 0) for i in range(255, -1, -1): r = self.double(r) - for (p, n) in ps: - if ((n >> i) & 1): + for p, n in ps: + if (n >> i) & 1: r = self.add(r, p) return r @@ -187,7 +196,8 @@ SECP256K1_G = ( 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8, - 1) + 1, +) SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2 @@ -201,15 +211,18 @@ def set(self, data): """Construct a public key from a serialization in compressed or uncompressed format""" - if (len(data) == 65 and data[0] == 0x04): - p = (int.from_bytes(data[1:33], 'big'), - int.from_bytes(data[33:65], 'big'), 1) + if len(data) == 65 and data[0] == 0x04: + p = ( + int.from_bytes(data[1:33], "big"), + int.from_bytes(data[33:65], "big"), + 1, + ) self.valid = SECP256K1.on_curve(p) if self.valid: self.p = p self.compressed = False - elif (len(data) == 33 and (data[0] == 0x02 or data[0] == 0x03)): - x = int.from_bytes(data[1:33], 'big') + elif len(data) == 33 and (data[0] == 0x02 or data[0] == 0x03): + x = int.from_bytes(data[1:33], "big") if SECP256K1.is_x_coord(x): p = SECP256K1.lift_x(x) if (p[1] & 1) != (data[0] & 1): @@ -236,49 +249,48 @@ if p is None: return None if self.compressed: - return bytes([0x02 + (p[1] & 1)]) + p[0].to_bytes(32, 'big') + return bytes([0x02 + (p[1] & 1)]) + p[0].to_bytes(32, "big") else: - return bytes([0x04]) + p[0].to_bytes(32, 'big') + \ - p[1].to_bytes(32, 'big') + return bytes([0x04]) + p[0].to_bytes(32, "big") + p[1].to_bytes(32, "big") def verify_ecdsa(self, sig, msg, low_s=True): """Verify a strictly DER-encoded ECDSA signature against this pubkey.""" assert self.valid - if (sig[1] + 2 != len(sig)): + if sig[1] + 2 != len(sig): return False - if (len(sig) < 4): + if len(sig) < 4: return False - if (sig[0] != 0x30): + if sig[0] != 0x30: return False - if (sig[2] != 0x02): + if sig[2] != 0x02: return False rlen = sig[3] - if (len(sig) < 6 + rlen): + if len(sig) < 6 + rlen: return False if rlen < 1 or rlen > 33: return False if sig[4] >= 0x80: return False - if (rlen > 1 and (sig[4] == 0) and not (sig[5] & 0x80)): + if rlen > 1 and (sig[4] == 0) and not (sig[5] & 0x80): return False - r = int.from_bytes(sig[4:4 + rlen], 'big') - if (sig[4 + rlen] != 0x02): + r = int.from_bytes(sig[4 : 4 + rlen], "big") + if sig[4 + rlen] != 0x02: return False slen = sig[5 + rlen] if slen < 1 or slen > 33: return False - if (len(sig) != 6 + rlen + slen): + if len(sig) != 6 + rlen + slen: return False if sig[6 + rlen] >= 0x80: return False - if (slen > 1 and (sig[6 + rlen] == 0) and not (sig[7 + rlen] & 0x80)): + if slen > 1 and (sig[6 + rlen] == 0) and not (sig[7 + rlen] & 0x80): return False - s = int.from_bytes(sig[6 + rlen:6 + rlen + slen], 'big') + s = int.from_bytes(sig[6 + rlen : 6 + rlen + slen], "big") if r < 1 or s < 1 or r >= SECP256K1_ORDER or s >= SECP256K1_ORDER: return False if low_s and s >= SECP256K1_ORDER_HALF: return False - z = int.from_bytes(msg, 'big') + z = int.from_bytes(msg, "big") w = modinv(s, SECP256K1_ORDER) u1 = z * w % SECP256K1_ORDER u2 = r * w % SECP256K1_ORDER @@ -293,13 +305,10 @@ assert len(msg32) == 32 Rx = sig[:32] - s = int.from_bytes(sig[32:], 'big') + s = int.from_bytes(sig[32:], "big") e = int.from_bytes( - hashlib.sha256( - Rx + - self.get_bytes() + - msg32).digest(), - 'big') + hashlib.sha256(Rx + self.get_bytes() + msg32).digest(), "big" + ) nege = SECP256K1_ORDER - e R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, s), (self.p, nege)])) @@ -309,7 +318,7 @@ if jacobi_symbol(R[1], SECP256K1.p) == -1: return False - return R[0] == int.from_bytes(Rx, 'big') + return R[0] == int.from_bytes(Rx, "big") class ECKey: @@ -321,26 +330,20 @@ def set(self, secret, compressed): """Construct a private key object with given 32-byte secret and compressed flag.""" assert len(secret) == 32 - secret = int.from_bytes(secret, 'big') - self.valid = (secret > 0 and secret < SECP256K1_ORDER) + secret = int.from_bytes(secret, "big") + self.valid = secret > 0 and secret < SECP256K1_ORDER if self.valid: self.secret = secret self.compressed = compressed def generate(self, compressed=True): """Generate a random private key (compressed or uncompressed).""" - self.set( - random.randrange( - 1, - SECP256K1_ORDER).to_bytes( - 32, - 'big'), - compressed) + self.set(random.randrange(1, SECP256K1_ORDER).to_bytes(32, "big"), compressed) def get_bytes(self): """Retrieve the 32-byte representation of this key.""" assert self.valid - return self.secret.to_bytes(32, 'big') + return self.secret.to_bytes(32, "big") @property def is_valid(self): @@ -363,7 +366,7 @@ def sign_ecdsa(self, msg, low_s=True): """Construct a DER-encoded ECDSA signature with this key.""" assert self.valid - z = int.from_bytes(msg, 'big') + z = int.from_bytes(msg, "big") # Note: no RFC6979, but a simple random nonce (some tests rely on # distinct transactions for the same operation) k = random.randrange(1, SECP256K1_ORDER) @@ -372,11 +375,15 @@ s = (modinv(k, SECP256K1_ORDER) * (z + self.secret * r)) % SECP256K1_ORDER if low_s and s > SECP256K1_ORDER_HALF: s = SECP256K1_ORDER - s - rb = r.to_bytes((r.bit_length() + 8) // 8, 'big') - sb = s.to_bytes((s.bit_length() + 8) // 8, 'big') - return b'\x30' + \ - bytes([4 + len(rb) + len(sb), 2, len(rb)]) + \ - rb + bytes([2, len(sb)]) + sb + rb = r.to_bytes((r.bit_length() + 8) // 8, "big") + sb = s.to_bytes((s.bit_length() + 8) // 8, "big") + return ( + b"\x30" + + bytes([4 + len(rb) + len(sb), 2, len(rb)]) + + rb + + bytes([2, len(sb)]) + + sb + ) def sign_schnorr(self, msg32): """Create Schnorr signature (BIP-Schnorr convention).""" @@ -393,15 +400,12 @@ if jacobi_symbol(R[1], SECP256K1.p) == -1: k = SECP256K1_ORDER - k - Rx = R[0].to_bytes(32, 'big') + Rx = R[0].to_bytes(32, "big") e = int.from_bytes( - hashlib.sha256( - Rx + - pubkey.get_bytes() + - msg32).digest(), - 'big') - s = (k + e * int.from_bytes(self.get_bytes(), 'big')) % SECP256K1_ORDER - sig = Rx + s.to_bytes(32, 'big') + hashlib.sha256(Rx + pubkey.get_bytes() + msg32).digest(), "big" + ) + s = (k + e * int.from_bytes(self.get_bytes(), "big")) % SECP256K1_ORDER + sig = Rx + s.to_bytes(32, "big") assert pubkey.verify_schnorr(sig, msg32) return sig diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -49,21 +49,21 @@ MAX_HEADERS_RESULTS = 2000 # Number of headers sent in one getheaders result MAX_INV_SIZE = 50000 # Maximum number of entries in an 'inv' protocol message -NODE_NETWORK = (1 << 0) -NODE_GETUTXO = (1 << 1) -NODE_BLOOM = (1 << 2) +NODE_NETWORK = 1 << 0 +NODE_GETUTXO = 1 << 1 +NODE_BLOOM = 1 << 2 # NODE_WITNESS = (1 << 3) # NODE_XTHIN = (1 << 4) # removed in v0.22.12 -NODE_COMPACT_FILTERS = (1 << 6) -NODE_NETWORK_LIMITED = (1 << 10) -NODE_AVALANCHE = (1 << 24) +NODE_COMPACT_FILTERS = 1 << 6 +NODE_NETWORK_LIMITED = 1 << 10 +NODE_AVALANCHE = 1 << 24 MSG_TX = 1 MSG_BLOCK = 2 MSG_FILTERED_BLOCK = 3 MSG_CMPCT_BLOCK = 4 -MSG_AVA_PROOF = 0x1f000001 -MSG_TYPE_MASK = 0xffffffff >> 2 +MSG_AVA_PROOF = 0x1F000001 +MSG_TYPE_MASK = 0xFFFFFFFF >> 2 FILTER_TYPE_BASIC = 0 @@ -71,7 +71,7 @@ def sha256(s): - return hashlib.new('sha256', s).digest() + return hashlib.new("sha256", s).digest() def hash256(s): @@ -214,19 +214,16 @@ # Objects that map to bitcoind objects, which can be serialized/deserialized + class CAddress: __slots__ = ("net", "ip", "nServices", "port", "time") # see https://github.com/bitcoin/bips/blob/master/bip-0155.mediawiki NET_IPV4 = 1 - ADDRV2_NET_NAME = { - NET_IPV4: "IPv4" - } + ADDRV2_NET_NAME = {NET_IPV4: "IPv4"} - ADDRV2_ADDRESS_LENGTH = { - NET_IPV4: 4 - } + ADDRV2_ADDRESS_LENGTH = {NET_IPV4: 4} def __init__(self): self.time = 0 @@ -326,8 +323,11 @@ return f"CInv(type={self.typemap[self.type]} hash={uint256_hex(self.hash)})" def __eq__(self, other): - return isinstance( - other, CInv) and self.hash == other.hash and self.type == other.type + return ( + isinstance(other, CInv) + and self.hash == other.hash + and self.type == other.type + ) class CBlockLocator: @@ -501,8 +501,16 @@ class CBlockHeader: - __slots__ = ("hash", "hashMerkleRoot", "hashPrevBlock", "nBits", "nNonce", - "nTime", "nVersion", "sha256") + __slots__ = ( + "hash", + "hashMerkleRoot", + "hashPrevBlock", + "nBits", + "nNonce", + "nTime", + "nVersion", + "sha256", + ) def __init__(self, header=None): if header is None: @@ -665,8 +673,14 @@ # This is what we send on the wire, in a cmpctblock message. class P2PHeaderAndShortIDs: - __slots__ = ("header", "nonce", "prefilled_txn", "prefilled_txn_length", - "shortids", "shortids_length") + __slots__ = ( + "header", + "nonce", + "prefilled_txn", + "prefilled_txn_length", + "shortids", + "shortids_length", + ) def __init__(self): self.header = CBlockHeader() @@ -683,8 +697,7 @@ for _ in range(self.shortids_length): # shortids are defined to be 6 bytes in the spec, so append # two zero bytes and read it in as an 8-byte number - self.shortids.append( - struct.unpack(" bytes: r = self.utxo.serialize() height_ser = self.height << 1 | int(self.is_coinbase) - r += struct.pack(' int: - return uint256_from_str(hash256( - ser_uint256(self.limited_proofid) + ser_string(self.proof_master))) + return uint256_from_str( + hash256(ser_uint256(self.limited_proofid) + ser_string(self.proof_master)) + ) def deserialize(self, f): self.limited_proofid = deser_uint256(f) @@ -1169,11 +1197,13 @@ return r def __repr__(self): - return f"AvalancheDelegation(" \ - f"limitedProofId={uint256_hex(self.limited_proofid)}, " \ - f"proofMaster={self.proof_master.hex()}, " \ - f"proofid={uint256_hex(self.proofid)}, " \ - f"levels={self.levels})" + return ( + "AvalancheDelegation(" + f"limitedProofId={uint256_hex(self.limited_proofid)}, " + f"proofMaster={self.proof_master.hex()}, " + f"proofid={uint256_hex(self.proofid)}, " + f"levels={self.levels})" + ) def getid(self): h = ser_uint256(self.proofid) @@ -1267,9 +1297,20 @@ # Objects that correspond to messages on the wire + class msg_version: - __slots__ = ("addrFrom", "addrTo", "nNonce", "relay", "nServices", - "nStartingHeight", "nTime", "nVersion", "strSubVer", "nExtraEntropy") + __slots__ = ( + "addrFrom", + "addrTo", + "nNonce", + "relay", + "nServices", + "nStartingHeight", + "nTime", + "nVersion", + "strSubVer", + "nExtraEntropy", + ) msgtype = b"version" def __init__(self): @@ -1279,7 +1320,7 @@ self.addrTo = CAddress() self.addrFrom = CAddress() self.nNonce = random.getrandbits(64) - self.strSubVer = '' + self.strSubVer = "" self.nStartingHeight = -1 self.relay = 0 self.nExtraEntropy = random.getrandbits(64) @@ -1294,7 +1335,7 @@ self.addrFrom = CAddress() self.addrFrom.deserialize(f, with_time=False) self.nNonce = struct.unpack("> (32 - bits)) + return ((v << bits) & 0xFFFFFFFF) | (v >> (32 - bits)) def chacha20_doubleround(s): @@ -23,23 +23,25 @@ See https://cr.yp.to/chacha/chacha-20080128.pdf and https://tools.ietf.org/html/rfc8439 """ - QUARTER_ROUNDS = [(0, 4, 8, 12), - (1, 5, 9, 13), - (2, 6, 10, 14), - (3, 7, 11, 15), - (0, 5, 10, 15), - (1, 6, 11, 12), - (2, 7, 8, 13), - (3, 4, 9, 14)] + QUARTER_ROUNDS = [ + (0, 4, 8, 12), + (1, 5, 9, 13), + (2, 6, 10, 14), + (3, 7, 11, 15), + (0, 5, 10, 15), + (1, 6, 11, 12), + (2, 7, 8, 13), + (3, 4, 9, 14), + ] for a, b, c, d in QUARTER_ROUNDS: - s[a] = (s[a] + s[b]) & 0xffffffff + s[a] = (s[a] + s[b]) & 0xFFFFFFFF s[d] = rot32(s[d] ^ s[a], 16) - s[c] = (s[c] + s[d]) & 0xffffffff + s[c] = (s[c] + s[d]) & 0xFFFFFFFF s[b] = rot32(s[b] ^ s[c], 12) - s[a] = (s[a] + s[b]) & 0xffffffff + s[a] = (s[a] + s[b]) & 0xFFFFFFFF s[d] = rot32(s[d] ^ s[a], 8) - s[c] = (s[c] + s[d]) & 0xffffffff + s[c] = (s[c] + s[d]) & 0xFFFFFFFF s[b] = rot32(s[b] ^ s[c], 7) @@ -47,11 +49,11 @@ """Specialized ChaCha20 implementation with 32-byte key, 0 IV, 384-byte output.""" # See RFC 8439 section 2.3 for chacha20 parameters - CONSTANTS = [0x61707865, 0x3320646e, 0x79622d32, 0x6b206574] + CONSTANTS = [0x61707865, 0x3320646E, 0x79622D32, 0x6B206574] key_bytes = [0] * 8 for i in range(8): - key_bytes[i] = int.from_bytes(key32[(4 * i):(4 * (i + 1))], 'little') + key_bytes[i] = int.from_bytes(key32[(4 * i) : (4 * (i + 1))], "little") INITIALIZATION_VECTOR = [0] * 4 init = CONSTANTS + key_bytes + INITIALIZATION_VECTOR @@ -62,7 +64,7 @@ for _ in range(10): chacha20_doubleround(s) for i in range(16): - out.extend(((s[i] + init[i]) & 0xffffffff).to_bytes(4, 'little')) + out.extend(((s[i] + init[i]) & 0xFFFFFFFF).to_bytes(4, "little")) return bytes(out) @@ -70,7 +72,7 @@ """Hash a 32-byte array data to a 3072-bit number using 6 Chacha20 operations.""" bytes384 = chacha20_32_to_384(data) - return int.from_bytes(bytes384, 'little') + return int.from_bytes(bytes384, "little") class MuHash3072: @@ -90,34 +92,33 @@ def insert(self, data): """Insert a byte array data in the set.""" data_hash = hashlib.sha256(data).digest() - self.numerator = ( - self.numerator * data_to_num3072(data_hash)) % self.MODULUS + self.numerator = (self.numerator * data_to_num3072(data_hash)) % self.MODULUS def remove(self, data): """Remove a byte array from the set.""" data_hash = hashlib.sha256(data).digest() self.denominator = ( - self.denominator * data_to_num3072(data_hash)) % self.MODULUS + self.denominator * data_to_num3072(data_hash) + ) % self.MODULUS def digest(self): """Extract the final hash. Does not modify this object.""" - val = (self.numerator * - modinv(self.denominator, self.MODULUS)) % self.MODULUS - bytes384 = val.to_bytes(384, 'little') + val = (self.numerator * modinv(self.denominator, self.MODULUS)) % self.MODULUS + bytes384 = val.to_bytes(384, "little") return hashlib.sha256(bytes384).digest() class TestFrameworkMuhash(unittest.TestCase): def test_muhash(self): muhash = MuHash3072() - muhash.insert(b'\x00' * 32) - muhash.insert((b'\x01' + b'\x00' * 31)) - muhash.remove((b'\x02' + b'\x00' * 31)) + muhash.insert(b"\x00" * 32) + muhash.insert((b"\x01" + b"\x00" * 31)) + muhash.remove((b"\x02" + b"\x00" * 31)) finalized = muhash.digest() # This mirrors the result in the C++ MuHash3072 unit test self.assertEqual( finalized[::-1].hex(), - "10d312b100cbd32ada024a6646e40d3482fcff103668d2625f10002a607d5863" + "10d312b100cbd32ada024a6646e40d3482fcff103668d2625f10002a607d5863", ) def test_chacha20(self): @@ -129,7 +130,9 @@ # vectors. chacha_check( [0] * 32, - "76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7da41597c5157488d7724e03fb8d84a376a43b8f41518a11cc387b669b2ee6586") + "76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7da41597c5157488d7724e03fb8d84a376a43b8f41518a11cc387b669b2ee6586", + ) chacha_check( [0] * 31 + [1], - "4540f05a9f1fb296d7736e7b208e3c96eb4fe1834688d2604f450952ed432d41bbe2a0b6ea7566d2a5d1e7e20d42af2c53d792b1c43fea817e9ad275ae546963") + "4540f05a9f1fb296d7736e7b208e3c96eb4fe1834688d2604f450952ed432d41bbe2a0b6ea7566d2a5d1e7e20d42af2c53d792b1c43fea817e9ad275ae546963", + ) diff --git a/test/functional/test_framework/netutil.py b/test/functional/test_framework/netutil.py --- a/test/functional/test_framework/netutil.py +++ b/test/functional/test_framework/netutil.py @@ -23,15 +23,15 @@ # STATE_CLOSE = '07' # STATE_CLOSE_WAIT = '08' # STATE_LAST_ACK = '09' -STATE_LISTEN = '0A' +STATE_LISTEN = "0A" # STATE_CLOSING = '0B' def get_socket_inodes(pid): - ''' + """ Get list of socket inodes for process pid. - ''' - base = f'/proc/{pid}/fd' + """ + base = f"/proc/{pid}/fd" inodes = [] for item in os.listdir(base): try: @@ -46,40 +46,40 @@ else: raise else: - if target.startswith('socket:'): + if target.startswith("socket:"): inodes.append(int(target[8:-1])) return inodes def _remove_empty(array): - return [x for x in array if x != ''] + return [x for x in array if x != ""] def _convert_ip_port(array): - host, port = array.split(':') + host, port = array.split(":") # convert host from mangled-per-four-bytes form as used by kernel host = bytes.fromhex(host) - host_out = '' + host_out = "" for x in range(0, len(host) // 4): - (val,) = struct.unpack('=I', host[x * 4:(x + 1) * 4]) - host_out += f'{val:08x}' + (val,) = struct.unpack("=I", host[x * 4 : (x + 1) * 4]) + host_out += f"{val:08x}" return host_out, int(port, 16) -def netstat(typ='tcp'): - ''' +def netstat(typ="tcp"): + """ Function to return a list with status of tcp connections at linux systems To get pid of all network process running on system, you must run this script as superuser - ''' - with open(f"/proc/net/{typ}", 'r', encoding='utf8') as f: + """ + with open(f"/proc/net/{typ}", "r", encoding="utf8") as f: content = f.readlines() content.pop(0) result = [] for line in content: # Split lines and remove empty spaces. - line_array = _remove_empty(line.split(' ')) + line_array = _remove_empty(line.split(" ")) tcp_id = line_array[0] l_addr = _convert_ip_port(line_array[1]) r_addr = _convert_ip_port(line_array[2]) @@ -92,23 +92,24 @@ def get_bind_addrs(pid): - ''' + """ Get bind addresses as (host,port) tuples for process pid. - ''' + """ inodes = get_socket_inodes(pid) bind_addrs = [] - for conn in netstat('tcp') + netstat('tcp6'): + for conn in netstat("tcp") + netstat("tcp6"): if conn[3] == STATE_LISTEN and conn[4] in inodes: bind_addrs.append(conn[1]) return bind_addrs + # from: http://code.activestate.com/recipes/439093/ def all_interfaces(): - ''' + """ Return all interfaces that are up - ''' + """ # Linux only, so only import when required import fcntl @@ -118,36 +119,43 @@ max_possible = 8 # initial value while True: inbytes = max_possible * struct_size - names = array.array('B', b'\0' * inbytes) - outbytes = struct.unpack('iL', fcntl.ioctl( - s.fileno(), - 0x8912, # SIOCGIFCONF - struct.pack('iL', inbytes, names.buffer_info()[0]) - ))[0] + names = array.array("B", b"\0" * inbytes) + outbytes = struct.unpack( + "iL", + fcntl.ioctl( + s.fileno(), + 0x8912, # SIOCGIFCONF + struct.pack("iL", inbytes, names.buffer_info()[0]), + ), + )[0] if outbytes == inbytes: max_possible *= 2 else: break namestr = names.tobytes() - return [(namestr[i:i + 16].split(b'\0', 1)[0], - socket.inet_ntoa(namestr[i + 20:i + 24])) - for i in range(0, outbytes, struct_size)] + return [ + ( + namestr[i : i + 16].split(b"\0", 1)[0], + socket.inet_ntoa(namestr[i + 20 : i + 24]), + ) + for i in range(0, outbytes, struct_size) + ] def addr_to_hex(addr): - ''' + """ Convert string IPv4 or IPv6 address to binary address as returned by get_bind_addrs. Very naive implementation that certainly doesn't work for all IPv6 variants. - ''' - if '.' in addr: # IPv4 - addr = [int(x) for x in addr.split('.')] - elif ':' in addr: # IPv6 + """ + if "." in addr: # IPv4 + addr = [int(x) for x in addr.split(".")] + elif ":" in addr: # IPv6 sub = [[], []] # prefix, suffix x = 0 - addr = addr.split(':') + addr = addr.split(":") for i, comp in enumerate(addr): - if comp == '': + if comp == "": # skip empty component at beginning or end if i == 0 or i == (len(addr) - 1): continue @@ -156,19 +164,19 @@ else: # two bytes per component val = int(comp, 16) sub[x].append(val >> 8) - sub[x].append(val & 0xff) + sub[x].append(val & 0xFF) nullbytes = 16 - len(sub[0]) - len(sub[1]) assert (x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0) addr = sub[0] + ([0] * nullbytes) + sub[1] else: - raise ValueError(f'Could not parse address {addr}') + raise ValueError(f"Could not parse address {addr}") return bytearray(addr).hex() def test_ipv6_local(): - ''' + """ Check for (local) IPv6 support. - ''' + """ import socket # By using SOCK_DGRAM this will not actually make a connection, but it will @@ -176,7 +184,7 @@ have_ipv6 = True try: s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) - s.connect(('::1', 0)) + s.connect(("::1", 0)) except socket.error: have_ipv6 = False return have_ipv6 diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py --- a/test/functional/test_framework/p2p.py +++ b/test/functional/test_framework/p2p.py @@ -178,24 +178,25 @@ self.peer_connect_helper(dstaddr, dstport, net, timeout_factor) loop = NetworkThread.network_event_loop - logger.debug( - f'Connecting to Bitcoin ABC Node: {self.dstaddr}:{self.dstport}') + logger.debug(f"Connecting to Bitcoin ABC Node: {self.dstaddr}:{self.dstport}") coroutine = loop.create_connection( - lambda: self, host=self.dstaddr, port=self.dstport) + lambda: self, host=self.dstaddr, port=self.dstport + ) return lambda: loop.call_soon_threadsafe(loop.create_task, coroutine) def peer_accept_connection( - self, connect_id, connect_cb=lambda: None, *, net, timeout_factor): - self.peer_connect_helper('0', 0, net, timeout_factor) + self, connect_id, connect_cb=lambda: None, *, net, timeout_factor + ): + self.peer_connect_helper("0", 0, net, timeout_factor) - logger.debug( - f'Listening for Bitcoin ABC Node with id: {connect_id}') + logger.debug(f"Listening for Bitcoin ABC Node with id: {connect_id}") return lambda: NetworkThread.listen(self, connect_cb, idx=connect_id) def peer_disconnect(self): # Connection could have already been closed by other end. NetworkThread.network_event_loop.call_soon_threadsafe( - lambda: self._transport and self._transport.abort()) + lambda: self._transport and self._transport.abort() + ) # Connection and disconnection methods @@ -217,7 +218,8 @@ """asyncio callback when a connection is closed.""" if exc: logger.warning( - f"Connection lost to {self.dstaddr}:{self.dstport} due to {exc}") + f"Connection lost to {self.dstaddr}:{self.dstport} due to {exc}" + ) else: logger.debug(f"Closed connection to: {self.dstaddr}:{self.dstport}") self._transport = None @@ -250,32 +252,33 @@ return None if self.recvbuf[:4] != self.magic_bytes: raise ValueError( - f"magic bytes mismatch: " - f"{self.magic_bytes!r} != {self.recvbuf!r}") + "magic bytes mismatch: " + f"{self.magic_bytes!r} != {self.recvbuf!r}" + ) if len(self.recvbuf) < 4 + 12 + 4 + 4: return None - msgtype = self.recvbuf[4:4 + 12].split(b"\x00", 1)[0] - msglen = struct.unpack( - "= MIN_P2P_VERSION_SUPPORTED, \ - f"Version {message.nVersion} received. Test framework only supports " \ + assert message.nVersion >= MIN_P2P_VERSION_SUPPORTED, ( + f"Version {message.nVersion} received. Test framework only supports " f"versions greater than {MIN_P2P_VERSION_SUPPORTED}" + ) self.send_message(msg_verack()) if self.support_addrv2: self.send_message(msg_sendaddrv2()) @@ -519,44 +557,53 @@ # Connection helper methods - def wait_until(self, test_function_in, *, timeout=60, - check_connected=True): + def wait_until(self, test_function_in, *, timeout=60, check_connected=True): def test_function(): if check_connected: assert self.is_connected return test_function_in() - wait_until_helper(test_function, timeout=timeout, lock=p2p_lock, - timeout_factor=self.timeout_factor) + wait_until_helper( + test_function, + timeout=timeout, + lock=p2p_lock, + timeout_factor=self.timeout_factor, + ) def wait_for_connect(self, timeout=60): - def test_function(): return self.is_connected + def test_function(): + return self.is_connected + self.wait_until(test_function, timeout=timeout, check_connected=False) def wait_for_disconnect(self, timeout=60): - def test_function(): return not self.is_connected + def test_function(): + return not self.is_connected + self.wait_until(test_function, timeout=timeout, check_connected=False) # Message receiving helper methods def wait_for_tx(self, txid, timeout=60): def test_function(): - if not self.last_message.get('tx'): + if not self.last_message.get("tx"): return False - return self.last_message['tx'].tx.rehash() == txid + return self.last_message["tx"].tx.rehash() == txid self.wait_until(test_function, timeout=timeout) def wait_for_block(self, blockhash, timeout=60): def test_function(): - return self.last_message.get( - "block") and self.last_message["block"].block.rehash() == blockhash + return ( + self.last_message.get("block") + and self.last_message["block"].block.rehash() == blockhash + ) self.wait_until(test_function, timeout=timeout) def wait_for_header(self, blockhash, timeout=60): def test_function(): - last_headers = self.last_message.get('headers') + last_headers = self.last_message.get("headers") if not last_headers: return False return last_headers.headers[0].rehash() == int(blockhash, 16) @@ -565,7 +612,7 @@ def wait_for_merkleblock(self, blockhash, timeout=60): def test_function(): - last_filtered_block = self.last_message.get('merkleblock') + last_filtered_block = self.last_message.get("merkleblock") if not last_filtered_block: return False return last_filtered_block.merkleblock.header.rehash() == int(blockhash, 16) @@ -576,6 +623,7 @@ """Waits for a getdata message. The object hashes in the inventory vector must match the provided hash_list.""" + def test_function(): last_data = self.last_message.get("getdata") if not last_data: @@ -591,6 +639,7 @@ value must be explicitly cleared before calling this method, or this will return immediately with success. TODO: change this method to take a hash value and only return true if the correct block header has been requested.""" + def test_function(): return self.last_message.get("getheaders") @@ -600,12 +649,15 @@ """Waits for an INV message and checks that the first inv object in the message was as expected.""" if len(expected_inv) > 1: raise NotImplementedError( - "wait_for_inv() will only verify the first inv object") + "wait_for_inv() will only verify the first inv object" + ) def test_function(): - return self.last_message.get("inv") and \ - self.last_message["inv"].inv[0].type == expected_inv[0].type and \ - self.last_message["inv"].inv[0].hash == expected_inv[0].hash + return ( + self.last_message.get("inv") + and self.last_message["inv"].inv[0].type == expected_inv[0].type + and self.last_message["inv"].inv[0].hash == expected_inv[0].hash + ) self.wait_until(test_function, timeout=timeout) @@ -634,8 +686,10 @@ self.send_message(msg_ping(nonce=self.ping_counter)) def test_function(): - return self.last_message.get( - "pong") and self.last_message["pong"].nonce == self.ping_counter + return ( + self.last_message.get("pong") + and self.last_message["pong"].nonce == self.ping_counter + ) self.wait_until(test_function, timeout=timeout) self.ping_counter += 1 @@ -668,10 +722,10 @@ def close(self, timeout=10): """Close the connections and network event loop.""" - self.network_event_loop.call_soon_threadsafe( - self.network_event_loop.stop) - wait_until_helper(lambda: not self.network_event_loop.is_running(), - timeout=timeout) + self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop) + wait_until_helper( + lambda: not self.network_event_loop.is_running(), timeout=timeout + ) self.network_event_loop.close() self.join(timeout) # Safe to remove event loop. @@ -679,7 +733,7 @@ @classmethod def listen(cls, p2p, callback, port=None, addr=None, idx=1): - """ Ensure a listening server is running on the given port, and run the + """Ensure a listening server is running on the given port, and run the protocol specified by `p2p` on the next connection to it. Once ready for connections, call `callback`.""" @@ -687,11 +741,12 @@ assert 0 < idx <= MAX_NODES port = p2p_port(MAX_NODES - idx) if addr is None: - addr = '127.0.0.1' + addr = "127.0.0.1" coroutine = cls.create_listen_server(addr, port, callback, p2p) cls.network_event_loop.call_soon_threadsafe( - cls.network_event_loop.create_task, coroutine) + cls.network_event_loop.create_task, coroutine + ) @classmethod async def create_listen_server(cls, addr, port, callback, proto): @@ -712,9 +767,10 @@ # connections, we can accomplish this by providing different # `proto` functions - listener = await cls.network_event_loop.create_server(peer_protocol, addr, port) - logger.debug( - f"Listening server on {addr}:{port} should be started") + listener = await cls.network_event_loop.create_server( + peer_protocol, addr, port + ) + logger.debug(f"Listening server on {addr}:{port} should be started") cls.listeners[(addr, port)] = listener cls.protos[(addr, port)] = proto @@ -724,13 +780,14 @@ class P2PDataStore(P2PInterface): """A P2P data store class. - Keeps a block and transaction store and responds correctly to getdata and getheaders requests.""" + Keeps a block and transaction store and responds correctly to getdata and getheaders requests. + """ def __init__(self): super().__init__() # store of blocks. key is block hash, value is a CBlock object self.block_store = {} - self.last_block_hash = '' + self.last_block_hash = "" # store of txs. key is txid, value is a CTransaction object self.tx_store = {} self.getdata_requests = [] @@ -739,13 +796,16 @@ """Check for the tx/block in our stores and if found, reply with an inv message.""" for inv in message.inv: self.getdata_requests.append(inv.hash) - if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys(): + if ( + inv.type & MSG_TYPE_MASK + ) == MSG_TX and inv.hash in self.tx_store.keys(): self.send_message(msg_tx(self.tx_store[inv.hash])) - elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys(): + elif ( + inv.type & MSG_TYPE_MASK + ) == MSG_BLOCK and inv.hash in self.block_store.keys(): self.send_message(msg_block(self.block_store[inv.hash])) else: - logger.debug( - f'getdata message type {hex(inv.type)} received.') + logger.debug(f"getdata message type {hex(inv.type)} received.") def on_getheaders(self, message): """Search back through our block store for the locator, and reply with a headers message if found.""" @@ -762,36 +822,45 @@ # as we go. prev_block_hash = headers_list[-1].hashPrevBlock if prev_block_hash in self.block_store: - prev_block_header = CBlockHeader( - self.block_store[prev_block_hash]) + prev_block_header = CBlockHeader(self.block_store[prev_block_hash]) headers_list.append(prev_block_header) if prev_block_header.sha256 == hash_stop: # if this is the hashstop header, stop here break else: logger.debug( - f'block hash {hex(prev_block_hash)} not found in block store') + f"block hash {hex(prev_block_hash)} not found in block store" + ) break # Truncate the list if there are too many headers - headers_list = headers_list[:-MAX_HEADERS_RESULTS - 1:-1] + headers_list = headers_list[: -MAX_HEADERS_RESULTS - 1 : -1] response = msg_headers(headers_list) if response is not None: self.send_message(response) - def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, - reject_reason=None, expect_disconnect=False, timeout=60): + def send_blocks_and_test( + self, + blocks, + node, + *, + success=True, + force_send=False, + reject_reason=None, + expect_disconnect=False, + timeout=60, + ): """Send blocks to test node and test whether the tip advances. - - add all blocks to our block_store - - send a headers message for the final block - - the on_getheaders handler will ensure that any getheaders are responded to - - if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will - ensure that any getdata messages are responded to. Otherwise send the full block unsolicited. - - if success is True: assert that the node's tip advances to the most recent block - - if success is False: assert that the node's tip doesn't advance - - if reject_reason is set: assert that the correct reject message is logged""" + - add all blocks to our block_store + - send a headers message for the final block + - the on_getheaders handler will ensure that any getheaders are responded to + - if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will + ensure that any getdata messages are responded to. Otherwise send the full block unsolicited. + - if success is True: assert that the node's tip advances to the most recent block + - if success is False: assert that the node's tip doesn't advance + - if reject_reason is set: assert that the correct reject message is logged""" with p2p_lock: for block in blocks: @@ -805,7 +874,8 @@ else: self.send_message( - msg_headers([CBlockHeader(block) for block in blocks])) + msg_headers([CBlockHeader(block) for block in blocks]) + ) self.wait_until( lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, @@ -818,8 +888,9 @@ self.sync_with_ping(timeout=timeout) if success: - self.wait_until(lambda: node.getbestblockhash() == - blocks[-1].hash, timeout=timeout) + self.wait_until( + lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout + ) else: assert node.getbestblockhash() != blocks[-1].hash @@ -829,15 +900,16 @@ else: test() - def send_txs_and_test(self, txs, node, *, success=True, - expect_disconnect=False, reject_reason=None): + def send_txs_and_test( + self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None + ): """Send txs to test node and test whether they're accepted to the mempool. - - add all txs to our tx_store - - send tx messages for all txs - - if success is True/False: assert that the txs are/are not accepted to the mempool - - if expect_disconnect is True: Skip the sync with ping - - if reject_reason is set: assert that the correct reject message is logged.""" + - add all txs to our tx_store + - send tx messages for all txs + - if success is True/False: assert that the txs are/are not accepted to the mempool + - if expect_disconnect is True: Skip the sync with ping + - if reject_reason is set: assert that the correct reject message is logged.""" with p2p_lock: for tx in txs: @@ -896,6 +968,7 @@ # Wait until invs have been received (and getdatas sent) for each txid. self.wait_until( lambda: set(self.tx_invs_received.keys()) == {int(tx, 16) for tx in txns}, - timeout=timeout) + timeout=timeout, + ) # Flush messages and wait for the getdatas to be processed self.sync_with_ping() diff --git a/test/functional/test_framework/ripemd160.py b/test/functional/test_framework/ripemd160.py --- a/test/functional/test_framework/ripemd160.py +++ b/test/functional/test_framework/ripemd160.py @@ -6,6 +6,7 @@ import unittest +# fmt: off # Message schedule indexes for the left path. ML = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, @@ -41,12 +42,13 @@ 15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8, 8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11 ] +# fmt: on # K constants for the left path. -KL = [0, 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xa953fd4e] +KL = [0, 0x5A827999, 0x6ED9EBA1, 0x8F1BBCDC, 0xA953FD4E] # K constants for the right path. -KR = [0x50a28be6, 0x5c4dd124, 0x6d703ef3, 0x7a6d76e9, 0] +KR = [0x50A28BE6, 0x5C4DD124, 0x6D703EF3, 0x7A6D76E9, 0] def fi(x, y, z, i): @@ -67,7 +69,7 @@ def rol(x, i): """Rotate the bottom 32 bits of x left by i bits.""" - return ((x << i) | ((x & 0xffffffff) >> (32 - i))) & 0xffffffff + return ((x << i) | ((x & 0xFFFFFFFF) >> (32 - i))) & 0xFFFFFFFF def compress(h0, h1, h2, h3, h4, block): @@ -77,7 +79,7 @@ # Right path variables. ar, br, cr, dr, er = h0, h1, h2, h3, h4 # Message variables. - x = [int.from_bytes(block[4 * i:4 * (i + 1)], 'little') for i in range(16)] + x = [int.from_bytes(block[4 * i : 4 * (i + 1)], "little") for i in range(16)] # Iterate over the 80 rounds of the compression. for j in range(80): @@ -96,18 +98,18 @@ def ripemd160(data: bytes) -> bytes: """Compute the RIPEMD-160 hash of data.""" # Initialize state. - state = (0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0) + state = (0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0) # Process full 64-byte blocks in the input. for b in range(len(data) >> 6): - state = compress(*state, data[64 * b:64 * (b + 1)]) + state = compress(*state, data[64 * b : 64 * (b + 1)]) # Construct final blocks (with padding and size). pad = b"\x80" + b"\x00" * ((119 - len(data)) & 63) - fin = data[len(data) & ~63:] + pad + (8 * len(data)).to_bytes(8, 'little') + fin = data[len(data) & ~63 :] + pad + (8 * len(data)).to_bytes(8, "little") # Process final blocks. for b in range(len(fin) >> 6): - state = compress(*state, fin[64 * b:64 * (b + 1)]) + state = compress(*state, fin[64 * b : 64 * (b + 1)]) # Produce output. - return b"".join((h & 0xffffffff).to_bytes(4, 'little') for h in state) + return b"".join((h & 0xFFFFFFFF).to_bytes(4, "little") for h in state) class TestFrameworkKey(unittest.TestCase): @@ -119,13 +121,16 @@ (b"a", "0bdc9d2d256b3ee9daae347be6f4dc835a467ffe"), (b"abc", "8eb208f7e05d987a9b044a8e98c6b087f15a0bfc"), (b"message digest", "5d0689ef49d2fae572b881b123a85ffa21595f36"), - (b"abcdefghijklmnopqrstuvwxyz", - "f71c27109c692c1b56bbdceb5b9d2865b3708dbc"), - (b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", - "12a053384a9c0c88e405a06c27dcf49ada62eb2b"), - (b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", - "b0e20b6e3116640286ed3a87a5713079b21f5189"), + (b"abcdefghijklmnopqrstuvwxyz", "f71c27109c692c1b56bbdceb5b9d2865b3708dbc"), + ( + b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", + "12a053384a9c0c88e405a06c27dcf49ada62eb2b", + ), + ( + b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", + "b0e20b6e3116640286ed3a87a5713079b21f5189", + ), (b"1234567890" * 8, "9b752e45573d4b39f4dbd3323cab82bf63326bfb"), - (b"a" * 1000000, "52783243c1697bdbe16d37f97f68f08325dc1528") + (b"a" * 1000000, "52783243c1697bdbe16d37f97f68f08325dc1528"), ]: self.assertEqual(ripemd160(msg).hex(), hexout) diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py --- a/test/functional/test_framework/script.py +++ b/test/functional/test_framework/script.py @@ -39,7 +39,7 @@ # Convert number to absolute value + sign in top bit. encoded_v = 0 if v == 0 else abs(v) | ((v < 0) << (n_bytes * 8 - 1)) # Serialize to bytes - return encoded_v.to_bytes(n_bytes, 'little') + return encoded_v.to_bytes(n_bytes, "little") _opcode_instances: List["CScriptOp"] = [] @@ -47,23 +47,24 @@ class CScriptOp(int): """A single script opcode""" + __slots__ = () @staticmethod def encode_op_pushdata(d): """Encode a PUSHDATA op, returning bytes""" - if len(d) < 0x4c: + if len(d) < 0x4C: # OP_PUSHDATA - return b'' + bytes([len(d)]) + d - elif len(d) <= 0xff: + return b"" + bytes([len(d)]) + d + elif len(d) <= 0xFF: # OP_PUSHDATA1 - return b'\x4c' + bytes([len(d)]) + d - elif len(d) <= 0xffff: + return b"\x4c" + bytes([len(d)]) + d + elif len(d) <= 0xFFFF: # OP_PUSHDATA2 - return b'\x4d' + struct.pack(b'>= 8 if r[-1] & 0x80: r.append(0x80 if neg else 0) @@ -439,7 +442,7 @@ result |= int(byte) << 8 * i if value[-1] >= 0x80: # Mask for all but the highest result bit - num_mask = (2**(len(value) * 8) - 1) >> 1 + num_mask = (2 ** (len(value) * 8) - 1) >> 1 result &= num_mask result *= -1 return result @@ -455,6 +458,7 @@ iter(script) however does iterate by opcode. """ + __slots__ = () @classmethod @@ -463,7 +467,7 @@ if isinstance(other, CScriptOp): other = bytes([other]) elif isinstance(other, CScriptNum): - if (other.value == 0): + if other.value == 0: other = bytes([CScriptOp(OP_0)]) else: other = CScriptNum.encode(other) @@ -486,17 +490,18 @@ # join makes no sense for a CScript() raise NotImplementedError - def __new__(cls, value=b''): + def __new__(cls, value=b""): if isinstance(value, bytes) or isinstance(value, bytearray): return super().__new__(cls, value) else: + def coerce_iterable(iterable): for instance in iterable: yield cls.__coerce_instance(instance) + # Annoyingly on both python2 and python3 bytes.join() always # returns a bytes instance even when subclassed. - return super().__new__( - cls, b''.join(coerce_iterable(value))) + return super().__new__(cls, b"".join(coerce_iterable(value))) def raw_iter(self): """Raw iteration @@ -517,43 +522,45 @@ datasize = None pushdata_type = None if opcode < OP_PUSHDATA1: - pushdata_type = f'PUSHDATA({opcode})' + pushdata_type = f"PUSHDATA({opcode})" datasize = opcode elif opcode == OP_PUSHDATA1: - pushdata_type = 'PUSHDATA1' + pushdata_type = "PUSHDATA1" if i >= len(self): - raise CScriptInvalidError( - 'PUSHDATA1: missing data length') + raise CScriptInvalidError("PUSHDATA1: missing data length") datasize = self[i] i += 1 elif opcode == OP_PUSHDATA2: - pushdata_type = 'PUSHDATA2' + pushdata_type = "PUSHDATA2" if i + 1 >= len(self): - raise CScriptInvalidError( - 'PUSHDATA2: missing data length') + raise CScriptInvalidError("PUSHDATA2: missing data length") datasize = self[i] + (self[i + 1] << 8) i += 2 elif opcode == OP_PUSHDATA4: - pushdata_type = 'PUSHDATA4' + pushdata_type = "PUSHDATA4" if i + 3 >= len(self): - raise CScriptInvalidError( - 'PUSHDATA4: missing data length') - datasize = self[i] + (self[i + 1] << 8) + \ - (self[i + 2] << 16) + (self[i + 3] << 24) + raise CScriptInvalidError("PUSHDATA4: missing data length") + datasize = ( + self[i] + + (self[i + 1] << 8) + + (self[i + 2] << 16) + + (self[i + 3] << 24) + ) i += 4 else: assert False # shouldn't happen - data = bytes(self[i:i + datasize]) + data = bytes(self[i : i + datasize]) # Check for truncation if len(data) < datasize: raise CScriptTruncatedPushDataError( - f'{pushdata_type}: truncated data', data) + f"{pushdata_type}: truncated data", data + ) i += datasize @@ -568,7 +575,7 @@ See raw_iter() if you need to distinguish the different possible PUSHDATA encodings. """ - for (opcode, data, sop_idx) in self.raw_iter(): + for opcode, data, sop_idx in self.raw_iter(): if data is not None: yield data else: @@ -593,10 +600,10 @@ try: op = _repr(next(i)) except CScriptTruncatedPushDataError as err: - op = f'{_repr(err.data)}...' + op = f"{_repr(err.data)}..." break except CScriptInvalidError as err: - op = f'' + op = f"" break except StopIteration: break @@ -616,14 +623,14 @@ def FindAndDelete(script, sig): """Consensus critical, see FindAndDelete() in Satoshi codebase""" - r = b'' + r = b"" last_sop_idx = sop_idx = 0 skip = True - for (opcode, data, sop_idx) in script.raw_iter(): + for opcode, data, sop_idx in script.raw_iter(): if not skip: r += script[last_sop_idx:sop_idx] last_sop_idx = sop_idx - if script[sop_idx:sop_idx + len(sig)] == sig: + if script[sop_idx : sop_idx + len(sig)] == sig: skip = True else: skip = False @@ -638,25 +645,24 @@ Returns (sighash, err) to precisely match the consensus-critical behavior of the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity) """ - HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' + HASH_ONE = b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" if inIdx >= len(txTo.vin): return (HASH_ONE, f"inIdx {inIdx} out of range ({len(txTo.vin)})") txtmp = CTransaction(txTo) for txin in txtmp.vin: - txin.scriptSig = b'' - txtmp.vin[inIdx].scriptSig = FindAndDelete( - script, CScript([OP_CODESEPARATOR])) + txin.scriptSig = b"" + txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR])) - if (hashtype & 0x1f) == SIGHASH_NONE: + if (hashtype & 0x1F) == SIGHASH_NONE: txtmp.vout = [] for i in range(len(txtmp.vin)): if i != inIdx: txtmp.vin[i].nSequence = 0 - elif (hashtype & 0x1f) == SIGHASH_SINGLE: + elif (hashtype & 0x1F) == SIGHASH_SINGLE: outIdx = inIdx if outIdx >= len(txtmp.vout): return (HASH_ONE, f"outIdx {outIdx} out of range ({len(txtmp.vout)})") @@ -683,12 +689,12 @@ return (sighash, None) + # TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided. # Performance optimization probably not necessary for python tests, however. def SignatureHashForkId(script, txTo, inIdx, hashtype, amount): - hashPrevouts = 0 hashSequence = 0 hashOutputs = 0 @@ -699,20 +705,22 @@ serialize_prevouts += i.prevout.serialize() hashPrevouts = uint256_from_str(hash256(serialize_prevouts)) - if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) - != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE): + if ( + not (hashtype & SIGHASH_ANYONECANPAY) + and (hashtype & 0x1F) != SIGHASH_SINGLE + and (hashtype & 0x1F) != SIGHASH_NONE + ): serialize_sequence = bytes() for i in txTo.vin: serialize_sequence += struct.pack("> 64) & ((1 << 64) - 1) n2 = (h >> 128) & ((1 << 64) - 1) n3 = (h >> 192) & ((1 << 64) - 1) - v0 = 0x736f6d6570736575 ^ k0 - v1 = 0x646f72616e646f6d ^ k1 - v2 = 0x6c7967656e657261 ^ k0 + v0 = 0x736F6D6570736575 ^ k0 + v1 = 0x646F72616E646F6D ^ k1 + v2 = 0x6C7967656E657261 ^ k0 v3 = 0x7465646279746573 ^ k1 ^ n0 v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3) diff --git a/test/functional/test_framework/socks5.py b/test/functional/test_framework/socks5.py --- a/test/functional/test_framework/socks5.py +++ b/test/functional/test_framework/socks5.py @@ -23,6 +23,7 @@ DOMAINNAME = 0x03 IPV6 = 0x04 + # Utility functions @@ -32,11 +33,12 @@ while n > 0: d = s.recv(n) if not d: - raise IOError('Unexpected end of stream') + raise IOError("Unexpected end of stream") rv.extend(d) n -= len(d) return rv + # Implementation classes @@ -62,7 +64,7 @@ self.password = password def __repr__(self): - return f'Socks5Command({self.cmd},{self.atyp},{self.addr},{self.port},{self.username},{self.password})' + return f"Socks5Command({self.cmd},{self.atyp},{self.addr},{self.port},{self.username},{self.password})" class Socks5Connection: @@ -76,7 +78,7 @@ # Verify socks version ver = recvall(self.conn, 1)[0] if ver != 0x05: - raise IOError(f'Invalid socks version {ver}') + raise IOError(f"Invalid socks version {ver}") # Choose authentication method nmethods = recvall(self.conn, 1)[0] methods = bytearray(recvall(self.conn, nmethods)) @@ -86,7 +88,7 @@ elif 0x00 in methods and self.serv.conf.unauth: method = 0x00 # unauthenticated if method is None: - raise IOError('No supported authentication method was offered') + raise IOError("No supported authentication method was offered") # Send response self.conn.sendall(bytearray([0x05, method])) # Read authentication (optional) @@ -95,7 +97,7 @@ if method == 0x02: ver = recvall(self.conn, 1)[0] if ver != 0x01: - raise IOError(f'Invalid auth packet version {ver}') + raise IOError(f"Invalid auth packet version {ver}") ulen = recvall(self.conn, 1)[0] username = str(recvall(self.conn, ulen)) plen = recvall(self.conn, 1)[0] @@ -106,11 +108,9 @@ # Read connect request ver, cmd, _, atyp = recvall(self.conn, 4) if ver != 0x05: - raise IOError( - f'Invalid socks version {ver} in connect request') + raise IOError(f"Invalid socks version {ver} in connect request") if cmd != Command.CONNECT: - raise IOError( - f'Unhandled command {cmd} in connect request') + raise IOError(f"Unhandled command {cmd} in connect request") if atyp == AddressType.IPV4: addr = recvall(self.conn, 4) @@ -120,17 +120,18 @@ elif atyp == AddressType.IPV6: addr = recvall(self.conn, 16) else: - raise IOError(f'Unknown address type {atyp}') + raise IOError(f"Unknown address type {atyp}") port_hi, port_lo = recvall(self.conn, 2) port = (port_hi << 8) | port_lo # Send dummy response self.conn.sendall( - bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])) + bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]) + ) cmdin = Socks5Command(cmd, atyp, addr, port, username, password) self.serv.queue.put(cmdin) - logger.info(f'Proxy: {cmdin}') + logger.info(f"Proxy: {cmdin}") # Fall through to disconnect except Exception as e: logger.exception("socks5 request handling failed.") diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -72,13 +72,17 @@ those standards are violated, a ``TypeError`` is raised.""" def __new__(cls, clsname, bases, dct): - if not clsname == 'BitcoinTestFramework': - if not ('run_test' in dct and 'set_test_params' in dct): - raise TypeError("BitcoinTestFramework subclasses must override " - "'run_test' and 'set_test_params'") - if '__init__' in dct or 'main' in dct: - raise TypeError("BitcoinTestFramework subclasses may not override " - "'__init__' or 'main'") + if not clsname == "BitcoinTestFramework": + if not ("run_test" in dct and "set_test_params" in dct): + raise TypeError( + "BitcoinTestFramework subclasses must override " + "'run_test' and 'set_test_params'" + ) + if "__init__" in dct or "main" in dct: + raise TypeError( + "BitcoinTestFramework subclasses may not override " + "'__init__' or 'main'" + ) return super().__new__(cls, clsname, bases, dct) @@ -101,7 +105,7 @@ def __init__(self): """Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method""" - self.chain: str = 'regtest' + self.chain: str = "regtest" self.setup_clean_chain: bool = False self.nodes: List[TestNode] = [] self.network_thread = None @@ -130,7 +134,8 @@ def main(self): """Main function. This should not be overridden by the subclass test scripts.""" assert hasattr( - self, "num_nodes"), "Test must set self.num_nodes in set_test_params()" + self, "num_nodes" + ), "Test must set self.num_nodes in set_test_params()" try: self.setup() @@ -162,45 +167,136 @@ def parse_args(self): parser = argparse.ArgumentParser(usage="%(prog)s [options]") - parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true", - help="Leave bitcoinds and test.* datadir on exit or error") - parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true", - help="Don't stop bitcoinds after the test execution") - parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(f"{os.path.dirname(os.path.realpath(__file__))}/../../cache"), - help="Directory for caching pregenerated datadirs (default: %(default)s)") - parser.add_argument("--tmpdir", dest="tmpdir", - help="Root directory for datadirs") - parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO", - help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.") - parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true", - help="Print out all RPC calls as they are made") - parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int, - help="The seed to use for assigning port numbers (default: current process id)") - parser.add_argument("--coveragedir", dest="coveragedir", - help="Write tested RPC commands into this directory") - parser.add_argument("--configfile", dest="configfile", default=os.path.abspath(os.path.dirname(os.path.realpath( - __file__)) + "/../../config.ini"), help="Location of the test framework config file (default: %(default)s)") - parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true", - help="Attach a python debugger if test fails") - parser.add_argument("--usecli", dest="usecli", default=False, action="store_true", - help="use bitcoin-cli instead of RPC for all commands") - parser.add_argument("--perf", dest="perf", default=False, action="store_true", - help="profile running nodes with perf for the duration of the test") - parser.add_argument("--valgrind", dest="valgrind", default=False, action="store_true", - help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown, valgrind 3.14 or later required") - parser.add_argument("--randomseed", type=int, - help="set a random seed for deterministically reproducing a previous test run") - parser.add_argument("--descriptors", default=False, action="store_true", - help="Run test using a descriptor wallet") - parser.add_argument("--with-wellingtonactivation", dest="wellingtonactivation", default=False, action="store_true", - help=f"Activate wellington update on timestamp {TIMESTAMP_IN_THE_PAST}") parser.add_argument( - '--timeout-factor', + "--nocleanup", + dest="nocleanup", + default=False, + action="store_true", + help="Leave bitcoinds and test.* datadir on exit or error", + ) + parser.add_argument( + "--noshutdown", + dest="noshutdown", + default=False, + action="store_true", + help="Don't stop bitcoinds after the test execution", + ) + parser.add_argument( + "--cachedir", + dest="cachedir", + default=os.path.abspath( + f"{os.path.dirname(os.path.realpath(__file__))}/../../cache" + ), + help="Directory for caching pregenerated datadirs (default: %(default)s)", + ) + parser.add_argument( + "--tmpdir", dest="tmpdir", help="Root directory for datadirs" + ) + parser.add_argument( + "-l", + "--loglevel", + dest="loglevel", + default="INFO", + help=( + "log events at this level and higher to the console. Can be set to" + " DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG" + " will output all logs to console. Note that logs at all levels are" + " always written to the test_framework.log file in the temporary test" + " directory." + ), + ) + parser.add_argument( + "--tracerpc", + dest="trace_rpc", + default=False, + action="store_true", + help="Print out all RPC calls as they are made", + ) + parser.add_argument( + "--portseed", + dest="port_seed", + default=os.getpid(), + type=int, + help=( + "The seed to use for assigning port numbers (default: current" + " process id)" + ), + ) + parser.add_argument( + "--coveragedir", + dest="coveragedir", + help="Write tested RPC commands into this directory", + ) + parser.add_argument( + "--configfile", + dest="configfile", + default=os.path.abspath( + os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini" + ), + help="Location of the test framework config file (default: %(default)s)", + ) + parser.add_argument( + "--pdbonfailure", + dest="pdbonfailure", + default=False, + action="store_true", + help="Attach a python debugger if test fails", + ) + parser.add_argument( + "--usecli", + dest="usecli", + default=False, + action="store_true", + help="use bitcoin-cli instead of RPC for all commands", + ) + parser.add_argument( + "--perf", + dest="perf", + default=False, + action="store_true", + help="profile running nodes with perf for the duration of the test", + ) + parser.add_argument( + "--valgrind", + dest="valgrind", + default=False, + action="store_true", + help=( + "run nodes under the valgrind memory error detector: expect at least a" + " ~10x slowdown, valgrind 3.14 or later required" + ), + ) + parser.add_argument( + "--randomseed", + type=int, + help=( + "set a random seed for deterministically reproducing a previous" + " test run" + ), + ) + parser.add_argument( + "--descriptors", + default=False, + action="store_true", + help="Run test using a descriptor wallet", + ) + parser.add_argument( + "--with-wellingtonactivation", + dest="wellingtonactivation", + default=False, + action="store_true", + help=f"Activate wellington update on timestamp {TIMESTAMP_IN_THE_PAST}", + ) + parser.add_argument( + "--timeout-factor", dest="timeout_factor", type=float, default=1.0, - help='adjust test timeouts by a factor. ' - 'Setting it to 0 disables all timeouts') + help=( + "adjust test timeouts by a factor. " + "Setting it to 0 disables all timeouts" + ), + ) self.add_options(parser) self.options = parser.parse_args() @@ -214,29 +310,34 @@ self.options.cachedir = os.path.abspath(self.options.cachedir) config = configparser.ConfigParser() - config.read_file(open(self.options.configfile, encoding='utf-8')) + config.read_file(open(self.options.configfile, encoding="utf-8")) self.config = config fname_bitcoind = os.path.join( config["environment"]["BUILDDIR"], "src", - f"bitcoind{config['environment']['EXEEXT']}" + f"bitcoind{config['environment']['EXEEXT']}", ) fname_bitcoincli = os.path.join( config["environment"]["BUILDDIR"], "src", - f"bitcoin-cli{config['environment']['EXEEXT']}" + f"bitcoin-cli{config['environment']['EXEEXT']}", ) self.options.bitcoind = os.getenv("BITCOIND", default=fname_bitcoind) - self.options.bitcoincli = os.getenv( - "BITCOINCLI", default=fname_bitcoincli) + self.options.bitcoincli = os.getenv("BITCOINCLI", default=fname_bitcoincli) self.options.emulator = config["environment"]["EMULATOR"] or None - os.environ['PATH'] = config['environment']['BUILDDIR'] + os.pathsep + \ - config['environment']['BUILDDIR'] + os.path.sep + "qt" + os.pathsep + \ - os.environ['PATH'] + os.environ["PATH"] = ( + config["environment"]["BUILDDIR"] + + os.pathsep + + config["environment"]["BUILDDIR"] + + os.path.sep + + "qt" + + os.pathsep + + os.environ["PATH"] + ) # Add test dir to sys.path (to access generated modules) - sys.path.append(os.path.join(config['environment']['BUILDDIR'], "test")) + sys.path.append(os.path.join(config["environment"]["BUILDDIR"], "test")) # Set up temp directory and start logging if self.options.tmpdir: @@ -262,14 +363,13 @@ random.seed(seed) self.log.debug(f"PRNG seed is: {seed}") - self.log.debug('Setting up network thread') + self.log.debug("Setting up network thread") self.network_thread = NetworkThread() self.network_thread.start() if self.options.usecli: if not self.supports_cli: - raise SkipTest( - "--usecli specified but test does not support using CLI") + raise SkipTest("--usecli specified but test does not support using CLI") self.skip_if_no_cli() self.skip_test_if_missing_module() self.setup_chain() @@ -283,7 +383,7 @@ print("Testcase failed. Attaching python debugger. Enter ? for help") pdb.set_trace() - self.log.debug('Closing down network thread') + self.log.debug("Closing down network thread") self.network_thread.close() if not self.options.noshutdown: self.log.info("Stopping nodes") @@ -292,25 +392,24 @@ else: for node in self.nodes: node.cleanup_on_exit = False - self.log.info( - "Note: bitcoinds were not stopped and may still be running") + self.log.info("Note: bitcoinds were not stopped and may still be running") should_clean_up = ( - not self.options.nocleanup and - not self.options.noshutdown and - self.success != TestStatus.FAILED and - not self.options.perf + not self.options.nocleanup + and not self.options.noshutdown + and self.success != TestStatus.FAILED + and not self.options.perf ) if should_clean_up: self.log.info(f"Cleaning up {self.options.tmpdir} on exit") cleanup_tree_on_exit = True elif self.options.perf: self.log.warning( - f"Not cleaning up dir {self.options.tmpdir} due to perf data") + f"Not cleaning up dir {self.options.tmpdir} due to perf data" + ) cleanup_tree_on_exit = False else: - self.log.warning( - f"Not cleaning up dir {self.options.tmpdir}") + self.log.warning(f"Not cleaning up dir {self.options.tmpdir}") cleanup_tree_on_exit = False if self.success == TestStatus.PASSED: @@ -322,18 +421,22 @@ else: self.log.error( f"Test failed. Test logging available at {self.options.tmpdir}" - f"/test_framework.log") + "/test_framework.log" + ) self.log.error("") combine_logs_path = os.path.normpath( - f'{os.path.dirname(os.path.realpath(__file__))}/../combine_logs.py') + f"{os.path.dirname(os.path.realpath(__file__))}/../combine_logs.py" + ) self.log.error( f"Hint: Call {combine_logs_path} '{self.options.tmpdir}' to " - f"consolidate all logs") + "consolidate all logs" + ) self.log.error("") self.log.error( "If this failure happened unexpectedly or intermittently, please" - " file a bug and provide a link or upload of the combined log.") - self.log.error(self.config['environment']['PACKAGE_BUGREPORT']) + " file a bug and provide a link or upload of the combined log." + ) + self.log.error(self.config["environment"]["PACKAGE_BUGREPORT"]) self.log.error("") exit_code = TEST_EXIT_FAILED # Logging.shutdown will not remove stream- and filehandlers, so we must @@ -410,9 +513,8 @@ assert_equal(n.getblockchaininfo()["blocks"], 199) # To ensure that all nodes are out of IBD, the most recent block # must have a timestamp not too old (see IsInitialBlockDownload()). - self.log.debug('Generate a block with current time') - block_hash = self.generate( - self.nodes[0], 1, sync_fun=self.no_op)[0] + self.log.debug("Generate a block with current time") + block_hash = self.generate(self.nodes[0], 1, sync_fun=self.no_op)[0] block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0) for n in self.nodes: n.submitblock(block) @@ -423,7 +525,8 @@ def import_deterministic_coinbase_privkeys(self): wallet_names = ( [self.default_wallet_name] * len(self.nodes) - if self.wallet_names is None else self.wallet_names + if self.wallet_names is None + else self.wallet_names ) assert len(wallet_names) <= len(self.nodes) for wallet_name, n in zip(wallet_names, self.nodes): @@ -431,10 +534,11 @@ n.createwallet( wallet_name=wallet_name, descriptors=self.options.descriptors, - load_on_startup=True) + load_on_startup=True, + ) n.importprivkey( - privkey=n.get_deterministic_priv_key().key, - label='coinbase') + privkey=n.get_deterministic_priv_key().key, label="coinbase" + ) def run_test(self): """Tests must override this method to define test logic""" @@ -443,8 +547,7 @@ # Public helper methods. These can be accessed by the subclass test # scripts. - def add_nodes(self, num_nodes: int, extra_args=None, - *, host=None, binary=None): + def add_nodes(self, num_nodes: int, extra_args=None, *, host=None, binary=None): """Instantiate TestNode objects. Should only be called once after the nodes have been specified in @@ -461,32 +564,35 @@ assert_equal(len(extra_args), num_nodes) assert_equal(len(binary), num_nodes) for i in range(num_nodes): - self.nodes.append(TestNode( - i, - get_datadir_path(self.options.tmpdir, i), - chain=self.chain, - host=host, - rpc_port=rpc_port(i), - p2p_port=p2p_port(i), - chronik_port=chronik_port(i), - timewait=self.rpc_timeout, - timeout_factor=self.options.timeout_factor, - bitcoind=binary[i], - bitcoin_cli=self.options.bitcoincli, - coverage_dir=self.options.coveragedir, - cwd=self.options.tmpdir, - extra_conf=extra_confs[i], - extra_args=extra_args[i], - use_cli=self.options.usecli, - emulator=self.options.emulator, - start_perf=self.options.perf, - use_valgrind=self.options.valgrind, - descriptors=self.options.descriptors, - )) + self.nodes.append( + TestNode( + i, + get_datadir_path(self.options.tmpdir, i), + chain=self.chain, + host=host, + rpc_port=rpc_port(i), + p2p_port=p2p_port(i), + chronik_port=chronik_port(i), + timewait=self.rpc_timeout, + timeout_factor=self.options.timeout_factor, + bitcoind=binary[i], + bitcoin_cli=self.options.bitcoincli, + coverage_dir=self.options.coveragedir, + cwd=self.options.tmpdir, + extra_conf=extra_confs[i], + extra_args=extra_args[i], + use_cli=self.options.usecli, + emulator=self.options.emulator, + start_perf=self.options.perf, + use_valgrind=self.options.valgrind, + descriptors=self.options.descriptors, + ) + ) if self.options.wellingtonactivation: self.nodes[i].extend_default_args( - [f"-wellingtonactivationtime={TIMESTAMP_IN_THE_PAST}"]) + [f"-wellingtonactivationtime={TIMESTAMP_IN_THE_PAST}"] + ) def start_node(self, i, *args, **kwargs): """Start a bitcoind""" @@ -517,10 +623,9 @@ if self.options.coveragedir is not None: for node in self.nodes: - coverage.write_all_rpc_commands( - self.options.coveragedir, node.rpc) + coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc) - def stop_node(self, i, expected_stderr='', wait=0): + def stop_node(self, i, expected_stderr="", wait=0): """Stop a bitcoind test node""" self.nodes[i].stop_node(expected_stderr, wait=wait) @@ -548,7 +653,7 @@ host = to_node.host if host is None: - host = '127.0.0.1' + host = "127.0.0.1" ip_port = f"{host}:{str(to_node.p2p_port)}" from_node.addnode(ip_port, "onetry") # poll until version handshake complete to avoid race conditions @@ -557,11 +662,14 @@ # * Must have a version message before anything else # * Must have a verack message before anything else wait_until_helper( - lambda: all(peer['version'] != 0 - for peer in from_node.getpeerinfo())) + lambda: all(peer["version"] != 0 for peer in from_node.getpeerinfo()) + ) wait_until_helper( - lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 - for peer in from_node.getpeerinfo())) + lambda: all( + peer["bytesrecv_per_msg"].pop("verack", 0) == 24 + for peer in from_node.getpeerinfo() + ) + ) def disconnect_nodes(self, a, b): from_node = self.nodes[a] @@ -570,15 +678,16 @@ def get_peer_ids(): result = [] for peer in from_node.getpeerinfo(): - if to_node.name in peer['subver']: - result.append(peer['id']) + if to_node.name in peer["subver"]: + result.append(peer["id"]) return result peer_ids = get_peer_ids() if not peer_ids: self.log.warning( f"disconnect_nodes: {from_node.index} and {to_node.index} were not " - "connected") + "connected" + ) return for peer_id in peer_ids: try: @@ -588,7 +697,7 @@ # and issuing the disconnect, don't worry about it. # This avoids a race condition if we're mass-disconnecting # peers. - if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED + if e.error["code"] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED raise # wait to disconnect @@ -623,14 +732,12 @@ return blocks def generatetoaddress(self, generator, *args, sync_fun=None, **kwargs): - blocks = generator.generatetoaddress( - *args, invalid_call=False, **kwargs) + blocks = generator.generatetoaddress(*args, invalid_call=False, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks def generatetodescriptor(self, generator, *args, sync_fun=None, **kwargs): - blocks = generator.generatetodescriptor( - *args, invalid_call=False, **kwargs) + blocks = generator.generatetodescriptor(*args, invalid_call=False, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks @@ -649,13 +756,12 @@ if best_hash.count(best_hash[0]) == len(rpc_connections): return # Check that each peer has at least one connection - assert (all(len(x.getpeerinfo()) for x in rpc_connections)) + assert all(len(x.getpeerinfo()) for x in rpc_connections) time.sleep(wait) best_hashes = "".join(f"\n {b!r}" for b in best_hash) raise AssertionError(f"Block sync timed out after {timeout}s:{best_hashes}") - def sync_mempools(self, nodes=None, wait=1, timeout=60, - flush_scheduler=True): + def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True): """ Wait until everybody has the same transactions in their memory pools @@ -671,7 +777,7 @@ r.syncwithvalidationinterfacequeue() return # Check that each peer has at least one connection - assert (all(len(x.getpeerinfo()) for x in rpc_connections)) + assert all(len(x.getpeerinfo()) for x in rpc_connections) time.sleep(wait) pool_str = "".join(f"\n {m!r}" for m in pool) raise AssertionError(f"Mempool sync timed out after {timeout}s:{pool_str}") @@ -689,16 +795,16 @@ return [uint256_hex(i) for i in id_list] while time.time() <= stop_time: - nodes_proofs = [ - set(format_ids(get_proof_ids(r))) for r in rpc_connections] + nodes_proofs = [set(format_ids(get_proof_ids(r))) for r in rpc_connections] if nodes_proofs.count(nodes_proofs[0]) == len(rpc_connections): return # Check that each peer has at least one connection - assert (all(len(x.getpeerinfo()) for x in rpc_connections)) + assert all(len(x.getpeerinfo()) for x in rpc_connections) time.sleep(wait) nodes_proofs_str = "".join(f"\n {m!r}" for m in nodes_proofs) raise AssertionError( - f"Proofs sync timed out after {timeout}s:{nodes_proofs_str}") + f"Proofs sync timed out after {timeout}s:{nodes_proofs_str}" + ) def sync_all(self, nodes=None): self.sync_blocks(nodes) @@ -706,32 +812,39 @@ self.sync_proofs(nodes) def wait_until(self, test_function, timeout=60): - return wait_until_helper(test_function, timeout=timeout, - timeout_factor=self.options.timeout_factor) + return wait_until_helper( + test_function, timeout=timeout, timeout_factor=self.options.timeout_factor + ) # Private helper methods. These should not be accessed by the subclass # test scripts. def _start_logging(self): # Add logger and logging handlers - self.log = logging.getLogger('TestFramework') + self.log = logging.getLogger("TestFramework") self.log.setLevel(logging.DEBUG) # Create file handler to log all messages fh = logging.FileHandler( - f"{self.options.tmpdir}/test_framework.log", encoding='utf-8') + f"{self.options.tmpdir}/test_framework.log", encoding="utf-8" + ) fh.setLevel(logging.DEBUG) # Create console handler to log messages to stderr. By default this # logs only error messages, but can be configured with --loglevel. ch = logging.StreamHandler(sys.stdout) # User can provide log level as a number or string (eg DEBUG). loglevel # was caught as a string, so try to convert it to an int - ll = int(self.options.loglevel) if self.options.loglevel.isdigit( - ) else self.options.loglevel.upper() + ll = ( + int(self.options.loglevel) + if self.options.loglevel.isdigit() + else self.options.loglevel.upper() + ) ch.setLevel(ll) # Format logs the same as bitcoind's debug.log with microprecision (so # log files can be concatenated and sorted) formatter = logging.Formatter( - fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S') + fmt="%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s", + datefmt="%Y-%m-%dT%H:%M:%S", + ) formatter.converter = time.gmtime fh.setFormatter(formatter) ch.setFormatter(formatter) @@ -758,8 +871,7 @@ assert self.num_nodes <= MAX_NODES if not os.path.isdir(cache_node_dir): - self.log.debug( - f"Creating cache directory {cache_node_dir}") + self.log.debug(f"Creating cache directory {cache_node_dir}") initialize_datadir( self.options.cachedir, @@ -773,7 +885,7 @@ cache_node_dir, chain=self.chain, extra_conf=["bind=127.0.0.1"], - extra_args=['-disablewallet'], + extra_args=["-disablewallet"], host=None, rpc_port=rpc_port(CACHE_NODE_ID), p2p_port=p2p_port(CACHE_NODE_ID), @@ -786,11 +898,13 @@ cwd=self.options.tmpdir, descriptors=self.options.descriptors, emulator=self.options.emulator, - )) + ) + ) if self.options.wellingtonactivation: self.nodes[CACHE_NODE_ID].extend_default_args( - [f"-wellingtonactivationtime={TIMESTAMP_IN_THE_PAST}"]) + [f"-wellingtonactivationtime={TIMESTAMP_IN_THE_PAST}"] + ) self.start_node(CACHE_NODE_ID) cache_node = self.nodes[CACHE_NODE_ID] @@ -800,8 +914,8 @@ # Set a time in the past, so that blocks don't end up in the future cache_node.setmocktime( - cache_node.getblockheader( - cache_node.getbestblockhash())['time']) + cache_node.getblockheader(cache_node.getbestblockhash())["time"] + ) # Create a 199-block-long chain; each of the 3 first nodes gets 25 # mature blocks and 25 immature. @@ -810,8 +924,9 @@ # old tip age). # This is needed so that we are out of IBD when the test starts, # see the tip age check in IsInitialBlockDownload(). - gen_addresses = [ - k.address for k in TestNode.PRIV_KEYS][:3] + [ADDRESS_ECREG_P2SH_OP_TRUE] + gen_addresses = [k.address for k in TestNode.PRIV_KEYS][:3] + [ + ADDRESS_ECREG_P2SH_OP_TRUE + ] assert_equal(len(gen_addresses), 4) for i in range(8): self.generatetoaddress( @@ -830,15 +945,14 @@ return os.path.join(cache_node_dir, self.chain, *paths) # Remove empty wallets dir - os.rmdir(cache_path('wallets')) + os.rmdir(cache_path("wallets")) for entry in os.listdir(cache_path()): # Only keep indexes, chainstate and blocks folders - if entry not in ['chainstate', 'blocks', 'indexes']: + if entry not in ["chainstate", "blocks", "indexes"]: os.remove(cache_path(entry)) for i in range(self.num_nodes): - self.log.debug( - f"Copy cache directory {cache_node_dir} to node {i}") + self.log.debug(f"Copy cache directory {cache_node_dir} to node {i}") to_dir = get_datadir_path(self.options.tmpdir, i) shutil.copytree(cache_node_dir, to_dir) # Overwrite port/rpcport in bitcoin.conf @@ -879,15 +993,16 @@ def skip_if_no_bitcoind_tracepoints(self): """Skip the running test if bitcoind has not been compiled with USDT tracepoint support.""" if not self.is_usdt_compiled(): - raise SkipTest( - "bitcoind has not been built with USDT tracepoints enabled.") + raise SkipTest("bitcoind has not been built with USDT tracepoints enabled.") def skip_if_no_bpf_permissions(self): """Skip the running test if we don't have permissions to do BPF syscalls and load BPF maps.""" # check for 'root' permissions if os.geteuid() != 0: raise SkipTest( - "no permissions to use BPF (please review the tests carefully before running them with higher privileges)") + "no permissions to use BPF (please review the tests carefully before" + " running them with higher privileges)" + ) def skip_if_platform_not_linux(self): """Skip the running test if we are not on a Linux platform""" diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -66,9 +66,30 @@ To make things easier for the test writer, any unrecognised messages will be dispatched to the RPC connection.""" - def __init__(self, i, datadir, *, chain, host, rpc_port, p2p_port, chronik_port, timewait, timeout_factor, bitcoind, bitcoin_cli, - coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, emulator=None, start_perf=False, - use_valgrind=False, descriptors=False): + def __init__( + self, + i, + datadir, + *, + chain, + host, + rpc_port, + p2p_port, + chronik_port, + timewait, + timeout_factor, + bitcoind, + bitcoin_cli, + coverage_dir, + cwd, + extra_conf=None, + extra_args=None, + use_cli=False, + emulator=None, + start_perf=False, + use_valgrind=False, + descriptors=False, + ): """ Kwargs: start_perf (bool): If True, begin profiling the node with `perf` as soon as @@ -91,8 +112,9 @@ self.binary = bitcoind if not os.path.isfile(self.binary): raise FileNotFoundError( - f"Binary '{self.binary}' could not be found.\nTry setting it manually:\n" - f"\tBITCOIND= {sys.argv[0]}") + f"Binary '{self.binary}' could not be found.\nTry setting it" + f" manually:\n\tBITCOIND= {sys.argv[0]}" + ) self.coverage_dir = coverage_dir self.cwd = cwd self.descriptors = descriptors @@ -116,31 +138,41 @@ "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", - "-uacomment=" + self.name + "-uacomment=" + self.name, ] if use_valgrind: default_suppressions_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), - "..", "..", "..", "contrib", "valgrind.supp") - suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE", - default_suppressions_file) + "..", + "..", + "..", + "contrib", + "valgrind.supp", + ) + suppressions_file = os.getenv( + "VALGRIND_SUPPRESSIONS_FILE", default_suppressions_file + ) self.binary = "valgrind" self.bitcoind_args = [bitcoind] + self.default_args - self.default_args = [f"--suppressions={suppressions_file}", - "--gen-suppressions=all", "--exit-on-first-error=yes", - "--error-exitcode=1", "--quiet"] + self.bitcoind_args + self.default_args = [ + f"--suppressions={suppressions_file}", + "--gen-suppressions=all", + "--exit-on-first-error=yes", + "--error-exitcode=1", + "--quiet", + ] + self.bitcoind_args if emulator is not None: if not os.path.isfile(emulator): - raise FileNotFoundError( - f"Emulator '{emulator}' could not be found.") + raise FileNotFoundError(f"Emulator '{emulator}' could not be found.") self.emulator = emulator if use_cli and not os.path.isfile(bitcoin_cli): raise FileNotFoundError( - f"Binary '{bitcoin_cli}' could not be found.\nTry setting it manually:\n" - f"\tBITCOINCLI= {sys.argv[0]}") + f"Binary '{bitcoin_cli}' could not be found.\nTry setting it" + f" manually:\n\tBITCOINCLI= {sys.argv[0]}" + ) self.cli = TestNodeCLI(bitcoin_cli, self.datadir, self.emulator) self.use_cli = use_cli self.start_perf = start_perf @@ -151,7 +183,7 @@ self.rpc = None self.url = None self.relay_fee_cache = None - self.log = logging.getLogger(f'TestFramework.node{i}') + self.log = logging.getLogger(f"TestFramework.node{i}") # Whether to kill the node when this object goes away self.cleanup_on_exit = True # Cache perf subprocesses here by their data output filename. @@ -159,54 +191,66 @@ self.p2ps = [] self.timeout_factor = timeout_factor - AddressKeyPair = collections.namedtuple( - 'AddressKeyPair', ['address', 'key']) + AddressKeyPair = collections.namedtuple("AddressKeyPair", ["address", "key"]) PRIV_KEYS = [ # address , privkey AddressKeyPair( - 'mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', - 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'), + "mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z", + "cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW", + ), AddressKeyPair( - 'msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', - 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'), + "msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg", + "cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE", + ), AddressKeyPair( - 'mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', - 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'), + "mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP", + "cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK", + ), AddressKeyPair( - 'mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', - 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'), + "mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR", + "cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim", + ), AddressKeyPair( - 'msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', - 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'), + "msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws", + "cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh", + ), AddressKeyPair( - 'n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', - 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'), + "n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi", + "cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq", + ), AddressKeyPair( - 'myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', - 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'), + "myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6", + "cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK", + ), AddressKeyPair( - 'mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', - 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'), + "mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8", + "cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy", + ), AddressKeyPair( - 'mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', - 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'), + "mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg", + "cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k", + ), AddressKeyPair( - 'mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', - 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'), + "mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf", + "cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik", + ), AddressKeyPair( - 'mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', - 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'), + "mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6", + "cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3", + ), AddressKeyPair( - 'mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', - 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'), + "mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7", + "cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ", + ), ] def get_deterministic_priv_key(self): """Return a deterministic priv key in base58, that only depends on the node's index""" num_keys = len(self.PRIV_KEYS) - assert self.index < num_keys, \ - f"Only {num_keys} keys are defined, please extend TestNode.PRIV_KEYS if " \ - f"more are needed." + assert self.index < num_keys, ( + f"Only {num_keys} keys are defined, please extend TestNode.PRIV_KEYS if " + "more are needed." + ) return self.PRIV_KEYS[self.index] def _node_msg(self, msg: str) -> str: @@ -230,16 +274,13 @@ def __getattr__(self, name): """Dispatches any unrecognised messages to the RPC connection or a CLI instance.""" if self.use_cli: - return getattr( - RPCOverloadWrapper(self.cli, True, self.descriptors), name) + return getattr(RPCOverloadWrapper(self.cli, True, self.descriptors), name) else: - assert self.rpc is not None, self._node_msg( - "Error: RPC not initialized") - assert self.rpc_connected, self._node_msg( - "Error: No RPC connection") + assert self.rpc is not None, self._node_msg("Error: RPC not initialized") + assert self.rpc_connected, self._node_msg("Error: No RPC connection") return getattr( - RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), - name) + RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), name + ) def clear_default_args(self): self.default_args.clear() @@ -254,22 +295,22 @@ # - if the arg is a value (-key=value) then the name must starts # with "-key=" (the '"' char is to avoid removing "-key_suffix" # arg is "-key" is the argument to remove). - self.default_args = [def_arg for def_arg in self.default_args - if rm_arg != def_arg and not def_arg.startswith(rm_arg + '=')] + self.default_args = [ + def_arg + for def_arg in self.default_args + if rm_arg != def_arg and not def_arg.startswith(rm_arg + "=") + ] - def start(self, extra_args=None, *, cwd=None, stdout=None, - stderr=None, **kwargs): + def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs): """Start the node.""" if extra_args is None: extra_args = self.extra_args # Add a new stdout and stderr file each time bitcoind is started if stderr is None: - stderr = tempfile.NamedTemporaryFile( - dir=self.stderr_dir, delete=False) + stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) if stdout is None: - stdout = tempfile.NamedTemporaryFile( - dir=self.stdout_dir, delete=False) + stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) self.stderr = stderr self.stdout = stdout @@ -289,12 +330,8 @@ if self.emulator is not None: p_args = [self.emulator] + p_args self.process = subprocess.Popen( - p_args, - env=subp_env, - stdout=stdout, - stderr=stderr, - cwd=cwd, - **kwargs) + p_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs + ) self.running = True self.log.debug("bitcoind started, waiting for RPC to come up") @@ -308,27 +345,28 @@ poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): if self.process.poll() is not None: - raise FailedToStartError(self._node_msg( - f'bitcoind exited with status {self.process.returncode} during ' - f'initialization')) + raise FailedToStartError( + self._node_msg( + f"bitcoind exited with status {self.process.returncode} during " + "initialization" + ) + ) try: rpc = get_rpc_proxy( - rpc_url( - self.datadir, - self.chain, - self.host, - self.rpc_port), + rpc_url(self.datadir, self.chain, self.host, self.rpc_port), self.index, # Shorter timeout to allow for one retry in case of # ETIMEDOUT timeout=self.rpc_timeout // 2, - coveragedir=self.coverage_dir + coveragedir=self.coverage_dir, ) rpc.getblockcount() # If the call to getblockcount() succeeds then the RPC # connection is up - wait_until_helper(lambda: rpc.getmempoolinfo()['loaded'], - timeout_factor=self.timeout_factor) + wait_until_helper( + lambda: rpc.getmempoolinfo()["loaded"], + timeout_factor=self.timeout_factor, + ) # Wait for the node to finish reindex, block import, and # loading the mempool. Usually importing happens fast or # even "immediate" when the node is started. However, there @@ -357,7 +395,7 @@ except JSONRPCException as e: # Initialization phase # -28 RPC in warmup # -342 Service unavailable, RPC server started but is shutting down due to error - if e.error['code'] != -28 and e.error['code'] != -342: + if e.error["code"] != -28 and e.error["code"] != -342: raise # unknown JSON RPC exception except ConnectionResetError: # This might happen when the RPC server is in warmup, but shut down before the call to getblockcount @@ -380,7 +418,8 @@ raise time.sleep(1.0 / poll_per_s) self._raise_assertion_error( - f"Unable to connect to bitcoind after {self.rpc_timeout}s") + f"Unable to connect to bitcoind after {self.rpc_timeout}s" + ) def wait_for_cookie_credentials(self): """Ensures auth cookie credentials can be read, e.g. for testing CLI @@ -400,29 +439,41 @@ pass time.sleep(1.0 / poll_per_s) self._raise_assertion_error( - f"Unable to retrieve cookie credentials after {self.rpc_timeout}s") + f"Unable to retrieve cookie credentials after {self.rpc_timeout}s" + ) def generate(self, nblocks, maxtries=1000000, **kwargs): self.log.debug( - "TestNode.generate() dispatches `generate` call to `generatetoaddress`") + "TestNode.generate() dispatches `generate` call to `generatetoaddress`" + ) return self.generatetoaddress( - nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries, **kwargs) + nblocks=nblocks, + address=self.get_deterministic_priv_key().address, + maxtries=maxtries, + **kwargs, + ) def generateblock(self, *args, invalid_call, **kwargs): assert not invalid_call - return self.__getattr__('generateblock')(*args, **kwargs) + return self.__getattr__("generateblock")(*args, **kwargs) def generatetoaddress(self, *args, invalid_call, **kwargs): assert not invalid_call - return self.__getattr__('generatetoaddress')(*args, **kwargs) + return self.__getattr__("generatetoaddress")(*args, **kwargs) def generatetodescriptor(self, *args, invalid_call, **kwargs): assert not invalid_call - return self.__getattr__('generatetodescriptor')(*args, **kwargs) - - def buildavalancheproof(self, sequence: int, expiration: int, master: str, - stakes: List[Dict[str, Any]], payoutAddress: Optional[str] = ADDRESS_ECREG_UNSPENDABLE) -> str: - return self.__getattr__('buildavalancheproof')( + return self.__getattr__("generatetodescriptor")(*args, **kwargs) + + def buildavalancheproof( + self, + sequence: int, + expiration: int, + master: str, + stakes: List[Dict[str, Any]], + payoutAddress: Optional[str] = ADDRESS_ECREG_UNSPENDABLE, + ) -> str: + return self.__getattr__("buildavalancheproof")( sequence=sequence, expiration=expiration, master=master, @@ -433,19 +484,17 @@ def get_wallet_rpc(self, wallet_name): if self.use_cli: return RPCOverloadWrapper( - self.cli(f"-rpcwallet={wallet_name}"), True, - self.descriptors) + self.cli(f"-rpcwallet={wallet_name}"), True, self.descriptors + ) else: - assert self.rpc is not None, self._node_msg( - "Error: RPC not initialized") - assert self.rpc_connected, self._node_msg( - "Error: RPC not connected") + assert self.rpc is not None, self._node_msg("Error: RPC not initialized") + assert self.rpc_connected, self._node_msg("Error: RPC not connected") wallet_path = f"wallet/{urllib.parse.quote(wallet_name)}" - return RPCOverloadWrapper(self.rpc / wallet_path, - descriptors=self.descriptors) + return RPCOverloadWrapper( + self.rpc / wallet_path, descriptors=self.descriptors + ) - def stop_node(self, expected_stderr='', *, wait=0, - wait_until_stopped=True): + def stop_node(self, expected_stderr="", *, wait=0, wait_until_stopped=True): """Stop the node.""" if not self.running: return @@ -461,10 +510,9 @@ # Check that stderr is as expected self.stderr.seek(0) - stderr = self.stderr.read().decode('utf-8').strip() + stderr = self.stderr.read().decode("utf-8").strip() if stderr != expected_stderr: - raise AssertionError( - f"Unexpected stderr {stderr} != {expected_stderr}") + raise AssertionError(f"Unexpected stderr {stderr} != {expected_stderr}") self.stdout.close() self.stderr.close() @@ -487,7 +535,8 @@ # process has stopped. Assert that it didn't return an error code. assert return_code == 0, self._node_msg( - f"Node returned non-zero exit code ({return_code}) when stopping") + f"Node returned non-zero exit code ({return_code}) when stopping" + ) self.running = False self.process = None self.rpc_connected = False @@ -497,9 +546,8 @@ def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT): wait_until_helper( - self.is_node_stopped, - timeout=timeout, - timeout_factor=self.timeout_factor) + self.is_node_stopped, timeout=timeout, timeout_factor=self.timeout_factor + ) @property def chain_path(self) -> Path: @@ -507,10 +555,10 @@ @property def debug_log_path(self) -> Path: - return self.chain_path / 'debug.log' + return self.chain_path / "debug.log" def debug_log_bytes(self) -> int: - with open(self.debug_log_path, encoding='utf-8') as dl: + with open(self.debug_log_path, encoding="utf-8") as dl: dl.seek(0, 2) return dl.tell() @@ -537,19 +585,18 @@ while True: found = True - with open(self.debug_log_path, encoding='utf-8') as dl: + with open(self.debug_log_path, encoding="utf-8") as dl: dl.seek(prev_size) log = dl.read() print_log = " - " + "\n - ".join(log.splitlines()) for unexpected_msg in unexpected_msgs: - if re.search(re.escape(unexpected_msg), - log, flags=re.MULTILINE): + if re.search(re.escape(unexpected_msg), log, flags=re.MULTILINE): self._raise_assertion_error( f'Unexpected message "{unexpected_msg}" partially matches ' - f'log:\n\n{print_log}\n\n') + f"log:\n\n{print_log}\n\n" + ) for expected_msg in expected_msgs: - if re.search(re.escape(expected_msg), log, - flags=re.MULTILINE) is None: + if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None: found = False if found: return @@ -558,11 +605,17 @@ time.sleep(0.05) self._raise_assertion_error( f'Expected messages "{expected_msgs}" does not partially match ' - f'log:\n\n{print_log}\n\n') + f"log:\n\n{print_log}\n\n" + ) @contextlib.contextmanager def wait_for_debug_log( - self, expected_msgs: List[bytes], timeout=60, interval=0.05, chatty_callable=None): + self, + expected_msgs: List[bytes], + timeout=60, + interval=0.05, + chatty_callable=None, + ): """ Block until we see all the debug log messages or until we exceed the timeout. If a chatty_callable is provided, it is repeated at every iteration. @@ -592,15 +645,17 @@ return if time.time() >= time_end: - print_log = " - " + \ - "\n - ".join([f"\n - {line.decode()}" for line in log.splitlines()]) + print_log = " - " + "\n - ".join( + [f"\n - {line.decode()}" for line in log.splitlines()] + ) break time.sleep(interval) self._raise_assertion_error( f'Expected messages "{str(expected_msgs)}" does not partially match ' - f'log:\n\n{print_log}\n\n') + f"log:\n\n{print_log}\n\n" + ) @contextlib.contextmanager def profile_with_perf(self, profile_name: str): @@ -627,25 +682,32 @@ subp = None def test_success(cmd): - return subprocess.call( - # shell=True required for pipe use below - cmd, shell=True, - stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0 + return ( + subprocess.call( + # shell=True required for pipe use below + cmd, + shell=True, + stderr=subprocess.DEVNULL, + stdout=subprocess.DEVNULL, + ) + == 0 + ) - if not sys.platform.startswith('linux'): + if not sys.platform.startswith("linux"): self.log.warning( - "Can't profile with perf; only availabe on Linux platforms") + "Can't profile with perf; only availabe on Linux platforms" + ) return None - if not test_success('which perf'): - self.log.warning( - "Can't profile with perf; must install perf-tools") + if not test_success("which perf"): + self.log.warning("Can't profile with perf; must install perf-tools") return None - if not test_success( - f'readelf -S {shlex.quote(self.binary)} | grep .debug_str'): + if not test_success(f"readelf -S {shlex.quote(self.binary)} | grep .debug_str"): self.log.warning( - "perf output won't be very useful without debug symbols compiled into bitcoind") + "perf output won't be very useful without debug symbols compiled into" + " bitcoind" + ) output_path = tempfile.NamedTemporaryFile( dir=self.datadir, @@ -654,18 +716,20 @@ ).name cmd = [ - 'perf', 'record', - '-g', # Record the callgraph. + "perf", + "record", + "-g", # Record the callgraph. # Compatibility for gcc's --fomit-frame-pointer. - '--call-graph', 'dwarf', - '-F', '101', # Sampling frequency in Hz. - '-p', str(self.process.pid), - '-o', output_path, + "--call-graph", + "dwarf", + "-F", + "101", # Sampling frequency in Hz. + "-p", + str(self.process.pid), + "-o", + output_path, ] - subp = subprocess.Popen( - cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.perf_subprocesses[profile_name] = subp return subp @@ -673,64 +737,81 @@ def _stop_perf(self, profile_name): """Stop (and pop) a perf subprocess.""" subp = self.perf_subprocesses.pop(profile_name) - output_path = subp.args[subp.args.index('-o') + 1] + output_path = subp.args[subp.args.index("-o") + 1] subp.terminate() subp.wait(timeout=10) stderr = subp.stderr.read().decode() - if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr: + if "Consider tweaking /proc/sys/kernel/perf_event_paranoid" in stderr: self.log.warning( "perf couldn't collect data! Try " - "'sudo sysctl -w kernel.perf_event_paranoid=-1'") + "'sudo sysctl -w kernel.perf_event_paranoid=-1'" + ) else: report_cmd = f"perf report -i {output_path}" self.log.info(f"See perf output by running '{report_cmd}'") def assert_start_raises_init_error( - self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs): + self, + extra_args=None, + expected_msg=None, + match=ErrorMatch.FULL_TEXT, + *args, + **kwargs, + ): """Attempt to start the node and expect it to raise an error. extra_args: extra arguments to pass through to bitcoind expected_msg: regex that stderr should match when bitcoind fails Will throw if bitcoind starts without an error. - Will throw if an expected_msg is provided and it does not match bitcoind's stdout.""" - with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \ - tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout: + Will throw if an expected_msg is provided and it does not match bitcoind's stdout. + """ + with tempfile.NamedTemporaryFile( + dir=self.stderr_dir, delete=False + ) as log_stderr, tempfile.NamedTemporaryFile( + dir=self.stdout_dir, delete=False + ) as log_stdout: try: - self.start(extra_args, stdout=log_stdout, - stderr=log_stderr, *args, **kwargs) + self.start( + extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs + ) ret = self.process.wait(timeout=self.rpc_timeout) - self.log.debug(self._node_msg( - f'bitcoind exited with status {ret} during initialization')) + self.log.debug( + self._node_msg( + f"bitcoind exited with status {ret} during initialization" + ) + ) self.running = False self.process = None # Check stderr for expected message if expected_msg is not None: log_stderr.seek(0) - stderr = log_stderr.read().decode('utf-8').strip() + stderr = log_stderr.read().decode("utf-8").strip() if match == ErrorMatch.PARTIAL_REGEX: - if re.search(expected_msg, stderr, - flags=re.MULTILINE) is None: + if re.search(expected_msg, stderr, flags=re.MULTILINE) is None: self._raise_assertion_error( f'Expected message "{expected_msg}" does not partially ' - f'match stderr:\n"{stderr}"') + f'match stderr:\n"{stderr}"' + ) elif match == ErrorMatch.FULL_REGEX: if re.fullmatch(expected_msg, stderr) is None: self._raise_assertion_error( f'Expected message "{expected_msg}" does not fully ' - f'match stderr:\n"{stderr}"') + f'match stderr:\n"{stderr}"' + ) elif match == ErrorMatch.FULL_TEXT: if expected_msg != stderr: self._raise_assertion_error( f'Expected message "{expected_msg}" does not fully ' - f'match stderr:\n"{stderr}"') + f'match stderr:\n"{stderr}"' + ) except subprocess.TimeoutExpired: self.process.kill() self.running = False self.process = None - assert_msg = f'bitcoind should have exited within {self.rpc_timeout}s ' + assert_msg = f"bitcoind should have exited within {self.rpc_timeout}s " if expected_msg is None: assert_msg += "with an error" else: @@ -744,7 +825,7 @@ return self.relay_fee_cache def calculate_fee(self, tx): - """ Estimate the necessary fees (in sats) for an unsigned CTransaction assuming: + """Estimate the necessary fees (in sats) for an unsigned CTransaction assuming: - the current relayfee on node - all inputs are compressed-key p2pkh, and will be signed ecdsa or schnorr - all inputs currently unsigned (empty scriptSig) @@ -766,19 +847,16 @@ This method adds the p2p connection to the self.p2ps list and also returns the connection to the caller.""" - if 'dstport' not in kwargs: - kwargs['dstport'] = p2p_port(self.index) - if 'dstaddr' not in kwargs: - kwargs['dstaddr'] = '127.0.0.1' + if "dstport" not in kwargs: + kwargs["dstport"] = p2p_port(self.index) + if "dstaddr" not in kwargs: + kwargs["dstaddr"] = "127.0.0.1" p2p_conn.peer_connect( - **kwargs, - net=self.chain, - timeout_factor=self.timeout_factor)() + **kwargs, net=self.chain, timeout_factor=self.timeout_factor + )() self.p2ps.append(p2p_conn) - p2p_conn.wait_until( - lambda: p2p_conn.is_connected, - check_connected=False) + p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False) if wait_for_verack: # Wait for the node to send us the version and verack p2p_conn.wait_for_verack() @@ -799,12 +877,13 @@ # string. This checks the node's newest peer. It could be racy if # another Bitcoin ABC node has connected since we opened our # connection, but we don't expect that to happen. - assert_equal(self.getpeerinfo()[-1]['subver'], P2P_SUBVERSION) + assert_equal(self.getpeerinfo()[-1]["subver"], P2P_SUBVERSION) return p2p_conn def add_outbound_p2p_connection( - self, p2p_conn, *, p2p_idx, connection_type="outbound-full-relay", **kwargs): + self, p2p_conn, *, p2p_idx, connection_type="outbound-full-relay", **kwargs + ): """Add an outbound p2p connection from node. Must be an "outbound-full-relay", "block-relay-only", "addr-fetch", "feeler" or "avalanche" connection. @@ -813,26 +892,26 @@ """ def addconnection_callback(address, port): - self.log.debug( - f"Connecting to {address}:{port} {connection_type}") - self.addconnection(f'{address}:{port}', connection_type) + self.log.debug(f"Connecting to {address}:{port} {connection_type}") + self.addconnection(f"{address}:{port}", connection_type) p2p_conn.peer_accept_connection( connect_cb=addconnection_callback, connect_id=p2p_idx + 1, net=self.chain, timeout_factor=self.timeout_factor, - **kwargs)() + **kwargs, + )() if connection_type == "feeler": # feeler connections are closed as soon as the node receives a # `version` message p2p_conn.wait_until( - lambda: p2p_conn.message_count["version"] == 1, - check_connected=False) + lambda: p2p_conn.message_count["version"] == 1, check_connected=False + ) p2p_conn.wait_until( - lambda: not p2p_conn.is_connected, - check_connected=False) + lambda: not p2p_conn.is_connected, check_connected=False + ) else: p2p_conn.wait_for_connect() self.p2ps.append(p2p_conn) @@ -844,8 +923,9 @@ def num_test_p2p_connections(self): """Return number of test framework p2p connections to the node.""" - return len([peer for peer in self.getpeerinfo() - if peer['subver'] == P2P_SUBVERSION]) + return len( + [peer for peer in self.getpeerinfo() if peer["subver"] == P2P_SUBVERSION] + ) def disconnect_p2ps(self): """Close all p2p connections to the node.""" @@ -853,8 +933,10 @@ p.peer_disconnect() del self.p2ps[:] - wait_until_helper(lambda: self.num_test_p2p_connections() == 0, - timeout_factor=self.timeout_factor) + wait_until_helper( + lambda: self.num_test_p2p_connections() == 0, + timeout_factor=self.timeout_factor, + ) class TestNodeCLIAttr: @@ -873,7 +955,7 @@ if isinstance(arg, bool): return str(arg).lower() elif arg is None: - return 'null' + return "null" elif isinstance(arg, dict) or isinstance(arg, list): return json.dumps(arg, default=EncodeDecimal) else: @@ -888,7 +970,7 @@ self.binary = binary self.datadir = datadir self.input = None - self.log = logging.getLogger('TestFramework.bitcoincli') + self.log = logging.getLogger("TestFramework.bitcoincli") self.emulator = emulator def __call__(self, *options, cli_input=None): @@ -913,11 +995,13 @@ def send_cli(self, command=None, *args, **kwargs): """Run bitcoin-cli command. Deserializes returned string as python object.""" pos_args = [arg_to_cli(arg) for arg in args] - named_args = [str(key) + "=" + arg_to_cli(value) - for (key, value) in kwargs.items()] - assert not (pos_args and named_args), \ - "Cannot use positional arguments and named arguments in the same " \ + named_args = [ + str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items() + ] + assert not (pos_args and named_args), ( + "Cannot use positional arguments and named arguments in the same " "bitcoin-cli call" + ) p_args = [self.binary, "-datadir=" + self.datadir] + self.options if named_args: p_args += ["-named"] @@ -927,19 +1011,24 @@ self.log.debug(f"Running bitcoin-cli {p_args[2:]}") if self.emulator is not None: p_args = [self.emulator] + p_args - process = subprocess.Popen(p_args, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + process = subprocess.Popen( + p_args, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) cli_stdout, cli_stderr = process.communicate(input=self.input) returncode = process.poll() if returncode: - match = re.match( - r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr) + match = re.match(r"error code: ([-0-9]+)\nerror message:\n(.*)", cli_stderr) if match: code, message = match.groups() raise JSONRPCException({"code": int(code), "message": message}) # Ignore cli_stdout, raise with cli_stderr raise subprocess.CalledProcessError( - returncode, self.binary, output=cli_stderr) + returncode, self.binary, output=cli_stderr + ) try: return json.loads(cli_stdout, parse_float=decimal.Decimal) except (json.JSONDecodeError, decimal.InvalidOperation): @@ -955,86 +1044,108 @@ def __getattr__(self, name): return getattr(self.rpc, name) - def createwallet(self, wallet_name, disable_private_keys=None, blank=None, - passphrase='', avoid_reuse=None, descriptors=None, load_on_startup=None): + def createwallet( + self, + wallet_name, + disable_private_keys=None, + blank=None, + passphrase="", + avoid_reuse=None, + descriptors=None, + load_on_startup=None, + ): if descriptors is None: descriptors = self.descriptors - return self.__getattr__('createwallet')( - wallet_name, disable_private_keys, blank, passphrase, avoid_reuse, descriptors, load_on_startup) + return self.__getattr__("createwallet")( + wallet_name, + disable_private_keys, + blank, + passphrase, + avoid_reuse, + descriptors, + load_on_startup, + ) def importprivkey(self, privkey, label=None, rescan=None): wallet_info = self.getwalletinfo() - if 'descriptors' not in wallet_info or ( - 'descriptors' in wallet_info and not wallet_info['descriptors']): - return self.__getattr__('importprivkey')(privkey, label, rescan) - desc = descsum_create('combo(' + privkey + ')') - req = [{ - 'desc': desc, - 'timestamp': 0 if rescan else 'now', - 'label': label if label else '' - }] + if "descriptors" not in wallet_info or ( + "descriptors" in wallet_info and not wallet_info["descriptors"] + ): + return self.__getattr__("importprivkey")(privkey, label, rescan) + desc = descsum_create("combo(" + privkey + ")") + req = [ + { + "desc": desc, + "timestamp": 0 if rescan else "now", + "label": label if label else "", + } + ] import_res = self.importdescriptors(req) - if not import_res[0]['success']: - raise JSONRPCException(import_res[0]['error']) + if not import_res[0]["success"]: + raise JSONRPCException(import_res[0]["error"]) - def addmultisigaddress(self, nrequired, keys, - label=None): + def addmultisigaddress(self, nrequired, keys, label=None): wallet_info = self.getwalletinfo() - if 'descriptors' not in wallet_info or ( - 'descriptors' in wallet_info and not wallet_info['descriptors']): - return self.__getattr__('addmultisigaddress')( - nrequired, keys, label) + if "descriptors" not in wallet_info or ( + "descriptors" in wallet_info and not wallet_info["descriptors"] + ): + return self.__getattr__("addmultisigaddress")(nrequired, keys, label) cms = self.createmultisig(nrequired, keys) - req = [{ - 'desc': cms['descriptor'], - 'timestamp': 0, - 'label': label if label else '' - }] + req = [ + {"desc": cms["descriptor"], "timestamp": 0, "label": label if label else ""} + ] import_res = self.importdescriptors(req) - if not import_res[0]['success']: - raise JSONRPCException(import_res[0]['error']) + if not import_res[0]["success"]: + raise JSONRPCException(import_res[0]["error"]) return cms def importpubkey(self, pubkey, label=None, rescan=None): wallet_info = self.getwalletinfo() - if 'descriptors' not in wallet_info or ( - 'descriptors' in wallet_info and not wallet_info['descriptors']): - return self.__getattr__('importpubkey')(pubkey, label, rescan) - desc = descsum_create('combo(' + pubkey + ')') - req = [{ - 'desc': desc, - 'timestamp': 0 if rescan else 'now', - 'label': label if label else '' - }] + if "descriptors" not in wallet_info or ( + "descriptors" in wallet_info and not wallet_info["descriptors"] + ): + return self.__getattr__("importpubkey")(pubkey, label, rescan) + desc = descsum_create("combo(" + pubkey + ")") + req = [ + { + "desc": desc, + "timestamp": 0 if rescan else "now", + "label": label if label else "", + } + ] import_res = self.importdescriptors(req) - if not import_res[0]['success']: - raise JSONRPCException(import_res[0]['error']) + if not import_res[0]["success"]: + raise JSONRPCException(import_res[0]["error"]) def importaddress(self, address, label=None, rescan=None, p2sh=None): wallet_info = self.getwalletinfo() - if 'descriptors' not in wallet_info or ( - 'descriptors' in wallet_info and not wallet_info['descriptors']): - return self.__getattr__('importaddress')( - address, label, rescan, p2sh) + if "descriptors" not in wallet_info or ( + "descriptors" in wallet_info and not wallet_info["descriptors"] + ): + return self.__getattr__("importaddress")(address, label, rescan, p2sh) is_hex = False try: int(address, 16) is_hex = True - desc = descsum_create('raw(' + address + ')') + desc = descsum_create("raw(" + address + ")") except BaseException: - desc = descsum_create('addr(' + address + ')') - reqs = [{ - 'desc': desc, - 'timestamp': 0 if rescan else 'now', - 'label': label if label else '' - }] + desc = descsum_create("addr(" + address + ")") + reqs = [ + { + "desc": desc, + "timestamp": 0 if rescan else "now", + "label": label if label else "", + } + ] if is_hex and p2sh: - reqs.append({ - 'desc': descsum_create('p2sh(raw(' + address + '))'), - 'timestamp': 0 if rescan else 'now', - 'label': label if label else '' - }) + reqs.append( + { + "desc": descsum_create("p2sh(raw(" + address + "))"), + "timestamp": 0 if rescan else "now", + "label": label if label else "", + } + ) import_res = self.importdescriptors(reqs) for res in import_res: - if not res['success']: - raise JSONRPCException(res['error']) + if not res["success"]: + raise JSONRPCException(res["error"]) diff --git a/test/functional/test_framework/txtools.py b/test/functional/test_framework/txtools.py --- a/test/functional/test_framework/txtools.py +++ b/test/functional/test_framework/txtools.py @@ -16,7 +16,7 @@ return random.randbytes(size) # type: ignore[attr-defined] # slower workaround if not size: - return b'' + return b"" return bytes.fromhex(f"{random.randrange(2**(8*size)):0{2*size}x}") @@ -48,9 +48,9 @@ data_size = required_padding - VOUT_VALUE_SIZE - 3 was_op_pushdata1_used = True - if data_size <= 0x4c: + if data_size <= 0x4C: was_op_pushdata1_used = False - if data_size == 0x4c: + if data_size == 0x4C: # Adding one more byte to the data causes two more bytes to be # added to the tx size, because of the need for OP_PUSHDATA1. # So remove 10 bytes to add an empty OP_RETURN vout instead in @@ -72,9 +72,7 @@ required_padding -= data_size + VOUT_VALUE_SIZE + 3 - tx.vout.append( - CTxOut(0, CScript([OP_RETURN, get_random_bytes(data_size)])) - ) + tx.vout.append(CTxOut(0, CScript([OP_RETURN, get_random_bytes(data_size)]))) tx.rehash() @@ -103,8 +101,8 @@ def test_size(requested_size, expected_size): self.assertEqual( - rawtx_length(pad_raw_tx(raw_tx, requested_size)), - expected_size) + rawtx_length(pad_raw_tx(raw_tx, requested_size)), expected_size + ) self.assertEqual(rawtx_length(raw_tx), 85) @@ -117,10 +115,8 @@ # because a VOUT with an empty OP_RETURN is the minimum data we can # add. for size in [86, 87, 88, 89, 90, 91, 92, 93, 94]: - test_size(requested_size=size, - expected_size=95) + test_size(requested_size=size, expected_size=95) # After that, the size is exactly as expected. for size in range(95, 1000): - test_size(requested_size=size, - expected_size=size) + test_size(requested_size=size, expected_size=size) diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -49,16 +49,19 @@ target_fee = satoshi_round(tx_size * fee_per_kB / 1000) if fee < (tx_size - wiggleroom) * fee_per_kB / 1000: raise AssertionError( - f"Fee of {str(fee)} XEC too low! (Should be {str(target_fee)} XEC)") + f"Fee of {str(fee)} XEC too low! (Should be {str(target_fee)} XEC)" + ) if fee > (tx_size + wiggleroom) * fee_per_kB / 1000: raise AssertionError( - f"Fee of {str(fee)} XEC too high! (Should be {str(target_fee)} XEC)") + f"Fee of {str(fee)} XEC too high! (Should be {str(target_fee)} XEC)" + ) def assert_equal(thing1, thing2, *args): if thing1 != thing2 or any(thing1 != arg for arg in args): raise AssertionError( - f"not({' == '.join(str(arg) for arg in (thing1, thing2) + args)})") + f"not({' == '.join(str(arg) for arg in (thing1, thing2) + args)})" + ) def assert_greater_than(thing1, thing2): @@ -79,21 +82,22 @@ try: fun(*args, **kwds) except JSONRPCException: - raise AssertionError( - "Use assert_raises_rpc_error() to test RPC failures") + raise AssertionError("Use assert_raises_rpc_error() to test RPC failures") except exc as e: - if message is not None and message not in e.error['message']: + if message is not None and message not in e.error["message"]: raise AssertionError( - f"Expected substring not found in error message:\nsubstring: '{message}'\nerror message: '{e.error['message']}'.") + "Expected substring not found in error message:\nsubstring:" + f" '{message}'\nerror message: '{e.error['message']}'." + ) except Exception as e: - raise AssertionError( - f"Unexpected exception raised: {type(e).__name__}") + raise AssertionError(f"Unexpected exception raised: {type(e).__name__}") else: raise AssertionError("No exception raised") def assert_raises_process_error( - returncode: int, output: str, fun: Callable, *args, **kwds): + returncode: int, output: str, fun: Callable, *args, **kwds +): """Execute a process and asserts the process return code and output. Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError @@ -111,8 +115,7 @@ fun(*args, **kwds) except CalledProcessError as e: if returncode != e.returncode: - raise AssertionError( - f"Unexpected returncode {e.returncode}") + raise AssertionError(f"Unexpected returncode {e.returncode}") if output not in e.output: raise AssertionError(f"Expected substring not found:{e.output}") else: @@ -120,7 +123,8 @@ def assert_raises_rpc_error( - code: Optional[int], message: Optional[str], fun: Callable, *args, **kwds): + code: Optional[int], message: Optional[str], fun: Callable, *args, **kwds +): """Run an RPC and verify that a specific JSONRPC exception code and message is raised. Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException @@ -150,15 +154,15 @@ # JSONRPCException was thrown as expected. Check the code and message # values are correct. if (code is not None) and (code != e.error["code"]): + raise AssertionError(f"Unexpected JSONRPC error code {e.error['code']}") + if (message is not None) and (message not in e.error["message"]): raise AssertionError( - f"Unexpected JSONRPC error code {e.error['code']}") - if (message is not None) and (message not in e.error['message']): - raise AssertionError( - f"Expected substring not found in error message:\nsubstring: '{message}'\nerror message: '{e.error['message']}'.") + "Expected substring not found in error message:\nsubstring:" + f" '{message}'\nerror message: '{e.error['message']}'." + ) return True except Exception as e: - raise AssertionError( - f"Unexpected exception raised: {type(e).__name__}") + raise AssertionError(f"Unexpected exception raised: {type(e).__name__}") else: return False @@ -168,23 +172,22 @@ int(string, 16) except Exception as e: raise AssertionError( - f"Couldn't interpret {string!r} as hexadecimal; raised: {e}") + f"Couldn't interpret {string!r} as hexadecimal; raised: {e}" + ) def assert_is_hash_string(string, length=64): if not isinstance(string, str): - raise AssertionError( - f"Expected a string, got type {type(string)!r}") + raise AssertionError(f"Expected a string, got type {type(string)!r}") elif length and len(string) != length: + raise AssertionError(f"String of length {length} expected; got {len(string)}") + elif not re.match("[abcdef0-9]+$", string): raise AssertionError( - f"String of length {length} expected; got {len(string)}") - elif not re.match('[abcdef0-9]+$', string): - raise AssertionError( - f"String {string!r} contains invalid characters for a hash.") + f"String {string!r} contains invalid characters for a hash." + ) -def assert_array_result(object_array, to_match, expected, - should_not_find=False): +def assert_array_result(object_array, to_match, expected, should_not_find=False): """ Pass in array of JSON objects, a dictionary with key/value pairs to match against, and another dictionary with expected key/value @@ -213,6 +216,7 @@ if num_matched > 0 and should_not_find: raise AssertionError(f"Objects were found {str(to_match)}") + # Utility functions ################### @@ -237,21 +241,27 @@ def str_to_b64str(string): - return b64encode(string.encode('utf-8')).decode('ascii') + return b64encode(string.encode("utf-8")).decode("ascii") def satoshi_round(amount): - return Decimal(amount).quantize(Decimal('0.01'), rounding=ROUND_DOWN) + return Decimal(amount).quantize(Decimal("0.01"), rounding=ROUND_DOWN) def iter_chunks(lst: list, n: int): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): - yield lst[i:i + n] + yield lst[i : i + n] -def wait_until_helper(predicate, *, attempts=float('inf'), - timeout=float('inf'), lock=None, timeout_factor=1.0): +def wait_until_helper( + predicate, + *, + attempts=float("inf"), + timeout=float("inf"), + lock=None, + timeout_factor=1.0, +): """Sleep until the predicate resolves to be True. Warning: Note that this method is not recommended to be used in tests as it is @@ -260,7 +270,7 @@ properly scaled. Furthermore, `wait_until()` from `P2PInterface` class in `p2p.py` has a preset lock. """ - if attempts == float('inf') and timeout == float('inf'): + if attempts == float("inf") and timeout == float("inf"): timeout = 60 timeout = timeout * timeout_factor attempt = 0 @@ -282,11 +292,14 @@ logger.error(f"wait_until() failed. Predicate: {predicate_source}") if attempt >= attempts: raise AssertionError( - f"Predicate {predicate_source} not true after {attempts} attempts") + f"Predicate {predicate_source} not true after {attempts} attempts" + ) elif time.time() >= time_end: raise AssertionError( - f"Predicate {predicate_source} not true after {timeout} seconds") - raise RuntimeError('Unreachable') + f"Predicate {predicate_source} not true after {timeout} seconds" + ) + raise RuntimeError("Unreachable") + # RPC/P2P connection constants and functions ############################################ @@ -302,7 +315,7 @@ MAX_NODES = 64 # Don't assign rpc or p2p ports lower than this (for example: 18333 is the # default testnet port) -PORT_MIN = int(os.getenv('TEST_RUNNER_PORT_MIN', default=20000)) +PORT_MIN = int(os.getenv("TEST_RUNNER_PORT_MIN", default=20000)) # The number of ports to "reserve" for p2p and rpc, each PORT_RANGE = 5000 # The number of times we increment the port counters and test it again before @@ -340,13 +353,14 @@ """ proxy_kwargs = {} if timeout is not None: - proxy_kwargs['timeout'] = int(timeout) + proxy_kwargs["timeout"] = int(timeout) proxy = AuthServiceProxy(url, **proxy_kwargs) proxy.url = url # store URL on proxy for info - coverage_logfile = coverage.get_filename( - coveragedir, node_number) if coveragedir else None + coverage_logfile = ( + coverage.get_filename(coveragedir, node_number) if coveragedir else None + ) return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile) @@ -358,9 +372,9 @@ global LAST_USED_PORT_MAP assert PortSeed.n is not None LAST_USED_PORT_MAP[port_name] = ( - PORT_MIN + - PORT_START_MAP[port_name] + - (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) + PORT_MIN + + PORT_START_MAP[port_name] + + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) ) @@ -368,7 +382,7 @@ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: - sock.bind(('127.0.0.1', port)) + sock.bind(("127.0.0.1", port)) return True except OSError: return False @@ -390,7 +404,8 @@ return LAST_USED_PORT_MAP[port_name] raise RuntimeError( - f"Could not find available {port_name} port after {MAX_PORT_RETRY} attempts.") + f"Could not find available {port_name} port after {MAX_PORT_RETRY} attempts." + ) def p2p_port(n: int) -> int: @@ -408,9 +423,10 @@ def rpc_url(datadir, chain, host, port): rpc_u, rpc_p = get_auth_cookie(datadir, chain) if host is None: - host = '127.0.0.1' + host = "127.0.0.1" return f"http://{rpc_u}:{rpc_p}@{host}:{int(port)}" + # Node functions ################ @@ -420,13 +436,13 @@ if not os.path.isdir(datadir): os.makedirs(datadir) # Translate chain name to config name - if chain == 'testnet3': - chain_name_conf_arg = 'testnet' - chain_name_conf_section = 'test' + if chain == "testnet3": + chain_name_conf_arg = "testnet" + chain_name_conf_section = "test" else: chain_name_conf_arg = chain chain_name_conf_section = chain - with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f: + with open(os.path.join(datadir, "bitcoin.conf"), "w", encoding="utf8") as f: f.write(f"{chain_name_conf_arg}=1\n") f.write(f"[{chain_name_conf_section}]\n") f.write(f"port={str(p2p_port(n))}\n") @@ -451,8 +467,8 @@ f.write("shrinkdebugfile=0\n") if disable_autoconnect: f.write("connect=0\n") - os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True) - os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True) + os.makedirs(os.path.join(datadir, "stderr"), exist_ok=True) + os.makedirs(os.path.join(datadir, "stdout"), exist_ok=True) return datadir @@ -461,7 +477,7 @@ def append_config(datadir, options): - with open(os.path.join(datadir, "bitcoin.conf"), 'a', encoding='utf8') as f: + with open(os.path.join(datadir, "bitcoin.conf"), "a", encoding="utf8") as f: for option in options: f.write(f"{option}\n") @@ -470,18 +486,20 @@ user = None password = None if os.path.isfile(os.path.join(datadir, "bitcoin.conf")): - with open(os.path.join(datadir, "bitcoin.conf"), 'r', encoding='utf8') as f: + with open(os.path.join(datadir, "bitcoin.conf"), "r", encoding="utf8") as f: for line in f: if line.startswith("rpcuser="): assert user is None # Ensure that there is only one rpcuser line user = line.split("=")[1].strip("\n") if line.startswith("rpcpassword="): - assert password is None # Ensure that there is only one rpcpassword line + assert ( + password is None + ) # Ensure that there is only one rpcpassword line password = line.split("=")[1].strip("\n") try: - with open(os.path.join(datadir, chain, ".cookie"), 'r', encoding="ascii") as f: + with open(os.path.join(datadir, chain, ".cookie"), "r", encoding="ascii") as f: userpass = f.read() - split_userpass = userpass.split(':') + split_userpass = userpass.split(":") user = split_userpass[0] password = split_userpass[1] except OSError: @@ -540,6 +558,7 @@ # the txout for change txouts = [] from .messages import CTxOut + txout = CTxOut() txout.nValue = 0 txout.scriptPubKey = bytes.fromhex(script_pubkey) @@ -547,6 +566,7 @@ txouts.append(txout) return txouts + # Create a spend of each passed-in utxo, splicing in "txouts" to each raw # transaction to make it large. See gen_return_txouts() above. @@ -555,11 +575,12 @@ addr = node.getnewaddress() txids = [] from .messages import CTransaction + for _ in range(num): t = utxos.pop() inputs = [{"txid": t["txid"], "vout": t["vout"]}] outputs = {} - change = t['amount'] - fee + change = t["amount"] - fee outputs[addr] = satoshi_round(change) rawtx = node.createrawtransaction(inputs, outputs) tx = CTransaction() @@ -567,8 +588,7 @@ for txout in txouts: tx.vout.append(txout) newtx = tx.serialize().hex() - signresult = node.signrawtransactionwithwallet( - newtx, None, "NONE|FORKID") + signresult = node.signrawtransactionwithwallet(newtx, None, "NONE|FORKID") txid = node.sendrawtransaction(signresult["hex"], 0) txids.append(txid) return txids @@ -583,8 +603,7 @@ for i in range(len(tx["vout"])): if any(addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]): return i - raise RuntimeError( - f"Vout not found for address: txid={txid}, addr={addr}") + raise RuntimeError(f"Vout not found for address: txid={txid}, addr={addr}") def modinv(a, n): diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py --- a/test/functional/test_framework/wallet.py +++ b/test/functional/test_framework/wallet.py @@ -49,8 +49,8 @@ self._utxos = [] self._address = ADDRESS_ECREG_P2SH_OP_TRUE self._scriptPubKey = bytes.fromhex( - self._test_node.validateaddress( - self._address)['scriptPubKey']) + self._test_node.validateaddress(self._address)["scriptPubKey"] + ) def _create_utxo(self, *, txid, vout, value, height): return {"txid": txid, "vout": vout, "value": value, "height": height} @@ -59,15 +59,18 @@ """Drop all utxos and rescan the utxo set""" self._utxos = [] res = self._test_node.scantxoutset( - action="start", scanobjects=[f'raw({self._scriptPubKey.hex()})']) - assert_equal(True, res['success']) - for utxo in res['unspents']: + action="start", scanobjects=[f"raw({self._scriptPubKey.hex()})"] + ) + assert_equal(True, res["success"]) + for utxo in res["unspents"]: self._utxos.append( self._create_utxo( txid=utxo["txid"], vout=utxo["vout"], value=utxo["amount"], - height=utxo["height"])) + height=utxo["height"], + ) + ) def scan_tx(self, tx): """Scan the tx and adjust the internal list of owned utxos""" @@ -80,19 +83,19 @@ self.get_utxo(txid=spent["txid"], vout=spent["vout"]) except StopIteration: pass - for out in tx['vout']: - if out['scriptPubKey']['hex'] == self._scriptPubKey.hex(): + for out in tx["vout"]: + if out["scriptPubKey"]["hex"] == self._scriptPubKey.hex(): self._utxos.append( self._create_utxo( - txid=tx["txid"], - vout=out["n"], - value=out["value"], - height=0)) + txid=tx["txid"], vout=out["n"], value=out["value"], height=0 + ) + ) def generate(self, num_blocks, **kwargs): """Generate blocks with coinbase outputs to the internal address, and call rescan_utxos""" blocks = self._test_node.generatetodescriptor( - num_blocks, f'raw({self._scriptPubKey.hex()})', **kwargs) + num_blocks, f"raw({self._scriptPubKey.hex()})", **kwargs + ) # Calling rescan_utxos here makes sure that after a generate the utxo # set is in a clean state. For example, the wallet will update # - if the caller consumed utxos, but never used them @@ -106,7 +109,7 @@ def get_scriptPubKey(self): return self._scriptPubKey - def get_utxo(self, *, txid: str = '', vout: Optional[int] = None): + def get_utxo(self, *, txid: str = "", vout: Optional[int] = None): """ Returns a utxo and marks it as spent (pops it from the internal list) @@ -114,18 +117,14 @@ txid: get the first utxo we find from a specific transaction """ # Put the largest utxo last - self._utxos = sorted( - self._utxos, key=lambda k: ( - k['value'], -k['height'])) + self._utxos = sorted(self._utxos, key=lambda k: (k["value"], -k["height"])) if txid: - utxo_filter: Any = filter( - lambda utxo: txid == utxo['txid'], self._utxos) + utxo_filter: Any = filter(lambda utxo: txid == utxo["txid"], self._utxos) else: # By default the largest utxo utxo_filter = reversed(self._utxos) if vout is not None: - utxo_filter = filter( - lambda utxo: vout == utxo['vout'], utxo_filter) + utxo_filter = filter(lambda utxo: vout == utxo["vout"], utxo_filter) index = self._utxos.index(next(utxo_filter)) return self._utxos.pop(index) @@ -133,9 +132,7 @@ def send_self_transfer(self, **kwargs): """Create and send a tx with the specified fee_rate. Fee may be exact or at most one satoshi higher than needed.""" tx = self.create_self_transfer(**kwargs) - self.sendrawtransaction( - from_node=kwargs['from_node'], - tx_hex=tx['hex']) + self.sendrawtransaction(from_node=kwargs["from_node"], tx_hex=tx["hex"]) return tx def send_to(self, *, from_node, scriptPubKey, amount, fee=1000): @@ -151,28 +148,29 @@ tx = self.create_self_transfer(from_node=from_node, fee_rate=0)["tx"] assert_greater_than_or_equal(tx.vout[0].nValue, amount + fee) # change output -> MiniWallet - tx.vout[0].nValue -= (amount + fee) + tx.vout[0].nValue -= amount + fee # arbitrary output -> to be returned tx.vout.append(CTxOut(amount, scriptPubKey)) - txid = self.sendrawtransaction( - from_node=from_node, - tx_hex=tx.serialize().hex()) + txid = self.sendrawtransaction(from_node=from_node, tx_hex=tx.serialize().hex()) return txid, len(tx.vout) - 1 - def create_self_transfer(self, *, fee_rate=Decimal("3000.00"), - from_node, utxo_to_spend=None, locktime=0): + def create_self_transfer( + self, *, fee_rate=Decimal("3000.00"), from_node, utxo_to_spend=None, locktime=0 + ): """Create and return a tx with the specified fee_rate. Fee may be exact or at most one satoshi higher than needed.""" utxo_to_spend = utxo_to_spend or self.get_utxo() # The size will be enforced by pad_tx() size = 100 send_value = satoshi_round( - utxo_to_spend['value'] - fee_rate * (Decimal(size) / 1000)) + utxo_to_spend["value"] - fee_rate * (Decimal(size) / 1000) + ) assert send_value > 0 tx = CTransaction() - tx.vin = [CTxIn(COutPoint(int(utxo_to_spend['txid'], 16), - utxo_to_spend['vout']))] + tx.vin = [ + CTxIn(COutPoint(int(utxo_to_spend["txid"], 16), utxo_to_spend["vout"])) + ] tx.vout = [CTxOut(int(send_value * XEC), self._scriptPubKey)] tx.nLockTime = locktime tx.vin[0].scriptSig = SCRIPTSIG_OP_TRUE @@ -181,10 +179,10 @@ assert_equal(len(tx.serialize()), size) new_utxo = self._create_utxo( - txid=tx.rehash(), vout=0, value=send_value, height=0) + txid=tx.rehash(), vout=0, value=send_value, height=0 + ) - return {"txid": new_utxo["txid"], - "hex": tx_hex, "tx": tx, "new_utxo": new_utxo} + return {"txid": new_utxo["txid"], "hex": tx_hex, "tx": tx, "new_utxo": new_utxo} def sendrawtransaction(self, *, from_node, tx_hex): txid = from_node.sendrawtransaction(tx_hex) @@ -194,14 +192,15 @@ def getnewdestination(): """Generate a random destination and return the corresponding public key, - scriptPubKey and address. Can be used when a random destination is - needed, but no compiled wallet is available (e.g. as replacement to the - getnewaddress/getaddressinfo RPCs).""" + scriptPubKey and address. Can be used when a random destination is + needed, but no compiled wallet is available (e.g. as replacement to the + getnewaddress/getaddressinfo RPCs).""" key = ECKey() key.generate() pubkey = key.get_pubkey().get_bytes() scriptpubkey = CScript( - [OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG]) + [OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG] + ) return pubkey, scriptpubkey, key_to_p2pkh(pubkey) @@ -209,8 +208,7 @@ """Converts a given address to the corresponding output script (scriptPubKey).""" payload, version = base58_to_byte(address) if version == 111: # testnet pubkey hash - return CScript([OP_DUP, OP_HASH160, payload, - OP_EQUALVERIFY, OP_CHECKSIG]) + return CScript([OP_DUP, OP_HASH160, payload, OP_EQUALVERIFY, OP_CHECKSIG]) elif version == 196: # testnet script hash return CScript([OP_HASH160, payload, OP_EQUAL]) # TODO: also support other address formats @@ -218,8 +216,16 @@ assert False -def make_chain(node, address, privkeys, parent_txid, parent_value, n=0, - parent_locking_script=None, fee=DEFAULT_FEE): +def make_chain( + node, + address, + privkeys, + parent_txid, + parent_value, + n=0, + parent_locking_script=None, + fee=DEFAULT_FEE, +): """Build a transaction that spends parent_txid.vout[n] and produces one output with amount = parent_value with a fee deducted. Return tuple (CTransaction object, raw hex, nValue, scriptPubKey of the @@ -229,21 +235,29 @@ my_value = parent_value - fee outputs = {address: my_value} rawtx = node.createrawtransaction(inputs, outputs) - prevtxs = [{ - "txid": parent_txid, - "vout": n, - "scriptPubKey": parent_locking_script, - "amount": parent_value, - }] if parent_locking_script else None + prevtxs = ( + [ + { + "txid": parent_txid, + "vout": n, + "scriptPubKey": parent_locking_script, + "amount": parent_value, + } + ] + if parent_locking_script + else None + ) signedtx = node.signrawtransactionwithkey( - hexstring=rawtx, privkeys=privkeys, prevtxs=prevtxs) + hexstring=rawtx, privkeys=privkeys, prevtxs=prevtxs + ) assert signedtx["complete"] tx = FromHex(CTransaction(), signedtx["hex"]) return (tx, signedtx["hex"], my_value, tx.vout[0].scriptPubKey.hex()) -def create_child_with_parents(node, address, privkeys, parents_tx, values, - locking_scripts, fee=DEFAULT_FEE): +def create_child_with_parents( + node, address, privkeys, parents_tx, values, locking_scripts, fee=DEFAULT_FEE +): """Creates a transaction that spends the first output of each parent in parents_tx.""" num_parents = len(parents_tx) total_value = sum(values) @@ -253,10 +267,16 @@ prevtxs = [] for i in range(num_parents): prevtxs.append( - {"txid": parents_tx[i].get_id(), "vout": 0, - "scriptPubKey": locking_scripts[i], "amount": values[i]}) + { + "txid": parents_tx[i].get_id(), + "vout": 0, + "scriptPubKey": locking_scripts[i], + "amount": values[i], + } + ) signedtx_child = node.signrawtransactionwithkey( - hexstring=rawtx_child, privkeys=privkeys, prevtxs=prevtxs) + hexstring=rawtx_child, privkeys=privkeys, prevtxs=prevtxs + ) assert signedtx_child["complete"] return signedtx_child["hex"] @@ -274,7 +294,8 @@ for _ in range(chain_length): (tx, txhex, value, parent_locking_script) = make_chain( - node, address, privkeys, txid, value, 0, parent_locking_script) + node, address, privkeys, txid, value, 0, parent_locking_script + ) txid = tx.get_id() chain_hex.append(txhex) chain_txns.append(tx) @@ -283,7 +304,7 @@ def bulk_transaction( - tx: CTransaction, node, target_size: int, privkeys=None, prevtxs=None + tx: CTransaction, node, target_size: int, privkeys=None, prevtxs=None ) -> CTransaction: """Return a padded and signed transaction. The original transaction is left unaltered. @@ -294,8 +315,7 @@ pad_tx(tx_heavy, target_size) assert_greater_than_or_equal(tx_heavy.billable_size(), target_size) if privkeys is not None: - signed_tx = node.signrawtransactionwithkey( - ToHex(tx_heavy), privkeys, prevtxs) + signed_tx = node.signrawtransactionwithkey(ToHex(tx_heavy), privkeys, prevtxs) return FromHex(CTransaction(), signed_tx["hex"]) # OP_TRUE tx_heavy.vin[0].scriptSig = SCRIPTSIG_OP_TRUE diff --git a/test/functional/test_framework/wallet_util.py b/test/functional/test_framework/wallet_util.py --- a/test/functional/test_framework/wallet_util.py +++ b/test/functional/test_framework/wallet_util.py @@ -20,16 +20,11 @@ hash160, ) -Key = namedtuple('Key', ['privkey', - 'pubkey', - 'p2pkh_script', - 'p2pkh_addr']) +Key = namedtuple("Key", ["privkey", "pubkey", "p2pkh_script", "p2pkh_addr"]) -Multisig = namedtuple('Multisig', ['privkeys', - 'pubkeys', - 'p2sh_script', - 'p2sh_addr', - 'redeem_script']) +Multisig = namedtuple( + "Multisig", ["privkeys", "pubkeys", "p2sh_script", "p2sh_addr", "redeem_script"] +) def get_key(node): @@ -37,13 +32,16 @@ Returns a named tuple of privkey, pubkey and all address and scripts.""" addr = node.getnewaddress() - pubkey = node.getaddressinfo(addr)['pubkey'] + pubkey = node.getaddressinfo(addr)["pubkey"] pkh = hash160(bytes.fromhex(pubkey)) - return Key(privkey=node.dumpprivkey(addr), - pubkey=pubkey, - p2pkh_script=CScript( - [OP_DUP, OP_HASH160, pkh, OP_EQUALVERIFY, OP_CHECKSIG]).hex(), - p2pkh_addr=key_to_p2pkh(pubkey)) + return Key( + privkey=node.dumpprivkey(addr), + pubkey=pubkey, + p2pkh_script=CScript( + [OP_DUP, OP_HASH160, pkh, OP_EQUALVERIFY, OP_CHECKSIG] + ).hex(), + p2pkh_addr=key_to_p2pkh(pubkey), + ) def get_generate_key(): @@ -55,11 +53,14 @@ privkey = bytes_to_wif(eckey.get_bytes()) pubkey = eckey.get_pubkey().get_bytes().hex() pkh = hash160(bytes.fromhex(pubkey)) - return Key(privkey=privkey, - pubkey=pubkey, - p2pkh_script=CScript( - [OP_DUP, OP_HASH160, pkh, OP_EQUALVERIFY, OP_CHECKSIG]).hex(), - p2pkh_addr=key_to_p2pkh(pubkey)) + return Key( + privkey=privkey, + pubkey=pubkey, + p2pkh_script=CScript( + [OP_DUP, OP_HASH160, pkh, OP_EQUALVERIFY, OP_CHECKSIG] + ).hex(), + p2pkh_addr=key_to_p2pkh(pubkey), + ) def get_multisig(node): @@ -70,16 +71,20 @@ pubkeys = [] for _ in range(3): addr = node.getaddressinfo(node.getnewaddress()) - addrs.append(addr['address']) - pubkeys.append(addr['pubkey']) - script_code = CScript([OP_2] + [bytes.fromhex(pubkey) - for pubkey in pubkeys] + [OP_3, OP_CHECKMULTISIG]) - return Multisig(privkeys=[node.dumpprivkey(addr) for addr in addrs], - pubkeys=pubkeys, - p2sh_script=CScript( - [OP_HASH160, hash160(script_code), OP_EQUAL]).hex(), - p2sh_addr=script_to_p2sh(script_code), - redeem_script=script_code.hex()) + addrs.append(addr["address"]) + pubkeys.append(addr["pubkey"]) + script_code = CScript( + [OP_2] + + [bytes.fromhex(pubkey) for pubkey in pubkeys] + + [OP_3, OP_CHECKMULTISIG] + ) + return Multisig( + privkeys=[node.dumpprivkey(addr) for addr in addrs], + pubkeys=pubkeys, + p2sh_script=CScript([OP_HASH160, hash160(script_code), OP_EQUAL]).hex(), + p2sh_addr=script_to_p2sh(script_code), + redeem_script=script_code.hex(), + ) def test_address(node, address, **kwargs): @@ -89,15 +94,17 @@ if value is None: if key in addr_info.keys(): raise AssertionError( - f"key {key} unexpectedly returned in getaddressinfo.") + f"key {key} unexpectedly returned in getaddressinfo." + ) elif addr_info[key] != value: raise AssertionError( - f"key {key} value {addr_info[key]} did not match expected value {value}") + f"key {key} value {addr_info[key]} did not match expected value {value}" + ) def bytes_to_wif(b, compressed=True): if compressed: - b += b'\x01' + b += b"\x01" return byte_to_base58(b, 239)