Changeset View
Changeset View
Standalone View
Standalone View
test/functional/test_framework/test_node.py
Show First 20 Lines • Show All 60 Lines • ▼ Show 20 Lines | class TestNode: | ||||
- state about the node (whether it's running, etc) | - state about the node (whether it's running, etc) | ||||
- a Python subprocess.Popen object representing the running process | - a Python subprocess.Popen object representing the running process | ||||
- an RPC connection to the node | - an RPC connection to the node | ||||
- one or more P2P connections to the node | - one or more P2P connections to the node | ||||
To make things easier for the test writer, any unrecognised messages will | To make things easier for the test writer, any unrecognised messages will | ||||
be dispatched to the RPC connection.""" | be dispatched to the RPC connection.""" | ||||
def __init__(self, i, datadir, *, chain, host, rpc_port, p2p_port, chronik_port, timewait, timeout_factor, bitcoind, bitcoin_cli, | def __init__( | ||||
coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, emulator=None, start_perf=False, | self, | ||||
use_valgrind=False, descriptors=False): | i, | ||||
datadir, | |||||
*, | |||||
chain, | |||||
host, | |||||
rpc_port, | |||||
p2p_port, | |||||
chronik_port, | |||||
timewait, | |||||
timeout_factor, | |||||
bitcoind, | |||||
bitcoin_cli, | |||||
coverage_dir, | |||||
cwd, | |||||
extra_conf=None, | |||||
extra_args=None, | |||||
use_cli=False, | |||||
emulator=None, | |||||
start_perf=False, | |||||
use_valgrind=False, | |||||
descriptors=False, | |||||
): | |||||
""" | """ | ||||
Kwargs: | Kwargs: | ||||
start_perf (bool): If True, begin profiling the node with `perf` as soon as | start_perf (bool): If True, begin profiling the node with `perf` as soon as | ||||
the node starts. | the node starts. | ||||
""" | """ | ||||
self.index = i | self.index = i | ||||
self.p2p_conn_index = 1 | self.p2p_conn_index = 1 | ||||
self.datadir = datadir | self.datadir = datadir | ||||
self.bitcoinconf = os.path.join(self.datadir, "bitcoin.conf") | self.bitcoinconf = os.path.join(self.datadir, "bitcoin.conf") | ||||
self.stdout_dir = os.path.join(self.datadir, "stdout") | self.stdout_dir = os.path.join(self.datadir, "stdout") | ||||
self.stderr_dir = os.path.join(self.datadir, "stderr") | self.stderr_dir = os.path.join(self.datadir, "stderr") | ||||
self.chain = chain | self.chain = chain | ||||
self.host = host | self.host = host | ||||
self.rpc_port = rpc_port | self.rpc_port = rpc_port | ||||
self.p2p_port = p2p_port | self.p2p_port = p2p_port | ||||
self.chronik_port = chronik_port | self.chronik_port = chronik_port | ||||
self.name = f"testnode-{i}" | self.name = f"testnode-{i}" | ||||
self.rpc_timeout = timewait | self.rpc_timeout = timewait | ||||
self.binary = bitcoind | self.binary = bitcoind | ||||
if not os.path.isfile(self.binary): | if not os.path.isfile(self.binary): | ||||
raise FileNotFoundError( | raise FileNotFoundError( | ||||
f"Binary '{self.binary}' could not be found.\nTry setting it manually:\n" | f"Binary '{self.binary}' could not be found.\nTry setting it" | ||||
f"\tBITCOIND=<path/to/bitcoind> {sys.argv[0]}") | f" manually:\n\tBITCOIND=<path/to/bitcoind> {sys.argv[0]}" | ||||
) | |||||
self.coverage_dir = coverage_dir | self.coverage_dir = coverage_dir | ||||
self.cwd = cwd | self.cwd = cwd | ||||
self.descriptors = descriptors | self.descriptors = descriptors | ||||
if extra_conf is not None: | if extra_conf is not None: | ||||
append_config(datadir, extra_conf) | append_config(datadir, extra_conf) | ||||
# Most callers will just need to add extra args to the default list | # Most callers will just need to add extra args to the default list | ||||
# below. | # below. | ||||
# For those callers that need more flexibility, they can access the | # For those callers that need more flexibility, they can access the | ||||
# default args using the provided facilities. | # default args using the provided facilities. | ||||
# Note that common args are set in the config file (see | # Note that common args are set in the config file (see | ||||
# initialize_datadir) | # initialize_datadir) | ||||
self.extra_args = extra_args | self.extra_args = extra_args | ||||
# Configuration for logging is set as command-line args rather than in the bitcoin.conf file. | # Configuration for logging is set as command-line args rather than in the bitcoin.conf file. | ||||
# This means that starting a bitcoind using the temp dir to debug a failed test won't | # This means that starting a bitcoind using the temp dir to debug a failed test won't | ||||
# spam debug.log. | # spam debug.log. | ||||
self.default_args = [ | self.default_args = [ | ||||
"-datadir=" + self.datadir, | "-datadir=" + self.datadir, | ||||
"-logtimemicros", | "-logtimemicros", | ||||
"-logthreadnames", | "-logthreadnames", | ||||
"-logsourcelocations", | "-logsourcelocations", | ||||
"-debug", | "-debug", | ||||
"-debugexclude=libevent", | "-debugexclude=libevent", | ||||
"-debugexclude=leveldb", | "-debugexclude=leveldb", | ||||
"-uacomment=" + self.name | "-uacomment=" + self.name, | ||||
] | ] | ||||
if use_valgrind: | if use_valgrind: | ||||
default_suppressions_file = os.path.join( | default_suppressions_file = os.path.join( | ||||
os.path.dirname(os.path.realpath(__file__)), | os.path.dirname(os.path.realpath(__file__)), | ||||
"..", "..", "..", "contrib", "valgrind.supp") | "..", | ||||
suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE", | "..", | ||||
default_suppressions_file) | "..", | ||||
"contrib", | |||||
"valgrind.supp", | |||||
) | |||||
suppressions_file = os.getenv( | |||||
"VALGRIND_SUPPRESSIONS_FILE", default_suppressions_file | |||||
) | |||||
self.binary = "valgrind" | self.binary = "valgrind" | ||||
self.bitcoind_args = [bitcoind] + self.default_args | self.bitcoind_args = [bitcoind] + self.default_args | ||||
self.default_args = [f"--suppressions={suppressions_file}", | self.default_args = [ | ||||
"--gen-suppressions=all", "--exit-on-first-error=yes", | f"--suppressions={suppressions_file}", | ||||
"--error-exitcode=1", "--quiet"] + self.bitcoind_args | "--gen-suppressions=all", | ||||
"--exit-on-first-error=yes", | |||||
"--error-exitcode=1", | |||||
"--quiet", | |||||
] + self.bitcoind_args | |||||
if emulator is not None: | if emulator is not None: | ||||
if not os.path.isfile(emulator): | if not os.path.isfile(emulator): | ||||
raise FileNotFoundError( | raise FileNotFoundError(f"Emulator '{emulator}' could not be found.") | ||||
f"Emulator '{emulator}' could not be found.") | |||||
self.emulator = emulator | self.emulator = emulator | ||||
if use_cli and not os.path.isfile(bitcoin_cli): | if use_cli and not os.path.isfile(bitcoin_cli): | ||||
raise FileNotFoundError( | raise FileNotFoundError( | ||||
f"Binary '{bitcoin_cli}' could not be found.\nTry setting it manually:\n" | f"Binary '{bitcoin_cli}' could not be found.\nTry setting it" | ||||
f"\tBITCOINCLI=<path/to/bitcoin-cli> {sys.argv[0]}") | f" manually:\n\tBITCOINCLI=<path/to/bitcoin-cli> {sys.argv[0]}" | ||||
) | |||||
self.cli = TestNodeCLI(bitcoin_cli, self.datadir, self.emulator) | self.cli = TestNodeCLI(bitcoin_cli, self.datadir, self.emulator) | ||||
self.use_cli = use_cli | self.use_cli = use_cli | ||||
self.start_perf = start_perf | self.start_perf = start_perf | ||||
self.running = False | self.running = False | ||||
self.process = None | self.process = None | ||||
self.rpc_connected = False | self.rpc_connected = False | ||||
self.rpc = None | self.rpc = None | ||||
self.url = None | self.url = None | ||||
self.relay_fee_cache = None | self.relay_fee_cache = None | ||||
self.log = logging.getLogger(f'TestFramework.node{i}') | self.log = logging.getLogger(f"TestFramework.node{i}") | ||||
# Whether to kill the node when this object goes away | # Whether to kill the node when this object goes away | ||||
self.cleanup_on_exit = True | self.cleanup_on_exit = True | ||||
# Cache perf subprocesses here by their data output filename. | # Cache perf subprocesses here by their data output filename. | ||||
self.perf_subprocesses = {} | self.perf_subprocesses = {} | ||||
self.p2ps = [] | self.p2ps = [] | ||||
self.timeout_factor = timeout_factor | self.timeout_factor = timeout_factor | ||||
AddressKeyPair = collections.namedtuple( | AddressKeyPair = collections.namedtuple("AddressKeyPair", ["address", "key"]) | ||||
'AddressKeyPair', ['address', 'key']) | |||||
PRIV_KEYS = [ | PRIV_KEYS = [ | ||||
# address , privkey | # address , privkey | ||||
AddressKeyPair( | AddressKeyPair( | ||||
'mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', | "mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z", | ||||
'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'), | "cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW", | ||||
), | |||||
AddressKeyPair( | AddressKeyPair( | ||||
'msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', | "msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg", | ||||
'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'), | "cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE", | ||||
), | |||||
AddressKeyPair( | AddressKeyPair( | ||||
'mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', | "mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP", | ||||
'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'), | "cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK", | ||||
), | |||||
AddressKeyPair( | AddressKeyPair( | ||||
'mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', | "mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR", | ||||
'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'), | "cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim", | ||||
), | |||||
AddressKeyPair( | AddressKeyPair( | ||||
'msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', | "msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws", | ||||
'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'), | "cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh", | ||||
), | |||||
AddressKeyPair( | AddressKeyPair( | ||||
'n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', | "n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi", | ||||
'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'), | "cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq", | ||||
), | |||||
AddressKeyPair( | AddressKeyPair( | ||||
'myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', | "myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6", | ||||
'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'), | "cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK", | ||||
), | |||||
AddressKeyPair( | AddressKeyPair( | ||||
'mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', | "mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8", | ||||
'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'), | "cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy", | ||||
), | |||||
AddressKeyPair( | AddressKeyPair( | ||||
'mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', | "mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg", | ||||
'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'), | "cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k", | ||||
), | |||||
AddressKeyPair( | AddressKeyPair( | ||||
'mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', | "mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf", | ||||
'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'), | "cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik", | ||||
), | |||||
AddressKeyPair( | AddressKeyPair( | ||||
'mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', | "mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6", | ||||
'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'), | "cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3", | ||||
), | |||||
AddressKeyPair( | AddressKeyPair( | ||||
'mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', | "mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7", | ||||
'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'), | "cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ", | ||||
), | |||||
] | ] | ||||
def get_deterministic_priv_key(self): | def get_deterministic_priv_key(self): | ||||
"""Return a deterministic priv key in base58, that only depends on the node's index""" | """Return a deterministic priv key in base58, that only depends on the node's index""" | ||||
num_keys = len(self.PRIV_KEYS) | num_keys = len(self.PRIV_KEYS) | ||||
assert self.index < num_keys, \ | assert self.index < num_keys, ( | ||||
f"Only {num_keys} keys are defined, please extend TestNode.PRIV_KEYS if " \ | f"Only {num_keys} keys are defined, please extend TestNode.PRIV_KEYS if " | ||||
f"more are needed." | "more are needed." | ||||
) | |||||
return self.PRIV_KEYS[self.index] | return self.PRIV_KEYS[self.index] | ||||
def _node_msg(self, msg: str) -> str: | def _node_msg(self, msg: str) -> str: | ||||
"""Return a modified msg that identifies this node by its index as a debugging aid.""" | """Return a modified msg that identifies this node by its index as a debugging aid.""" | ||||
return f"[node {self.index}] {msg}" | return f"[node {self.index}] {msg}" | ||||
def _raise_assertion_error(self, msg: str): | def _raise_assertion_error(self, msg: str): | ||||
"""Raise an AssertionError with msg modified to identify this node.""" | """Raise an AssertionError with msg modified to identify this node.""" | ||||
raise AssertionError(self._node_msg(msg)) | raise AssertionError(self._node_msg(msg)) | ||||
def __del__(self): | def __del__(self): | ||||
# Ensure that we don't leave any bitcoind processes lying around after | # Ensure that we don't leave any bitcoind processes lying around after | ||||
# the test ends | # the test ends | ||||
if self.process and self.cleanup_on_exit: | if self.process and self.cleanup_on_exit: | ||||
# Should only happen on test failure | # Should only happen on test failure | ||||
# Avoid using logger, as that may have already been shutdown when | # Avoid using logger, as that may have already been shutdown when | ||||
# this destructor is called. | # this destructor is called. | ||||
print(self._node_msg("Cleaning up leftover process")) | print(self._node_msg("Cleaning up leftover process")) | ||||
self.process.kill() | self.process.kill() | ||||
def __getattr__(self, name): | def __getattr__(self, name): | ||||
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance.""" | """Dispatches any unrecognised messages to the RPC connection or a CLI instance.""" | ||||
if self.use_cli: | if self.use_cli: | ||||
return getattr( | return getattr(RPCOverloadWrapper(self.cli, True, self.descriptors), name) | ||||
RPCOverloadWrapper(self.cli, True, self.descriptors), name) | |||||
else: | else: | ||||
assert self.rpc is not None, self._node_msg( | assert self.rpc is not None, self._node_msg("Error: RPC not initialized") | ||||
"Error: RPC not initialized") | assert self.rpc_connected, self._node_msg("Error: No RPC connection") | ||||
assert self.rpc_connected, self._node_msg( | |||||
"Error: No RPC connection") | |||||
return getattr( | return getattr( | ||||
RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), | RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), name | ||||
name) | ) | ||||
def clear_default_args(self): | def clear_default_args(self): | ||||
self.default_args.clear() | self.default_args.clear() | ||||
def extend_default_args(self, args): | def extend_default_args(self, args): | ||||
self.default_args.extend(args) | self.default_args.extend(args) | ||||
def remove_default_args(self, args): | def remove_default_args(self, args): | ||||
for rm_arg in args: | for rm_arg in args: | ||||
# Remove all occurrences of rm_arg in self.default_args: | # Remove all occurrences of rm_arg in self.default_args: | ||||
# - if the arg is a flag (-flag), then the names must match | # - if the arg is a flag (-flag), then the names must match | ||||
# - if the arg is a value (-key=value) then the name must starts | # - if the arg is a value (-key=value) then the name must starts | ||||
# with "-key=" (the '"' char is to avoid removing "-key_suffix" | # with "-key=" (the '"' char is to avoid removing "-key_suffix" | ||||
# arg is "-key" is the argument to remove). | # arg is "-key" is the argument to remove). | ||||
self.default_args = [def_arg for def_arg in self.default_args | self.default_args = [ | ||||
if rm_arg != def_arg and not def_arg.startswith(rm_arg + '=')] | def_arg | ||||
for def_arg in self.default_args | |||||
if rm_arg != def_arg and not def_arg.startswith(rm_arg + "=") | |||||
] | |||||
def start(self, extra_args=None, *, cwd=None, stdout=None, | def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs): | ||||
stderr=None, **kwargs): | |||||
"""Start the node.""" | """Start the node.""" | ||||
if extra_args is None: | if extra_args is None: | ||||
extra_args = self.extra_args | extra_args = self.extra_args | ||||
# Add a new stdout and stderr file each time bitcoind is started | # Add a new stdout and stderr file each time bitcoind is started | ||||
if stderr is None: | if stderr is None: | ||||
stderr = tempfile.NamedTemporaryFile( | stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) | ||||
dir=self.stderr_dir, delete=False) | |||||
if stdout is None: | if stdout is None: | ||||
stdout = tempfile.NamedTemporaryFile( | stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) | ||||
dir=self.stdout_dir, delete=False) | |||||
self.stderr = stderr | self.stderr = stderr | ||||
self.stdout = stdout | self.stdout = stdout | ||||
if cwd is None: | if cwd is None: | ||||
cwd = self.cwd | cwd = self.cwd | ||||
# Delete any existing cookie file -- if such a file exists (eg due to | # Delete any existing cookie file -- if such a file exists (eg due to | ||||
# unclean shutdown), it will get overwritten anyway by bitcoind, and | # unclean shutdown), it will get overwritten anyway by bitcoind, and | ||||
# potentially interfere with our attempt to authenticate | # potentially interfere with our attempt to authenticate | ||||
delete_cookie_file(self.datadir, self.chain) | delete_cookie_file(self.datadir, self.chain) | ||||
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are | # add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are | ||||
# written to stderr and not the terminal | # written to stderr and not the terminal | ||||
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1") | subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1") | ||||
p_args = [self.binary] + self.default_args + extra_args | p_args = [self.binary] + self.default_args + extra_args | ||||
if self.emulator is not None: | if self.emulator is not None: | ||||
p_args = [self.emulator] + p_args | p_args = [self.emulator] + p_args | ||||
self.process = subprocess.Popen( | self.process = subprocess.Popen( | ||||
p_args, | p_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs | ||||
env=subp_env, | ) | ||||
stdout=stdout, | |||||
stderr=stderr, | |||||
cwd=cwd, | |||||
**kwargs) | |||||
self.running = True | self.running = True | ||||
self.log.debug("bitcoind started, waiting for RPC to come up") | self.log.debug("bitcoind started, waiting for RPC to come up") | ||||
if self.start_perf: | if self.start_perf: | ||||
self._start_perf() | self._start_perf() | ||||
def wait_for_rpc_connection(self): | def wait_for_rpc_connection(self): | ||||
"""Sets up an RPC connection to the bitcoind process. Returns False if unable to connect.""" | """Sets up an RPC connection to the bitcoind process. Returns False if unable to connect.""" | ||||
# Poll at a rate of four times per second | # Poll at a rate of four times per second | ||||
poll_per_s = 4 | poll_per_s = 4 | ||||
for _ in range(poll_per_s * self.rpc_timeout): | for _ in range(poll_per_s * self.rpc_timeout): | ||||
if self.process.poll() is not None: | if self.process.poll() is not None: | ||||
raise FailedToStartError(self._node_msg( | raise FailedToStartError( | ||||
f'bitcoind exited with status {self.process.returncode} during ' | self._node_msg( | ||||
f'initialization')) | f"bitcoind exited with status {self.process.returncode} during " | ||||
"initialization" | |||||
) | |||||
) | |||||
try: | try: | ||||
rpc = get_rpc_proxy( | rpc = get_rpc_proxy( | ||||
rpc_url( | rpc_url(self.datadir, self.chain, self.host, self.rpc_port), | ||||
self.datadir, | |||||
self.chain, | |||||
self.host, | |||||
self.rpc_port), | |||||
self.index, | self.index, | ||||
# Shorter timeout to allow for one retry in case of | # Shorter timeout to allow for one retry in case of | ||||
# ETIMEDOUT | # ETIMEDOUT | ||||
timeout=self.rpc_timeout // 2, | timeout=self.rpc_timeout // 2, | ||||
coveragedir=self.coverage_dir | coveragedir=self.coverage_dir, | ||||
) | ) | ||||
rpc.getblockcount() | rpc.getblockcount() | ||||
# If the call to getblockcount() succeeds then the RPC | # If the call to getblockcount() succeeds then the RPC | ||||
# connection is up | # connection is up | ||||
wait_until_helper(lambda: rpc.getmempoolinfo()['loaded'], | wait_until_helper( | ||||
timeout_factor=self.timeout_factor) | lambda: rpc.getmempoolinfo()["loaded"], | ||||
timeout_factor=self.timeout_factor, | |||||
) | |||||
# Wait for the node to finish reindex, block import, and | # Wait for the node to finish reindex, block import, and | ||||
# loading the mempool. Usually importing happens fast or | # loading the mempool. Usually importing happens fast or | ||||
# even "immediate" when the node is started. However, there | # even "immediate" when the node is started. However, there | ||||
# is no guarantee and sometimes ThreadImport might finish | # is no guarantee and sometimes ThreadImport might finish | ||||
# later. This is going to cause intermittent test failures, | # later. This is going to cause intermittent test failures, | ||||
# because generally the tests assume the node is fully | # because generally the tests assume the node is fully | ||||
# ready after being started. | # ready after being started. | ||||
# | # | ||||
Show All 12 Lines | def wait_for_rpc_connection(self): | ||||
return | return | ||||
self.rpc = rpc | self.rpc = rpc | ||||
self.rpc_connected = True | self.rpc_connected = True | ||||
self.url = self.rpc.url | self.url = self.rpc.url | ||||
return | return | ||||
except JSONRPCException as e: # Initialization phase | except JSONRPCException as e: # Initialization phase | ||||
# -28 RPC in warmup | # -28 RPC in warmup | ||||
# -342 Service unavailable, RPC server started but is shutting down due to error | # -342 Service unavailable, RPC server started but is shutting down due to error | ||||
if e.error['code'] != -28 and e.error['code'] != -342: | if e.error["code"] != -28 and e.error["code"] != -342: | ||||
raise # unknown JSON RPC exception | raise # unknown JSON RPC exception | ||||
except ConnectionResetError: | except ConnectionResetError: | ||||
# This might happen when the RPC server is in warmup, but shut down before the call to getblockcount | # This might happen when the RPC server is in warmup, but shut down before the call to getblockcount | ||||
# succeeds. Try again to properly raise the FailedToStartError | # succeeds. Try again to properly raise the FailedToStartError | ||||
pass | pass | ||||
except OSError as e: | except OSError as e: | ||||
if e.errno == errno.ETIMEDOUT: | if e.errno == errno.ETIMEDOUT: | ||||
# Treat identical to ConnectionResetError | # Treat identical to ConnectionResetError | ||||
pass | pass | ||||
elif e.errno == errno.ECONNREFUSED: | elif e.errno == errno.ECONNREFUSED: | ||||
# Port not yet open? | # Port not yet open? | ||||
pass | pass | ||||
else: | else: | ||||
# unknown OS error | # unknown OS error | ||||
raise | raise | ||||
except ValueError as e: | except ValueError as e: | ||||
# cookie file not found and no rpcuser or rpcpassword; | # cookie file not found and no rpcuser or rpcpassword; | ||||
# bitcoind is still starting | # bitcoind is still starting | ||||
if "No RPC credentials" not in str(e): | if "No RPC credentials" not in str(e): | ||||
raise | raise | ||||
time.sleep(1.0 / poll_per_s) | time.sleep(1.0 / poll_per_s) | ||||
self._raise_assertion_error( | self._raise_assertion_error( | ||||
f"Unable to connect to bitcoind after {self.rpc_timeout}s") | f"Unable to connect to bitcoind after {self.rpc_timeout}s" | ||||
) | |||||
def wait_for_cookie_credentials(self): | def wait_for_cookie_credentials(self): | ||||
"""Ensures auth cookie credentials can be read, e.g. for testing CLI | """Ensures auth cookie credentials can be read, e.g. for testing CLI | ||||
with -rpcwait before RPC connection is up.""" | with -rpcwait before RPC connection is up.""" | ||||
self.log.debug("Waiting for cookie credentials") | self.log.debug("Waiting for cookie credentials") | ||||
# Poll at a rate of four times per second. | # Poll at a rate of four times per second. | ||||
poll_per_s = 4 | poll_per_s = 4 | ||||
for _ in range(poll_per_s * self.rpc_timeout): | for _ in range(poll_per_s * self.rpc_timeout): | ||||
try: | try: | ||||
get_auth_cookie(self.datadir, self.chain) | get_auth_cookie(self.datadir, self.chain) | ||||
self.log.debug("Cookie credentials successfully retrieved") | self.log.debug("Cookie credentials successfully retrieved") | ||||
return | return | ||||
except ValueError: | except ValueError: | ||||
# cookie file not found and no rpcuser or rpcpassword; | # cookie file not found and no rpcuser or rpcpassword; | ||||
# bitcoind is still starting so we continue polling until | # bitcoind is still starting so we continue polling until | ||||
# RPC credentials are retrieved | # RPC credentials are retrieved | ||||
pass | pass | ||||
time.sleep(1.0 / poll_per_s) | time.sleep(1.0 / poll_per_s) | ||||
self._raise_assertion_error( | self._raise_assertion_error( | ||||
f"Unable to retrieve cookie credentials after {self.rpc_timeout}s") | f"Unable to retrieve cookie credentials after {self.rpc_timeout}s" | ||||
) | |||||
def generate(self, nblocks, maxtries=1000000, **kwargs): | def generate(self, nblocks, maxtries=1000000, **kwargs): | ||||
self.log.debug( | self.log.debug( | ||||
"TestNode.generate() dispatches `generate` call to `generatetoaddress`") | "TestNode.generate() dispatches `generate` call to `generatetoaddress`" | ||||
) | |||||
return self.generatetoaddress( | return self.generatetoaddress( | ||||
nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries, **kwargs) | nblocks=nblocks, | ||||
address=self.get_deterministic_priv_key().address, | |||||
maxtries=maxtries, | |||||
**kwargs, | |||||
) | |||||
def generateblock(self, *args, invalid_call, **kwargs): | def generateblock(self, *args, invalid_call, **kwargs): | ||||
assert not invalid_call | assert not invalid_call | ||||
return self.__getattr__('generateblock')(*args, **kwargs) | return self.__getattr__("generateblock")(*args, **kwargs) | ||||
def generatetoaddress(self, *args, invalid_call, **kwargs): | def generatetoaddress(self, *args, invalid_call, **kwargs): | ||||
assert not invalid_call | assert not invalid_call | ||||
return self.__getattr__('generatetoaddress')(*args, **kwargs) | return self.__getattr__("generatetoaddress")(*args, **kwargs) | ||||
def generatetodescriptor(self, *args, invalid_call, **kwargs): | def generatetodescriptor(self, *args, invalid_call, **kwargs): | ||||
assert not invalid_call | assert not invalid_call | ||||
return self.__getattr__('generatetodescriptor')(*args, **kwargs) | return self.__getattr__("generatetodescriptor")(*args, **kwargs) | ||||
def buildavalancheproof(self, sequence: int, expiration: int, master: str, | def buildavalancheproof( | ||||
stakes: List[Dict[str, Any]], payoutAddress: Optional[str] = ADDRESS_ECREG_UNSPENDABLE) -> str: | self, | ||||
return self.__getattr__('buildavalancheproof')( | sequence: int, | ||||
expiration: int, | |||||
master: str, | |||||
stakes: List[Dict[str, Any]], | |||||
payoutAddress: Optional[str] = ADDRESS_ECREG_UNSPENDABLE, | |||||
) -> str: | |||||
return self.__getattr__("buildavalancheproof")( | |||||
sequence=sequence, | sequence=sequence, | ||||
expiration=expiration, | expiration=expiration, | ||||
master=master, | master=master, | ||||
stakes=stakes, | stakes=stakes, | ||||
payoutAddress=payoutAddress, | payoutAddress=payoutAddress, | ||||
) | ) | ||||
def get_wallet_rpc(self, wallet_name): | def get_wallet_rpc(self, wallet_name): | ||||
if self.use_cli: | if self.use_cli: | ||||
return RPCOverloadWrapper( | return RPCOverloadWrapper( | ||||
self.cli(f"-rpcwallet={wallet_name}"), True, | self.cli(f"-rpcwallet={wallet_name}"), True, self.descriptors | ||||
self.descriptors) | ) | ||||
else: | else: | ||||
assert self.rpc is not None, self._node_msg( | assert self.rpc is not None, self._node_msg("Error: RPC not initialized") | ||||
"Error: RPC not initialized") | assert self.rpc_connected, self._node_msg("Error: RPC not connected") | ||||
assert self.rpc_connected, self._node_msg( | |||||
"Error: RPC not connected") | |||||
wallet_path = f"wallet/{urllib.parse.quote(wallet_name)}" | wallet_path = f"wallet/{urllib.parse.quote(wallet_name)}" | ||||
return RPCOverloadWrapper(self.rpc / wallet_path, | return RPCOverloadWrapper( | ||||
descriptors=self.descriptors) | self.rpc / wallet_path, descriptors=self.descriptors | ||||
) | |||||
def stop_node(self, expected_stderr='', *, wait=0, | def stop_node(self, expected_stderr="", *, wait=0, wait_until_stopped=True): | ||||
wait_until_stopped=True): | |||||
"""Stop the node.""" | """Stop the node.""" | ||||
if not self.running: | if not self.running: | ||||
return | return | ||||
self.log.debug("Stopping node") | self.log.debug("Stopping node") | ||||
try: | try: | ||||
self.stop(wait=wait) | self.stop(wait=wait) | ||||
except http.client.CannotSendRequest: | except http.client.CannotSendRequest: | ||||
self.log.exception("Unable to stop node.") | self.log.exception("Unable to stop node.") | ||||
# If there are any running perf processes, stop them. | # If there are any running perf processes, stop them. | ||||
for profile_name in tuple(self.perf_subprocesses.keys()): | for profile_name in tuple(self.perf_subprocesses.keys()): | ||||
self._stop_perf(profile_name) | self._stop_perf(profile_name) | ||||
# Check that stderr is as expected | # Check that stderr is as expected | ||||
self.stderr.seek(0) | self.stderr.seek(0) | ||||
stderr = self.stderr.read().decode('utf-8').strip() | stderr = self.stderr.read().decode("utf-8").strip() | ||||
if stderr != expected_stderr: | if stderr != expected_stderr: | ||||
raise AssertionError( | raise AssertionError(f"Unexpected stderr {stderr} != {expected_stderr}") | ||||
f"Unexpected stderr {stderr} != {expected_stderr}") | |||||
self.stdout.close() | self.stdout.close() | ||||
self.stderr.close() | self.stderr.close() | ||||
del self.p2ps[:] | del self.p2ps[:] | ||||
if wait_until_stopped: | if wait_until_stopped: | ||||
self.wait_until_stopped() | self.wait_until_stopped() | ||||
def is_node_stopped(self): | def is_node_stopped(self): | ||||
"""Checks whether the node has stopped. | """Checks whether the node has stopped. | ||||
Returns True if the node has stopped. False otherwise. | Returns True if the node has stopped. False otherwise. | ||||
This method is responsible for freeing resources (self.process).""" | This method is responsible for freeing resources (self.process).""" | ||||
if not self.running: | if not self.running: | ||||
return True | return True | ||||
return_code = self.process.poll() | return_code = self.process.poll() | ||||
if return_code is None: | if return_code is None: | ||||
return False | return False | ||||
# process has stopped. Assert that it didn't return an error code. | # process has stopped. Assert that it didn't return an error code. | ||||
assert return_code == 0, self._node_msg( | assert return_code == 0, self._node_msg( | ||||
f"Node returned non-zero exit code ({return_code}) when stopping") | f"Node returned non-zero exit code ({return_code}) when stopping" | ||||
) | |||||
self.running = False | self.running = False | ||||
self.process = None | self.process = None | ||||
self.rpc_connected = False | self.rpc_connected = False | ||||
self.rpc = None | self.rpc = None | ||||
self.log.debug("Node stopped") | self.log.debug("Node stopped") | ||||
return True | return True | ||||
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT): | def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT): | ||||
wait_until_helper( | wait_until_helper( | ||||
self.is_node_stopped, | self.is_node_stopped, timeout=timeout, timeout_factor=self.timeout_factor | ||||
timeout=timeout, | ) | ||||
timeout_factor=self.timeout_factor) | |||||
@property | @property | ||||
def chain_path(self) -> Path: | def chain_path(self) -> Path: | ||||
return Path(self.datadir) / self.chain | return Path(self.datadir) / self.chain | ||||
@property | @property | ||||
def debug_log_path(self) -> Path: | def debug_log_path(self) -> Path: | ||||
return self.chain_path / 'debug.log' | return self.chain_path / "debug.log" | ||||
def debug_log_bytes(self) -> int: | def debug_log_bytes(self) -> int: | ||||
with open(self.debug_log_path, encoding='utf-8') as dl: | with open(self.debug_log_path, encoding="utf-8") as dl: | ||||
dl.seek(0, 2) | dl.seek(0, 2) | ||||
return dl.tell() | return dl.tell() | ||||
@contextlib.contextmanager | @contextlib.contextmanager | ||||
def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2): | def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2): | ||||
"""Assert that some debug messages are present within some timeout. | """Assert that some debug messages are present within some timeout. | ||||
Unexpected debug messages may be optionally provided to fail a test | Unexpected debug messages may be optionally provided to fail a test | ||||
if they appear before expected messages. | if they appear before expected messages. | ||||
Show All 10 Lines | def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2): | ||||
unexpected_msgs = [] | unexpected_msgs = [] | ||||
time_end = time.time() + timeout * self.timeout_factor | time_end = time.time() + timeout * self.timeout_factor | ||||
prev_size = self.debug_log_bytes() | prev_size = self.debug_log_bytes() | ||||
yield | yield | ||||
while True: | while True: | ||||
found = True | found = True | ||||
with open(self.debug_log_path, encoding='utf-8') as dl: | with open(self.debug_log_path, encoding="utf-8") as dl: | ||||
dl.seek(prev_size) | dl.seek(prev_size) | ||||
log = dl.read() | log = dl.read() | ||||
print_log = " - " + "\n - ".join(log.splitlines()) | print_log = " - " + "\n - ".join(log.splitlines()) | ||||
for unexpected_msg in unexpected_msgs: | for unexpected_msg in unexpected_msgs: | ||||
if re.search(re.escape(unexpected_msg), | if re.search(re.escape(unexpected_msg), log, flags=re.MULTILINE): | ||||
log, flags=re.MULTILINE): | |||||
self._raise_assertion_error( | self._raise_assertion_error( | ||||
f'Unexpected message "{unexpected_msg}" partially matches ' | f'Unexpected message "{unexpected_msg}" partially matches ' | ||||
f'log:\n\n{print_log}\n\n') | f"log:\n\n{print_log}\n\n" | ||||
) | |||||
for expected_msg in expected_msgs: | for expected_msg in expected_msgs: | ||||
if re.search(re.escape(expected_msg), log, | if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None: | ||||
flags=re.MULTILINE) is None: | |||||
found = False | found = False | ||||
if found: | if found: | ||||
return | return | ||||
if time.time() >= time_end: | if time.time() >= time_end: | ||||
break | break | ||||
time.sleep(0.05) | time.sleep(0.05) | ||||
self._raise_assertion_error( | self._raise_assertion_error( | ||||
f'Expected messages "{expected_msgs}" does not partially match ' | f'Expected messages "{expected_msgs}" does not partially match ' | ||||
f'log:\n\n{print_log}\n\n') | f"log:\n\n{print_log}\n\n" | ||||
) | |||||
@contextlib.contextmanager | @contextlib.contextmanager | ||||
def wait_for_debug_log( | def wait_for_debug_log( | ||||
self, expected_msgs: List[bytes], timeout=60, interval=0.05, chatty_callable=None): | self, | ||||
expected_msgs: List[bytes], | |||||
timeout=60, | |||||
interval=0.05, | |||||
chatty_callable=None, | |||||
): | |||||
""" | """ | ||||
Block until we see all the debug log messages or until we exceed the timeout. | Block until we see all the debug log messages or until we exceed the timeout. | ||||
If a chatty_callable is provided, it is repeated at every iteration. | If a chatty_callable is provided, it is repeated at every iteration. | ||||
""" | """ | ||||
time_end = time.time() + timeout * self.timeout_factor | time_end = time.time() + timeout * self.timeout_factor | ||||
prev_size = self.debug_log_bytes() | prev_size = self.debug_log_bytes() | ||||
yield | yield | ||||
Show All 13 Lines | ): | ||||
for expected_msg in expected_msgs: | for expected_msg in expected_msgs: | ||||
if expected_msg not in log: | if expected_msg not in log: | ||||
found = False | found = False | ||||
if found: | if found: | ||||
return | return | ||||
if time.time() >= time_end: | if time.time() >= time_end: | ||||
print_log = " - " + \ | print_log = " - " + "\n - ".join( | ||||
"\n - ".join([f"\n - {line.decode()}" for line in log.splitlines()]) | [f"\n - {line.decode()}" for line in log.splitlines()] | ||||
) | |||||
break | break | ||||
time.sleep(interval) | time.sleep(interval) | ||||
self._raise_assertion_error( | self._raise_assertion_error( | ||||
f'Expected messages "{str(expected_msgs)}" does not partially match ' | f'Expected messages "{str(expected_msgs)}" does not partially match ' | ||||
f'log:\n\n{print_log}\n\n') | f"log:\n\n{print_log}\n\n" | ||||
) | |||||
@contextlib.contextmanager | @contextlib.contextmanager | ||||
def profile_with_perf(self, profile_name: str): | def profile_with_perf(self, profile_name: str): | ||||
""" | """ | ||||
Context manager that allows easy profiling of node activity using `perf`. | Context manager that allows easy profiling of node activity using `perf`. | ||||
See `test/functional/README.md` for details on perf usage. | See `test/functional/README.md` for details on perf usage. | ||||
Show All 10 Lines | class TestNode: | ||||
def _start_perf(self, profile_name=None): | def _start_perf(self, profile_name=None): | ||||
"""Start a perf process to profile this node. | """Start a perf process to profile this node. | ||||
Returns the subprocess running perf.""" | Returns the subprocess running perf.""" | ||||
subp = None | subp = None | ||||
def test_success(cmd): | def test_success(cmd): | ||||
return subprocess.call( | return ( | ||||
subprocess.call( | |||||
# shell=True required for pipe use below | # shell=True required for pipe use below | ||||
cmd, shell=True, | cmd, | ||||
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0 | shell=True, | ||||
stderr=subprocess.DEVNULL, | |||||
stdout=subprocess.DEVNULL, | |||||
) | |||||
== 0 | |||||
) | |||||
if not sys.platform.startswith('linux'): | if not sys.platform.startswith("linux"): | ||||
self.log.warning( | self.log.warning( | ||||
"Can't profile with perf; only availabe on Linux platforms") | "Can't profile with perf; only availabe on Linux platforms" | ||||
) | |||||
return None | return None | ||||
if not test_success('which perf'): | if not test_success("which perf"): | ||||
self.log.warning( | self.log.warning("Can't profile with perf; must install perf-tools") | ||||
"Can't profile with perf; must install perf-tools") | |||||
return None | return None | ||||
if not test_success( | if not test_success(f"readelf -S {shlex.quote(self.binary)} | grep .debug_str"): | ||||
f'readelf -S {shlex.quote(self.binary)} | grep .debug_str'): | |||||
self.log.warning( | self.log.warning( | ||||
"perf output won't be very useful without debug symbols compiled into bitcoind") | "perf output won't be very useful without debug symbols compiled into" | ||||
" bitcoind" | |||||
) | |||||
output_path = tempfile.NamedTemporaryFile( | output_path = tempfile.NamedTemporaryFile( | ||||
dir=self.datadir, | dir=self.datadir, | ||||
prefix=f"{profile_name or 'test'}.perf.data.", | prefix=f"{profile_name or 'test'}.perf.data.", | ||||
delete=False, | delete=False, | ||||
).name | ).name | ||||
cmd = [ | cmd = [ | ||||
'perf', 'record', | "perf", | ||||
'-g', # Record the callgraph. | "record", | ||||
"-g", # Record the callgraph. | |||||
# Compatibility for gcc's --fomit-frame-pointer. | # Compatibility for gcc's --fomit-frame-pointer. | ||||
'--call-graph', 'dwarf', | "--call-graph", | ||||
'-F', '101', # Sampling frequency in Hz. | "dwarf", | ||||
'-p', str(self.process.pid), | "-F", | ||||
'-o', output_path, | "101", # Sampling frequency in Hz. | ||||
"-p", | |||||
str(self.process.pid), | |||||
"-o", | |||||
output_path, | |||||
] | ] | ||||
subp = subprocess.Popen( | subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) | ||||
cmd, | |||||
stdout=subprocess.PIPE, | |||||
stderr=subprocess.PIPE) | |||||
self.perf_subprocesses[profile_name] = subp | self.perf_subprocesses[profile_name] = subp | ||||
return subp | return subp | ||||
def _stop_perf(self, profile_name): | def _stop_perf(self, profile_name): | ||||
"""Stop (and pop) a perf subprocess.""" | """Stop (and pop) a perf subprocess.""" | ||||
subp = self.perf_subprocesses.pop(profile_name) | subp = self.perf_subprocesses.pop(profile_name) | ||||
output_path = subp.args[subp.args.index('-o') + 1] | output_path = subp.args[subp.args.index("-o") + 1] | ||||
subp.terminate() | subp.terminate() | ||||
subp.wait(timeout=10) | subp.wait(timeout=10) | ||||
stderr = subp.stderr.read().decode() | stderr = subp.stderr.read().decode() | ||||
if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr: | if "Consider tweaking /proc/sys/kernel/perf_event_paranoid" in stderr: | ||||
self.log.warning( | self.log.warning( | ||||
"perf couldn't collect data! Try " | "perf couldn't collect data! Try " | ||||
"'sudo sysctl -w kernel.perf_event_paranoid=-1'") | "'sudo sysctl -w kernel.perf_event_paranoid=-1'" | ||||
) | |||||
else: | else: | ||||
report_cmd = f"perf report -i {output_path}" | report_cmd = f"perf report -i {output_path}" | ||||
self.log.info(f"See perf output by running '{report_cmd}'") | self.log.info(f"See perf output by running '{report_cmd}'") | ||||
def assert_start_raises_init_error( | def assert_start_raises_init_error( | ||||
self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs): | self, | ||||
extra_args=None, | |||||
expected_msg=None, | |||||
match=ErrorMatch.FULL_TEXT, | |||||
*args, | |||||
**kwargs, | |||||
): | |||||
"""Attempt to start the node and expect it to raise an error. | """Attempt to start the node and expect it to raise an error. | ||||
extra_args: extra arguments to pass through to bitcoind | extra_args: extra arguments to pass through to bitcoind | ||||
expected_msg: regex that stderr should match when bitcoind fails | expected_msg: regex that stderr should match when bitcoind fails | ||||
Will throw if bitcoind starts without an error. | Will throw if bitcoind starts without an error. | ||||
Will throw if an expected_msg is provided and it does not match bitcoind's stdout.""" | Will throw if an expected_msg is provided and it does not match bitcoind's stdout. | ||||
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \ | """ | ||||
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout: | with tempfile.NamedTemporaryFile( | ||||
dir=self.stderr_dir, delete=False | |||||
) as log_stderr, tempfile.NamedTemporaryFile( | |||||
dir=self.stdout_dir, delete=False | |||||
) as log_stdout: | |||||
try: | try: | ||||
self.start(extra_args, stdout=log_stdout, | self.start( | ||||
stderr=log_stderr, *args, **kwargs) | extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs | ||||
) | |||||
ret = self.process.wait(timeout=self.rpc_timeout) | ret = self.process.wait(timeout=self.rpc_timeout) | ||||
self.log.debug(self._node_msg( | self.log.debug( | ||||
f'bitcoind exited with status {ret} during initialization')) | self._node_msg( | ||||
f"bitcoind exited with status {ret} during initialization" | |||||
) | |||||
) | |||||
self.running = False | self.running = False | ||||
self.process = None | self.process = None | ||||
# Check stderr for expected message | # Check stderr for expected message | ||||
if expected_msg is not None: | if expected_msg is not None: | ||||
log_stderr.seek(0) | log_stderr.seek(0) | ||||
stderr = log_stderr.read().decode('utf-8').strip() | stderr = log_stderr.read().decode("utf-8").strip() | ||||
if match == ErrorMatch.PARTIAL_REGEX: | if match == ErrorMatch.PARTIAL_REGEX: | ||||
if re.search(expected_msg, stderr, | if re.search(expected_msg, stderr, flags=re.MULTILINE) is None: | ||||
flags=re.MULTILINE) is None: | |||||
self._raise_assertion_error( | self._raise_assertion_error( | ||||
f'Expected message "{expected_msg}" does not partially ' | f'Expected message "{expected_msg}" does not partially ' | ||||
f'match stderr:\n"{stderr}"') | f'match stderr:\n"{stderr}"' | ||||
) | |||||
elif match == ErrorMatch.FULL_REGEX: | elif match == ErrorMatch.FULL_REGEX: | ||||
if re.fullmatch(expected_msg, stderr) is None: | if re.fullmatch(expected_msg, stderr) is None: | ||||
self._raise_assertion_error( | self._raise_assertion_error( | ||||
f'Expected message "{expected_msg}" does not fully ' | f'Expected message "{expected_msg}" does not fully ' | ||||
f'match stderr:\n"{stderr}"') | f'match stderr:\n"{stderr}"' | ||||
) | |||||
elif match == ErrorMatch.FULL_TEXT: | elif match == ErrorMatch.FULL_TEXT: | ||||
if expected_msg != stderr: | if expected_msg != stderr: | ||||
self._raise_assertion_error( | self._raise_assertion_error( | ||||
f'Expected message "{expected_msg}" does not fully ' | f'Expected message "{expected_msg}" does not fully ' | ||||
f'match stderr:\n"{stderr}"') | f'match stderr:\n"{stderr}"' | ||||
) | |||||
except subprocess.TimeoutExpired: | except subprocess.TimeoutExpired: | ||||
self.process.kill() | self.process.kill() | ||||
self.running = False | self.running = False | ||||
self.process = None | self.process = None | ||||
assert_msg = f'bitcoind should have exited within {self.rpc_timeout}s ' | assert_msg = f"bitcoind should have exited within {self.rpc_timeout}s " | ||||
if expected_msg is None: | if expected_msg is None: | ||||
assert_msg += "with an error" | assert_msg += "with an error" | ||||
else: | else: | ||||
assert_msg += "with expected error " + expected_msg | assert_msg += "with expected error " + expected_msg | ||||
self._raise_assertion_error(assert_msg) | self._raise_assertion_error(assert_msg) | ||||
def relay_fee(self, cached=True): | def relay_fee(self, cached=True): | ||||
if not self.relay_fee_cache or not cached: | if not self.relay_fee_cache or not cached: | ||||
self.relay_fee_cache = self.getnetworkinfo()["relayfee"] | self.relay_fee_cache = self.getnetworkinfo()["relayfee"] | ||||
return self.relay_fee_cache | return self.relay_fee_cache | ||||
def calculate_fee(self, tx): | def calculate_fee(self, tx): | ||||
""" Estimate the necessary fees (in sats) for an unsigned CTransaction assuming: | """Estimate the necessary fees (in sats) for an unsigned CTransaction assuming: | ||||
- the current relayfee on node | - the current relayfee on node | ||||
- all inputs are compressed-key p2pkh, and will be signed ecdsa or schnorr | - all inputs are compressed-key p2pkh, and will be signed ecdsa or schnorr | ||||
- all inputs currently unsigned (empty scriptSig) | - all inputs currently unsigned (empty scriptSig) | ||||
""" | """ | ||||
billable_size_estimate = tx.billable_size() | billable_size_estimate = tx.billable_size() | ||||
# Add some padding for signatures / public keys | # Add some padding for signatures / public keys | ||||
# 107 = length of PUSH(longest_sig = 72 bytes), PUSH(pubkey = 33 bytes) | # 107 = length of PUSH(longest_sig = 72 bytes), PUSH(pubkey = 33 bytes) | ||||
billable_size_estimate += len(tx.vin) * 107 | billable_size_estimate += len(tx.vin) * 107 | ||||
# relay_fee gives a value in XEC per kB. | # relay_fee gives a value in XEC per kB. | ||||
return int(self.relay_fee() / 1000 * billable_size_estimate * XEC) | return int(self.relay_fee() / 1000 * billable_size_estimate * XEC) | ||||
def calculate_fee_from_txid(self, txid): | def calculate_fee_from_txid(self, txid): | ||||
ctx = FromHex(CTransaction(), self.getrawtransaction(txid)) | ctx = FromHex(CTransaction(), self.getrawtransaction(txid)) | ||||
return self.calculate_fee(ctx) | return self.calculate_fee(ctx) | ||||
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs): | def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs): | ||||
"""Add an inbound p2p connection to the node. | """Add an inbound p2p connection to the node. | ||||
This method adds the p2p connection to the self.p2ps list and also | This method adds the p2p connection to the self.p2ps list and also | ||||
returns the connection to the caller.""" | returns the connection to the caller.""" | ||||
if 'dstport' not in kwargs: | if "dstport" not in kwargs: | ||||
kwargs['dstport'] = p2p_port(self.index) | kwargs["dstport"] = p2p_port(self.index) | ||||
if 'dstaddr' not in kwargs: | if "dstaddr" not in kwargs: | ||||
kwargs['dstaddr'] = '127.0.0.1' | kwargs["dstaddr"] = "127.0.0.1" | ||||
p2p_conn.peer_connect( | p2p_conn.peer_connect( | ||||
**kwargs, | **kwargs, net=self.chain, timeout_factor=self.timeout_factor | ||||
net=self.chain, | )() | ||||
timeout_factor=self.timeout_factor)() | |||||
self.p2ps.append(p2p_conn) | self.p2ps.append(p2p_conn) | ||||
p2p_conn.wait_until( | p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False) | ||||
lambda: p2p_conn.is_connected, | |||||
check_connected=False) | |||||
if wait_for_verack: | if wait_for_verack: | ||||
# Wait for the node to send us the version and verack | # Wait for the node to send us the version and verack | ||||
p2p_conn.wait_for_verack() | p2p_conn.wait_for_verack() | ||||
# At this point we have sent our version message and received the version and verack, however the full node | # At this point we have sent our version message and received the version and verack, however the full node | ||||
# has not yet received the verack from us (in reply to their version). So, the connection is not yet fully | # has not yet received the verack from us (in reply to their version). So, the connection is not yet fully | ||||
# established (fSuccessfullyConnected). | # established (fSuccessfullyConnected). | ||||
# | # | ||||
# This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the | # This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the | ||||
# message we send. However, it might lead to races where we are expecting to receive a message. E.g. a | # message we send. However, it might lead to races where we are expecting to receive a message. E.g. a | ||||
# transaction that will be added to the mempool as soon as we return here. | # transaction that will be added to the mempool as soon as we return here. | ||||
# | # | ||||
# So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds) | # So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds) | ||||
# in comparison to the upside of making tests less fragile and | # in comparison to the upside of making tests less fragile and | ||||
# unexpected intermittent errors less likely. | # unexpected intermittent errors less likely. | ||||
p2p_conn.sync_with_ping() | p2p_conn.sync_with_ping() | ||||
# Consistency check that the Bitcoin ABC has received our user agent | # Consistency check that the Bitcoin ABC has received our user agent | ||||
# string. This checks the node's newest peer. It could be racy if | # string. This checks the node's newest peer. It could be racy if | ||||
# another Bitcoin ABC node has connected since we opened our | # another Bitcoin ABC node has connected since we opened our | ||||
# connection, but we don't expect that to happen. | # connection, but we don't expect that to happen. | ||||
assert_equal(self.getpeerinfo()[-1]['subver'], P2P_SUBVERSION) | assert_equal(self.getpeerinfo()[-1]["subver"], P2P_SUBVERSION) | ||||
return p2p_conn | return p2p_conn | ||||
def add_outbound_p2p_connection( | def add_outbound_p2p_connection( | ||||
self, p2p_conn, *, p2p_idx, connection_type="outbound-full-relay", **kwargs): | self, p2p_conn, *, p2p_idx, connection_type="outbound-full-relay", **kwargs | ||||
): | |||||
"""Add an outbound p2p connection from node. Must be an | """Add an outbound p2p connection from node. Must be an | ||||
"outbound-full-relay", "block-relay-only", "addr-fetch", "feeler" or "avalanche" connection. | "outbound-full-relay", "block-relay-only", "addr-fetch", "feeler" or "avalanche" connection. | ||||
This method adds the p2p connection to the self.p2ps list and returns | This method adds the p2p connection to the self.p2ps list and returns | ||||
the connection to the caller. | the connection to the caller. | ||||
""" | """ | ||||
def addconnection_callback(address, port): | def addconnection_callback(address, port): | ||||
self.log.debug( | self.log.debug(f"Connecting to {address}:{port} {connection_type}") | ||||
f"Connecting to {address}:{port} {connection_type}") | self.addconnection(f"{address}:{port}", connection_type) | ||||
self.addconnection(f'{address}:{port}', connection_type) | |||||
p2p_conn.peer_accept_connection( | p2p_conn.peer_accept_connection( | ||||
connect_cb=addconnection_callback, | connect_cb=addconnection_callback, | ||||
connect_id=p2p_idx + 1, | connect_id=p2p_idx + 1, | ||||
net=self.chain, | net=self.chain, | ||||
timeout_factor=self.timeout_factor, | timeout_factor=self.timeout_factor, | ||||
**kwargs)() | **kwargs, | ||||
)() | |||||
if connection_type == "feeler": | if connection_type == "feeler": | ||||
# feeler connections are closed as soon as the node receives a | # feeler connections are closed as soon as the node receives a | ||||
# `version` message | # `version` message | ||||
p2p_conn.wait_until( | p2p_conn.wait_until( | ||||
lambda: p2p_conn.message_count["version"] == 1, | lambda: p2p_conn.message_count["version"] == 1, check_connected=False | ||||
check_connected=False) | ) | ||||
p2p_conn.wait_until( | p2p_conn.wait_until( | ||||
lambda: not p2p_conn.is_connected, | lambda: not p2p_conn.is_connected, check_connected=False | ||||
check_connected=False) | ) | ||||
else: | else: | ||||
p2p_conn.wait_for_connect() | p2p_conn.wait_for_connect() | ||||
self.p2ps.append(p2p_conn) | self.p2ps.append(p2p_conn) | ||||
p2p_conn.wait_for_verack() | p2p_conn.wait_for_verack() | ||||
p2p_conn.sync_with_ping() | p2p_conn.sync_with_ping() | ||||
return p2p_conn | return p2p_conn | ||||
def num_test_p2p_connections(self): | def num_test_p2p_connections(self): | ||||
"""Return number of test framework p2p connections to the node.""" | """Return number of test framework p2p connections to the node.""" | ||||
return len([peer for peer in self.getpeerinfo() | return len( | ||||
if peer['subver'] == P2P_SUBVERSION]) | [peer for peer in self.getpeerinfo() if peer["subver"] == P2P_SUBVERSION] | ||||
) | |||||
def disconnect_p2ps(self): | def disconnect_p2ps(self): | ||||
"""Close all p2p connections to the node.""" | """Close all p2p connections to the node.""" | ||||
for p in self.p2ps: | for p in self.p2ps: | ||||
p.peer_disconnect() | p.peer_disconnect() | ||||
del self.p2ps[:] | del self.p2ps[:] | ||||
wait_until_helper(lambda: self.num_test_p2p_connections() == 0, | wait_until_helper( | ||||
timeout_factor=self.timeout_factor) | lambda: self.num_test_p2p_connections() == 0, | ||||
timeout_factor=self.timeout_factor, | |||||
) | |||||
class TestNodeCLIAttr: | class TestNodeCLIAttr: | ||||
def __init__(self, cli, command): | def __init__(self, cli, command): | ||||
self.cli = cli | self.cli = cli | ||||
self.command = command | self.command = command | ||||
def __call__(self, *args, **kwargs): | def __call__(self, *args, **kwargs): | ||||
return self.cli.send_cli(self.command, *args, **kwargs) | return self.cli.send_cli(self.command, *args, **kwargs) | ||||
def get_request(self, *args, **kwargs): | def get_request(self, *args, **kwargs): | ||||
return lambda: self(*args, **kwargs) | return lambda: self(*args, **kwargs) | ||||
def arg_to_cli(arg): | def arg_to_cli(arg): | ||||
if isinstance(arg, bool): | if isinstance(arg, bool): | ||||
return str(arg).lower() | return str(arg).lower() | ||||
elif arg is None: | elif arg is None: | ||||
return 'null' | return "null" | ||||
elif isinstance(arg, dict) or isinstance(arg, list): | elif isinstance(arg, dict) or isinstance(arg, list): | ||||
return json.dumps(arg, default=EncodeDecimal) | return json.dumps(arg, default=EncodeDecimal) | ||||
else: | else: | ||||
return str(arg) | return str(arg) | ||||
class TestNodeCLI: | class TestNodeCLI: | ||||
"""Interface to bitcoin-cli for an individual node""" | """Interface to bitcoin-cli for an individual node""" | ||||
def __init__(self, binary, datadir, emulator=None): | def __init__(self, binary, datadir, emulator=None): | ||||
self.options = [] | self.options = [] | ||||
self.binary = binary | self.binary = binary | ||||
self.datadir = datadir | self.datadir = datadir | ||||
self.input = None | self.input = None | ||||
self.log = logging.getLogger('TestFramework.bitcoincli') | self.log = logging.getLogger("TestFramework.bitcoincli") | ||||
self.emulator = emulator | self.emulator = emulator | ||||
def __call__(self, *options, cli_input=None): | def __call__(self, *options, cli_input=None): | ||||
# TestNodeCLI is callable with bitcoin-cli command-line options | # TestNodeCLI is callable with bitcoin-cli command-line options | ||||
cli = TestNodeCLI(self.binary, self.datadir, self.emulator) | cli = TestNodeCLI(self.binary, self.datadir, self.emulator) | ||||
cli.options = [str(o) for o in options] | cli.options = [str(o) for o in options] | ||||
cli.input = cli_input | cli.input = cli_input | ||||
return cli | return cli | ||||
def __getattr__(self, command): | def __getattr__(self, command): | ||||
return TestNodeCLIAttr(self, command) | return TestNodeCLIAttr(self, command) | ||||
def batch(self, requests): | def batch(self, requests): | ||||
results = [] | results = [] | ||||
for request in requests: | for request in requests: | ||||
try: | try: | ||||
results.append({"result": request()}) | results.append({"result": request()}) | ||||
except JSONRPCException as e: | except JSONRPCException as e: | ||||
results.append({"error": e}) | results.append({"error": e}) | ||||
return results | return results | ||||
def send_cli(self, command=None, *args, **kwargs): | def send_cli(self, command=None, *args, **kwargs): | ||||
"""Run bitcoin-cli command. Deserializes returned string as python object.""" | """Run bitcoin-cli command. Deserializes returned string as python object.""" | ||||
pos_args = [arg_to_cli(arg) for arg in args] | pos_args = [arg_to_cli(arg) for arg in args] | ||||
named_args = [str(key) + "=" + arg_to_cli(value) | named_args = [ | ||||
for (key, value) in kwargs.items()] | str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items() | ||||
assert not (pos_args and named_args), \ | ] | ||||
"Cannot use positional arguments and named arguments in the same " \ | assert not (pos_args and named_args), ( | ||||
"Cannot use positional arguments and named arguments in the same " | |||||
"bitcoin-cli call" | "bitcoin-cli call" | ||||
) | |||||
p_args = [self.binary, "-datadir=" + self.datadir] + self.options | p_args = [self.binary, "-datadir=" + self.datadir] + self.options | ||||
if named_args: | if named_args: | ||||
p_args += ["-named"] | p_args += ["-named"] | ||||
if command is not None: | if command is not None: | ||||
p_args += [command] | p_args += [command] | ||||
p_args += pos_args + named_args | p_args += pos_args + named_args | ||||
self.log.debug(f"Running bitcoin-cli {p_args[2:]}") | self.log.debug(f"Running bitcoin-cli {p_args[2:]}") | ||||
if self.emulator is not None: | if self.emulator is not None: | ||||
p_args = [self.emulator] + p_args | p_args = [self.emulator] + p_args | ||||
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, | process = subprocess.Popen( | ||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) | p_args, | ||||
stdin=subprocess.PIPE, | |||||
stdout=subprocess.PIPE, | |||||
stderr=subprocess.PIPE, | |||||
universal_newlines=True, | |||||
) | |||||
cli_stdout, cli_stderr = process.communicate(input=self.input) | cli_stdout, cli_stderr = process.communicate(input=self.input) | ||||
returncode = process.poll() | returncode = process.poll() | ||||
if returncode: | if returncode: | ||||
match = re.match( | match = re.match(r"error code: ([-0-9]+)\nerror message:\n(.*)", cli_stderr) | ||||
r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr) | |||||
if match: | if match: | ||||
code, message = match.groups() | code, message = match.groups() | ||||
raise JSONRPCException({"code": int(code), "message": message}) | raise JSONRPCException({"code": int(code), "message": message}) | ||||
# Ignore cli_stdout, raise with cli_stderr | # Ignore cli_stdout, raise with cli_stderr | ||||
raise subprocess.CalledProcessError( | raise subprocess.CalledProcessError( | ||||
returncode, self.binary, output=cli_stderr) | returncode, self.binary, output=cli_stderr | ||||
) | |||||
try: | try: | ||||
return json.loads(cli_stdout, parse_float=decimal.Decimal) | return json.loads(cli_stdout, parse_float=decimal.Decimal) | ||||
except (json.JSONDecodeError, decimal.InvalidOperation): | except (json.JSONDecodeError, decimal.InvalidOperation): | ||||
return cli_stdout.rstrip("\n") | return cli_stdout.rstrip("\n") | ||||
class RPCOverloadWrapper: | class RPCOverloadWrapper: | ||||
def __init__(self, rpc, cli=False, descriptors=False): | def __init__(self, rpc, cli=False, descriptors=False): | ||||
self.rpc = rpc | self.rpc = rpc | ||||
self.is_cli = cli | self.is_cli = cli | ||||
self.descriptors = descriptors | self.descriptors = descriptors | ||||
def __getattr__(self, name): | def __getattr__(self, name): | ||||
return getattr(self.rpc, name) | return getattr(self.rpc, name) | ||||
def createwallet(self, wallet_name, disable_private_keys=None, blank=None, | def createwallet( | ||||
passphrase='', avoid_reuse=None, descriptors=None, load_on_startup=None): | self, | ||||
wallet_name, | |||||
disable_private_keys=None, | |||||
blank=None, | |||||
passphrase="", | |||||
avoid_reuse=None, | |||||
descriptors=None, | |||||
load_on_startup=None, | |||||
): | |||||
if descriptors is None: | if descriptors is None: | ||||
descriptors = self.descriptors | descriptors = self.descriptors | ||||
return self.__getattr__('createwallet')( | return self.__getattr__("createwallet")( | ||||
wallet_name, disable_private_keys, blank, passphrase, avoid_reuse, descriptors, load_on_startup) | wallet_name, | ||||
disable_private_keys, | |||||
blank, | |||||
passphrase, | |||||
avoid_reuse, | |||||
descriptors, | |||||
load_on_startup, | |||||
) | |||||
def importprivkey(self, privkey, label=None, rescan=None): | def importprivkey(self, privkey, label=None, rescan=None): | ||||
wallet_info = self.getwalletinfo() | wallet_info = self.getwalletinfo() | ||||
if 'descriptors' not in wallet_info or ( | if "descriptors" not in wallet_info or ( | ||||
'descriptors' in wallet_info and not wallet_info['descriptors']): | "descriptors" in wallet_info and not wallet_info["descriptors"] | ||||
return self.__getattr__('importprivkey')(privkey, label, rescan) | ): | ||||
desc = descsum_create('combo(' + privkey + ')') | return self.__getattr__("importprivkey")(privkey, label, rescan) | ||||
req = [{ | desc = descsum_create("combo(" + privkey + ")") | ||||
'desc': desc, | req = [ | ||||
'timestamp': 0 if rescan else 'now', | { | ||||
'label': label if label else '' | "desc": desc, | ||||
}] | "timestamp": 0 if rescan else "now", | ||||
"label": label if label else "", | |||||
} | |||||
] | |||||
import_res = self.importdescriptors(req) | import_res = self.importdescriptors(req) | ||||
if not import_res[0]['success']: | if not import_res[0]["success"]: | ||||
raise JSONRPCException(import_res[0]['error']) | raise JSONRPCException(import_res[0]["error"]) | ||||
def addmultisigaddress(self, nrequired, keys, | def addmultisigaddress(self, nrequired, keys, label=None): | ||||
label=None): | |||||
wallet_info = self.getwalletinfo() | wallet_info = self.getwalletinfo() | ||||
if 'descriptors' not in wallet_info or ( | if "descriptors" not in wallet_info or ( | ||||
'descriptors' in wallet_info and not wallet_info['descriptors']): | "descriptors" in wallet_info and not wallet_info["descriptors"] | ||||
return self.__getattr__('addmultisigaddress')( | ): | ||||
nrequired, keys, label) | return self.__getattr__("addmultisigaddress")(nrequired, keys, label) | ||||
cms = self.createmultisig(nrequired, keys) | cms = self.createmultisig(nrequired, keys) | ||||
req = [{ | req = [ | ||||
'desc': cms['descriptor'], | {"desc": cms["descriptor"], "timestamp": 0, "label": label if label else ""} | ||||
'timestamp': 0, | ] | ||||
'label': label if label else '' | |||||
}] | |||||
import_res = self.importdescriptors(req) | import_res = self.importdescriptors(req) | ||||
if not import_res[0]['success']: | if not import_res[0]["success"]: | ||||
raise JSONRPCException(import_res[0]['error']) | raise JSONRPCException(import_res[0]["error"]) | ||||
return cms | return cms | ||||
def importpubkey(self, pubkey, label=None, rescan=None): | def importpubkey(self, pubkey, label=None, rescan=None): | ||||
wallet_info = self.getwalletinfo() | wallet_info = self.getwalletinfo() | ||||
if 'descriptors' not in wallet_info or ( | if "descriptors" not in wallet_info or ( | ||||
'descriptors' in wallet_info and not wallet_info['descriptors']): | "descriptors" in wallet_info and not wallet_info["descriptors"] | ||||
return self.__getattr__('importpubkey')(pubkey, label, rescan) | ): | ||||
desc = descsum_create('combo(' + pubkey + ')') | return self.__getattr__("importpubkey")(pubkey, label, rescan) | ||||
req = [{ | desc = descsum_create("combo(" + pubkey + ")") | ||||
'desc': desc, | req = [ | ||||
'timestamp': 0 if rescan else 'now', | { | ||||
'label': label if label else '' | "desc": desc, | ||||
}] | "timestamp": 0 if rescan else "now", | ||||
"label": label if label else "", | |||||
} | |||||
] | |||||
import_res = self.importdescriptors(req) | import_res = self.importdescriptors(req) | ||||
if not import_res[0]['success']: | if not import_res[0]["success"]: | ||||
raise JSONRPCException(import_res[0]['error']) | raise JSONRPCException(import_res[0]["error"]) | ||||
def importaddress(self, address, label=None, rescan=None, p2sh=None): | def importaddress(self, address, label=None, rescan=None, p2sh=None): | ||||
wallet_info = self.getwalletinfo() | wallet_info = self.getwalletinfo() | ||||
if 'descriptors' not in wallet_info or ( | if "descriptors" not in wallet_info or ( | ||||
'descriptors' in wallet_info and not wallet_info['descriptors']): | "descriptors" in wallet_info and not wallet_info["descriptors"] | ||||
return self.__getattr__('importaddress')( | ): | ||||
address, label, rescan, p2sh) | return self.__getattr__("importaddress")(address, label, rescan, p2sh) | ||||
is_hex = False | is_hex = False | ||||
try: | try: | ||||
int(address, 16) | int(address, 16) | ||||
is_hex = True | is_hex = True | ||||
desc = descsum_create('raw(' + address + ')') | desc = descsum_create("raw(" + address + ")") | ||||
except BaseException: | except BaseException: | ||||
desc = descsum_create('addr(' + address + ')') | desc = descsum_create("addr(" + address + ")") | ||||
reqs = [{ | reqs = [ | ||||
'desc': desc, | { | ||||
'timestamp': 0 if rescan else 'now', | "desc": desc, | ||||
'label': label if label else '' | "timestamp": 0 if rescan else "now", | ||||
}] | "label": label if label else "", | ||||
} | |||||
] | |||||
if is_hex and p2sh: | if is_hex and p2sh: | ||||
reqs.append({ | reqs.append( | ||||
'desc': descsum_create('p2sh(raw(' + address + '))'), | { | ||||
'timestamp': 0 if rescan else 'now', | "desc": descsum_create("p2sh(raw(" + address + "))"), | ||||
'label': label if label else '' | "timestamp": 0 if rescan else "now", | ||||
}) | "label": label if label else "", | ||||
} | |||||
) | |||||
import_res = self.importdescriptors(reqs) | import_res = self.importdescriptors(reqs) | ||||
for res in import_res: | for res in import_res: | ||||
if not res['success']: | if not res["success"]: | ||||
raise JSONRPCException(res['error']) | raise JSONRPCException(res["error"]) |