Changeset View
Changeset View
Standalone View
Standalone View
test/functional/p2p_filter.py
Show All 10 Lines | from test_framework.messages import ( | ||||
MAX_BLOOM_FILTER_SIZE, | MAX_BLOOM_FILTER_SIZE, | ||||
MAX_BLOOM_HASH_FUNCS, | MAX_BLOOM_HASH_FUNCS, | ||||
MSG_BLOCK, | MSG_BLOCK, | ||||
MSG_FILTERED_BLOCK, | MSG_FILTERED_BLOCK, | ||||
msg_filteradd, | msg_filteradd, | ||||
msg_filterclear, | msg_filterclear, | ||||
msg_filterload, | msg_filterload, | ||||
msg_getdata, | msg_getdata, | ||||
msg_mempool, | |||||
msg_version, | |||||
) | ) | ||||
from test_framework.mininode import P2PInterface | from test_framework.mininode import P2PInterface, mininode_lock | ||||
from test_framework.script import MAX_SCRIPT_ELEMENT_SIZE | from test_framework.script import MAX_SCRIPT_ELEMENT_SIZE | ||||
from test_framework.test_framework import BitcoinTestFramework | from test_framework.test_framework import BitcoinTestFramework | ||||
class FilterNode(P2PInterface): | class P2PBloomFilter(P2PInterface): | ||||
# This is a P2SH watch-only wallet | # This is a P2SH watch-only wallet | ||||
watch_script_pubkey = 'a914ffffffffffffffffffffffffffffffffffffffff87' | watch_script_pubkey = 'a914ffffffffffffffffffffffffffffffffffffffff87' | ||||
# The initial filter (n=10, fp=0.000001) with just the above scriptPubKey | # The initial filter (n=10, fp=0.000001) with just the above scriptPubKey | ||||
# added | # added | ||||
watch_filter_init = msg_filterload( | watch_filter_init = msg_filterload( | ||||
data=b'@\x00\x08\x00\x80\x00\x00 \x00\xc0\x00 \x04\x00\x08$\x00\x04\x80\x00\x00 \x00\x00\x00\x00\x80\x00\x00@\x00\x02@ \x00', | data=b'@\x00\x08\x00\x80\x00\x00 \x00\xc0\x00 \x04\x00\x08$\x00\x04\x80\x00\x00 \x00\x00\x00\x00\x80\x00\x00@\x00\x02@ \x00', | ||||
nHashFuncs=19, | nHashFuncs=19, | ||||
nTweak=0, | nTweak=0, | ||||
nFlags=1, | nFlags=1, | ||||
) | ) | ||||
def __init__(self): | |||||
super().__init__() | |||||
self._tx_received = False | |||||
self._merkleblock_received = False | |||||
def on_inv(self, message): | def on_inv(self, message): | ||||
want = msg_getdata() | want = msg_getdata() | ||||
for i in message.inv: | for i in message.inv: | ||||
# inv messages can only contain TX or BLOCK, so translate BLOCK to | # inv messages can only contain TX or BLOCK, so translate BLOCK to | ||||
# FILTERED_BLOCK | # FILTERED_BLOCK | ||||
if i.type == MSG_BLOCK: | if i.type == MSG_BLOCK: | ||||
want.inv.append(CInv(MSG_FILTERED_BLOCK, i.hash)) | want.inv.append(CInv(MSG_FILTERED_BLOCK, i.hash)) | ||||
else: | else: | ||||
want.inv.append(i) | want.inv.append(i) | ||||
if len(want.inv): | if len(want.inv): | ||||
self.send_message(want) | self.send_message(want) | ||||
def on_merkleblock(self, message): | def on_merkleblock(self, message): | ||||
self.merkleblock_received = True | self._merkleblock_received = True | ||||
def on_tx(self, message): | def on_tx(self, message): | ||||
self.tx_received = True | self._tx_received = True | ||||
@property | |||||
def tx_received(self): | |||||
with mininode_lock: | |||||
return self._tx_received | |||||
@tx_received.setter | |||||
def tx_received(self, value): | |||||
with mininode_lock: | |||||
self._tx_received = value | |||||
@property | |||||
def merkleblock_received(self): | |||||
with mininode_lock: | |||||
return self._merkleblock_received | |||||
@merkleblock_received.setter | |||||
def merkleblock_received(self, value): | |||||
with mininode_lock: | |||||
self._merkleblock_received = value | |||||
class FilterTest(BitcoinTestFramework): | class FilterTest(BitcoinTestFramework): | ||||
def set_test_params(self): | def set_test_params(self): | ||||
self.setup_clean_chain = False | self.setup_clean_chain = False | ||||
self.num_nodes = 1 | self.num_nodes = 1 | ||||
self.extra_args = [[ | self.extra_args = [[ | ||||
'-peerbloomfilters', | '-peerbloomfilters', | ||||
'-whitelist=noban@127.0.0.1', # immediate tx relay | '-whitelist=noban@127.0.0.1', # immediate tx relay | ||||
]] | ]] | ||||
def skip_test_if_missing_module(self): | def skip_test_if_missing_module(self): | ||||
self.skip_if_no_wallet() | self.skip_if_no_wallet() | ||||
def test_size_limits(self, filter_node): | def test_size_limits(self, filter_peer): | ||||
self.log.info('Check that too large filter is rejected') | self.log.info('Check that too large filter is rejected') | ||||
with self.nodes[0].assert_debug_log(['Misbehaving']): | with self.nodes[0].assert_debug_log(['Misbehaving']): | ||||
filter_node.send_and_ping(msg_filterload( | filter_peer.send_and_ping(msg_filterload( | ||||
data=b'\xbb' * (MAX_BLOOM_FILTER_SIZE + 1))) | data=b'\xbb' * (MAX_BLOOM_FILTER_SIZE + 1))) | ||||
self.log.info('Check that max size filter is accepted') | self.log.info('Check that max size filter is accepted') | ||||
with self.nodes[0].assert_debug_log([""], unexpected_msgs=['Misbehaving']): | with self.nodes[0].assert_debug_log([""], unexpected_msgs=['Misbehaving']): | ||||
filter_node.send_and_ping( | filter_peer.send_and_ping( | ||||
msg_filterload( | msg_filterload( | ||||
data=b'\xbb' * | data=b'\xbb' * | ||||
(MAX_BLOOM_FILTER_SIZE))) | (MAX_BLOOM_FILTER_SIZE))) | ||||
filter_node.send_and_ping(msg_filterclear()) | filter_peer.send_and_ping(msg_filterclear()) | ||||
self.log.info( | self.log.info( | ||||
'Check that filter with too many hash functions is rejected') | 'Check that filter with too many hash functions is rejected') | ||||
with self.nodes[0].assert_debug_log(['Misbehaving']): | with self.nodes[0].assert_debug_log(['Misbehaving']): | ||||
filter_node.send_and_ping( | filter_peer.send_and_ping( | ||||
msg_filterload( | msg_filterload( | ||||
data=b'\xaa', | data=b'\xaa', | ||||
nHashFuncs=MAX_BLOOM_HASH_FUNCS + 1)) | nHashFuncs=MAX_BLOOM_HASH_FUNCS + 1)) | ||||
self.log.info('Check that filter with max hash functions is accepted') | self.log.info('Check that filter with max hash functions is accepted') | ||||
with self.nodes[0].assert_debug_log([""], unexpected_msgs=['Misbehaving']): | with self.nodes[0].assert_debug_log([""], unexpected_msgs=['Misbehaving']): | ||||
filter_node.send_and_ping( | filter_peer.send_and_ping( | ||||
msg_filterload( | msg_filterload( | ||||
data=b'\xaa', | data=b'\xaa', | ||||
nHashFuncs=MAX_BLOOM_HASH_FUNCS)) | nHashFuncs=MAX_BLOOM_HASH_FUNCS)) | ||||
# Don't send filterclear until next two filteradd checks are done | # Don't send filterclear until next two filteradd checks are done | ||||
self.log.info( | self.log.info( | ||||
'Check that max size data element to add to the filter is accepted') | 'Check that max size data element to add to the filter is accepted') | ||||
with self.nodes[0].assert_debug_log([""], unexpected_msgs=['Misbehaving']): | with self.nodes[0].assert_debug_log([""], unexpected_msgs=['Misbehaving']): | ||||
filter_node.send_and_ping( | filter_peer.send_and_ping( | ||||
msg_filteradd( | msg_filteradd( | ||||
data=b'\xcc' * | data=b'\xcc' * | ||||
(MAX_SCRIPT_ELEMENT_SIZE))) | (MAX_SCRIPT_ELEMENT_SIZE))) | ||||
self.log.info( | self.log.info( | ||||
'Check that too large data element to add to the filter is rejected') | 'Check that too large data element to add to the filter is rejected') | ||||
with self.nodes[0].assert_debug_log(['Misbehaving']): | with self.nodes[0].assert_debug_log(['Misbehaving']): | ||||
filter_node.send_and_ping(msg_filteradd( | filter_peer.send_and_ping(msg_filteradd( | ||||
data=b'\xcc' * (MAX_SCRIPT_ELEMENT_SIZE + 1))) | data=b'\xcc' * (MAX_SCRIPT_ELEMENT_SIZE + 1))) | ||||
filter_node.send_and_ping(msg_filterclear()) | filter_peer.send_and_ping(msg_filterclear()) | ||||
def run_test(self): | def test_msg_mempool(self): | ||||
filter_node = self.nodes[0].add_p2p_connection(FilterNode()) | self.log.info( | ||||
"Check that a node with bloom filters enabled services p2p mempool messages") | |||||
filter_peer = P2PBloomFilter() | |||||
self.test_size_limits(filter_node) | self.log.info("Create a tx relevant to the peer before connecting") | ||||
filter_address = self.nodes[0].decodescript( | |||||
filter_peer.watch_script_pubkey)['addresses'][0] | |||||
txid = self.nodes[0].sendtoaddress(filter_address, 90) | |||||
self.log.info('Add filtered P2P connection to the node') | self.log.info( | ||||
filter_node.send_and_ping(filter_node.watch_filter_init) | "Send a mempool msg after connecting and check that the tx is received") | ||||
self.nodes[0].add_p2p_connection(filter_peer) | |||||
filter_peer.send_and_ping(filter_peer.watch_filter_init) | |||||
self.nodes[0].p2p.send_message(msg_mempool()) | |||||
filter_peer.wait_for_tx(txid) | |||||
def test_frelay_false(self, filter_peer): | |||||
self.log.info( | |||||
"Check that a node with fRelay set to false does not receive invs until the filter is set") | |||||
filter_peer.tx_received = False | |||||
filter_address = self.nodes[0].decodescript( | |||||
filter_peer.watch_script_pubkey)['addresses'][0] | |||||
self.nodes[0].sendtoaddress(filter_address, 90) | |||||
# Sync to make sure the reason filter_peer doesn't receive the tx is | |||||
# not p2p delays | |||||
filter_peer.sync_with_ping() | |||||
assert not filter_peer.tx_received | |||||
# Clear the mempool so that this transaction does not impact subsequent | |||||
# tests | |||||
self.nodes[0].generate(1) | |||||
def test_filter(self, filter_peer): | |||||
# Set the bloomfilter using filterload | |||||
filter_peer.send_and_ping(filter_peer.watch_filter_init) | |||||
# If fRelay is not already True, sending filterload sets it to True | |||||
assert self.nodes[0].getpeerinfo()[0]['relaytxes'] | |||||
filter_address = self.nodes[0].decodescript( | filter_address = self.nodes[0].decodescript( | ||||
filter_node.watch_script_pubkey)['addresses'][0] | filter_peer.watch_script_pubkey)['addresses'][0] | ||||
self.log.info( | self.log.info( | ||||
'Check that we receive merkleblock and tx if the filter matches a tx in a block') | 'Check that we receive merkleblock and tx if the filter matches a tx in a block') | ||||
block_hash = self.nodes[0].generatetoaddress(1, filter_address)[0] | block_hash = self.nodes[0].generatetoaddress(1, filter_address)[0] | ||||
txid = self.nodes[0].getblock(block_hash)['tx'][0] | txid = self.nodes[0].getblock(block_hash)['tx'][0] | ||||
filter_node.wait_for_merkleblock(block_hash) | filter_peer.wait_for_merkleblock(block_hash) | ||||
filter_node.wait_for_tx(txid) | filter_peer.wait_for_tx(txid) | ||||
self.log.info( | self.log.info( | ||||
'Check that we only receive a merkleblock if the filter does not match a tx in a block') | 'Check that we only receive a merkleblock if the filter does not match a tx in a block') | ||||
filter_node.tx_received = False | filter_peer.tx_received = False | ||||
block_hash = self.nodes[0].generatetoaddress( | block_hash = self.nodes[0].generatetoaddress( | ||||
1, self.nodes[0].getnewaddress())[0] | 1, self.nodes[0].getnewaddress())[0] | ||||
filter_node.wait_for_merkleblock(block_hash) | filter_peer.wait_for_merkleblock(block_hash) | ||||
assert not filter_node.tx_received | assert not filter_peer.tx_received | ||||
self.log.info( | self.log.info( | ||||
'Check that we not receive a tx if the filter does not match a mempool tx') | 'Check that we not receive a tx if the filter does not match a mempool tx') | ||||
filter_node.merkleblock_received = False | filter_peer.merkleblock_received = False | ||||
filter_node.tx_received = False | filter_peer.tx_received = False | ||||
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 90) | self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 90) | ||||
filter_node.sync_with_ping() | filter_peer.sync_with_ping() | ||||
filter_node.sync_with_ping() | filter_peer.sync_with_ping() | ||||
assert not filter_node.merkleblock_received | assert not filter_peer.merkleblock_received | ||||
assert not filter_node.tx_received | assert not filter_peer.tx_received | ||||
self.log.info( | self.log.info( | ||||
'Check that we receive a tx in reply to a mempool msg if the filter matches a mempool tx') | 'Check that we receive a tx if the filter matches a mempool tx') | ||||
filter_node.merkleblock_received = False | filter_peer.merkleblock_received = False | ||||
txid = self.nodes[0].sendtoaddress(filter_address, 90) | txid = self.nodes[0].sendtoaddress(filter_address, 90) | ||||
filter_node.wait_for_tx(txid) | filter_peer.wait_for_tx(txid) | ||||
assert not filter_node.merkleblock_received | assert not filter_peer.merkleblock_received | ||||
self.log.info( | self.log.info( | ||||
'Check that after deleting filter all txs get relayed again') | 'Check that after deleting filter all txs get relayed again') | ||||
filter_node.send_and_ping(msg_filterclear()) | filter_peer.send_and_ping(msg_filterclear()) | ||||
for _ in range(5): | for _ in range(5): | ||||
txid = self.nodes[0].sendtoaddress( | txid = self.nodes[0].sendtoaddress( | ||||
self.nodes[0].getnewaddress(), 7) | self.nodes[0].getnewaddress(), 7) | ||||
filter_node.wait_for_tx(txid) | filter_peer.wait_for_tx(txid) | ||||
self.log.info( | self.log.info( | ||||
'Check that request for filtered blocks is ignored if no filter' | 'Check that request for filtered blocks is ignored if no filter' | ||||
' is set') | ' is set') | ||||
filter_node.merkleblock_received = False | filter_peer.merkleblock_received = False | ||||
filter_node.tx_received = False | filter_peer.tx_received = False | ||||
with self.nodes[0].assert_debug_log(expected_msgs=['received getdata']): | with self.nodes[0].assert_debug_log(expected_msgs=['received getdata']): | ||||
block_hash = self.nodes[0].generatetoaddress( | block_hash = self.nodes[0].generatetoaddress( | ||||
1, self.nodes[0].getnewaddress())[0] | 1, self.nodes[0].getnewaddress())[0] | ||||
filter_node.wait_for_inv([CInv(MSG_BLOCK, int(block_hash, 16))]) | filter_peer.wait_for_inv([CInv(MSG_BLOCK, int(block_hash, 16))]) | ||||
filter_node.sync_with_ping() | filter_peer.sync_with_ping() | ||||
assert not filter_node.merkleblock_received | assert not filter_peer.merkleblock_received | ||||
assert not filter_node.tx_received | assert not filter_peer.tx_received | ||||
self.log.info( | self.log.info( | ||||
'Check that sending "filteradd" if no filter is set is treated as ' | 'Check that sending "filteradd" if no filter is set is treated as ' | ||||
'misbehavior') | 'misbehavior') | ||||
with self.nodes[0].assert_debug_log(['Misbehaving']): | with self.nodes[0].assert_debug_log(['Misbehaving']): | ||||
filter_node.send_and_ping(msg_filteradd(data=b'letsmisbehave')) | filter_peer.send_and_ping(msg_filteradd(data=b'letsmisbehave')) | ||||
self.log.info( | self.log.info( | ||||
"Check that division-by-zero remote crash bug [CVE-2013-5700] is fixed") | "Check that division-by-zero remote crash bug [CVE-2013-5700] is fixed") | ||||
filter_node.send_and_ping(msg_filterload(data=b'', nHashFuncs=1)) | filter_peer.send_and_ping(msg_filterload(data=b'', nHashFuncs=1)) | ||||
filter_node.send_and_ping( | filter_peer.send_and_ping( | ||||
msg_filteradd( | msg_filteradd( | ||||
data=b'letstrytocrashthisnode')) | data=b'letstrytocrashthisnode')) | ||||
self.nodes[0].disconnect_p2ps() | |||||
def run_test(self): | |||||
filter_peer = self.nodes[0].add_p2p_connection(P2PBloomFilter()) | |||||
self.log.info('Test filter size limits') | |||||
self.test_size_limits(filter_peer) | |||||
self.log.info('Test BIP 37 for a node with fRelay = True (default)') | |||||
self.test_filter(filter_peer) | |||||
self.nodes[0].disconnect_p2ps() | |||||
self.log.info('Test BIP 37 for a node with fRelay = False') | |||||
# Add peer but do not send version yet | |||||
filter_peer_without_nrelay = self.nodes[0].add_p2p_connection( | |||||
P2PBloomFilter(), send_version=False, wait_for_verack=False) | |||||
# Send version with fRelay=False | |||||
filter_peer_without_nrelay.wait_until( | |||||
lambda: filter_peer_without_nrelay.is_connected, timeout=10) | |||||
version_without_fRelay = msg_version() | |||||
version_without_fRelay.nRelay = 0 | |||||
filter_peer_without_nrelay.send_message(version_without_fRelay) | |||||
filter_peer_without_nrelay.wait_for_verack() | |||||
assert not self.nodes[0].getpeerinfo()[0]['relaytxes'] | |||||
self.test_frelay_false(filter_peer_without_nrelay) | |||||
self.test_filter(filter_peer_without_nrelay) | |||||
self.log.info('Test msg_mempool') | |||||
self.test_msg_mempool() | |||||
if __name__ == '__main__': | if __name__ == '__main__': | ||||
FilterTest().main() | FilterTest().main() |