Changeset View
Changeset View
Standalone View
Standalone View
test/functional/p2p_tx_download.py
Show All 12 Lines | from test_framework.messages import ( | ||||
FromHex, | FromHex, | ||||
MSG_TX, | MSG_TX, | ||||
MSG_TYPE_MASK, | MSG_TYPE_MASK, | ||||
msg_inv, | msg_inv, | ||||
msg_notfound, | msg_notfound, | ||||
) | ) | ||||
from test_framework.mininode import ( | from test_framework.mininode import ( | ||||
P2PInterface, | P2PInterface, | ||||
mininode_lock, | p2p_lock, | ||||
) | ) | ||||
from test_framework.test_framework import BitcoinTestFramework | from test_framework.test_framework import BitcoinTestFramework | ||||
from test_framework.util import ( | from test_framework.util import ( | ||||
assert_equal, | assert_equal, | ||||
wait_until, | wait_until, | ||||
) | ) | ||||
import time | import time | ||||
Show All 37 Lines | def test_tx_requests(self): | ||||
msg = msg_inv([CInv(t=MSG_TX, h=txid)]) | msg = msg_inv([CInv(t=MSG_TX, h=txid)]) | ||||
for p in self.nodes[0].p2ps: | for p in self.nodes[0].p2ps: | ||||
p.send_and_ping(msg) | p.send_and_ping(msg) | ||||
outstanding_peer_index = [i for i in range(len(self.nodes[0].p2ps))] | outstanding_peer_index = [i for i in range(len(self.nodes[0].p2ps))] | ||||
def getdata_found(peer_index): | def getdata_found(peer_index): | ||||
p = self.nodes[0].p2ps[peer_index] | p = self.nodes[0].p2ps[peer_index] | ||||
with mininode_lock: | with p2p_lock: | ||||
return p.last_message.get( | return p.last_message.get( | ||||
"getdata") and p.last_message["getdata"].inv[-1].hash == txid | "getdata") and p.last_message["getdata"].inv[-1].hash == txid | ||||
node_0_mocktime = int(time.time()) | node_0_mocktime = int(time.time()) | ||||
while outstanding_peer_index: | while outstanding_peer_index: | ||||
node_0_mocktime += MAX_GETDATA_INBOUND_WAIT | node_0_mocktime += MAX_GETDATA_INBOUND_WAIT | ||||
self.nodes[0].setmocktime(node_0_mocktime) | self.nodes[0].setmocktime(node_0_mocktime) | ||||
wait_until(lambda: any(getdata_found(i) | wait_until(lambda: any(getdata_found(i) | ||||
▲ Show 20 Lines • Show All 47 Lines • ▼ Show 20 Lines | class TxDownloadTest(BitcoinTestFramework): | ||||
def test_in_flight_max(self): | def test_in_flight_max(self): | ||||
self.log.info("Test that we don't load peers with more than {} transaction requests immediately".format( | self.log.info("Test that we don't load peers with more than {} transaction requests immediately".format( | ||||
MAX_GETDATA_IN_FLIGHT)) | MAX_GETDATA_IN_FLIGHT)) | ||||
txids = [i for i in range(MAX_GETDATA_IN_FLIGHT + 2)] | txids = [i for i in range(MAX_GETDATA_IN_FLIGHT + 2)] | ||||
p = self.nodes[0].p2ps[0] | p = self.nodes[0].p2ps[0] | ||||
with mininode_lock: | with p2p_lock: | ||||
p.tx_getdata_count = 0 | p.tx_getdata_count = 0 | ||||
mock_time = int(time.time() + 1) | mock_time = int(time.time() + 1) | ||||
self.nodes[0].setmocktime(mock_time) | self.nodes[0].setmocktime(mock_time) | ||||
for i in range(MAX_GETDATA_IN_FLIGHT): | for i in range(MAX_GETDATA_IN_FLIGHT): | ||||
p.send_message(msg_inv([CInv(t=MSG_TX, h=txids[i])])) | p.send_message(msg_inv([CInv(t=MSG_TX, h=txids[i])])) | ||||
p.sync_with_ping() | p.sync_with_ping() | ||||
mock_time += INBOUND_PEER_TX_DELAY | mock_time += INBOUND_PEER_TX_DELAY | ||||
Show All 9 Lines | def test_in_flight_max(self): | ||||
OVERLOADED_PEER_DELAY - | OVERLOADED_PEER_DELAY - | ||||
1)) | 1)) | ||||
self.nodes[0].setmocktime( | self.nodes[0].setmocktime( | ||||
mock_time + | mock_time + | ||||
INBOUND_PEER_TX_DELAY + | INBOUND_PEER_TX_DELAY + | ||||
OVERLOADED_PEER_DELAY - | OVERLOADED_PEER_DELAY - | ||||
1) | 1) | ||||
p.sync_with_ping() | p.sync_with_ping() | ||||
with mininode_lock: | with p2p_lock: | ||||
assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT) | assert_equal(p.tx_getdata_count, MAX_GETDATA_IN_FLIGHT) | ||||
self.log.info( | self.log.info( | ||||
"If we wait {} seconds after announcement, we should eventually get more requests".format( | "If we wait {} seconds after announcement, we should eventually get more requests".format( | ||||
INBOUND_PEER_TX_DELAY + | INBOUND_PEER_TX_DELAY + | ||||
OVERLOADED_PEER_DELAY)) | OVERLOADED_PEER_DELAY)) | ||||
self.nodes[0].setmocktime( | self.nodes[0].setmocktime( | ||||
mock_time + | mock_time + | ||||
INBOUND_PEER_TX_DELAY + | INBOUND_PEER_TX_DELAY + | ||||
OVERLOADED_PEER_DELAY) | OVERLOADED_PEER_DELAY) | ||||
p.wait_until(lambda: p.tx_getdata_count == len(txids)) | p.wait_until(lambda: p.tx_getdata_count == len(txids)) | ||||
def test_expiry_fallback(self): | def test_expiry_fallback(self): | ||||
self.log.info( | self.log.info( | ||||
'Check that expiry will select another peer for download') | 'Check that expiry will select another peer for download') | ||||
TXID = 0xffaa | TXID = 0xffaa | ||||
peer1 = self.nodes[0].add_p2p_connection(TestP2PConn()) | peer1 = self.nodes[0].add_p2p_connection(TestP2PConn()) | ||||
peer2 = self.nodes[0].add_p2p_connection(TestP2PConn()) | peer2 = self.nodes[0].add_p2p_connection(TestP2PConn()) | ||||
for p in [peer1, peer2]: | for p in [peer1, peer2]: | ||||
p.send_message(msg_inv([CInv(t=MSG_TX, h=TXID)])) | p.send_message(msg_inv([CInv(t=MSG_TX, h=TXID)])) | ||||
# One of the peers is asked for the tx | # One of the peers is asked for the tx | ||||
peer2.wait_until( | peer2.wait_until( | ||||
lambda: sum( | lambda: sum( | ||||
p.tx_getdata_count for p in [ | p.tx_getdata_count for p in [ | ||||
peer1, peer2]) == 1) | peer1, peer2]) == 1) | ||||
with mininode_lock: | with p2p_lock: | ||||
peer_expiry, peer_fallback = ( | peer_expiry, peer_fallback = ( | ||||
peer1, peer2) if peer1.tx_getdata_count == 1 else ( | peer1, peer2) if peer1.tx_getdata_count == 1 else ( | ||||
peer2, peer1) | peer2, peer1) | ||||
assert_equal(peer_fallback.tx_getdata_count, 0) | assert_equal(peer_fallback.tx_getdata_count, 0) | ||||
# Wait for request to peer_expiry to expire | # Wait for request to peer_expiry to expire | ||||
self.nodes[0].setmocktime(int(time.time()) + GETDATA_TX_INTERVAL + 1) | self.nodes[0].setmocktime(int(time.time()) + GETDATA_TX_INTERVAL + 1) | ||||
peer_fallback.wait_until( | peer_fallback.wait_until( | ||||
lambda: peer_fallback.tx_getdata_count >= 1, timeout=1) | lambda: peer_fallback.tx_getdata_count >= 1, timeout=1) | ||||
with mininode_lock: | with p2p_lock: | ||||
assert_equal(peer_fallback.tx_getdata_count, 1) | assert_equal(peer_fallback.tx_getdata_count, 1) | ||||
# reset mocktime | # reset mocktime | ||||
self.restart_node(0) | self.restart_node(0) | ||||
def test_disconnect_fallback(self): | def test_disconnect_fallback(self): | ||||
self.log.info( | self.log.info( | ||||
'Check that disconnect will select another peer for download') | 'Check that disconnect will select another peer for download') | ||||
TXID = 0xffbb | TXID = 0xffbb | ||||
peer1 = self.nodes[0].add_p2p_connection(TestP2PConn()) | peer1 = self.nodes[0].add_p2p_connection(TestP2PConn()) | ||||
peer2 = self.nodes[0].add_p2p_connection(TestP2PConn()) | peer2 = self.nodes[0].add_p2p_connection(TestP2PConn()) | ||||
for p in [peer1, peer2]: | for p in [peer1, peer2]: | ||||
p.send_message(msg_inv([CInv(t=MSG_TX, h=TXID)])) | p.send_message(msg_inv([CInv(t=MSG_TX, h=TXID)])) | ||||
# One of the peers is asked for the tx | # One of the peers is asked for the tx | ||||
peer2.wait_until( | peer2.wait_until( | ||||
lambda: sum( | lambda: sum( | ||||
p.tx_getdata_count for p in [ | p.tx_getdata_count for p in [ | ||||
peer1, peer2]) == 1) | peer1, peer2]) == 1) | ||||
with mininode_lock: | with p2p_lock: | ||||
peer_disconnect, peer_fallback = ( | peer_disconnect, peer_fallback = ( | ||||
peer1, peer2) if peer1.tx_getdata_count == 1 else ( | peer1, peer2) if peer1.tx_getdata_count == 1 else ( | ||||
peer2, peer1) | peer2, peer1) | ||||
assert_equal(peer_fallback.tx_getdata_count, 0) | assert_equal(peer_fallback.tx_getdata_count, 0) | ||||
peer_disconnect.peer_disconnect() | peer_disconnect.peer_disconnect() | ||||
peer_disconnect.wait_for_disconnect() | peer_disconnect.wait_for_disconnect() | ||||
peer_fallback.wait_until( | peer_fallback.wait_until( | ||||
lambda: peer_fallback.tx_getdata_count >= 1, timeout=1) | lambda: peer_fallback.tx_getdata_count >= 1, timeout=1) | ||||
with mininode_lock: | with p2p_lock: | ||||
assert_equal(peer_fallback.tx_getdata_count, 1) | assert_equal(peer_fallback.tx_getdata_count, 1) | ||||
def test_notfound_fallback(self): | def test_notfound_fallback(self): | ||||
self.log.info( | self.log.info( | ||||
'Check that notfounds will select another peer for download immediately') | 'Check that notfounds will select another peer for download immediately') | ||||
TXID = 0xffdd | TXID = 0xffdd | ||||
peer1 = self.nodes[0].add_p2p_connection(TestP2PConn()) | peer1 = self.nodes[0].add_p2p_connection(TestP2PConn()) | ||||
peer2 = self.nodes[0].add_p2p_connection(TestP2PConn()) | peer2 = self.nodes[0].add_p2p_connection(TestP2PConn()) | ||||
for p in [peer1, peer2]: | for p in [peer1, peer2]: | ||||
p.send_message(msg_inv([CInv(t=MSG_TX, h=TXID)])) | p.send_message(msg_inv([CInv(t=MSG_TX, h=TXID)])) | ||||
# One of the peers is asked for the tx | # One of the peers is asked for the tx | ||||
peer2.wait_until( | peer2.wait_until( | ||||
lambda: sum( | lambda: sum( | ||||
p.tx_getdata_count for p in [ | p.tx_getdata_count for p in [ | ||||
peer1, peer2]) == 1) | peer1, peer2]) == 1) | ||||
with mininode_lock: | with p2p_lock: | ||||
peer_notfound, peer_fallback = ( | peer_notfound, peer_fallback = ( | ||||
peer1, peer2) if peer1.tx_getdata_count == 1 else ( | peer1, peer2) if peer1.tx_getdata_count == 1 else ( | ||||
peer2, peer1) | peer2, peer1) | ||||
assert_equal(peer_fallback.tx_getdata_count, 0) | assert_equal(peer_fallback.tx_getdata_count, 0) | ||||
# Send notfound, so that fallback peer is selected | # Send notfound, so that fallback peer is selected | ||||
peer_notfound.send_and_ping(msg_notfound(vec=[CInv(MSG_TX, TXID)])) | peer_notfound.send_and_ping(msg_notfound(vec=[CInv(MSG_TX, TXID)])) | ||||
peer_fallback.wait_until( | peer_fallback.wait_until( | ||||
lambda: peer_fallback.tx_getdata_count >= 1, timeout=1) | lambda: peer_fallback.tx_getdata_count >= 1, timeout=1) | ||||
with mininode_lock: | with p2p_lock: | ||||
assert_equal(peer_fallback.tx_getdata_count, 1) | assert_equal(peer_fallback.tx_getdata_count, 1) | ||||
def test_preferred_inv(self): | def test_preferred_inv(self): | ||||
self.log.info( | self.log.info( | ||||
'Check that invs from preferred peers are downloaded immediately') | 'Check that invs from preferred peers are downloaded immediately') | ||||
self.restart_node(0, extra_args=['-whitelist=noban@127.0.0.1']) | self.restart_node(0, extra_args=['-whitelist=noban@127.0.0.1']) | ||||
peer = self.nodes[0].add_p2p_connection(TestP2PConn()) | peer = self.nodes[0].add_p2p_connection(TestP2PConn()) | ||||
peer.send_message(msg_inv([CInv(t=MSG_TX, h=0xff00ff00)])) | peer.send_message(msg_inv([CInv(t=MSG_TX, h=0xff00ff00)])) | ||||
peer.wait_until(lambda: peer.tx_getdata_count >= 1, timeout=1) | peer.wait_until(lambda: peer.tx_getdata_count >= 1, timeout=1) | ||||
with mininode_lock: | with p2p_lock: | ||||
assert_equal(peer.tx_getdata_count, 1) | assert_equal(peer.tx_getdata_count, 1) | ||||
def test_large_inv_batch(self): | def test_large_inv_batch(self): | ||||
self.log.info( | self.log.info( | ||||
'Test how large inv batches are handled with relay permission') | 'Test how large inv batches are handled with relay permission') | ||||
self.restart_node(0, extra_args=['-whitelist=relay@127.0.0.1']) | self.restart_node(0, extra_args=['-whitelist=relay@127.0.0.1']) | ||||
peer = self.nodes[0].add_p2p_connection(TestP2PConn()) | peer = self.nodes[0].add_p2p_connection(TestP2PConn()) | ||||
peer.send_message(msg_inv([CInv(t=MSG_TX, h=txid) | peer.send_message(msg_inv([CInv(t=MSG_TX, h=txid) | ||||
for txid in range(MAX_PEER_TX_ANNOUNCEMENTS + 1)])) | for txid in range(MAX_PEER_TX_ANNOUNCEMENTS + 1)])) | ||||
peer.wait_until(lambda: peer.tx_getdata_count == | peer.wait_until(lambda: peer.tx_getdata_count == | ||||
MAX_PEER_TX_ANNOUNCEMENTS + 1) | MAX_PEER_TX_ANNOUNCEMENTS + 1) | ||||
self.log.info( | self.log.info( | ||||
'Test how large inv batches are handled without relay permission') | 'Test how large inv batches are handled without relay permission') | ||||
self.restart_node(0) | self.restart_node(0) | ||||
peer = self.nodes[0].add_p2p_connection(TestP2PConn()) | peer = self.nodes[0].add_p2p_connection(TestP2PConn()) | ||||
peer.send_message(msg_inv([CInv(t=MSG_TX, h=txid) | peer.send_message(msg_inv([CInv(t=MSG_TX, h=txid) | ||||
for txid in range(MAX_PEER_TX_ANNOUNCEMENTS + 1)])) | for txid in range(MAX_PEER_TX_ANNOUNCEMENTS + 1)])) | ||||
peer.wait_until(lambda: peer.tx_getdata_count == | peer.wait_until(lambda: peer.tx_getdata_count == | ||||
MAX_PEER_TX_ANNOUNCEMENTS) | MAX_PEER_TX_ANNOUNCEMENTS) | ||||
peer.sync_with_ping() | peer.sync_with_ping() | ||||
with mininode_lock: | with p2p_lock: | ||||
assert_equal(peer.tx_getdata_count, MAX_PEER_TX_ANNOUNCEMENTS) | assert_equal(peer.tx_getdata_count, MAX_PEER_TX_ANNOUNCEMENTS) | ||||
def test_spurious_notfound(self): | def test_spurious_notfound(self): | ||||
self.log.info('Check that spurious notfound is ignored') | self.log.info('Check that spurious notfound is ignored') | ||||
self.nodes[0].p2ps[0].send_message(msg_notfound(vec=[CInv(MSG_TX, 1)])) | self.nodes[0].p2ps[0].send_message(msg_notfound(vec=[CInv(MSG_TX, 1)])) | ||||
def run_test(self): | def run_test(self): | ||||
# Run tests without mocktime that only need one peer-connection first, | # Run tests without mocktime that only need one peer-connection first, | ||||
Show All 27 Lines |