diff --git a/test/functional/abc_p2p_avalanche.py b/test/functional/abc_p2p_avalanche.py --- a/test/functional/abc_p2p_avalanche.py +++ b/test/functional/abc_p2p_avalanche.py @@ -64,17 +64,14 @@ self.remote_extra_entropy = message.nExtraEntropy def on_avaresponse(self, message): - with mininode_lock: - self.avaresponses.append(message.response) + self.avaresponses.append(message.response) def on_avapoll(self, message): - with mininode_lock: - self.avapolls.append(message.poll) + self.avapolls.append(message.poll) def on_avahello(self, message): - with mininode_lock: - assert(self.avahello is None) - self.avahello = message + assert(self.avahello is None) + self.avahello = message def send_avaresponse(self, round, votes, privkey): response = AvalancheResponse(round, 0, votes) diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py --- a/test/functional/p2p_compactblocks.py +++ b/test/functional/p2p_compactblocks.py @@ -354,10 +354,9 @@ header_and_shortids, block_hash, block) # Now fetch the compact block using a normal non-announce getdata - with mininode_lock: - test_node.clear_block_announcement() - inv = CInv(MSG_CMPCT_BLOCK, block_hash) - test_node.send_message(msg_getdata([inv])) + test_node.clear_block_announcement() + inv = CInv(MSG_CMPCT_BLOCK, block_hash) + test_node.send_message(msg_getdata([inv])) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py --- a/test/functional/test_framework/mininode.py +++ b/test/functional/test_framework/mininode.py @@ -584,7 +584,7 @@ # P2PConnection acquires this lock whenever delivering a message to a P2PInterface. # This lock should be acquired in the thread running the test logic to synchronize # access to any data shared with the P2PInterface or P2PConnection. -mininode_lock = threading.RLock() +mininode_lock = threading.Lock() class NetworkThread(threading.Thread): @@ -787,7 +787,7 @@ The mempool should mark unbroadcast=False for these transactions. """ # Wait until invs have been received (and getdatas sent) for each txid. - self.wait_until(lambda: set(self.get_invs()) == set( + self.wait_until(lambda: set(self.tx_invs_received.keys()) == set( [int(tx, 16) for tx in txns]), timeout) # Flush messages and wait for the getdatas to be processed self.sync_with_ping()