Changeset View
Changeset View
Standalone View
Standalone View
src/txmempool.cpp
Show First 20 Lines • Show All 1,315 Lines • ▼ Show 20 Lines | while (DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE) { | ||||
// Drop the earliest entry, and remove its children from the | // Drop the earliest entry, and remove its children from the | ||||
// mempool. | // mempool. | ||||
auto it = queuedTx.get<insertion_order>().begin(); | auto it = queuedTx.get<insertion_order>().begin(); | ||||
g_mempool.removeRecursive(**it, MemPoolRemovalReason::REORG); | g_mempool.removeRecursive(**it, MemPoolRemovalReason::REORG); | ||||
removeEntry(it); | removeEntry(it); | ||||
} | } | ||||
} | } | ||||
void DisconnectedBlockTransactions::importMempool(CTxMemPool &pool) { | |||||
// pool.cs is probably locked already, but re-take it anyway | |||||
markblundeberg: Not sure about this comment ... it looks like the intended callsite will only have cs_main… | |||||
LOCK(pool.cs); | |||||
// addForBlock's algorithm sorts a vector of transactions back into | |||||
// topological order. We use it in a separate object to create a valid | |||||
// ordering of all mempool transactions, which we then splice in front of | |||||
// the current queuedTx. This results in a valid sequence of transactions to | |||||
// be reprocessed in updateMempoolForReorg | |||||
DisconnectedBlockTransactions ordered_mempool; | |||||
std::vector<CTransactionRef> vtx; | |||||
vtx.reserve(pool.mapTx.size()); | |||||
// We copy in order of the entry_time index to facilitate for | |||||
// addForBlocks (which iterates in reverse order), as vtx probably end | |||||
// in the correct ordering for queuedTx | |||||
for (const CTxMemPoolEntry &e : pool.mapTx.get<entry_time>()) { | |||||
vtx.push_back(e.GetSharedTx()); | |||||
} | |||||
pool.clear(); | |||||
ordered_mempool.addForBlock(vtx); | |||||
cachedInnerUsage += ordered_mempool.cachedInnerUsage; | |||||
queuedTx.get<insertion_order>().splice( | |||||
queuedTx.get<insertion_order>().begin(), | |||||
ordered_mempool.queuedTx.get<insertion_order>()); | |||||
// We limit memory usage because we can't know if more blocks will be | |||||
// disconnected | |||||
while (DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE) { | |||||
// Drop the earliest entry which, by definition, has no children | |||||
auto it = queuedTx.get<insertion_order>().begin(); | |||||
removeEntry(it); | |||||
} | |||||
} | |||||
void DisconnectedBlockTransactions::updateMempoolForReorg(const Config &config, | void DisconnectedBlockTransactions::updateMempoolForReorg(const Config &config, | ||||
bool fAddToMempool) { | bool fAddToMempool) { | ||||
AssertLockHeld(cs_main); | AssertLockHeld(cs_main); | ||||
std::vector<TxId> txidsUpdate; | std::vector<TxId> txidsUpdate; | ||||
// disconnectpool's insertion_order index sorts the entries from oldest to | // disconnectpool's insertion_order index sorts the entries from oldest to | ||||
// newest, but the oldest entry will be the last tx from the latest mined | // newest, but the oldest entry will be the last tx from the latest mined | ||||
// block that was disconnected. | // block that was disconnected. | ||||
Show All 37 Lines |
Not sure about this comment ... it looks like the intended callsite will only have cs_main locked.
(If cs_main is locked I'm not sure if it's important to lock pool.cs, but good idea anyway)