diff --git a/src/validation.cpp b/src/validation.cpp
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -243,6 +243,27 @@
 /** chainwork for the last block that preciousblock has been applied to. */
 arith_uint256 nLastPreciousChainwork = 0;
 
+/**
+ * In order to efficiently track invalidity of headers, we keep the set of
+ * blocks which we tried to connect and found to be invalid here (ie which
+ * were set to BLOCK_FAILED_VALID since the last restart). We can then
+ * walk this set and check if a new header is a descendant of something in
+ * this set, preventing us from having to walk mapBlockIndex when we try
+ * to connect a bad block and fail.
+ *
+ * While this is more complicated than marking everything which descends
+ * from an invalid block as invalid at the time we discover it to be
+ * invalid, doing so would require walking all of mapBlockIndex to find all
+ * descendants. Since this case should be very rare, keeping track of all
+ * BLOCK_FAILED_VALID blocks in a set should be just fine and work just as
+ * well.
+ *
+ * Because we alreardy walk mapBlockIndex in height-order at startup, we go
+ * ahead and mark descendants of invalid blocks as FAILED_CHILD at that time,
+ * instead of putting things in this set.
+ */
+std::set<CBlockIndex *> g_failed_blocks;
+
 /** Dirty block index entries. */
 std::set<const CBlockIndex *> setDirtyBlockIndex;
 
@@ -1119,6 +1140,7 @@
                                     const CValidationState &state) {
     if (!state.CorruptionPossible()) {
         pindex->nStatus = pindex->nStatus.withFailed();
+        g_failed_blocks.insert(pindex);
         setDirtyBlockIndex.insert(pindex);
         InvalidChainFound(pindex);
     }
@@ -2978,21 +3000,18 @@
                               CBlockIndex *pindex, bool invalidate) {
     AssertLockHeld(cs_main);
 
-    // Mark the block as either invalid or parked.
-    pindex->nStatus = invalidate ? pindex->nStatus.withFailed()
-                                 : pindex->nStatus.withParked();
-    setDirtyBlockIndex.insert(pindex);
+    // We first disconnect backwards and then mark the blocks as invalid.
+    // This prevents a case where pruned nodes may fail to invalidateblock
+    // and be left unable to start as they have no tip candidates (as there
+    // are no blocks that meet the "have data and are not invalid per
+    // nStatus" criteria for inclusion in setBlockIndexCandidates).
+
+    bool pindex_was_in_chain = false;
+    CBlockIndex *invalid_walk_tip = chainActive.Tip();
 
     DisconnectedBlockTransactions disconnectpool;
     while (chainActive.Contains(pindex)) {
-        CBlockIndex *pindexWalk = chainActive.Tip();
-        if (pindexWalk != pindex) {
-            pindexWalk->nStatus = invalidate
-                                      ? pindexWalk->nStatus.withFailedParent()
-                                      : pindexWalk->nStatus.withParkedParent();
-            setDirtyBlockIndex.insert(pindexWalk);
-        }
-
+        pindex_was_in_chain = true;
         // ActivateBestChain considers blocks already in chainActive
         // unconditionally valid already, so force disconnect away from it.
         if (!DisconnectTip(config, state, &disconnectpool)) {
@@ -3003,6 +3022,24 @@
         }
     }
 
+    // Now mark the blocks we just disconnected as descendants invalid
+    // (note this may not be all descendants).
+    while (pindex_was_in_chain && invalid_walk_tip != pindex) {
+        invalid_walk_tip->nStatus =
+            invalidate ? invalid_walk_tip->nStatus.withFailedParent()
+                       : invalid_walk_tip->nStatus.withParkedParent();
+        setDirtyBlockIndex.insert(invalid_walk_tip);
+        invalid_walk_tip = invalid_walk_tip->pprev;
+    }
+
+    // Mark the block as either invalid or parked.
+    pindex->nStatus = invalidate ? pindex->nStatus.withFailed()
+                                 : pindex->nStatus.withParked();
+    setDirtyBlockIndex.insert(pindex);
+    if (invalidate) {
+        g_failed_blocks.insert(pindex);
+    }
+
     // DisconnectTip will add transactions to disconnectpool; try to add these
     // back to the mempool.
     disconnectpool.updateMempoolForReorg(config, true);
@@ -3066,6 +3103,9 @@
         pindex->GetAncestor(pindexBase->nHeight) == pindexBase) {
         pindex->nStatus = newStatus;
         setDirtyBlockIndex.insert(pindex);
+        if (newStatus.isValid()) {
+            g_failed_blocks.erase(pindex);
+        }
 
         if (pindex->IsValid(BlockValidity::TRANSACTIONS) && pindex->nChainTx &&
             setBlockIndexCandidates.value_comp()(chainActive.Tip(), pindex)) {
@@ -3094,6 +3134,9 @@
         if (pindex->nStatus != newStatus) {
             pindex->nStatus = newStatus;
             setDirtyBlockIndex.insert(pindex);
+            if (newStatus.isValid()) {
+                g_failed_blocks.erase(pindex);
+            }
         }
         pindex = pindex->pprev;
     }
@@ -3761,6 +3804,24 @@
             return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s",
                          __func__, hash.ToString(), FormatStateMessage(state));
         }
+
+        if (!pindexPrev->IsValid(BlockValidity::SCRIPTS)) {
+            for (const CBlockIndex *failedit : g_failed_blocks) {
+                if (pindexPrev->GetAncestor(failedit->nHeight) == failedit) {
+                    assert(failedit->nStatus.hasFailed());
+                    CBlockIndex *invalid_walk = pindexPrev;
+                    while (invalid_walk != failedit) {
+                        invalid_walk->nStatus =
+                            invalid_walk->nStatus.withFailedParent();
+                        setDirtyBlockIndex.insert(invalid_walk);
+                        invalid_walk = invalid_walk->pprev;
+                    }
+                    return state.DoS(100,
+                                     error("%s: prev block invalid", __func__),
+                                     REJECT_INVALID, "bad-prevblk");
+                }
+            }
+        }
     }
 
     if (pindex == nullptr) {
@@ -4380,6 +4441,11 @@
             }
         }
 
+        if (!pindex->nStatus.hasFailed() && pindex->pprev &&
+            pindex->pprev->nStatus.hasFailed()) {
+            pindex->nStatus = pindex->nStatus.withFailedParent();
+            setDirtyBlockIndex.insert(pindex);
+        }
         if (pindex->IsValid(BlockValidity::TRANSACTIONS) &&
             (pindex->nChainTx || pindex->pprev == nullptr)) {
             setBlockIndexCandidates.insert(pindex);
@@ -4893,6 +4959,7 @@
     nLastBlockFile = 0;
     nBlockSequenceId = 1;
     setDirtyBlockIndex.clear();
+    g_failed_blocks.clear();
     setDirtyFileInfo.clear();
 
     for (BlockMap::value_type &entry : mapBlockIndex) {
diff --git a/test/functional/p2p_unrequested_blocks.py b/test/functional/p2p_unrequested_blocks.py
--- a/test/functional/p2p_unrequested_blocks.py
+++ b/test/functional/p2p_unrequested_blocks.py
@@ -355,9 +355,7 @@
         headers_message = msg_headers()
         headers_message.headers.append(CBlockHeader(block_293))
         test_node.send_message(headers_message)
-        # FIXME: Uncomment this line once Core backport 015a525 is completed.
-        # Current behavior does not ban peers that give us headers on invalid chains.
-        # test_node.wait_for_disconnect()
+        test_node.wait_for_disconnect()
 
         # 9. Connect node1 to node0 and ensure it is able to sync
         connect_nodes(self.nodes[0], self.nodes[1])