Page page;
BTPageOpaque opaque;
- if (record->xl_info & XLR_BKP_BLOCK_1)
- return;
-
xlrec = (xl_btree_vacuum *) XLogRecGetData(record);
/*
}
}
+ /*
+ * If the block was restored from a full page image, nothing more to do.
+ * The RestoreBkpBlocks() call already pinned and took cleanup lock on
+ * it. We call RestoreBkpBlocks() after skipping blocks so that we get
+ * sequential disk access.
+ */
+ if (record->xl_info & XLR_BKP_BLOCK_1)
+ return;
+
/*
* Like in btvacuumpage(), we need to take a cleanup lock on every leaf
* page. See nbtree/README for details.
true);
ResolveRecoveryConflictWithVirtualXIDs(backends,
- "drop tablespace",
+ "b-tree delete",
CONFLICT_MODE_ERROR_DEFERRABLE,
lsn);
}
}
/*
- * Exclusive lock on a btree block is as good as a Cleanup lock,
- * so need to special case btree delete and vacuum.
+ * Vacuum needs to pin and take cleanup lock on every leaf page,
+ * a regular exclusive lock is enough for all other purposes.
*/
- RestoreBkpBlocks(lsn, record, false);
+ RestoreBkpBlocks(lsn, record, (info == XLOG_BTREE_VACUUM));
switch (info)
{
* ignoring them as already applied, but that's not a huge drawback.
*
* If 'cleanup' is true, a cleanup lock is used when restoring blocks.
- * Otherwise, a normal exclusive lock is used. At the moment, that's just
- * pro forma, because there can't be any regular backends in the system
- * during recovery. The 'cleanup' argument applies to all backup blocks
+ * Otherwise, a normal exclusive lock is used. This is important to
+ * safeguard these changes against users accessing the system during recovery.
+ * The 'cleanup' argument applies to all backup blocks
* in the WAL record, that suffices for now.
*/
void
/* Handy constant for an invalid xlog recptr */
static const XLogRecPtr InvalidXLogRecPtr = {0, 0};
-#define XLogRecPtrIsValid(xp) (!(xp.xlogid ==0 && xp.xrecoff == 0))
/*
* The overall layout of an XLOG record is: