From 17ffe2ed518b2d892807649b44f2f89f3ee4d4ad Mon Sep 17 00:00:00 2001 From: Simon Riggs Date: Mon, 5 Oct 2009 14:28:34 +0100 Subject: [PATCH] Apply 0007-Need-to-pin-and-take-cleanup-lock-on-every-b-tree-le.patch with some docs changes. All code changes approved, especially the neat tweak to improve performance in btree vacuum. --- src/backend/access/nbtree/nbtxlog.c | 20 +++++++++++++------- src/backend/access/transam/clog.c | 1 - src/backend/access/transam/xlog.c | 6 +++--- src/include/access/xlog.h | 1 - 4 files changed, 16 insertions(+), 12 deletions(-) diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index aea48d9d54..6b78781b94 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -470,9 +470,6 @@ btree_xlog_vacuum(XLogRecPtr lsn, XLogRecord *record) Page page; BTPageOpaque opaque; - if (record->xl_info & XLR_BKP_BLOCK_1) - return; - xlrec = (xl_btree_vacuum *) XLogRecGetData(record); /* @@ -502,6 +499,15 @@ btree_xlog_vacuum(XLogRecPtr lsn, XLogRecord *record) } } + /* + * If the block was restored from a full page image, nothing more to do. + * The RestoreBkpBlocks() call already pinned and took cleanup lock on + * it. We call RestoreBkpBlocks() after skipping blocks so that we get + * sequential disk access. + */ + if (record->xl_info & XLR_BKP_BLOCK_1) + return; + /* * Like in btvacuumpage(), we need to take a cleanup lock on every leaf * page. See nbtree/README for details. @@ -830,17 +836,17 @@ btree_redo(XLogRecPtr lsn, XLogRecord *record) true); ResolveRecoveryConflictWithVirtualXIDs(backends, - "drop tablespace", + "b-tree delete", CONFLICT_MODE_ERROR_DEFERRABLE, lsn); } } /* - * Exclusive lock on a btree block is as good as a Cleanup lock, - * so need to special case btree delete and vacuum. + * Vacuum needs to pin and take cleanup lock on every leaf page, + * a regular exclusive lock is enough for all other purposes. */ - RestoreBkpBlocks(lsn, record, false); + RestoreBkpBlocks(lsn, record, (info == XLOG_BTREE_VACUUM)); switch (info) { diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c index 90de8b61b5..c6c27a79bc 100644 --- a/src/backend/access/transam/clog.c +++ b/src/backend/access/transam/clog.c @@ -35,7 +35,6 @@ #include "access/clog.h" #include "access/slru.h" #include "access/transam.h" -#include "access/xact.h" #include "access/xlogutils.h" #include "pg_trace.h" #include "postmaster/bgwriter.h" diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 8308e22ed0..b5a7f66701 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -3338,9 +3338,9 @@ CleanupBackupHistory(void) * ignoring them as already applied, but that's not a huge drawback. * * If 'cleanup' is true, a cleanup lock is used when restoring blocks. - * Otherwise, a normal exclusive lock is used. At the moment, that's just - * pro forma, because there can't be any regular backends in the system - * during recovery. The 'cleanup' argument applies to all backup blocks + * Otherwise, a normal exclusive lock is used. This is important to + * safeguard these changes against users accessing the system during recovery. + * The 'cleanup' argument applies to all backup blocks * in the WAL record, that suffices for now. */ void diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h index 7a9308204c..7eec091e61 100644 --- a/src/include/access/xlog.h +++ b/src/include/access/xlog.h @@ -20,7 +20,6 @@ /* Handy constant for an invalid xlog recptr */ static const XLogRecPtr InvalidXLogRecPtr = {0, 0}; -#define XLogRecPtrIsValid(xp) (!(xp.xlogid ==0 && xp.xrecoff == 0)) /* * The overall layout of an XLOG record is: -- 2.39.5