Move the tuple freezing point in CLUSTER to a point further back in the past,
authorAlvaro Herrera <alvherre@alvh.no-ip.org>
Thu, 17 May 2007 15:28:29 +0000 (15:28 +0000)
committerAlvaro Herrera <alvherre@alvh.no-ip.org>
Thu, 17 May 2007 15:28:29 +0000 (15:28 +0000)
to avoid losing useful Xid information in not-so-old tuples.  This makes
CLUSTER behave the same as VACUUM as far a tuple-freezing behavior goes
(though CLUSTER does not yet advance the table's relfrozenxid).

While at it, move the actual freezing operation in rewriteheap.c to a more
appropriate place, and document it thoroughly.  This part of the patch from
Tom Lane.

src/backend/access/heap/rewriteheap.c
src/backend/commands/cluster.c
src/backend/commands/vacuum.c
src/backend/commands/vacuumlazy.c
src/include/access/rewriteheap.h
src/include/commands/vacuum.h

index de757eaeb131ff4fd5f53413af97208ac5fb0ea4..314f01ed75e63ba42090231a5994a980ab1374f8 100644 (file)
@@ -123,6 +123,8 @@ typedef struct RewriteStateData
        bool                    rs_use_wal;                     /* must we WAL-log inserts? */
        TransactionId   rs_oldest_xmin;         /* oldest xmin used by caller to
                                                                                 * determine tuple visibility */
+       TransactionId   rs_freeze_xid;          /* Xid that will be used as freeze
+                                                                                * cutoff point */
        MemoryContext   rs_cxt;                         /* for hash tables and entries and
                                                                                 * tuples in them */
        HTAB               *rs_unresolved_tups; /* unmatched A tuples */
@@ -171,6 +173,7 @@ static void raw_heap_insert(RewriteState state, HeapTuple tup);
  *
  * new_heap            new, locked heap relation to insert tuples to
  * oldest_xmin xid used by the caller to determine which tuples are dead
+ * freeze_xid  xid before which tuples will be frozen
  * use_wal             should the inserts to the new heap be WAL-logged?
  *
  * Returns an opaque RewriteState, allocated in current memory context,
@@ -178,7 +181,7 @@ static void raw_heap_insert(RewriteState state, HeapTuple tup);
  */
 RewriteState
 begin_heap_rewrite(Relation new_heap, TransactionId oldest_xmin,
-                                  bool use_wal)
+                                  TransactionId freeze_xid, bool use_wal)
 {
        RewriteState state;
        MemoryContext rw_cxt;
@@ -206,6 +209,7 @@ begin_heap_rewrite(Relation new_heap, TransactionId oldest_xmin,
        state->rs_buffer_valid = false;
        state->rs_use_wal = use_wal;
        state->rs_oldest_xmin = oldest_xmin;
+       state->rs_freeze_xid = freeze_xid;
        state->rs_cxt = rw_cxt;
 
        /* Initialize hash tables used to track update chains */
@@ -292,7 +296,9 @@ end_heap_rewrite(RewriteState state)
 /*
  * Add a tuple to the new heap.
  *
- * Visibility information is copied from the original tuple.
+ * Visibility information is copied from the original tuple, except that
+ * we "freeze" very-old tuples.  Note that since we scribble on new_tuple,
+ * it had better be temp storage not a pointer to the original tuple.
  *
  * state               opaque state as returned by begin_heap_rewrite
  * old_tuple   original tuple in the old heap
@@ -323,6 +329,17 @@ rewrite_heap_tuple(RewriteState state,
        new_tuple->t_data->t_infomask |=
                old_tuple->t_data->t_infomask & HEAP_XACT_MASK;
 
+       /*
+        * While we have our hands on the tuple, we may as well freeze any
+        * very-old xmin or xmax, so that future VACUUM effort can be saved.
+        *
+        * Note we abuse heap_freeze_tuple() a bit here, since it's expecting
+        * to be given a pointer to a tuple in a disk buffer.  It happens
+        * though that we can get the right things to happen by passing
+        * InvalidBuffer for the buffer.
+        */
+       heap_freeze_tuple(new_tuple->t_data, state->rs_freeze_xid, InvalidBuffer);
+
        /*
         * Invalid ctid means that ctid should point to the tuple itself.
         * We'll override it later if the tuple is part of an update chain.
@@ -538,8 +555,6 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
        OffsetNumber    newoff;
        HeapTuple               heaptup;
 
-       heap_freeze_tuple(tup->t_data, state->rs_oldest_xmin, InvalidBuffer);
-
        /*
         * If the new tuple is too big for storage or contains already toasted
         * out-of-line attributes from some other relation, invoke the toaster.
index c53c88ed0c0bf878b2ca794a3a65a63a1746a30e..781ce7e81794abc7250238dae3a998aa4da7b3b4 100644 (file)
@@ -29,6 +29,7 @@
 #include "catalog/namespace.h"
 #include "catalog/toasting.h"
 #include "commands/cluster.h"
+#include "commands/vacuum.h"
 #include "miscadmin.h"
 #include "storage/procarray.h"
 #include "utils/acl.h"
@@ -657,6 +658,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
        HeapTuple       tuple;
        bool            use_wal;
        TransactionId OldestXmin;
+       TransactionId FreezeXid;
        RewriteState rwstate;
 
        /*
@@ -688,11 +690,16 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
        /* use_wal off requires rd_targblock be initially invalid */
        Assert(NewHeap->rd_targblock == InvalidBlockNumber);
 
-       /* Get the cutoff xmin we'll use to weed out dead tuples */
-       OldestXmin = GetOldestXmin(OldHeap->rd_rel->relisshared, true);
+       /*
+        * compute xids used to freeze and weed out dead tuples.  We use -1
+        * freeze_min_age to avoid having CLUSTER freeze tuples earlier than
+        * a plain VACUUM would.
+        */
+       vacuum_set_xid_limits(-1, OldHeap->rd_rel->relisshared,
+                                                 &OldestXmin, &FreezeXid);
 
        /* Initialize the rewrite operation */
-       rwstate = begin_heap_rewrite(NewHeap, OldestXmin, use_wal);
+       rwstate = begin_heap_rewrite(NewHeap, OldestXmin, FreezeXid, use_wal);
 
        /*
         * Scan through the OldHeap in OldIndex order and copy each tuple into the
index 1e4dc286f865623d91eeab3a1aef79296167f276..6b0e3e4f9ee612d6f7031bb4fb3450e6a9618947 100644 (file)
@@ -566,7 +566,7 @@ get_rel_oids(List *relids, const RangeVar *vacrel, const char *stmttype)
  * vacuum_set_xid_limits() -- compute oldest-Xmin and freeze cutoff points
  */
 void
-vacuum_set_xid_limits(VacuumStmt *vacstmt, bool sharedRel,
+vacuum_set_xid_limits(int freeze_min_age, bool sharedRel,
                                          TransactionId *oldestXmin,
                                          TransactionId *freezeLimit)
 {
@@ -588,12 +588,12 @@ vacuum_set_xid_limits(VacuumStmt *vacstmt, bool sharedRel,
        Assert(TransactionIdIsNormal(*oldestXmin));
 
        /*
-        * Determine the minimum freeze age to use: as specified in the vacstmt,
+        * Determine the minimum freeze age to use: as specified by the caller,
         * or vacuum_freeze_min_age, but in any case not more than half
         * autovacuum_freeze_max_age, so that autovacuums to prevent XID
         * wraparound won't occur too frequently.
         */
-       freezemin = vacstmt->freeze_min_age;
+       freezemin = freeze_min_age;
        if (freezemin < 0)
                freezemin = vacuum_freeze_min_age;
        freezemin = Min(freezemin, autovacuum_freeze_max_age / 2);
@@ -1154,7 +1154,7 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
                                i;
        VRelStats  *vacrelstats;
 
-       vacuum_set_xid_limits(vacstmt, onerel->rd_rel->relisshared,
+       vacuum_set_xid_limits(vacstmt->freeze_min_age, onerel->rd_rel->relisshared,
                                                  &OldestXmin, &FreezeLimit);
 
        /*
index d9a5ab7eac7fcf09e407043889f5e27e910f9376..b9a7052f05c9c28e0addddb83bdb3fc5a2215fc7 100644 (file)
@@ -158,7 +158,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
        else
                elevel = DEBUG2;
 
-       vacuum_set_xid_limits(vacstmt, onerel->rd_rel->relisshared,
+       vacuum_set_xid_limits(vacstmt->freeze_min_age, onerel->rd_rel->relisshared,
                                                  &OldestXmin, &FreezeLimit);
 
        vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
index 054bdd466335b5eb76731e051cd7bd59023f0fa8..094425c75d91edfe7a00e1e5687be89def913417 100644 (file)
 typedef struct RewriteStateData *RewriteState;
 
 extern RewriteState begin_heap_rewrite(Relation NewHeap,
-                                                                          TransactionId OldestXmin, bool use_wal);
+                                  TransactionId OldestXmin, TransactionId FreezeXid,
+                                  bool use_wal);
 extern void end_heap_rewrite(RewriteState state);
 extern void rewrite_heap_tuple(RewriteState state, HeapTuple oldTuple,
-                                                          HeapTuple newTuple);
+                                  HeapTuple newTuple);
 extern void rewrite_heap_dead_tuple(RewriteState state, HeapTuple oldTuple);
 
 #endif /* REWRITE_HEAP_H */
index 123f0693fd3d7cce59d04e309d4c17f5f3145e8a..1373e9a4b8299b7e303edfab759077ad33c08b5d 100644 (file)
@@ -119,7 +119,7 @@ extern void vac_update_relstats(Oid relid,
                                        double num_tuples,
                                        bool hasindex,
                                        TransactionId frozenxid);
-extern void vacuum_set_xid_limits(VacuumStmt *vacstmt, bool sharedRel,
+extern void vacuum_set_xid_limits(int freeze_min_age, bool sharedRel,
                                          TransactionId *oldestXmin,
                                          TransactionId *freezeLimit);
 extern void vac_update_datfrozenxid(void);