Tweak hash index AM to use the new ReadOrZeroBuffer bufmgr API when fetching
authorTom Lane <tgl@sss.pgh.pa.us>
Thu, 3 May 2007 16:45:58 +0000 (16:45 +0000)
committerTom Lane <tgl@sss.pgh.pa.us>
Thu, 3 May 2007 16:45:58 +0000 (16:45 +0000)
pages it intends to zero immediately.  Just to show there is some use for that
function besides WAL recovery :-).
Along the way, fold _hash_checkpage and _hash_pageinit calls into _hash_getbuf
and friends, instead of expecting callers to do that separately.

contrib/pgstattuple/pgstattuple.c
src/backend/access/hash/hash.c
src/backend/access/hash/hashinsert.c
src/backend/access/hash/hashovfl.c
src/backend/access/hash/hashpage.c
src/backend/access/hash/hashsearch.c
src/backend/access/hash/hashutil.c
src/include/access/hash.h

index cd85bcded607a75b41aa1624523747966cb60868..2ead470b42ea6c9d0819e00885e6e2ad8b6e382a 100644 (file)
@@ -360,7 +360,7 @@ pgstat_hash_page(pgstattuple_type * stat, Relation rel, BlockNumber blkno)
        Page            page;
 
        _hash_getlock(rel, blkno, HASH_SHARE);
-       buf = _hash_getbuf(rel, blkno, HASH_READ);
+       buf = _hash_getbuf(rel, blkno, HASH_READ, 0);
        page = BufferGetPage(buf);
 
        if (PageGetSpecialSize(page) == MAXALIGN(sizeof(HashPageOpaqueData)))
index 9e9604399db7ce97b1aafe8cab31dd33682328c9..a0f838aa6dbb3e218bda61d2992e8596720bdbae 100644 (file)
@@ -506,8 +506,7 @@ hashbulkdelete(PG_FUNCTION_ARGS)
         * array cannot change under us; and it beats rereading the metapage for
         * each bucket.
         */
-       metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
-       _hash_checkpage(rel, metabuf, LH_META_PAGE);
+       metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
        metap = (HashMetaPage) BufferGetPage(metabuf);
        orig_maxbucket = metap->hashm_maxbucket;
        orig_ntuples = metap->hashm_ntuples;
@@ -548,8 +547,8 @@ loop_top:
 
                        vacuum_delay_point();
 
-                       buf = _hash_getbuf(rel, blkno, HASH_WRITE);
-                       _hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
+                       buf = _hash_getbuf(rel, blkno, HASH_WRITE,
+                                                          LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
                        page = BufferGetPage(buf);
                        opaque = (HashPageOpaque) PageGetSpecialPointer(page);
                        Assert(opaque->hasho_bucket == cur_bucket);
@@ -607,8 +606,7 @@ loop_top:
        }
 
        /* Write-lock metapage and check for split since we started */
-       metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE);
-       _hash_checkpage(rel, metabuf, LH_META_PAGE);
+       metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE, LH_META_PAGE);
        metap = (HashMetaPage) BufferGetPage(metabuf);
 
        if (cur_maxbucket != metap->hashm_maxbucket)
index 20998ff9fbce34b35345bc6c81552d8cff06dd90..10d91c2844a629d88f2f0eab2a6ace2514fde782 100644 (file)
@@ -66,8 +66,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
        _hash_getlock(rel, 0, HASH_SHARE);
 
        /* Read the metapage */
-       metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
-       _hash_checkpage(rel, metabuf, LH_META_PAGE);
+       metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
        metap = (HashMetaPage) BufferGetPage(metabuf);
 
        /*
@@ -104,8 +103,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
        _hash_droplock(rel, 0, HASH_SHARE);
 
        /* Fetch the primary bucket page for the bucket */
-       buf = _hash_getbuf(rel, blkno, HASH_WRITE);
-       _hash_checkpage(rel, buf, LH_BUCKET_PAGE);
+       buf = _hash_getbuf(rel, blkno, HASH_WRITE, LH_BUCKET_PAGE);
        page = BufferGetPage(buf);
        pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
        Assert(pageopaque->hasho_bucket == bucket);
@@ -125,7 +123,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
                         * find out next pass through the loop test above.
                         */
                        _hash_relbuf(rel, buf);
-                       buf = _hash_getbuf(rel, nextblkno, HASH_WRITE);
+                       buf = _hash_getbuf(rel, nextblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
                        page = BufferGetPage(buf);
                }
                else
@@ -145,8 +143,8 @@ _hash_doinsert(Relation rel, IndexTuple itup)
                        /* should fit now, given test above */
                        Assert(PageGetFreeSpace(page) >= itemsz);
                }
-               _hash_checkpage(rel, buf, LH_OVERFLOW_PAGE);
                pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
+               Assert(pageopaque->hasho_flag == LH_OVERFLOW_PAGE);
                Assert(pageopaque->hasho_bucket == bucket);
        }
 
index 7279f0db69465a02aa2c3df8de6b9b224c68cfa8..60bef7b1a53b993a59db23692db8648e52d4277b 100644 (file)
@@ -107,7 +107,6 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
 
        /* allocate and lock an empty overflow page */
        ovflbuf = _hash_getovflpage(rel, metabuf);
-       ovflpage = BufferGetPage(ovflbuf);
 
        /*
         * Write-lock the tail page.  It is okay to hold two buffer locks here
@@ -115,12 +114,14 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
         */
        _hash_chgbufaccess(rel, buf, HASH_NOLOCK, HASH_WRITE);
 
+       /* probably redundant... */
+       _hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
+
        /* loop to find current tail page, in case someone else inserted too */
        for (;;)
        {
                BlockNumber nextblkno;
 
-               _hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
                page = BufferGetPage(buf);
                pageopaque = (HashPageOpaque) PageGetSpecialPointer(page);
                nextblkno = pageopaque->hasho_nextblkno;
@@ -131,11 +132,11 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
                /* we assume we do not need to write the unmodified page */
                _hash_relbuf(rel, buf);
 
-               buf = _hash_getbuf(rel, nextblkno, HASH_WRITE);
+               buf = _hash_getbuf(rel, nextblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
        }
 
        /* now that we have correct backlink, initialize new overflow page */
-       _hash_pageinit(ovflpage, BufferGetPageSize(ovflbuf));
+       ovflpage = BufferGetPage(ovflbuf);
        ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage);
        ovflopaque->hasho_prevblkno = BufferGetBlockNumber(buf);
        ovflopaque->hasho_nextblkno = InvalidBlockNumber;
@@ -156,7 +157,8 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf)
  *     _hash_getovflpage()
  *
  *     Find an available overflow page and return it.  The returned buffer
- *     is pinned and write-locked, but its contents are not initialized.
+ *     is pinned and write-locked, and has had _hash_pageinit() applied,
+ *     but it is caller's responsibility to fill the special space.
  *
  * The caller must hold a pin, but no lock, on the metapage buffer.
  * That buffer is left in the same state at exit.
@@ -220,8 +222,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
                /* Release exclusive lock on metapage while reading bitmap page */
                _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
 
-               mapbuf = _hash_getbuf(rel, mapblkno, HASH_WRITE);
-               _hash_checkpage(rel, mapbuf, LH_BITMAP_PAGE);
+               mapbuf = _hash_getbuf(rel, mapblkno, HASH_WRITE, LH_BITMAP_PAGE);
                mappage = BufferGetPage(mapbuf);
                freep = HashPageGetBitmap(mappage);
 
@@ -277,7 +278,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
         * with metapage write lock held; would be better to use a lock that
         * doesn't block incoming searches.
         */
-       newbuf = _hash_getnewbuf(rel, blkno, HASH_WRITE);
+       newbuf = _hash_getnewbuf(rel, blkno);
 
        metap->hashm_spares[splitnum]++;
 
@@ -327,8 +328,8 @@ found:
                _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
        }
 
-       /* Fetch and return the recycled page */
-       return _hash_getbuf(rel, blkno, HASH_WRITE);
+       /* Fetch, init, and return the recycled page */
+       return _hash_getinitbuf(rel, blkno);
 }
 
 /*
@@ -412,30 +413,29 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
         */
        if (BlockNumberIsValid(prevblkno))
        {
-               Buffer          prevbuf = _hash_getbuf(rel, prevblkno, HASH_WRITE);
+               Buffer          prevbuf = _hash_getbuf(rel, prevblkno, HASH_WRITE,
+                                                                                  LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
                Page            prevpage = BufferGetPage(prevbuf);
                HashPageOpaque prevopaque = (HashPageOpaque) PageGetSpecialPointer(prevpage);
 
-               _hash_checkpage(rel, prevbuf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
                Assert(prevopaque->hasho_bucket == bucket);
                prevopaque->hasho_nextblkno = nextblkno;
                _hash_wrtbuf(rel, prevbuf);
        }
        if (BlockNumberIsValid(nextblkno))
        {
-               Buffer          nextbuf = _hash_getbuf(rel, nextblkno, HASH_WRITE);
+               Buffer          nextbuf = _hash_getbuf(rel, nextblkno, HASH_WRITE,
+                                                                                  LH_OVERFLOW_PAGE);
                Page            nextpage = BufferGetPage(nextbuf);
                HashPageOpaque nextopaque = (HashPageOpaque) PageGetSpecialPointer(nextpage);
 
-               _hash_checkpage(rel, nextbuf, LH_OVERFLOW_PAGE);
                Assert(nextopaque->hasho_bucket == bucket);
                nextopaque->hasho_prevblkno = prevblkno;
                _hash_wrtbuf(rel, nextbuf);
        }
 
        /* Read the metapage so we can determine which bitmap page to use */
-       metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
-       _hash_checkpage(rel, metabuf, LH_META_PAGE);
+       metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
        metap = (HashMetaPage) BufferGetPage(metabuf);
 
        /* Identify which bit to set */
@@ -452,8 +452,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf)
        _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
 
        /* Clear the bitmap bit to indicate that this overflow page is free */
-       mapbuf = _hash_getbuf(rel, blkno, HASH_WRITE);
-       _hash_checkpage(rel, mapbuf, LH_BITMAP_PAGE);
+       mapbuf = _hash_getbuf(rel, blkno, HASH_WRITE, LH_BITMAP_PAGE);
        mappage = BufferGetPage(mapbuf);
        freep = HashPageGetBitmap(mappage);
        Assert(ISSET(freep, bitmapbit));
@@ -507,11 +506,10 @@ _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno)
         * page while holding the metapage lock, but this path is taken so seldom
         * that it's not worth worrying about.
         */
-       buf = _hash_getnewbuf(rel, blkno, HASH_WRITE);
+       buf = _hash_getnewbuf(rel, blkno);
        pg = BufferGetPage(buf);
 
-       /* initialize the page */
-       _hash_pageinit(pg, BufferGetPageSize(buf));
+       /* initialize the page's special space */
        op = (HashPageOpaque) PageGetSpecialPointer(pg);
        op->hasho_prevblkno = InvalidBlockNumber;
        op->hasho_nextblkno = InvalidBlockNumber;
@@ -583,8 +581,7 @@ _hash_squeezebucket(Relation rel,
         * start squeezing into the base bucket page.
         */
        wblkno = bucket_blkno;
-       wbuf = _hash_getbuf(rel, wblkno, HASH_WRITE);
-       _hash_checkpage(rel, wbuf, LH_BUCKET_PAGE);
+       wbuf = _hash_getbuf(rel, wblkno, HASH_WRITE, LH_BUCKET_PAGE);
        wpage = BufferGetPage(wbuf);
        wopaque = (HashPageOpaque) PageGetSpecialPointer(wpage);
 
@@ -607,8 +604,7 @@ _hash_squeezebucket(Relation rel,
                rblkno = ropaque->hasho_nextblkno;
                if (ropaque != wopaque)
                        _hash_relbuf(rel, rbuf);
-               rbuf = _hash_getbuf(rel, rblkno, HASH_WRITE);
-               _hash_checkpage(rel, rbuf, LH_OVERFLOW_PAGE);
+               rbuf = _hash_getbuf(rel, rblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
                rpage = BufferGetPage(rbuf);
                ropaque = (HashPageOpaque) PageGetSpecialPointer(rpage);
                Assert(ropaque->hasho_bucket == bucket);
@@ -648,8 +644,7 @@ _hash_squeezebucket(Relation rel,
                                        return;
                                }
 
-                               wbuf = _hash_getbuf(rel, wblkno, HASH_WRITE);
-                               _hash_checkpage(rel, wbuf, LH_OVERFLOW_PAGE);
+                               wbuf = _hash_getbuf(rel, wblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
                                wpage = BufferGetPage(wbuf);
                                wopaque = (HashPageOpaque) PageGetSpecialPointer(wpage);
                                Assert(wopaque->hasho_bucket == bucket);
@@ -701,8 +696,7 @@ _hash_squeezebucket(Relation rel,
                        /* free this overflow page, then get the previous one */
                        _hash_freeovflpage(rel, rbuf);
 
-                       rbuf = _hash_getbuf(rel, rblkno, HASH_WRITE);
-                       _hash_checkpage(rel, rbuf, LH_OVERFLOW_PAGE);
+                       rbuf = _hash_getbuf(rel, rblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
                        rpage = BufferGetPage(rbuf);
                        ropaque = (HashPageOpaque) PageGetSpecialPointer(rpage);
                        Assert(ropaque->hasho_bucket == bucket);
index 88dad0790f414254ed27379d1ec399bc894ed57a..65a0abfd4e4e790f9d7b44b1ad89fd30d76694fb 100644 (file)
@@ -100,21 +100,21 @@ _hash_droplock(Relation rel, BlockNumber whichlock, int access)
  *     _hash_getbuf() -- Get a buffer by block number for read or write.
  *
  *             'access' must be HASH_READ, HASH_WRITE, or HASH_NOLOCK.
+ *             'flags' is a bitwise OR of the allowed page types.
+ *
+ *             This must be used only to fetch pages that are expected to be valid
+ *             already.  _hash_checkpage() is applied using the given flags.
  *
  *             When this routine returns, the appropriate lock is set on the
  *             requested buffer and its reference count has been incremented
  *             (ie, the buffer is "locked and pinned").
  *
- *             P_NEW is disallowed because this routine should only be used
+ *             P_NEW is disallowed because this routine can only be used
  *             to access pages that are known to be before the filesystem EOF.
  *             Extending the index should be done with _hash_getnewbuf.
- *
- *             All call sites should call either _hash_checkpage or _hash_pageinit
- *             on the returned page, depending on whether the block is expected
- *             to be valid or not.
  */
 Buffer
-_hash_getbuf(Relation rel, BlockNumber blkno, int access)
+_hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags)
 {
        Buffer          buf;
 
@@ -127,13 +127,52 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access)
                LockBuffer(buf, access);
 
        /* ref count and lock type are correct */
+
+       _hash_checkpage(rel, buf, flags);
+
+       return buf;
+}
+
+/*
+ *     _hash_getinitbuf() -- Get and initialize a buffer by block number.
+ *
+ *             This must be used only to fetch pages that are known to be before
+ *             the index's filesystem EOF, but are to be filled from scratch.
+ *             _hash_pageinit() is applied automatically.  Otherwise it has
+ *             effects similar to _hash_getbuf() with access = HASH_WRITE.
+ *
+ *             When this routine returns, a write lock is set on the
+ *             requested buffer and its reference count has been incremented
+ *             (ie, the buffer is "locked and pinned").
+ *
+ *             P_NEW is disallowed because this routine can only be used
+ *             to access pages that are known to be before the filesystem EOF.
+ *             Extending the index should be done with _hash_getnewbuf.
+ */
+Buffer
+_hash_getinitbuf(Relation rel, BlockNumber blkno)
+{
+       Buffer          buf;
+
+       if (blkno == P_NEW)
+               elog(ERROR, "hash AM does not use P_NEW");
+
+       buf = ReadOrZeroBuffer(rel, blkno);
+
+       LockBuffer(buf, HASH_WRITE);
+
+       /* ref count and lock type are correct */
+
+       /* initialize the page */
+       _hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
+
        return buf;
 }
 
 /*
  *     _hash_getnewbuf() -- Get a new page at the end of the index.
  *
- *             This has the same API as _hash_getbuf, except that we are adding
+ *             This has the same API as _hash_getinitbuf, except that we are adding
  *             a page to the index, and hence expect the page to be past the
  *             logical EOF.  (However, we have to support the case where it isn't,
  *             since a prior try might have crashed after extending the filesystem
@@ -141,12 +180,9 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access)
  *
  *             It is caller's responsibility to ensure that only one process can
  *             extend the index at a time.
- *
- *             All call sites should call _hash_pageinit on the returned page.
- *             Also, it's difficult to imagine why access would not be HASH_WRITE.
  */
 Buffer
-_hash_getnewbuf(Relation rel, BlockNumber blkno, int access)
+_hash_getnewbuf(Relation rel, BlockNumber blkno)
 {
        BlockNumber     nblocks = RelationGetNumberOfBlocks(rel);
        Buffer          buf;
@@ -166,12 +202,15 @@ _hash_getnewbuf(Relation rel, BlockNumber blkno, int access)
                                 BufferGetBlockNumber(buf), blkno);
        }
        else
-               buf = ReadBuffer(rel, blkno);
+               buf = ReadOrZeroBuffer(rel, blkno);
 
-       if (access != HASH_NOLOCK)
-               LockBuffer(buf, access);
+       LockBuffer(buf, HASH_WRITE);
 
        /* ref count and lock type are correct */
+
+       /* initialize the page */
+       _hash_pageinit(BufferGetPage(buf), BufferGetPageSize(buf));
+
        return buf;
 }
 
@@ -292,9 +331,8 @@ _hash_metapinit(Relation rel)
         * smgrextend() calls to occur.  This ensures that the smgr level
         * has the right idea of the physical index length.
         */
-       metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, HASH_WRITE);
+       metabuf = _hash_getnewbuf(rel, HASH_METAPAGE);
        pg = BufferGetPage(metabuf);
-       _hash_pageinit(pg, BufferGetPageSize(metabuf));
 
        pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
        pageopaque->hasho_prevblkno = InvalidBlockNumber;
@@ -350,9 +388,8 @@ _hash_metapinit(Relation rel)
         */
        for (i = 0; i <= 1; i++)
        {
-               buf = _hash_getnewbuf(rel, BUCKET_TO_BLKNO(metap, i), HASH_WRITE);
+               buf = _hash_getnewbuf(rel, BUCKET_TO_BLKNO(metap, i));
                pg = BufferGetPage(buf);
-               _hash_pageinit(pg, BufferGetPageSize(buf));
                pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg);
                pageopaque->hasho_prevblkno = InvalidBlockNumber;
                pageopaque->hasho_nextblkno = InvalidBlockNumber;
@@ -679,17 +716,15 @@ _hash_splitbucket(Relation rel,
         * either bucket.
         */
        oblkno = start_oblkno;
-       obuf = _hash_getbuf(rel, oblkno, HASH_WRITE);
-       _hash_checkpage(rel, obuf, LH_BUCKET_PAGE);
+       obuf = _hash_getbuf(rel, oblkno, HASH_WRITE, LH_BUCKET_PAGE);
        opage = BufferGetPage(obuf);
        oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
 
        nblkno = start_nblkno;
-       nbuf = _hash_getbuf(rel, nblkno, HASH_WRITE);
+       nbuf = _hash_getnewbuf(rel, nblkno);
        npage = BufferGetPage(nbuf);
 
        /* initialize the new bucket's primary page */
-       _hash_pageinit(npage, BufferGetPageSize(nbuf));
        nopaque = (HashPageOpaque) PageGetSpecialPointer(npage);
        nopaque->hasho_prevblkno = InvalidBlockNumber;
        nopaque->hasho_nextblkno = InvalidBlockNumber;
@@ -725,8 +760,7 @@ _hash_splitbucket(Relation rel,
                         */
                        _hash_wrtbuf(rel, obuf);
 
-                       obuf = _hash_getbuf(rel, oblkno, HASH_WRITE);
-                       _hash_checkpage(rel, obuf, LH_OVERFLOW_PAGE);
+                       obuf = _hash_getbuf(rel, oblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
                        opage = BufferGetPage(obuf);
                        oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
                        ooffnum = FirstOffsetNumber;
@@ -763,7 +797,6 @@ _hash_splitbucket(Relation rel,
                                _hash_chgbufaccess(rel, nbuf, HASH_WRITE, HASH_NOLOCK);
                                /* chain to a new overflow page */
                                nbuf = _hash_addovflpage(rel, metabuf, nbuf);
-                               _hash_checkpage(rel, nbuf, LH_OVERFLOW_PAGE);
                                npage = BufferGetPage(nbuf);
                                /* we don't need nopaque within the loop */
                        }
index 5bd464bf98487d4b47ddf3de70ce49cbd4976e92..8b9e1fcec66ab4d3aacd408634cd690432c21a8b 100644 (file)
@@ -73,8 +73,7 @@ _hash_readnext(Relation rel,
        *bufp = InvalidBuffer;
        if (BlockNumberIsValid(blkno))
        {
-               *bufp = _hash_getbuf(rel, blkno, HASH_READ);
-               _hash_checkpage(rel, *bufp, LH_OVERFLOW_PAGE);
+               *bufp = _hash_getbuf(rel, blkno, HASH_READ, LH_OVERFLOW_PAGE);
                *pagep = BufferGetPage(*bufp);
                *opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep);
        }
@@ -94,8 +93,8 @@ _hash_readprev(Relation rel,
        *bufp = InvalidBuffer;
        if (BlockNumberIsValid(blkno))
        {
-               *bufp = _hash_getbuf(rel, blkno, HASH_READ);
-               _hash_checkpage(rel, *bufp, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
+               *bufp = _hash_getbuf(rel, blkno, HASH_READ,
+                                                        LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
                *pagep = BufferGetPage(*bufp);
                *opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep);
        }
@@ -183,8 +182,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
        _hash_getlock(rel, 0, HASH_SHARE);
 
        /* Read the metapage */
-       metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ);
-       _hash_checkpage(rel, metabuf, LH_META_PAGE);
+       metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
        metap = (HashMetaPage) BufferGetPage(metabuf);
 
        /*
@@ -213,8 +211,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
        so->hashso_bucket_blkno = blkno;
 
        /* Fetch the primary bucket page for the bucket */
-       buf = _hash_getbuf(rel, blkno, HASH_READ);
-       _hash_checkpage(rel, buf, LH_BUCKET_PAGE);
+       buf = _hash_getbuf(rel, blkno, HASH_READ, LH_BUCKET_PAGE);
        page = BufferGetPage(buf);
        opaque = (HashPageOpaque) PageGetSpecialPointer(page);
        Assert(opaque->hasho_bucket == bucket);
index 5f232ce3e787845e3b10915531bc9b6ebbe53d01..4d28ed777082e328e8ffd7ccc53755de5e7cd502 100644 (file)
@@ -137,6 +137,9 @@ _hash_log2(uint32 num)
 
 /*
  * _hash_checkpage -- sanity checks on the format of all hash pages
+ *
+ * If flags is not zero, it is a bitwise OR of the acceptable values of
+ * hasho_flag.
  */
 void
 _hash_checkpage(Relation rel, Buffer buf, int flags)
index f222c5b6b15114f721962c09b7f9f981d86abed0..dad4a58c0c36ae70b8652933d316fa7109c40bec 100644 (file)
@@ -283,8 +283,10 @@ extern void _hash_squeezebucket(Relation rel,
 extern void _hash_getlock(Relation rel, BlockNumber whichlock, int access);
 extern bool _hash_try_getlock(Relation rel, BlockNumber whichlock, int access);
 extern void _hash_droplock(Relation rel, BlockNumber whichlock, int access);
-extern Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access);
-extern Buffer _hash_getnewbuf(Relation rel, BlockNumber blkno, int access);
+extern Buffer _hash_getbuf(Relation rel, BlockNumber blkno,
+                                                  int access, int flags);
+extern Buffer _hash_getinitbuf(Relation rel, BlockNumber blkno);
+extern Buffer _hash_getnewbuf(Relation rel, BlockNumber blkno);
 extern void _hash_relbuf(Relation rel, Buffer buf);
 extern void _hash_dropbuf(Relation rel, Buffer buf);
 extern void _hash_wrtbuf(Relation rel, Buffer buf);