OffsetNumber upperbound);
 static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum);
 static inline BTScanInsert bt_mkscankey_pivotsearch(Relation rel,
-                                                                                                       IndexTuple itup);
+                                                IndexTuple itup);
 static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block,
                                         Page page, OffsetNumber offset);
 static inline ItemPointer BTreeTupleGetHeapTIDCareful(BtreeCheckState *state,
                /* Fingerprint leaf page tuples (those that point to the heap) */
                if (state->heapallindexed && P_ISLEAF(topaque) && !ItemIdIsDead(itemid))
                {
-                       IndexTuple              norm;
+                       IndexTuple      norm;
 
                        norm = bt_normalize_tuple(state, itup);
                        bloom_add_element(state->filter, (unsigned char *) norm,
                 */
                else if (offset == max)
                {
-                       BTScanInsert    rightkey;
+                       BTScanInsert rightkey;
 
                        /* Get item in next/right page */
                        rightkey = bt_right_page_check_scankey(state);
                                                  bool *isnull, bool tupleIsAlive, void *checkstate)
 {
        BtreeCheckState *state = (BtreeCheckState *) checkstate;
-       IndexTuple      itup, norm;
+       IndexTuple      itup,
+                               norm;
 
        Assert(state->heapallindexed);
 
 
        for (i = 0; i < tupleDescriptor->natts; i++)
        {
-               Form_pg_attribute       att;
+               Form_pg_attribute att;
 
                att = TupleDescAttr(tupleDescriptor, i);
 
 
                                (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                 errmsg("can't extend cube"),
                                 errdetail("A cube cannot have more than %d dimensions.",
-                                                          CUBE_MAX_DIM)));
+                                                  CUBE_MAX_DIM)));
 
        if (ARRNELEMS(ll) != dim)
                ereport(ERROR,
                                (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                 errmsg("array is too long"),
                                 errdetail("A cube cannot have more than %d dimensions.",
-                                                          CUBE_MAX_DIM)));
+                                                  CUBE_MAX_DIM)));
 
        dur = ARRPTR(ur);
 
                                (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                 errmsg("array is too long"),
                                 errdetail("A cube cannot have more than %d dimensions.",
-                                                          CUBE_MAX_DIM)));
+                                                  CUBE_MAX_DIM)));
 
        size = IS_POINT(c) ? POINT_SIZE(dim) : CUBE_SIZE(dim);
        result = (NDBOX *) palloc0(size);
                                (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                 errmsg("can't extend cube"),
                                 errdetail("A cube cannot have more than %d dimensions.",
-                                                          CUBE_MAX_DIM)));
+                                                  CUBE_MAX_DIM)));
 
        if (IS_POINT(cube))
        {
                                (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                 errmsg("can't extend cube"),
                                 errdetail("A cube cannot have more than %d dimensions.",
-                                                          CUBE_MAX_DIM)));
+                                                  CUBE_MAX_DIM)));
 
        if (IS_POINT(cube) && (x1 == x2))
        {
 
                        /* Skip dropped attributes (probably shouldn't see any here). */
                        if (attr->attisdropped)
                                continue;
-                       /* Skip generated columns (COPY won't accept them in the column
-                        * list) */
+
+                       /*
+                        * Skip generated columns (COPY won't accept them in the column
+                        * list)
+                        */
                        if (attr->attgenerated)
                                continue;
                        *columns = lappend(*columns, makeString(pstrdup(attname)));
 
                 */
                for (j = i = len - 1; i > 0 && lenr > 0; i--, j--)
                {
-                       int             r_end = dr[i];
-                       int             r_start = r_end;
-                       while (i > 0 && lenr > 0 && dr[i-1] == r_start - 1)
+                       int                     r_end = dr[i];
+                       int                     r_start = r_end;
+
+                       while (i > 0 && lenr > 0 && dr[i - 1] == r_start - 1)
                                --r_start, --i, --lenr;
-                       dr[2*j] = r_start;
-                       dr[2*j+1] = r_end;
+                       dr[2 * j] = r_start;
+                       dr[2 * j + 1] = r_end;
                }
                /* just copy the rest, if any, as trivial ranges */
                for (; i >= 0; i--, j--)
-                       dr[2*j] = dr[2*j + 1] = dr[i];
+                       dr[2 * j] = dr[2 * j + 1] = dr[i];
 
                if (++j)
                {
                        /*
                         * shunt everything down to start at the right place
                         */
-                       memmove((void *) &dr[0], (void *) &dr[2*j], 2*(len - j) * sizeof(int32));
+                       memmove((void *) &dr[0], (void *) &dr[2 * j], 2 * (len - j) * sizeof(int32));
                }
+
                /*
                 * make "len" be number of array elements, not ranges
                 */
-               len = 2*(len - j);
+               len = 2 * (len - j);
                cand = 1;
                while (len > MAXNUMRANGE * 2)
                {
                        min = PG_INT64_MAX;
                        for (i = 2; i < len; i += 2)
-                               if (min > ((int64)dr[i] - (int64)dr[i - 1]))
+                               if (min > ((int64) dr[i] - (int64) dr[i - 1]))
                                {
-                                       min = ((int64)dr[i] - (int64)dr[i - 1]);
+                                       min = ((int64) dr[i] - (int64) dr[i - 1]);
                                        cand = i;
                                }
                        memmove((void *) &dr[cand - 1], (void *) &dr[cand + 1], (len - cand - 1) * sizeof(int32));
                        len -= 2;
                }
+
                /*
                 * check sparseness of result
                 */
 
        for (i = 0; i < len; i += 2)
        {
                if (!i || a[i] != a[i - 1]) /* do not count repeated range */
-                       size += (int64)(a[i + 1]) - (int64)(a[i]) + 1;
+                       size += (int64) (a[i + 1]) - (int64) (a[i]) + 1;
        }
 
-       if (size > (int64)INT_MAX || size < (int64)INT_MIN)
+       if (size > (int64) INT_MAX || size < (int64) INT_MIN)
                return -1;                              /* overflow */
        return (int) size;
 }
 
                queryId = pgss_hash_string(query, query_len);
 
                /*
-                * If we are unlucky enough to get a hash of zero(invalid), use queryID
-                * as 2 instead, queryID 1 is already in use for normal statements.
+                * If we are unlucky enough to get a hash of zero(invalid), use
+                * queryID as 2 instead, queryID 1 is already in use for normal
+                * statements.
                 */
                if (queryId == UINT64CONST(0))
                        queryId = UINT64CONST(2);
 
 }
 
 mp_result
-mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small * r)
+mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small *r)
 {
        mpz_t           vtmp;
        mp_digit        vbuf[MP_VALUE_DIGITS(value)];
 }
 
 mp_result
-mp_int_to_int(mp_int z, mp_small * out)
+mp_int_to_int(mp_int z, mp_small *out)
 {
        assert(z != NULL);
 
 }
 
 mp_result
-mp_int_to_uint(mp_int z, mp_usmall * out)
+mp_int_to_uint(mp_int z, mp_usmall *out)
 {
        assert(z != NULL);
 
 
 /** Sets `q` and `*r` to the quotent and remainder of `a / value`. Division by
        powers of 2 is detected and handled efficiently. The remainder is pinned to
        `0 <= *r < b`. Either of `q` or `r` may be NULL. */
-mp_result      mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small * r);
+mp_result      mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small *r);
 
 /** Sets `q` and `r` to the quotient and remainder of `a / 2^p2`. This is a
        special case for division by powers of two that is more efficient than
        The remainder is pinned to `0 <= r < value`. */
 static inline
 mp_result
-mp_int_mod_value(mp_int a, mp_small value, mp_small * r)
+mp_int_mod_value(mp_int a, mp_small value, mp_small *r)
 {
        return mp_int_div_value(a, value, 0, r);
 }
 
 /** Returns `MP_OK` if `z` is representable as `mp_small`, else `MP_RANGE`.
        If `out` is not NULL, `*out` is set to the value of `z` when `MP_OK`. */
-mp_result      mp_int_to_int(mp_int z, mp_small * out);
+mp_result      mp_int_to_int(mp_int z, mp_small *out);
 
 /** Returns `MP_OK` if `z` is representable as `mp_usmall`, or `MP_RANGE`.
        If `out` is not NULL, `*out` is set to the value of `z` when `MP_OK`. */
-mp_result      mp_int_to_uint(mp_int z, mp_usmall * out);
+mp_result      mp_int_to_uint(mp_int z, mp_usmall *out);
 
 /** Converts `z` to a zero-terminated string of characters in the specified
        `radix`, writing at most `limit` characters to `str` including the
 
 
        InitDirtySnapshot(SnapshotDirty);
 
-       nblocks = hscan->rs_nblocks; /* # blocks to be scanned */
+       nblocks = hscan->rs_nblocks;    /* # blocks to be scanned */
 
        /* scan the relation */
        while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
 
        if (fmstate->aux_fmstate)
                resultRelInfo->ri_FdwState = fmstate->aux_fmstate;
        rslot = execute_foreign_modify(estate, resultRelInfo, CMD_INSERT,
-                                                                 slot, planSlot);
+                                                                  slot, planSlot);
        /* Revert that change */
        if (fmstate->aux_fmstate)
                resultRelInfo->ri_FdwState = fmstate;
        bool            doNothing = false;
 
        /*
-        * If the foreign table we are about to insert routed rows into is also
-        * an UPDATE subplan result rel that will be updated later, proceeding
-        * with the INSERT will result in the later UPDATE incorrectly modifying
-        * those routed rows, so prevent the INSERT --- it would be nice if we
-        * could handle this case; but for now, throw an error for safety.
+        * If the foreign table we are about to insert routed rows into is also an
+        * UPDATE subplan result rel that will be updated later, proceeding with
+        * the INSERT will result in the later UPDATE incorrectly modifying those
+        * routed rows, so prevent the INSERT --- it would be nice if we could
+        * handle this case; but for now, throw an error for safety.
         */
        if (plan && plan->operation == CMD_UPDATE &&
                (resultRelInfo->ri_usesFdwDirectModify ||
        if (!grouping_is_sortable(root->parse->groupClause) ||
                !pathkeys_contained_in(pathkeys, root->group_pathkeys))
        {
-               Path            sort_path;              /* dummy for result of cost_sort */
+               Path            sort_path;      /* dummy for result of cost_sort */
 
                cost_sort(&sort_path,
                                  root,
                 * add 1/4th of that default.
                 */
                double          sort_multiplier = 1.0 + (DEFAULT_FDW_SORT_MULTIPLIER
- - 1.0) * 0.25;
+                                                                                        - 1.0) * 0.25;
 
                *p_startup_cost *= sort_multiplier;
                *p_run_cost *= sort_multiplier;
                                                                                        fmstate->retrieved_attrs,
                                                                                        NULL,
                                                                                        fmstate->temp_cxt);
+
                /*
                 * The returning slot will not necessarily be suitable to store
                 * heaptuples directly, so allow for conversion.
                /*
                 * Grouping and aggregation are not supported with FOR UPDATE/SHARE,
                 * so the input_rel should be a base, join, or ordered relation; and
-                * if it's an ordered relation, its input relation should be a base
-                * or join relation.
+                * if it's an ordered relation, its input relation should be a base or
+                * join relation.
                 */
                Assert(input_rel->reloptkind == RELOPT_BASEREL ||
                           input_rel->reloptkind == RELOPT_JOINREL ||
 
                }
 
                /*
-                * Now walk the missing attributes. If there is a missing value
-                * make space for it. Otherwise, it's going to be NULL.
+                * Now walk the missing attributes. If there is a missing value make
+                * space for it. Otherwise, it's going to be NULL.
                 */
                for (attnum = firstmissingnum;
                         attnum < natts;
 
                 * There is at least one empty page.  So we have to rescan the tree
                 * deleting empty pages.
                 */
-               Buffer                          buffer;
+               Buffer          buffer;
                DataPageDeleteStack root,
-                                                  *ptr,
-                                                  *tmp;
+                                  *ptr,
+                                  *tmp;
 
                buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, rootBlkno,
                                                                        RBM_NORMAL, gvs->strategy);
 
                /*
-                * Lock posting tree root for cleanup to ensure there are no concurrent
-                * inserts.
+                * Lock posting tree root for cleanup to ensure there are no
+                * concurrent inserts.
                 */
                LockBufferForCleanup(buffer);
 
 
                while (segno < a_segno)
                {
                        /*
-                        * Once modification is started and page tail is copied, we've
-                        * to copy unmodified segments.
+                        * Once modification is started and page tail is copied, we've to
+                        * copy unmodified segments.
                         */
                        segsize = SizeOfGinPostingList(oldseg);
                        if (tailCopy)
                }
 
                /*
-                * We're about to start modification of the page.  So, copy tail of the
-                * page if it's not done already.
+                * We're about to start modification of the page.  So, copy tail of
+                * the page if it's not done already.
                 */
                if (!tailCopy && segptr != segmentend)
                {
-                       int tailSize = segmentend - segptr;
+                       int                     tailSize = segmentend - segptr;
 
                        tailCopy = (Pointer) palloc(tailSize);
                        memcpy(tailCopy, segptr, tailSize);
        segptr = (Pointer) oldseg;
        if (segptr != segmentend && tailCopy)
        {
-               int restSize = segmentend - segptr;
+               int                     restSize = segmentend - segptr;
 
                Assert(writePtr + restSize <= PageGetSpecialPointer(page));
                memcpy(writePtr, segptr, restSize);
 
                        gistcheckpage(r, buffer);
 
                        /*
-                        * Otherwise, recycle it if deleted, and too old to have any processes
-                        * interested in it.
+                        * Otherwise, recycle it if deleted, and too old to have any
+                        * processes interested in it.
                         */
                        if (gistPageRecyclable(page))
                        {
                                /*
-                                * If we are generating WAL for Hot Standby then create a
-                                * WAL record that will allow us to conflict with queries
-                                * running on standby, in case they have snapshots older
-                                * than the page's deleteXid.
+                                * If we are generating WAL for Hot Standby then create a WAL
+                                * record that will allow us to conflict with queries running
+                                * on standby, in case they have snapshots older than the
+                                * page's deleteXid.
                                 */
                                if (XLogStandbyInfoActive() && RelationNeedsWAL(r))
                                        gistXLogPageReuse(r, blkno, GistPageGetDeleteXid(page));
 
 {
        text       *key = PG_GETARG_TEXT_PP(0);
        Oid                     collid = PG_GET_COLLATION();
-       pg_locale_t     mylocale = 0;
+       pg_locale_t mylocale = 0;
        Datum           result;
 
        if (!collid)
                        int32_t         ulen = -1;
                        UChar      *uchar = NULL;
                        Size            bsize;
-                       uint8_t    *buf;
+                       uint8_t    *buf;
 
                        ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
 
 {
        text       *key = PG_GETARG_TEXT_PP(0);
        Oid                     collid = PG_GET_COLLATION();
-       pg_locale_t     mylocale = 0;
+       pg_locale_t mylocale = 0;
        Datum           result;
 
        if (!collid)
                        int32_t         ulen = -1;
                        UChar      *uchar = NULL;
                        Size            bsize;
-                       uint8_t    *buf;
+                       uint8_t    *buf;
 
                        ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
 
 
 heap_get_latest_tid(TableScanDesc sscan,
                                        ItemPointer tid)
 {
-       Relation relation = sscan->rs_rd;
-       Snapshot snapshot = sscan->rs_snapshot;
+       Relation        relation = sscan->rs_rd;
+       Snapshot        snapshot = sscan->rs_snapshot;
        ItemPointerData ctid;
        TransactionId priorXmax;
 
 
                                                HeapTupleHeaderGetCmin(tuple->t_data) >= cid)
                                        {
                                                tmfd->xmax = priorXmax;
+
                                                /*
                                                 * Cmin is the problematic value, so store that. See
                                                 * above.
        Snapshot        snapshot;
        bool            need_unregister_snapshot = false;
        TransactionId OldestXmin;
-       BlockNumber     previous_blkno = InvalidBlockNumber;
+       BlockNumber previous_blkno = InvalidBlockNumber;
        BlockNumber root_blkno = InvalidBlockNumber;
        OffsetNumber root_offsets[MaxHeapTuplesPerPage];
 
        /* Publish number of blocks to scan */
        if (progress)
        {
-               BlockNumber             nblocks;
+               BlockNumber nblocks;
 
                if (hscan->rs_base.rs_parallel != NULL)
                {
                /* Report scan progress, if asked to. */
                if (progress)
                {
-                       BlockNumber     blocks_done = heapam_scan_get_blocks_done(hscan);
+                       BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
 
                        if (blocks_done != previous_blkno)
                        {
        /* Report scan progress one last time. */
        if (progress)
        {
-               BlockNumber             blks_done;
+               BlockNumber blks_done;
 
                if (hscan->rs_base.rs_parallel != NULL)
                {
        BlockNumber root_blkno = InvalidBlockNumber;
        OffsetNumber root_offsets[MaxHeapTuplesPerPage];
        bool            in_index[MaxHeapTuplesPerPage];
-       BlockNumber     previous_blkno = InvalidBlockNumber;
+       BlockNumber previous_blkno = InvalidBlockNumber;
 
        /* state variables for the merge */
        ItemPointer indexcursor = NULL;
 heapam_scan_get_blocks_done(HeapScanDesc hscan)
 {
        ParallelBlockTableScanDesc bpscan = NULL;
-       BlockNumber             startblock;
-       BlockNumber             blocks_done;
+       BlockNumber startblock;
+       BlockNumber blocks_done;
 
        if (hscan->rs_base.rs_parallel != NULL)
        {
                blocks_done = hscan->rs_cblock - startblock;
        else
        {
-               BlockNumber     nblocks;
+               BlockNumber nblocks;
 
                nblocks = bpscan != NULL ? bpscan->phs_nblocks : hscan->rs_nblocks;
                blocks_done = nblocks - startblock +
 
        }
        else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
        {
-               int options = HEAP_INSERT_SKIP_FSM;
+               int                     options = HEAP_INSERT_SKIP_FSM;
 
                if (!state->rs_use_wal)
                        options |= HEAP_INSERT_SKIP_WAL;
 
 toast_decompress_datum_slice(struct varlena *attr, int32 slicelength)
 {
        struct varlena *result;
-       int32 rawsize;
+       int32           rawsize;
 
        Assert(VARATT_IS_COMPRESSED(attr));
 
        result = (struct varlena *) palloc(slicelength + VARHDRSZ);
 
        rawsize = pglz_decompress(TOAST_COMPRESS_RAWDATA(attr),
-                                               VARSIZE(attr) - TOAST_COMPRESS_HDRSZ,
-                                               VARDATA(result),
-                                               slicelength, false);
+                                                         VARSIZE(attr) - TOAST_COMPRESS_HDRSZ,
+                                                         VARDATA(result),
+                                                         slicelength, false);
        if (rawsize < 0)
                elog(ERROR, "compressed data is corrupted");
 
 
 static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
                                 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
 static bool should_attempt_truncation(VacuumParams *params,
-                                                LVRelStats *vacrelstats);
+                                                 LVRelStats *vacrelstats);
 static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
 static BlockNumber count_nondeletable_pages(Relation onerel,
                                                 LVRelStats *vacrelstats);
                                         * cheaper to get rid of it in the next pruning pass than
                                         * to treat it like an indexed tuple. Finally, if index
                                         * cleanup is disabled, the second heap pass will not
-                                        * execute, and the tuple will not get removed, so we
-                                        * must treat it like any other dead tuple that we choose
-                                        * to keep.
+                                        * execute, and the tuple will not get removed, so we must
+                                        * treat it like any other dead tuple that we choose to
+                                        * keep.
                                         *
                                         * If this were to happen for a tuple that actually needed
                                         * to be deleted, we'd be in trouble, because it'd
                                        all_visible = false;
                                        break;
                                case HEAPTUPLE_LIVE:
+
                                        /*
                                         * Count it as live.  Not only is this natural, but it's
                                         * also what acquire_sample_rows() does.
                        else
                        {
                                /*
-                                * Here, we have indexes but index cleanup is disabled. Instead of
-                                * vacuuming the dead tuples on the heap, we just forget them.
+                                * Here, we have indexes but index cleanup is disabled.
+                                * Instead of vacuuming the dead tuples on the heap, we just
+                                * forget them.
                                 *
                                 * Note that vacrelstats->dead_tuples could have tuples which
                                 * became dead after HOT-pruning but are not marked dead yet.
-                                * We do not process them because it's a very rare condition, and
-                                * the next vacuum will process them anyway.
+                                * We do not process them because it's a very rare condition,
+                                * and the next vacuum will process them anyway.
                                 */
                                Assert(params->index_cleanup == VACOPT_TERNARY_DISABLED);
                        }
 
                /*
                 * Re-find and write lock the parent of buf.
                 *
-                * It's possible that the location of buf's downlink has changed
-                * since our initial _bt_search() descent.  _bt_getstackbuf() will
-                * detect and recover from this, updating the stack, which ensures
-                * that the new downlink will be inserted at the correct offset.
-                * Even buf's parent may have changed.
+                * It's possible that the location of buf's downlink has changed since
+                * our initial _bt_search() descent.  _bt_getstackbuf() will detect
+                * and recover from this, updating the stack, which ensures that the
+                * new downlink will be inserted at the correct offset. Even buf's
+                * parent may have changed.
                 */
                stack->bts_btentry = bknum;
                pbuf = _bt_getstackbuf(rel, stack);
 
                new_stack->bts_parent = stack_in;
 
                /*
-                * Page level 1 is lowest non-leaf page level prior to leaves.  So,
-                * if we're on the level 1 and asked to lock leaf page in write mode,
+                * Page level 1 is lowest non-leaf page level prior to leaves.  So, if
+                * we're on the level 1 and asked to lock leaf page in write mode,
                 * then lock next page in write mode, because it must be a leaf.
                 */
                if (opaque->btpo.level == 1 && access == BT_WRITE)
 
        /* Initialize remaining insertion scan key fields */
        inskey.heapkeyspace = _bt_heapkeyspace(rel);
-       inskey.anynullkeys = false;             /* unusued */
+       inskey.anynullkeys = false; /* unused */
        inskey.nextkey = nextkey;
        inskey.pivotsearch = false;
        inskey.scantid = NULL;
 
                         * much smaller.
                         *
                         * Since the truncated tuple is often smaller than the original
-                        * tuple, it cannot just be copied in place (besides, we want
-                        * to actually save space on the leaf page).  We delete the
-                        * original high key, and add our own truncated high key at the
-                        * same offset.
+                        * tuple, it cannot just be copied in place (besides, we want to
+                        * actually save space on the leaf page).  We delete the original
+                        * high key, and add our own truncated high key at the same
+                        * offset.
                         *
                         * Note that the page layout won't be changed very much.  oitup is
                         * already located at the physical beginning of tuple space, so we
 
        key = palloc(offsetof(BTScanInsertData, scankeys) +
                                 sizeof(ScanKeyData) * indnkeyatts);
        key->heapkeyspace = itup == NULL || _bt_heapkeyspace(rel);
-       key->anynullkeys = false;               /* initial assumption */
+       key->anynullkeys = false;       /* initial assumption */
        key->nextkey = false;
        key->pivotsearch = false;
        key->keysz = Min(indnkeyatts, tupnatts);
 
 pairingheap_SpGistSearchItem_cmp(const pairingheap_node *a,
                                                                 const pairingheap_node *b, void *arg)
 {
-       const           SpGistSearchItem *sa = (const SpGistSearchItem *) a;
-       const           SpGistSearchItem *sb = (const SpGistSearchItem *) b;
+       const SpGistSearchItem *sa = (const SpGistSearchItem *) a;
+       const SpGistSearchItem *sb = (const SpGistSearchItem *) b;
        SpGistScanOpaque so = (SpGistScanOpaque) arg;
        int                     i;
 
 }
 
 static void
-spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem * item)
+spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem *item)
 {
        if (!so->state.attLeafType.attbyval &&
                DatumGetPointer(item->value) != NULL)
  * Called in queue context
  */
 static void
-spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem * item)
+spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem *item)
 {
        pairingheap_add(so->scanQueue, &item->phNode);
 }
  *             the scan is not ordered AND the item satisfies the scankeys
  */
 static bool
-spgLeafTest(SpGistScanOpaque so, SpGistSearchItem * item,
+spgLeafTest(SpGistScanOpaque so, SpGistSearchItem *item,
                        SpGistLeafTuple leafTuple, bool isnull,
                        bool *reportedSome, storeRes_func storeRes)
 {
 static void
 spgInitInnerConsistentIn(spgInnerConsistentIn *in,
                                                 SpGistScanOpaque so,
-                                                SpGistSearchItem * item,
+                                                SpGistSearchItem *item,
                                                 SpGistInnerTuple innerTuple)
 {
        in->scankeys = so->keyData;
 
 static SpGistSearchItem *
 spgMakeInnerItem(SpGistScanOpaque so,
-                                SpGistSearchItem * parentItem,
+                                SpGistSearchItem *parentItem,
                                 SpGistNodeTuple tuple,
                                 spgInnerConsistentOut *out, int i, bool isnull,
                                 double *distances)
 }
 
 static void
-spgInnerTest(SpGistScanOpaque so, SpGistSearchItem * item,
+spgInnerTest(SpGistScanOpaque so, SpGistSearchItem *item,
                         SpGistInnerTuple innerTuple, bool isnull)
 {
        MemoryContext oldCxt = MemoryContextSwitchTo(so->tempCxt);
 
 static OffsetNumber
 spgTestLeafTuple(SpGistScanOpaque so,
-                                SpGistSearchItem * item,
+                                SpGistSearchItem *item,
                                 Page page, OffsetNumber offset,
                                 bool isnull, bool isroot,
                                 bool *reportedSome,
 
                        res = (level >= queryLen) ||
                                DatumGetBool(DirectFunctionCall2Coll(text_starts_with,
                                                                                                         PG_GET_COLLATION(),
-                                                                                                out->leafValue,
-                                                                                                PointerGetDatum(query)));
+                                                                                                        out->leafValue,
+                                                                                                        PointerGetDatum(query)));
 
                        if (!res)                       /* no need to consider remaining conditions */
                                break;
 
                         * happened since VACUUM started.
                         *
                         * Note: we could make a tighter test by seeing if the xid is
-                        * "running" according to the active snapshot; but snapmgr.c doesn't
-                        * currently export a suitable API, and it's not entirely clear
-                        * that a tighter test is worth the cycles anyway.
+                        * "running" according to the active snapshot; but snapmgr.c
+                        * doesn't currently export a suitable API, and it's not entirely
+                        * clear that a tighter test is worth the cycles anyway.
                         */
                        if (TransactionIdFollowsOrEquals(dt->xid, bds->myXmin))
                                spgAddPendingTID(bds, &dt->pointer);
 
 table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
 {
        uint32          flags = SO_TYPE_SEQSCAN |
-               SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
+       SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
        Oid                     relid = RelationGetRelid(relation);
        Snapshot        snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
 
 {
        Snapshot        snapshot;
        uint32          flags = SO_TYPE_SEQSCAN |
-               SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
+       SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
 
        Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
 
 void
 table_get_latest_tid(TableScanDesc scan, ItemPointer tid)
 {
-       Relation rel = scan->rs_rd;
+       Relation        rel = scan->rs_rd;
        const TableAmRoutine *tableam = rel->rd_tableam;
 
        /*
 
 
        /*
         * Ensure parent(s) have XIDs, so that a child always has an XID later
-        * than its parent.  Mustn't recurse here, or we might get a stack overflow
-        * if we're at the bottom of a huge stack of subtransactions none of which
-        * have XIDs yet.
+        * than its parent.  Mustn't recurse here, or we might get a stack
+        * overflow if we're at the bottom of a huge stack of subtransactions none
+        * of which have XIDs yet.
         */
        if (isSubXact && !FullTransactionIdIsValid(s->parent->fullTransactionId))
        {
  * just skipping the reset in StartTransaction() won't work.)
  */
 static int     save_XactIsoLevel;
-static bool    save_XactReadOnly;
-static bool    save_XactDeferrable;
+static bool save_XactReadOnly;
+static bool save_XactDeferrable;
 
 void
 SaveTransactionCharacteristics(void)
                nxids = add_size(nxids, s->nChildXids);
        }
        Assert(SerializedTransactionStateHeaderSize + nxids * sizeof(TransactionId)
-               <= maxsize);
+                  <= maxsize);
 
        /* Copy them to our scratch space. */
        workspace = palloc(nxids * sizeof(TransactionId));
 
                                        ereport(FATAL,
                                                        (errmsg("could not find redo location referenced by checkpoint record"),
                                                         errhint("If you are restoring from a backup, touch \"%s/recovery.signal\" and add required recovery options.\n"
-                                                        "If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n"
-                                                        "Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.",
-                                                        DataDir, DataDir, DataDir)));
+                                                                        "If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n"
+                                                                        "Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.",
+                                                                        DataDir, DataDir, DataDir)));
                        }
                }
                else
 
 
                                        while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
                                        {
-                                               Oid             oid = ((Form_pg_proc) GETSTRUCT(tuple))->oid;
+                                               Oid                     oid = ((Form_pg_proc) GETSTRUCT(tuple))->oid;
 
                                                objects = lappend_oid(objects, oid);
                                        }
 
        while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
        {
-               Oid             oid  = ((Form_pg_class) GETSTRUCT(tuple))->oid;
+               Oid                     oid = ((Form_pg_class) GETSTRUCT(tuple))->oid;
 
                relations = lappend_oid(relations, oid);
        }
        }
        else
        {
-               Oid             defAclOid;
+               Oid                     defAclOid;
 
                /* Prepare to insert or update pg_default_acl entry */
                MemSet(values, 0, sizeof(values));
                if (isNew)
                        InvokeObjectPostCreateHook(DefaultAclRelationId, defAclOid, 0);
                else
-                       InvokeObjectPostAlterHook(DefaultAclRelationId,  defAclOid, 0);
+                       InvokeObjectPostAlterHook(DefaultAclRelationId, defAclOid, 0);
        }
 
        if (HeapTupleIsValid(tuple))
 
 Datum
 pg_nextoid(PG_FUNCTION_ARGS)
 {
-       Oid             reloid = PG_GETARG_OID(0);
-       Name    attname = PG_GETARG_NAME(1);
-       Oid             idxoid = PG_GETARG_OID(2);
-       Relation rel;
-       Relation idx;
-       HeapTuple atttuple;
+       Oid                     reloid = PG_GETARG_OID(0);
+       Name            attname = PG_GETARG_NAME(1);
+       Oid                     idxoid = PG_GETARG_OID(2);
+       Relation        rel;
+       Relation        idx;
+       HeapTuple       atttuple;
        Form_pg_attribute attform;
-       AttrNumber attno;
-       Oid             newoid;
+       AttrNumber      attno;
+       Oid                     newoid;
 
        /*
         * As this function is not intended to be used during normal running, and
 
                /*
                 * If the expression is just a NULL constant, we do not bother to make
                 * an explicit pg_attrdef entry, since the default behavior is
-                * equivalent.  This applies to column defaults, but not for generation
-                * expressions.
+                * equivalent.  This applies to column defaults, but not for
+                * generation expressions.
                 *
                 * Note a nonobvious property of this test: if the column is of a
                 * domain type, what we'll get is not a bare null Const but a
 
                                                                  Anum_pg_class_reloptions, &isnull);
 
        /*
-        * Extract the list of column names to be used for the index
-        * creation.
+        * Extract the list of column names to be used for the index creation.
         */
        for (int i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
        {
                                                          optionDatum,
                                                          INDEX_CREATE_SKIP_BUILD | INDEX_CREATE_CONCURRENT,
                                                          0,
-                                                         true, /* allow table to be a system catalog? */
-                                                         false, /* is_internal? */
+                                                         true, /* allow table to be a system catalog? */
+                                                         false,        /* is_internal? */
                                                          NULL);
 
        /* Close the relations used and clean up */
                                                                          values, nulls, replaces);
                        CatalogTupleUpdate(description, &tuple->t_self, tuple);
 
-                       break;                                  /* Assume there can be only one match */
+                       break;                          /* Assume there can be only one match */
                }
 
                systable_endscan(sd);
         */
        if (get_rel_relispartition(oldIndexId))
        {
-               List   *ancestors = get_partition_ancestors(oldIndexId);
-               Oid             parentIndexRelid = linitial_oid(ancestors);
+               List       *ancestors = get_partition_ancestors(oldIndexId);
+               Oid                     parentIndexRelid = linitial_oid(ancestors);
 
                DeleteInheritsTuple(oldIndexId, parentIndexRelid);
                StoreSingleInheritance(newIndexId, parentIndexRelid, 1);
                                newClassRel->pgstat_info->t_counts.t_tuples_fetched = tabentry->tuples_fetched;
                                newClassRel->pgstat_info->t_counts.t_blocks_fetched = tabentry->blocks_fetched;
                                newClassRel->pgstat_info->t_counts.t_blocks_hit = tabentry->blocks_hit;
-                               /* The data will be sent by the next pgstat_report_stat() call. */
+
+                               /*
+                                * The data will be sent by the next pgstat_report_stat()
+                                * call.
+                                */
                        }
                }
        }
        Relation        userIndexRelation;
 
        /*
-        * No more predicate locks will be acquired on this index, and we're
-        * about to stop doing inserts into the index which could show
-        * conflicts with existing predicate locks, so now is the time to move
-        * them to the heap relation.
+        * No more predicate locks will be acquired on this index, and we're about
+        * to stop doing inserts into the index which could show conflicts with
+        * existing predicate locks, so now is the time to move them to the heap
+        * relation.
         */
        userHeapRelation = table_open(heapId, ShareUpdateExclusiveLock);
        userIndexRelation = index_open(indexId, ShareUpdateExclusiveLock);
        TransferPredicateLocksToHeapRelation(userIndexRelation);
 
        /*
-        * Now we are sure that nobody uses the index for queries; they just
-        * might have it open for updating it.  So now we can unset indisready
-        * and indislive, then wait till nobody could be using it at all
-        * anymore.
+        * Now we are sure that nobody uses the index for queries; they just might
+        * have it open for updating it.  So now we can unset indisready and
+        * indislive, then wait till nobody could be using it at all anymore.
         */
        index_set_state_flags(indexId, INDEX_DROP_SET_DEAD);
 
        /*
-        * Invalidate the relcache for the table, so that after this commit
-        * all sessions will refresh the table's index list.  Forgetting just
-        * the index's relcache entry is not enough.
+        * Invalidate the relcache for the table, so that after this commit all
+        * sessions will refresh the table's index list.  Forgetting just the
+        * index's relcache entry is not enough.
         */
        CacheInvalidateRelcache(userHeapRelation);
 
         */
        if (OidIsValid(parentConstraintId))
        {
-               ObjectAddress   referenced;
+               ObjectAddress referenced;
 
                ObjectAddressSet(referenced, ConstraintRelationId, parentConstraintId);
                recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
                        PROGRESS_SCAN_BLOCKS_DONE,
                        PROGRESS_SCAN_BLOCKS_TOTAL
                };
-               const int64     val[] = {
+               const int64 val[] = {
                        PROGRESS_CREATEIDX_PHASE_BUILD,
                        PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE,
                        0, 0, 0, 0
                        PROGRESS_SCAN_BLOCKS_DONE,
                        PROGRESS_SCAN_BLOCKS_TOTAL
                };
-               const int64     val[] = {
+               const int64 val[] = {
                        PROGRESS_CREATEIDX_PHASE_VALIDATE_IDXSCAN,
                        0, 0, 0, 0
                };
+
                pgstat_progress_update_multi_param(5, index, val);
        }
 
                        PROGRESS_SCAN_BLOCKS_DONE,
                        PROGRESS_SCAN_BLOCKS_TOTAL
                };
-               const int64     val[] = {
+               const int64 val[] = {
                        PROGRESS_CREATEIDX_PHASE_VALIDATE_SORT,
                        0, 0
                };
 
                                StringInfoData opfam;
 
                                amprocDesc = table_open(AccessMethodProcedureRelationId,
-                                                                          AccessShareLock);
+                                                                               AccessShareLock);
 
                                ScanKeyInit(&skey[0],
                                                        Anum_pg_amproc_oid,
 
 
        myself = ProcedureCreate(aggName,
                                                         aggNamespace,
-                                                        replace, /* maybe replacement */
+                                                        replace,       /* maybe replacement */
                                                         false, /* doesn't return a set */
                                                         finaltype, /* returnType */
                                                         GetUserId(),   /* proowner */
 
                /*
                 * If we're replacing an existing entry, we need to validate that
-                * we're not changing anything that would break callers.
-                * Specifically we must not change aggkind or aggnumdirectargs,
-                * which affect how an aggregate call is treated in parse
-                * analysis.
+                * we're not changing anything that would break callers. Specifically
+                * we must not change aggkind or aggnumdirectargs, which affect how an
+                * aggregate call is treated in parse analysis.
                 */
                if (aggKind != oldagg->aggkind)
                        ereport(ERROR,
 
                                         prokind == PROKIND_PROCEDURE
                                         ? errmsg("cannot change whether a procedure has output parameters")
                                         : errmsg("cannot change return type of existing function"),
-                                        /* translator: first %s is DROP FUNCTION, DROP PROCEDURE or DROP AGGREGATE */
+
+                       /*
+                        * translator: first %s is DROP FUNCTION, DROP PROCEDURE or DROP
+                        * AGGREGATE
+                        */
                                         errhint("Use %s %s first.",
                                                         dropcmd,
                                                         format_procedure(oldproc->oid))));
                                                (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
                                                 errmsg("cannot change return type of existing function"),
                                                 errdetail("Row type defined by OUT parameters is different."),
-                                                /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+                               /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
                                                 errhint("Use %s %s first.",
                                                                 dropcmd,
                                                                 format_procedure(oldproc->oid))));
                                                        (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
                                                         errmsg("cannot change name of input parameter \"%s\"",
                                                                        old_arg_names[j]),
-                                                        /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+                                       /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
                                                         errhint("Use %s %s first.",
                                                                         dropcmd,
                                                                         format_procedure(oldproc->oid))));
                                ereport(ERROR,
                                                (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
                                                 errmsg("cannot remove parameter defaults from existing function"),
-                                                /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+                               /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
                                                 errhint("Use %s %s first.",
                                                                 dropcmd,
                                                                 format_procedure(oldproc->oid))));
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
                                                         errmsg("cannot change data type of existing parameter default value"),
-                                                        /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+                                       /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
                                                         errhint("Use %s %s first.",
                                                                         dropcmd,
                                                                         format_procedure(oldproc->oid))));
        else
        {
                /* Creating a new procedure */
-               Oid             newOid;
+               Oid                     newOid;
 
                /* First, get default permissions and set up proacl */
                proacl = get_user_default_acl(OBJECT_FUNCTION, proowner,
 
        result = NIL;
        while (HeapTupleIsValid(tup = systable_getnext(scan)))
        {
-               Oid             oid = ((Form_pg_publication) GETSTRUCT(tup))->oid;
+               Oid                     oid = ((Form_pg_publication) GETSTRUCT(tup))->oid;
 
                result = lappend_oid(result, oid);
        }
 
                        break;
                default:
                        elog(ERROR, "invalid relpersistence: %c", relpersistence);
-                       return NULL;                            /* placate compiler */
+                       return NULL;            /* placate compiler */
        }
 
        srel = smgropen(rnode, backend);
 
                                 errhint("Must be superuser to create an access method.")));
 
        /* Check if name is used */
-       amoid = GetSysCacheOid1(AMNAME,  Anum_pg_am_oid,
+       amoid = GetSysCacheOid1(AMNAME, Anum_pg_am_oid,
                                                        CStringGetDatum(stmt->amname));
        if (OidIsValid(amoid))
        {
 
 
 static void rebuild_relation(Relation OldHeap, Oid indexOid, bool verbose);
 static void copy_table_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
-                          bool verbose, bool *pSwapToastByContent,
-                          TransactionId *pFreezeXid, MultiXactId *pCutoffMulti);
+                               bool verbose, bool *pSwapToastByContent,
+                               TransactionId *pFreezeXid, MultiXactId *pCutoffMulti);
 static List *get_tables_to_cluster(MemoryContext cluster_context);
 
 
 
        /* Copy the heap data into the new table in the desired order */
        copy_table_data(OIDNewHeap, tableOid, indexOid, verbose,
-                                  &swap_toast_by_content, &frozenXid, &cutoffMulti);
+                                       &swap_toast_by_content, &frozenXid, &cutoffMulti);
 
        /*
         * Swap the physical files of the target and transient tables, then
 
                                (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
                                 errmsg("function \"%s\" must be fired for INSERT or UPDATE",
                                                funcname)));
-               ItemPointerSetInvalid(&checktid);               /* keep compiler quiet */
+               ItemPointerSetInvalid(&checktid);       /* keep compiler quiet */
        }
 
        slot = table_slot_create(trigdata->tg_relation, NULL);
        tmptid = checktid;
        {
                IndexFetchTableData *scan = table_index_fetch_begin(trigdata->tg_relation);
-               bool call_again = false;
+               bool            call_again = false;
 
                if (!table_index_fetch_tuple(scan, &tmptid, SnapshotSelf, slot,
                                                                         &call_again, NULL))
 
 
        /* We assume that there can be at most one matching tuple */
        if (HeapTupleIsValid(dbtuple))
-               oid = ((Form_pg_database)GETSTRUCT(dbtuple))->oid;
+               oid = ((Form_pg_database) GETSTRUCT(dbtuple))->oid;
        else
                oid = InvalidOid;
 
 
 static void
 ExplainPrintSettings(ExplainState *es)
 {
-       int             num;
+       int                     num;
        struct config_generic **gucs;
 
        /* bail out if information about settings not requested */
 
        if (es->format != EXPLAIN_FORMAT_TEXT)
        {
-               int             i;
+               int                     i;
 
                ExplainOpenGroup("Settings", "Settings", true, es);
 
                for (i = 0; i < num; i++)
                {
-                       char *setting;
+                       char       *setting;
                        struct config_generic *conf = gucs[i];
 
                        setting = GetConfigOptionByName(conf->name, NULL, true);
        }
        else
        {
-               int             i;
-               StringInfoData  str;
+               int                     i;
+               StringInfoData str;
 
                initStringInfo(&str);
 
                for (i = 0; i < num; i++)
                {
-                       char *setting;
+                       char       *setting;
                        struct config_generic *conf = gucs[i];
 
                        if (i > 0)
        ExplainNode(ps, NIL, NULL, NULL, es);
 
        /*
-        * If requested, include information about GUC parameters with values
-        * that don't match the built-in defaults.
+        * If requested, include information about GUC parameters with values that
+        * don't match the built-in defaults.
         */
        ExplainPrintSettings(es);
 }
                                if (es->costs && es->verbose &&
                                        outerPlanState(planstate)->worker_jit_instrument)
                                {
-                                       PlanState *child = outerPlanState(planstate);
+                                       PlanState  *child = outerPlanState(planstate);
                                        int                     n;
                                        SharedJitInstrumentation *w = child->worker_jit_instrument;
 
 
 
                        t_sql = DirectFunctionCall3Coll(replace_text,
                                                                                        C_COLLATION_OID,
-                                                                               t_sql,
-                                                                               CStringGetTextDatum("@extschema@"),
-                                                                               CStringGetTextDatum(qSchemaName));
+                                                                                       t_sql,
+                                                                                       CStringGetTextDatum("@extschema@"),
+                                                                                       CStringGetTextDatum(qSchemaName));
                }
 
                /*
                {
                        t_sql = DirectFunctionCall3Coll(replace_text,
                                                                                        C_COLLATION_OID,
-                                                                               t_sql,
-                                                                               CStringGetTextDatum("MODULE_PATHNAME"),
-                                                                               CStringGetTextDatum(control->module_pathname));
+                                                                                       t_sql,
+                                                                                       CStringGetTextDatum("MODULE_PATHNAME"),
+                                                                                       CStringGetTextDatum(control->module_pathname));
                }
 
                /* And now back to C string */
 
  */
 struct ReindexIndexCallbackState
 {
-       bool        concurrent;                 /* flag from statement */
-       Oid         locked_table_oid;   /* tracks previously locked table */
+       bool            concurrent;             /* flag from statement */
+       Oid                     locked_table_oid;       /* tracks previously locked table */
 };
 
 /*
                {
                        if (progress)
                        {
-                               PGPROC *holder = BackendIdGetProc(old_snapshots[i].backendId);
+                               PGPROC     *holder = BackendIdGetProc(old_snapshots[i].backendId);
 
                                pgstat_progress_update_param(PROGRESS_WAITFOR_CURRENT_PID,
                                                                                         holder->pid);
         */
        if (partitioned && stmt->relation && !stmt->relation->inh)
        {
-               PartitionDesc   pd = RelationGetPartitionDesc(rel);
+               PartitionDesc pd = RelationGetPartitionDesc(rel);
 
                if (pd->nparts != 0)
                        flags |= INDEX_CREATE_INVALID;
        /* Get a session-level lock on each table. */
        foreach(lc, relationLocks)
        {
-               LockRelId   *lockrelid = (LockRelId *) lfirst(lc);
+               LockRelId  *lockrelid = (LockRelId *) lfirst(lc);
 
                LockRelationIdForSession(lockrelid, ShareUpdateExclusiveLock);
        }
 
                /*
                 * The index is now valid in the sense that it contains all currently
-                * interesting tuples.  But since it might not contain tuples deleted just
-                * before the reference snap was taken, we have to wait out any
+                * interesting tuples.  But since it might not contain tuples deleted
+                * just before the reference snap was taken, we have to wait out any
                 * transactions that might have older snapshots.
                 */
                pgstat_progress_update_param(PROGRESS_CREATEIDX_PHASE,
         */
        foreach(lc, relationLocks)
        {
-               LockRelId   *lockrelid = (LockRelId *) lfirst(lc);
+               LockRelId  *lockrelid = (LockRelId *) lfirst(lc);
 
                UnlockRelationIdForSession(lockrelid, ShareUpdateExclusiveLock);
        }
 
                elog(ERROR, "cache lookup failed for statistics object %u", statsOid);
 
        /*
-        * When none of the defined statistics types contain datum values
-        * from the table's columns then there's no need to reset the stats.
-        * Functional dependencies and ndistinct stats should still hold true.
+        * When none of the defined statistics types contain datum values from the
+        * table's columns then there's no need to reset the stats. Functional
+        * dependencies and ndistinct stats should still hold true.
         */
        if (!statext_is_kind_built(oldtup, STATS_EXT_MCV))
        {
 
                                   const char *colName, LOCKMODE lockmode);
 static bool NotNullImpliedByRelConstraints(Relation rel, Form_pg_attribute attr);
 static bool ConstraintImpliedByRelConstraint(Relation scanrel,
-                                                                        List *partConstraint, List *existedConstraints);
+                                                                List *partConstraint, List *existedConstraints);
 static ObjectAddress ATExecColumnDefault(Relation rel, const char *colName,
                                        Node *newDefault, LOCKMODE lockmode);
 static ObjectAddress ATExecAddIdentity(Relation rel, const char *colName,
        }
 
        /*
-        * Now add any newly specified CHECK constraints to the new relation.
-        * Same as for defaults above, but these need to come after partitioning
-        * is set up.
+        * Now add any newly specified CHECK constraints to the new relation. Same
+        * as for defaults above, but these need to come after partitioning is set
+        * up.
         */
        if (stmt->constraints)
                AddRelationNewConstraints(rel, NIL, stmt->constraints,
         */
        if (IsSystemClass(relOid, classform) && relkind == RELKIND_INDEX)
        {
-               HeapTuple               locTuple;
-               Form_pg_index   indexform;
-               bool                    indisvalid;
+               HeapTuple       locTuple;
+               Form_pg_index indexform;
+               bool            indisvalid;
 
                locTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(relOid));
                if (!HeapTupleIsValid(locTuple))
                        {
                                Relation        toastrel = relation_open(toast_relid,
                                                                                                         AccessExclusiveLock);
+
                                RelationSetNewRelfilenode(toastrel,
                                                                                  toastrel->rd_rel->relpersistence);
                                table_close(toastrel, NoLock);
                        /* nothing to do here, oid columns don't exist anymore */
                        break;
                case AT_SetTableSpace:  /* SET TABLESPACE */
+
                        /*
                         * Only do this for partitioned tables and indexes, for which this
                         * is just a catalog change.  Other relation types which have
                {
                        /*
                         * If required, test the current data within the table against new
-                        * constraints generated by ALTER TABLE commands, but don't rebuild
-                        * data.
+                        * constraints generated by ALTER TABLE commands, but don't
+                        * rebuild data.
                         */
                        if (tab->constraints != NIL || tab->verify_new_notnull ||
                                tab->partition_constraint != NULL)
        {
                /*
                 * If we are rebuilding the tuples OR if we added any new but not
-                * verified NOT NULL constraints, check all not-null constraints.
-                * This is a bit of overkill but it minimizes risk of bugs, and
+                * verified NOT NULL constraints, check all not-null constraints. This
+                * is a bit of overkill but it minimizes risk of bugs, and
                 * heap_attisnull is a pretty cheap test anyway.
                 */
                for (i = 0; i < newTupDesc->natts; i++)
                        {
                                /*
                                 * If there's no rewrite, old and new table are guaranteed to
-                                * have the same AM, so we can just use the old slot to
-                                * verify new constraints etc.
+                                * have the same AM, so we can just use the old slot to verify
+                                * new constraints etc.
                                 */
                                insertslot = oldslot;
                        }
                /*
                 * Ordinarily phase 3 must ensure that no NULLs exist in columns that
                 * are set NOT NULL; however, if we can find a constraint which proves
-                * this then we can skip that.  We needn't bother looking if
-                * we've already found that we must verify some other NOT NULL
-                * constraint.
+                * this then we can skip that.  We needn't bother looking if we've
+                * already found that we must verify some other NOT NULL constraint.
                 */
                if (!tab->verify_new_notnull &&
                        !NotNullImpliedByRelConstraints(rel, (Form_pg_attribute) GETSTRUCT(tuple)))
         */
        if (tab->rewrite)
        {
-               Relation    newrel;
+               Relation        newrel;
 
                newrel = table_open(RelationGetRelid(rel), NoLock);
                RelationClearMissing(newrel);
                                        {
                                                /*
                                                 * Changing the type of a column that is used by a
-                                                * generated column is not allowed by SQL standard.
-                                                * It might be doable with some thinking and effort.
+                                                * generated column is not allowed by SQL standard. It
+                                                * might be doable with some thinking and effort.
                                                 */
                                                ereport(ERROR,
                                                                (errcode(ERRCODE_SYNTAX_ERROR),
 
        /*
         * Here we go --- change the recorded column type and collation.  (Note
-        * heapTup is a copy of the syscache entry, so okay to scribble on.)
-        * First fix up the missing value if any.
+        * heapTup is a copy of the syscache entry, so okay to scribble on.) First
+        * fix up the missing value if any.
         */
        if (attTup->atthasmissing)
        {
-               Datum       missingval;
-               bool        missingNull;
+               Datum           missingval;
+               bool            missingNull;
 
                /* if rewrite is true the missing value should already be cleared */
                Assert(tab->rewrite == 0);
 
                /* if it's a null array there is nothing to do */
 
-               if (! missingNull)
+               if (!missingNull)
                {
                        /*
                         * Get the datum out of the array and repack it in a new array
                         * changed, only the array metadata.
                         */
 
-                       int one = 1;
-                       bool isNull;
-                       Datum       valuesAtt[Natts_pg_attribute];
-                       bool        nullsAtt[Natts_pg_attribute];
-                       bool        replacesAtt[Natts_pg_attribute];
-                       HeapTuple   newTup;
+                       int                     one = 1;
+                       bool            isNull;
+                       Datum           valuesAtt[Natts_pg_attribute];
+                       bool            nullsAtt[Natts_pg_attribute];
+                       bool            replacesAtt[Natts_pg_attribute];
+                       HeapTuple       newTup;
 
                        MemSet(valuesAtt, 0, sizeof(valuesAtt));
                        MemSet(nullsAtt, false, sizeof(nullsAtt));
                                                                                   attTup->attalign,
                                                                                   &isNull);
                        missingval = PointerGetDatum(
-                               construct_array(&missingval,
-                                                               1,
-                                                               targettype,
-                                                               tform->typlen,
-                                                               tform->typbyval,
-                                                               tform->typalign));
+                                                                                construct_array(&missingval,
+                                                                                                                1,
+                                                                                                                targettype,
+                                                                                                                tform->typlen,
+                                                                                                                tform->typbyval,
+                                                                                                                tform->typalign));
 
                        valuesAtt[Anum_pg_attribute_attmissingval - 1] = missingval;
                        replacesAtt[Anum_pg_attribute_attmissingval - 1] = true;
        Oid                     reloid = RelationGetRelid(rel);
 
        /*
-        * Shouldn't be called on relations having storage; these are processed
-        * in phase 3.
+        * Shouldn't be called on relations having storage; these are processed in
+        * phase 3.
         */
        Assert(!RELKIND_HAS_STORAGE(rel->rd_rel->relkind));
 
        /* Can't allow a non-shared relation in pg_global */
        if (newTableSpace == GLOBALTABLESPACE_OID)
                ereport(ERROR,
-               (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                       errmsg("only shared relations can be placed in pg_global tablespace")));
+                               (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+                                errmsg("only shared relations can be placed in pg_global tablespace")));
 
        /*
         * No work if no change in tablespace.
                                i = -1;
                                while ((i = bms_next_member(expr_attrs, i)) >= 0)
                                {
-                                       AttrNumber  attno = i + FirstLowInvalidHeapAttributeNumber;
+                                       AttrNumber      attno = i + FirstLowInvalidHeapAttributeNumber;
 
                                        if (TupleDescAttr(RelationGetDescr(rel), attno - 1)->attgenerated)
                                                ereport(ERROR,
 bool
 ConstraintImpliedByRelConstraint(Relation scanrel, List *testConstraint, List *provenConstraint)
 {
-       List            *existConstraint = list_copy(provenConstraint);
+       List       *existConstraint = list_copy(provenConstraint);
        TupleConstr *constr = RelationGetDescr(scanrel)->constr;
        int                     num_check,
                                i;
         * not-false and try to prove the same for testConstraint.
         *
         * Note that predicate_implied_by assumes its first argument is known
-        * immutable.  That should always be true for both NOT NULL and
-        * partition constraints, so we don't test it here.
+        * immutable.  That should always be true for both NOT NULL and partition
+        * constraints, so we don't test it here.
         */
        return predicate_implied_by(testConstraint, existConstraint, true);
 }
 
 
        /*
         * Allow explicit specification of database's default tablespace in
-        * default_tablespace without triggering permissions checks.  Don't
-        * allow specifying that when creating a partitioned table, however,
-        * since the result is confusing.
+        * default_tablespace without triggering permissions checks.  Don't allow
+        * specifying that when creating a partitioned table, however, since the
+        * result is confusing.
         */
        if (result == MyDatabaseTableSpace)
        {
 
                case AFTER_TRIGGER_FDW_REUSE:
 
                        /*
-                        * Store tuple in the slot so that tg_trigtuple does not
-                        * reference tuplestore memory.  (It is formally possible for the
-                        * trigger function to queue trigger events that add to the same
+                        * Store tuple in the slot so that tg_trigtuple does not reference
+                        * tuplestore memory.  (It is formally possible for the trigger
+                        * function to queue trigger events that add to the same
                         * tuplestore, which can push other tuples out of memory.)  The
                         * distinction is academic, because we start with a minimal tuple
                         * that is stored as a heap tuple, constructed in different memory
 
 ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel)
 {
        VacuumParams params;
-       bool verbose = false;
-       bool skip_locked = false;
-       bool analyze = false;
-       bool freeze = false;
-       bool full = false;
-       bool disable_page_skipping = false;
-       ListCell        *lc;
+       bool            verbose = false;
+       bool            skip_locked = false;
+       bool            analyze = false;
+       bool            freeze = false;
+       bool            full = false;
+       bool            disable_page_skipping = false;
+       ListCell   *lc;
 
        /* Set default value */
        params.index_cleanup = VACOPT_TERNARY_DEFAULT;
        /* Parse options list */
        foreach(lc, vacstmt->options)
        {
-               DefElem *opt = (DefElem *) lfirst(lc);
+               DefElem    *opt = (DefElem *) lfirst(lc);
 
                /* Parse common options for VACUUM and ANALYZE */
                if (strcmp(opt->defname, "verbose") == 0)
        /*
         * Determine the log level.
         *
-        * For manual VACUUM or ANALYZE, we emit a WARNING to match the log statements
-        * in the permission checks; otherwise, only log if the caller so requested.
+        * For manual VACUUM or ANALYZE, we emit a WARNING to match the log
+        * statements in the permission checks; otherwise, only log if the caller
+        * so requested.
         */
        if (!IsAutoVacuumWorkerProcess())
                elevel = WARNING;
                }
 
                /*
-                * Some table AMs might not need per-relation xid / multixid
-                * horizons. It therefore seems reasonable to allow relfrozenxid and
-                * relminmxid to not be set (i.e. set to their respective Invalid*Id)
+                * Some table AMs might not need per-relation xid / multixid horizons.
+                * It therefore seems reasonable to allow relfrozenxid and relminmxid
+                * to not be set (i.e. set to their respective Invalid*Id)
                 * independently. Thus validate and compute horizon for each only if
                 * set.
                 *
 
 static void
 ExecComputeSlotInfo(ExprState *state, ExprEvalStep *op)
 {
-       PlanState *parent = state->parent;
+       PlanState  *parent = state->parent;
        TupleDesc       desc = NULL;
        const TupleTableSlotOps *tts_ops = NULL;
-       bool isfixed = false;
+       bool            isfixed = false;
 
        if (op->d.fetch.known_desc != NULL)
        {
  */
 ExprState *
 ExecBuildGroupingEqual(TupleDesc ldesc, TupleDesc rdesc,
-                                          const TupleTableSlotOps * lops, const TupleTableSlotOps * rops,
+                                          const TupleTableSlotOps *lops, const TupleTableSlotOps *rops,
                                           int numCols,
                                           const AttrNumber *keyColIdx,
                                           const Oid *eqfunctions,
 
 ExecEvalSysVar(ExprState *state, ExprEvalStep *op, ExprContext *econtext,
                           TupleTableSlot *slot)
 {
-       Datum d;
+       Datum           d;
 
        /* slot_getsysattr has sufficient defenses against bad attnums */
        d = slot_getsysattr(slot,
 
 
                if (relation)
                        *slot = table_slot_create(relation,
-                                                                                &epqstate->estate->es_tupleTable);
+                                                                         &epqstate->estate->es_tupleTable);
                else
                        *slot = ExecAllocTableSlot(&epqstate->estate->es_tupleTable,
                                                                           epqstate->origslot->tts_tupleDescriptor,
 
         * instrumentation in per-query context.
         */
        ibytes = offsetof(SharedJitInstrumentation, jit_instr)
-                        + mul_size(shared_jit->num_workers, sizeof(JitInstrumentation));
+               + mul_size(shared_jit->num_workers, sizeof(JitInstrumentation));
        planstate->worker_jit_instrument =
                MemoryContextAlloc(planstate->state->es_query_cxt, ibytes);
 
        /* Accumulate JIT instrumentation, if any. */
        if (pei->jit_instrumentation)
                ExecParallelRetrieveJitInstrumentation(pei->planstate,
-                                                                                       pei->jit_instrumentation);
+                                                                                          pei->jit_instrumentation);
 
        /* Free any serialized parameters. */
        if (DsaPointerIsValid(pei->param_exec))
 
        TupleTableSlot *tupslot;
        AttrNumber *tupmap;
        int                     indexes[FLEXIBLE_ARRAY_MEMBER];
-} PartitionDispatchData;
+}                      PartitionDispatchData;
 
 /* struct to hold result relations coming from UPDATE subplans */
 typedef struct SubplanResultRelHashElem
 {
-       Oid             relid;          /* hash key -- must be first */
+       Oid                     relid;                  /* hash key -- must be first */
        ResultRelInfo *rri;
 } SubplanResultRelHashElem;
 
                                if (proute->subplan_resultrel_htab)
                                {
                                        Oid                     partoid = partdesc->oids[partidx];
-                                       SubplanResultRelHashElem   *elem;
+                                       SubplanResultRelHashElem *elem;
 
                                        elem = hash_search(proute->subplan_resultrel_htab,
                                                                           &partoid, HASH_FIND, NULL);
                ResultRelInfo *rri = &mtstate->resultRelInfo[i];
                bool            found;
                Oid                     partoid = RelationGetRelid(rri->ri_RelationDesc);
-               SubplanResultRelHashElem   *elem;
+               SubplanResultRelHashElem *elem;
 
                elem = (SubplanResultRelHashElem *)
                        hash_search(htab, &partoid, HASH_ENTER, &found);
                                 * It's safe to reuse these from the partition root, as we
                                 * only process one tuple at a time (therefore we won't
                                 * overwrite needed data in slots), and the results of
-                                * projections are independent of the underlying
-                                * storage. Projections and where clauses themselves don't
-                                * store state / are independent of the underlying storage.
+                                * projections are independent of the underlying storage.
+                                * Projections and where clauses themselves don't store state
+                                * / are independent of the underlying storage.
                                 */
                                leaf_part_rri->ri_onConflict->oc_ProjSlot =
                                        rootResultRelInfo->ri_onConflict->oc_ProjSlot;
 {
        MemoryContext oldcxt;
        PartitionRoutingInfo *partrouteinfo;
-       int             rri_index;
+       int                     rri_index;
 
        oldcxt = MemoryContextSwitchTo(proute->memcxt);
 
                        }
                        else
                        {
-                               int             pd_idx = 0;
-                               int             pp_idx;
+                               int                     pd_idx = 0;
+                               int                     pp_idx;
 
                                /*
                                 * Some new partitions have appeared since plan time, and
                                 * those are reflected in our PartitionDesc but were not
                                 * present in the one used to construct subplan_map and
                                 * subpart_map.  So we must construct new and longer arrays
-                                * where the partitions that were originally present map to the
-                                * same place, and any added indexes map to -1, as if the
+                                * where the partitions that were originally present map to
+                                * the same place, and any added indexes map to -1, as if the
                                 * new partitions had been pruned.
                                 */
                                pprune->subpart_map = palloc(sizeof(int) * partdesc->nparts);
 
 static bool
 tuples_equal(TupleTableSlot *slot1, TupleTableSlot *slot2)
 {
-       int         attrnum;
+       int                     attrnum;
 
        Assert(slot1->tts_tupleDescriptor->natts ==
                   slot2->tts_tupleDescriptor->natts);
 
                if (!DatumGetBool(FunctionCall2Coll(&typentry->eq_opr_finfo,
                                                                                        att->attcollation,
-                                                                               slot1->tts_values[attrnum],
-                                                                               slot2->tts_values[attrnum])))
+                                                                                       slot1->tts_values[attrnum],
+                                                                                       slot2->tts_values[attrnum])))
                        return false;
        }
 
                resultRelInfo->ri_TrigDesc->trig_insert_before_row)
        {
                if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
-                       skip_tuple = true;              /* "do nothing" */
+                       skip_tuple = true;      /* "do nothing" */
        }
 
        if (!skip_tuple)
        {
                if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
                                                                  tid, NULL, slot))
-                       skip_tuple = true;              /* "do nothing" */
+                       skip_tuple = true;      /* "do nothing" */
        }
 
        if (!skip_tuple)
                if (resultRelInfo->ri_PartitionCheck)
                        ExecPartitionCheck(resultRelInfo, slot, estate, true);
 
-               simple_table_update(rel, tid, slot,estate->es_snapshot,
+               simple_table_update(rel, tid, slot, estate->es_snapshot,
                                                        &update_indexes);
 
                if (resultRelInfo->ri_NumIndices > 0 && update_indexes)
                                                 const char *relname)
 {
        /*
-        * We currently only support writing to regular tables.  However, give
-        * a more specific error for partitioned and foreign tables.
+        * We currently only support writing to regular tables.  However, give a
+        * more specific error for partitioned and foreign tables.
         */
        if (relkind == RELKIND_PARTITIONED_TABLE)
                ereport(ERROR,
                                 errmsg("cannot use relation \"%s.%s\" as logical replication target",
                                                nspname, relname),
                                 errdetail("\"%s.%s\" is a partitioned table.",
-                                               nspname, relname)));
+                                                  nspname, relname)));
        else if (relkind == RELKIND_FOREIGN_TABLE)
                ereport(ERROR,
                                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
                                 errmsg("cannot use relation \"%s.%s\" as logical replication target",
                                                nspname, relname),
                                 errdetail("\"%s.%s\" is a foreign table.",
-                                               nspname, relname)));
+                                                  nspname, relname)));
 
        if (relkind != RELKIND_RELATION)
                ereport(ERROR,
                                 errmsg("cannot use relation \"%s.%s\" as logical replication target",
                                                nspname, relname),
                                 errdetail("\"%s.%s\" is not a table.",
-                                               nspname, relname)));
+                                                  nspname, relname)));
 }
 
 
                        /* Check if it meets the access-method conditions */
                        if (!(*recheckMtd) (node, slot))
-                               return ExecClearTuple(slot);    /* would not be returned by scan */
+                               return ExecClearTuple(slot);    /* would not be returned by
+                                                                                                * scan */
 
                        return slot;
                }
 
 
 static TupleDesc ExecTypeFromTLInternal(List *targetList,
                                           bool skipjunk);
-static pg_attribute_always_inline void
-slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp,
+static pg_attribute_always_inline void slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp,
                                           int natts);
 static inline void tts_buffer_heap_store_tuple(TupleTableSlot *slot,
-                                                                                          HeapTuple tuple,
-                                                                                          Buffer buffer,
-                                                                                          bool transfer_pin);
+                                                       HeapTuple tuple,
+                                                       Buffer buffer,
+                                                       bool transfer_pin);
 static void tts_heap_store_tuple(TupleTableSlot *slot, HeapTuple tuple, bool shouldFree);
 
 
 {
        elog(ERROR, "virtual tuple table slot does not have system attributes");
 
-       return 0; /* silence compiler warnings */
+       return 0;                                       /* silence compiler warnings */
 }
 
 /*
        for (int natt = 0; natt < desc->natts; natt++)
        {
                Form_pg_attribute att = TupleDescAttr(desc, natt);
-               Datum val;
+               Datum           val;
 
                if (att->attbyval || slot->tts_isnull[natt])
                        continue;
        for (int natt = 0; natt < desc->natts; natt++)
        {
                Form_pg_attribute att = TupleDescAttr(desc, natt);
-               Datum val;
+               Datum           val;
 
                if (att->attbyval || slot->tts_isnull[natt])
                        continue;
                if (att->attlen == -1 &&
                        VARATT_IS_EXTERNAL_EXPANDED(DatumGetPointer(val)))
                {
-                       Size data_length;
+                       Size            data_length;
 
                        /*
                         * We want to flatten the expanded value so that the materialized
                }
                else
                {
-                       Size data_length = 0;
+                       Size            data_length = 0;
 
                        data = (char *) att_align_nominal(data, att->attalign);
                        data_length = att_addlength_datum(data_length, att->attlen, val);
 static void
 tts_heap_copyslot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
 {
-       HeapTuple tuple;
+       HeapTuple       tuple;
        MemoryContext oldcontext;
 
        oldcontext = MemoryContextSwitchTo(dstslot->tts_mcxt);
 {
        elog(ERROR, "minimal tuple table slot does not have system attributes");
 
-       return 0; /* silence compiler warnings */
+       return 0;                                       /* silence compiler warnings */
 }
 
 static void
 MakeTupleTableSlot(TupleDesc tupleDesc,
                                   const TupleTableSlotOps *tts_ops)
 {
-       Size            basesz, allocsz;
+       Size            basesz,
+                               allocsz;
        TupleTableSlot *slot;
+
        basesz = tts_ops->base_slot_size;
 
        /*
 slot_getsomeattrs_int(TupleTableSlot *slot, int attnum)
 {
        /* Check for caller errors */
-       Assert(slot->tts_nvalid < attnum); /* slot_getsomeattr checked */
+       Assert(slot->tts_nvalid < attnum);      /* slot_getsomeattr checked */
        Assert(attnum > 0);
 
        if (unlikely(attnum > slot->tts_tupleDescriptor->natts))
        slot->tts_ops->getsomeattrs(slot, attnum);
 
        /*
-        * If the underlying tuple doesn't have enough attributes, tuple descriptor
-        * must have the missing attributes.
+        * If the underlying tuple doesn't have enough attributes, tuple
+        * descriptor must have the missing attributes.
         */
        if (unlikely(slot->tts_nvalid < attnum))
        {
 
                                if (junkFilter)
                                {
                                        TupleTableSlot *slot =
-                                               MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple);
+                                       MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple);
 
                                        *junkFilter = ExecInitJunkFilter(tlist, slot);
                                }
                if (junkFilter)
                {
                        TupleTableSlot *slot =
-                               MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple);
+                       MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple);
 
                        *junkFilter = ExecInitJunkFilterConversion(tlist,
                                                                                                           CreateTupleDescCopy(tupdesc),
 
                          oldAbbrevVal == newAbbrevVal &&
                          DatumGetBool(FunctionCall2Coll(&pertrans->equalfnOne,
                                                                                         pertrans->aggCollation,
-                                                                                oldVal, *newVal)))))
+                                                                                        oldVal, *newVal)))))
                {
                        /* equal to prior, so forget this one */
                        if (!pertrans->inputtypeByVal && !*isNull)
 
        Assert(HeapTupleIsValid(tup));
 
        /* Build the TupleTableSlot for the given tuple */
-       ExecStoreHeapTuple(tup,                 /* tuple to store */
+       ExecStoreHeapTuple(tup,         /* tuple to store */
                                           gm_state->gm_slots[reader],  /* slot in which to store
                                                                                                         * the tuple */
-                                          true);               /* pfree tuple when done with it */
+                                          true);       /* pfree tuple when done with it */
 
        return true;
 }
 
                rclauses = lappend(rclauses, ExecInitExpr(lsecond(hclause->args),
                                                                                                  (PlanState *) hjstate));
                rhclauses = lappend(rhclauses, ExecInitExpr(lsecond(hclause->args),
-                                                                                                  innerPlanState(hjstate)));
+                                                                                                       innerPlanState(hjstate)));
                hoperators = lappend_oid(hoperators, hclause->opno);
                hcollations = lappend_oid(hcollations, hclause->inputcollid);
        }
 
 
                /*
                 * Fill the scan tuple slot with data from the index.  This might be
-                * provided in either HeapTuple or IndexTuple format.  Conceivably
-                * an index AM might fill both fields, in which case we prefer the
-                * heap format, since it's probably a bit cheaper to fill a slot from.
+                * provided in either HeapTuple or IndexTuple format.  Conceivably an
+                * index AM might fill both fields, in which case we prefer the heap
+                * format, since it's probably a bit cheaper to fill a slot from.
                 */
                if (scandesc->xs_hitup)
                {
 
                                                                scandesc->xs_orderbynulls,
                                                                node) <= 0)
                        {
-                               HeapTuple tuple;
+                               HeapTuple       tuple;
 
                                tuple = reorderqueue_pop(node);
 
 
        /* node returns unmodified slots from the outer plan */
        lrstate->ps.resultopsset = true;
        lrstate->ps.resultops = ExecGetResultSlotOps(outerPlanState(lrstate),
-                                                                                                       &lrstate->ps.resultopsfixed);
+                                                                                                &lrstate->ps.resultopsfixed);
 
        /*
         * LockRows nodes do no projections, so initialize projection info for
 
                                                                goto ldelete;
 
                                                case TM_SelfModified:
+
                                                        /*
                                                         * This can be reached when following an update
                                                         * chain from a tuple updated by another session,
        {
                if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
                                                                  tupleid, oldtuple, slot))
-                       return NULL;        /* "do nothing" */
+                       return NULL;            /* "do nothing" */
        }
 
        /* INSTEAD OF ROW UPDATE Triggers */
        {
                if (!ExecIRUpdateTriggers(estate, resultRelInfo,
                                                                  oldtuple, slot))
-                       return NULL;        /* "do nothing" */
+                       return NULL;            /* "do nothing" */
        }
        else if (resultRelInfo->ri_FdwRoutine)
        {
                                                        return NULL;
 
                                                case TM_SelfModified:
+
                                                        /*
                                                         * This can be reached when following an update
                                                         * chain from a tuple updated by another session,
 
        scan = node->ss.ss_currentScanDesc;
 
        if (scan != NULL)
-               table_rescan(scan,      /* scan desc */
-                                        NULL); /* new scan keys */
+               table_rescan(scan,              /* scan desc */
+                                        NULL);         /* new scan keys */
 
        ExecScanReScan((ScanState *) node);
 }
 
                /* Apply the type-specific equality function */
                if (!DatumGetBool(FunctionCall2Coll(&eqfunctions[i],
                                                                                        collations[i],
-                                                                               attr1, attr2)))
+                                                                                       attr1, attr2)))
                {
                        result = true;          /* they are unequal */
                        break;
 
        ExecInitScanTupleSlot(estate, &subquerystate->ss,
                                                  ExecGetResultType(subquerystate->subplan),
                                                  ExecGetResultSlotOps(subquerystate->subplan, NULL));
+
        /*
         * The slot used as the scantuple isn't the slot above (outside of EPQ),
         * but the one from the node below.
 
         * later used inside the PAM conversation to pass the password to the
         * authentication module.
         */
-       pam_passw_conv.appdata_ptr = unconstify(char *, password); /* from password above,
-                                                                                                        * not allocated */
+       pam_passw_conv.appdata_ptr = unconstify(char *, password);      /* from password above,
+                                                                                                                                * not allocated */
 
        /* Optionally, one can set the service name in pg_hba.conf */
        if (port->hba->pamservice && port->hba->pamservice[0] != '\0')
 
 static bool ssl_is_server_start;
 
 static int ssl_protocol_version_to_openssl(int v, const char *guc_name,
-                                                                                  int loglevel);
+                                                               int loglevel);
 #ifndef SSL_CTX_set_min_proto_version
-static int SSL_CTX_set_min_proto_version(SSL_CTX *ctx, int version);
-static int SSL_CTX_set_max_proto_version(SSL_CTX *ctx, int version);
+static int     SSL_CTX_set_min_proto_version(SSL_CTX *ctx, int version);
+static int     SSL_CTX_set_max_proto_version(SSL_CTX *ctx, int version);
 #endif
 
 
 
        if (ssl_min_protocol_version)
        {
-               int ssl_ver = ssl_protocol_version_to_openssl(ssl_min_protocol_version,
-                                                                                                         "ssl_min_protocol_version",
-                                                                                                         isServerStart ? FATAL : LOG);
+               int                     ssl_ver = ssl_protocol_version_to_openssl(ssl_min_protocol_version,
+                                                                                                                         "ssl_min_protocol_version",
+                                                                                                                         isServerStart ? FATAL : LOG);
+
                if (ssl_ver == -1)
                        goto error;
                SSL_CTX_set_min_proto_version(context, ssl_ver);
 
        if (ssl_max_protocol_version)
        {
-               int ssl_ver = ssl_protocol_version_to_openssl(ssl_max_protocol_version,
-                                                                                                         "ssl_max_protocol_version",
-                                                                                                         isServerStart ? FATAL : LOG);
+               int                     ssl_ver = ssl_protocol_version_to_openssl(ssl_max_protocol_version,
+                                                                                                                         "ssl_max_protocol_version",
+                                                                                                                         isServerStart ? FATAL : LOG);
+
                if (ssl_ver == -1)
                        goto error;
                SSL_CTX_set_max_proto_version(context, ssl_ver);
                serial = X509_get_serialNumber(port->peer);
                b = ASN1_INTEGER_to_BN(serial, NULL);
                decimal = BN_bn2dec(b);
+
                BN_free(b);
                strlcpy(ptr, decimal, len);
                OPENSSL_free(decimal);
 
        }
 
        /*
-        * Now add bits of the last word, but only those before the item.
-        * We can do that by applying a mask and then using popcount again.
-        * To get 0-based index, we want to count only preceding bits, not
-        * the item itself, so we subtract 1.
+        * Now add bits of the last word, but only those before the item. We can
+        * do that by applying a mask and then using popcount again. To get
+        * 0-based index, we want to count only preceding bits, not the item
+        * itself, so we subtract 1.
         */
        mask = ((bitmapword) 1 << bitnum) - 1;
        result += bmw_popcount(a->words[wordnum] & mask);
 
        int                     listidx;
 
        /*
-        * If there's exactly one clause (and it was not estimated yet), just
-        * go directly to clause_selectivity(). None of what we might do below
-        * is relevant.
+        * If there's exactly one clause (and it was not estimated yet), just go
+        * directly to clause_selectivity(). None of what we might do below is
+        * relevant.
         */
        if ((list_length(clauses) == 1) &&
                bms_num_members(estimatedclauses) == 0)
 
        if (!root->partColsUpdated)
                root->partColsUpdated =
                        has_partition_attrs(parentrel, parentrte->updatedCols, NULL);
+
        /*
         * There shouldn't be any generated columns in the partition key.
         */
 
  */
 void
 adjust_limit_rows_costs(double *rows,  /* in/out parameter */
-                                               Cost *startup_cost,     /* in/out parameter */
+                                               Cost *startup_cost, /* in/out parameter */
                                                Cost *total_cost,       /* in/out parameter */
                                                int64 offset_est,
                                                int64 count_est)
 
                                                                                                          InvalidOid, &found_whole_row);
 
                        /*
-                        * Prevent this for the same reason as for constraints below.
-                        * Note that defaults cannot contain any vars, so it's OK that the
+                        * Prevent this for the same reason as for constraints below. Note
+                        * that defaults cannot contain any vars, so it's OK that the
                         * error message refers to generated columns.
                         */
                        if (found_whole_row)
                 * any necessary validation.
                 */
                result_spec->lowerdatums =
-                                       transformPartitionRangeBounds(pstate, spec->lowerdatums,
-                                                                                                 parent);
+                       transformPartitionRangeBounds(pstate, spec->lowerdatums,
+                                                                                 parent);
                result_spec->upperdatums =
-                                       transformPartitionRangeBounds(pstate, spec->upperdatums,
-                                                                                                 parent);
+                       transformPartitionRangeBounds(pstate, spec->upperdatums,
+                                                                                 parent);
        }
        else
                elog(ERROR, "unexpected partition strategy: %d", (int) strategy);
        i = j = 0;
        foreach(lc, blist)
        {
-               Node *expr = lfirst(lc);
+               Node       *expr = lfirst(lc);
                PartitionRangeDatum *prd = NULL;
 
                /*
-                * Infinite range bounds -- "minvalue" and "maxvalue" -- get passed
-                * in as ColumnRefs.
+                * Infinite range bounds -- "minvalue" and "maxvalue" -- get passed in
+                * as ColumnRefs.
                 */
                if (IsA(expr, ColumnRef))
                {
-                       ColumnRef *cref = (ColumnRef *) expr;
-                       char *cname = NULL;
+                       ColumnRef  *cref = (ColumnRef *) expr;
+                       char       *cname = NULL;
 
                        /*
                         * There should be a single field named either "minvalue" or
                        if (cname == NULL)
                        {
                                /*
-                                * ColumnRef is not in the desired single-field-name form.
-                                * For consistency between all partition strategies, let the
+                                * ColumnRef is not in the desired single-field-name form. For
+                                * consistency between all partition strategies, let the
                                 * expression transformation report any errors rather than
                                 * doing it ourselves.
                                 */
        }
 
        /*
-        * Once we see MINVALUE or MAXVALUE for one column, the remaining
-        * columns must be the same.
+        * Once we see MINVALUE or MAXVALUE for one column, the remaining columns
+        * must be the same.
         */
        validateInfiniteBounds(pstate, result);
 
 
        /*
         * Check that the input expression's collation is compatible with one
-        * specified for the parent's partition key (partcollation).  Don't
-        * throw an error if it's the default collation which we'll replace with
-        * the parent's collation anyway.
+        * specified for the parent's partition key (partcollation).  Don't throw
+        * an error if it's the default collation which we'll replace with the
+        * parent's collation anyway.
         */
        if (IsA(value, CollateExpr))
        {
-               Oid             exprCollOid = exprCollation(value);
+               Oid                     exprCollOid = exprCollation(value);
 
                if (OidIsValid(exprCollOid) &&
                        exprCollOid != DEFAULT_COLLATION_OID &&
 
 {
        MemoryContext pdir_mcxt;
        HTAB       *pdir_hash;
-} PartitionDirectoryData;
+}                      PartitionDirectoryData;
 
 typedef struct PartitionDirectoryEntry
 {
 
        /*
         * Get partition oids from pg_inherits.  This uses a single snapshot to
-        * fetch the list of children, so while more children may be getting
-        * added concurrently, whatever this function returns will be accurate
-        * as of some well-defined point in time.
+        * fetch the list of children, so while more children may be getting added
+        * concurrently, whatever this function returns will be accurate as of
+        * some well-defined point in time.
         */
        inhoids = find_inheritance_children(RelationGetRelid(rel), NoLock);
        nparts = list_length(inhoids);
                 *
                 * Note that this algorithm assumes that PartitionBoundSpec we manage
                 * to fetch is the right one -- so this is only good enough for
-                * concurrent ATTACH PARTITION, not concurrent DETACH PARTITION
-                * or some hypothetical operation that changes the partition bounds.
+                * concurrent ATTACH PARTITION, not concurrent DETACH PARTITION or
+                * some hypothetical operation that changes the partition bounds.
                 */
                if (boundspec == NULL)
                {
                        Relation        pg_class;
-                       SysScanDesc     scan;
-                       ScanKeyData     key[1];
+                       SysScanDesc scan;
+                       ScanKeyData key[1];
                        Datum           datum;
                        bool            isnull;
 
 void
 DestroyPartitionDirectory(PartitionDirectory pdir)
 {
-       HASH_SEQ_STATUS status;
+       HASH_SEQ_STATUS status;
        PartitionDirectoryEntry *pde;
 
        hash_seq_init(&status, pdir->pdir_hash);
 
                                                char **sender_host, int *sender_port);
 static char *libpqrcv_identify_system(WalReceiverConn *conn,
                                                 TimeLineID *primary_tli);
-static int libpqrcv_server_version(WalReceiverConn *conn);
+static int     libpqrcv_server_version(WalReceiverConn *conn);
 static void libpqrcv_readtimelinehistoryfile(WalReceiverConn *conn,
                                                                 TimeLineID tli, char **filename,
                                                                 char **content, int *len);
 
 Oid *
 ReorderBufferGetRelids(ReorderBuffer *rb, int nrelids)
 {
-       Oid        *relids;
-       Size    alloc_len;
+       Oid                *relids;
+       Size            alloc_len;
 
        alloc_len = sizeof(Oid) * nrelids;
 
                else
                {
                        /*
-                        * Maybe we already saw this tuple before in this transaction,
-                        * but if so it must have the same cmin.
+                        * Maybe we already saw this tuple before in this transaction, but
+                        * if so it must have the same cmin.
                         */
                        Assert(ent->cmin == change->data.tuplecid.cmin);
 
                        }
                case REORDER_BUFFER_CHANGE_TRUNCATE:
                        {
-                               Size    size;
-                               char   *data;
+                               Size            size;
+                               char       *data;
 
                                /* account for the OIDs of truncated relations */
                                size = sizeof(Oid) * change->data.truncate.nrelids;
                        /* the base struct contains all the data, easy peasy */
                case REORDER_BUFFER_CHANGE_TRUNCATE:
                        {
-                               Oid        *relids;
+                               Oid                *relids;
 
                                relids = ReorderBufferGetRelids(rb,
                                                                                                change->data.truncate.nrelids);
 
                SpinLockRelease(&src->mutex);
 
                /*
-                * Check if the source slot still exists and is valid. We regard it
-                * as invalid if the type of replication slot or name has been
-                * changed, or the restart_lsn either is invalid or has gone backward.
-                * (The restart_lsn could go backwards if the source slot is dropped
-                * and copied from an older slot during installation.)
+                * Check if the source slot still exists and is valid. We regard it as
+                * invalid if the type of replication slot or name has been changed,
+                * or the restart_lsn either is invalid or has gone backward. (The
+                * restart_lsn could go backwards if the source slot is dropped and
+                * copied from an older slot during installation.)
                 *
                 * Since erroring out will release and drop the destination slot we
                 * don't need to release it here.
 
                                           WAIT_EVENT_SYNC_REP);
 
                /*
-                * If the postmaster dies, we'll probably never get an
-                * acknowledgment, because all the wal sender processes will exit. So
-                * just bail out.
+                * If the postmaster dies, we'll probably never get an acknowledgment,
+                * because all the wal sender processes will exit. So just bail out.
                 */
                if (rc & WL_POSTMASTER_DEATH)
                {
 
         * anyway.
         *
         * Note we use _exit(2) not _exit(0).  This is to force the postmaster
-        * into a system reset cycle if someone sends a manual SIGQUIT to a
-        * random backend.  This is necessary precisely because we don't clean up
-        * our shared memory state.  (The "dead man switch" mechanism in
-        * pmsignal.c should ensure the postmaster sees this as a crash, too, but
-        * no harm in being doubly sure.)
+        * into a system reset cycle if someone sends a manual SIGQUIT to a random
+        * backend.  This is necessary precisely because we don't clean up our
+        * shared memory state.  (The "dead man switch" mechanism in pmsignal.c
+        * should ensure the postmaster sees this as a crash, too, but no harm in
+        * being doubly sure.)
         */
        _exit(2);
 }
 
        int                     write_head;
        int                     read_heads[NUM_SYNC_REP_WAIT_MODE];
        WalTimeSample last_read[NUM_SYNC_REP_WAIT_MODE];
-}                      LagTracker;
+} LagTracker;
 
 static LagTracker *lag_tracker;
 
                sleeptime = WalSndComputeSleeptime(GetCurrentTimestamp());
 
                wakeEvents = WL_LATCH_SET | WL_EXIT_ON_PM_DEATH |
-                                        WL_SOCKET_READABLE | WL_TIMEOUT;
+                       WL_SOCKET_READABLE | WL_TIMEOUT;
 
                if (pq_is_send_pending())
                        wakeEvents |= WL_SOCKET_WRITEABLE;
                        int                     wakeEvents;
 
                        wakeEvents = WL_LATCH_SET | WL_EXIT_ON_PM_DEATH | WL_TIMEOUT |
-                                                WL_SOCKET_READABLE;
+                               WL_SOCKET_READABLE;
 
                        /*
                         * Use fresh timestamp, not last_processed, to reduce the chance
 
         * build an array of SortItem(s) sorted using the multi-sort support
         *
         * XXX This relies on all stats entries pointing to the same tuple
-        * descriptor.  For now that assumption holds, but it might change in
-        * the future for example if we support statistics on multiple tables.
+        * descriptor.  For now that assumption holds, but it might change in the
+        * future for example if we support statistics on multiple tables.
         */
        items = build_sorted_items(numrows, &nitems, rows, stats[0]->tupDesc,
                                                           mss, k, attnums_dep);
        {
                /*
                 * Check if the group ended, which may be either because we processed
-                * all the items (i==nitems), or because the i-th item is not equal
-                * to the preceding one.
+                * all the items (i==nitems), or because the i-th item is not equal to
+                * the preceding one.
                 */
                if (i == nitems ||
                        multi_sort_compare_dims(0, k - 2, &items[i - 1], &items[i], mss) != 0)
 
                                          int nvacatts, VacAttrStats **vacatts);
 static void statext_store(Relation pg_stext, Oid relid,
                          MVNDistinct *ndistinct, MVDependencies *dependencies,
-                         MCVList * mcvlist, VacAttrStats **stats);
+                         MCVList *mcvlist, VacAttrStats **stats);
 
 
 /*
 static void
 statext_store(Relation pg_stext, Oid statOid,
                          MVNDistinct *ndistinct, MVDependencies *dependencies,
-                         MCVList * mcv, VacAttrStats **stats)
+                         MCVList *mcv, VacAttrStats **stats)
 {
        HeapTuple       stup,
                                oldtup;
        {
                /*
                 * Make sure the bitmap contains only user-defined attributes. As
-                * bitmaps can't contain negative values, this can be violated in
-                * two ways. Firstly, the bitmap might contain 0 as a member, and
-                * secondly the integer value might be larger than MaxAttrNumber.
+                * bitmaps can't contain negative values, this can be violated in two
+                * ways. Firstly, the bitmap might contain 0 as a member, and secondly
+                * the integer value might be larger than MaxAttrNumber.
                 */
                Assert(AttrNumberIsForUserDefinedAttr(j));
                Assert(j <= MaxAttrNumber);
        idx = 0;
        for (i = 0; i < numrows; i++)
        {
-               bool    toowide = false;
+               bool            toowide = false;
 
                items[idx].values = &values[idx * numattrs];
                items[idx].isnull = &isnull[idx * numattrs];
                /* load the values/null flags from sample rows */
                for (j = 0; j < numattrs; j++)
                {
-                       Datum   value;
-                       bool    isnull;
+                       Datum           value;
+                       bool            isnull;
 
                        value = heap_getattr(rows[i], attnums[j], tdesc, &isnull);
 
        int                     listidx;
        StatisticExtInfo *stat;
        List       *stat_clauses;
-       Selectivity     simple_sel,
+       Selectivity simple_sel,
                                mcv_sel,
                                mcv_basesel,
                                mcv_totalsel,
         * Pre-process the clauses list to extract the attnums seen in each item.
         * We need to determine if there's any clauses which will be useful for
         * selectivity estimations with extended stats. Along the way we'll record
-        * all of the attnums for each clause in a list which we'll reference later
-        * so we don't need to repeat the same work again. We'll also keep track of
-        * all attnums seen.
+        * all of the attnums for each clause in a list which we'll reference
+        * later so we don't need to repeat the same work again. We'll also keep
+        * track of all attnums seen.
         *
         * We also skip clauses that we already estimated using different types of
         * statistics (we treat them as incompatible).
        }
 
        /*
-        * First compute "simple" selectivity, i.e. without the extended statistics,
-        * and essentially assuming independence of the columns/clauses. We'll then
-        * use the various selectivities computed from MCV list to improve it.
+        * First compute "simple" selectivity, i.e. without the extended
+        * statistics, and essentially assuming independence of the
+        * columns/clauses. We'll then use the various selectivities computed from
+        * MCV list to improve it.
         */
        simple_sel = clauselist_selectivity_simple(root, stat_clauses, varRelid,
                                                                                           jointype, sjinfo, NULL);
                                                           JoinType jointype, SpecialJoinInfo *sjinfo,
                                                           RelOptInfo *rel, Bitmapset **estimatedclauses)
 {
-       Selectivity     sel;
+       Selectivity sel;
 
        /* First, try estimating clauses using a multivariate MCV list. */
        sel = statext_mcv_clauselist_selectivity(root, clauses, varRelid, jointype,
                                                                                         sjinfo, rel, estimatedclauses);
 
        /*
-        * Then, apply functional dependencies on the remaining clauses by
-        * calling dependencies_clauselist_selectivity.  Pass 'estimatedclauses'
-        * so the function can properly skip clauses already estimated above.
+        * Then, apply functional dependencies on the remaining clauses by calling
+        * dependencies_clauselist_selectivity.  Pass 'estimatedclauses' so the
+        * function can properly skip clauses already estimated above.
         *
         * The reasoning for applying dependencies last is that the more complex
         * stats can track more complex correlations between the attributes, and
 
         *
         * Using the same algorithm might exclude items that are close to the
         * "average" frequency of the sample. But that does not say whether the
-        * observed frequency is close to the base frequency or not. We also
-        * need to consider unexpectedly uncommon items (again, compared to the
-        * base frequency), and the single-column algorithm does not have to.
+        * observed frequency is close to the base frequency or not. We also need
+        * to consider unexpectedly uncommon items (again, compared to the base
+        * frequency), and the single-column algorithm does not have to.
         *
         * We simply decide how many items to keep by computing minimum count
-        * using get_mincount_for_mcv_list() and then keep all items that seem
-        * to be more common than that.
+        * using get_mincount_for_mcv_list() and then keep all items that seem to
+        * be more common than that.
         */
        mincount = get_mincount_for_mcv_list(numrows, totalrows);
 
        /*
-        * Walk the groups until we find the first group with a count below
-        * the mincount threshold (the index of that group is the number of
-        * groups we want to keep).
+        * Walk the groups until we find the first group with a count below the
+        * mincount threshold (the index of that group is the number of groups we
+        * want to keep).
         */
        for (i = 0; i < nitems; i++)
        {
         */
        if (nitems > 0)
        {
-               int     j;
+               int                     j;
 
                /*
                 * Allocate the MCV list structure, set the global parameters.
  * (or a longer type) instead of using an array of bool items.
  */
 bytea *
-statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats)
+statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats)
 {
        int                     i;
        int                     dim;
                        info[dim].nbytes = 0;
                        for (i = 0; i < info[dim].nvalues; i++)
                        {
-                               Size    len;
+                               Size            len;
 
                                values[dim][i] = PointerGetDatum(PG_DETOAST_DATUM(values[dim][i]));
 
                        info[dim].nbytes = 0;
                        for (i = 0; i < info[dim].nvalues; i++)
                        {
-                               Size    len;
+                               Size            len;
 
                                /* c-strings include terminator, so +1 byte */
                                values[dim][i] = PointerGetDatum(PG_DETOAST_DATUM(values[dim][i]));
         * for each attribute, deduplicated values and items).
         *
         * The header fields are copied one by one, so that we don't need any
-        * explicit alignment (we copy them while deserializing). All fields
-        * after this need to be properly aligned, for direct access.
+        * explicit alignment (we copy them while deserializing). All fields after
+        * this need to be properly aligned, for direct access.
         */
        total_length = MAXALIGN(VARHDRSZ + (3 * sizeof(uint32))
-                       + sizeof(AttrNumber) + (ndims * sizeof(Oid)));
+                                                       + sizeof(AttrNumber) + (ndims * sizeof(Oid)));
 
        /* dimension info */
        total_length += MAXALIGN(ndims * sizeof(DimensionInfo));
                total_length += MAXALIGN(info[i].nbytes);
 
        /*
-        * And finally the items (no additional alignment needed, we start
-        * at proper alignment and the itemsize formula uses MAXALIGN)
+        * And finally the items (no additional alignment needed, we start at
+        * proper alignment and the itemsize formula uses MAXALIGN)
         */
        total_length += mcvlist->nitems * itemsize;
 
        /*
-        * Allocate space for the whole serialized MCV list (we'll skip bytes,
-        * so we set them to zero to make the result more compressible).
+        * Allocate space for the whole serialized MCV list (we'll skip bytes, so
+        * we set them to zero to make the result more compressible).
         */
        raw = palloc0(total_length);
        SET_VARSIZE(raw, total_length);
                HeapTuple       tuple;
                Datum           result;
 
-               StringInfoData  itemValues;
-               StringInfoData  itemNulls;
+               StringInfoData itemValues;
+               StringInfoData itemNulls;
 
                int                     i;
 
                 */
                values = (char **) palloc0(5 * sizeof(char *));
 
-               values[0] = (char *) palloc(64 * sizeof(char)); /* item index */
-               values[3] = (char *) palloc(64 * sizeof(char)); /* frequency */
-               values[4] = (char *) palloc(64 * sizeof(char)); /* base frequency */
+               values[0] = (char *) palloc(64 * sizeof(char)); /* item index */
+               values[3] = (char *) palloc(64 * sizeof(char)); /* frequency */
+               values[4] = (char *) palloc(64 * sizeof(char)); /* base frequency */
 
                outfuncs = (Oid *) palloc0(sizeof(Oid) * mcvlist->ndimensions);
                fmgrinfo = (FmgrInfo *) palloc0(sizeof(FmgrInfo) * mcvlist->ndimensions);
  */
 static bool *
 mcv_get_match_bitmap(PlannerInfo *root, List *clauses,
-                                         Bitmapset *keys, MCVList * mcvlist, bool is_or)
+                                        Bitmapset *keys, MCVList *mcvlist, bool is_or)
 {
        int                     i;
        ListCell   *l;
 
                case RELKIND_MATVIEW:
                        {
                                /*
-                                * Not every table AM uses BLCKSZ wide fixed size
-                                * blocks. Therefore tableam returns the size in bytes - but
-                                * for the purpose of this routine, we want the number of
-                                * blocks. Therefore divide, rounding up.
+                                * Not every table AM uses BLCKSZ wide fixed size blocks.
+                                * Therefore tableam returns the size in bytes - but for the
+                                * purpose of this routine, we want the number of blocks.
+                                * Therefore divide, rounding up.
                                 */
-                               uint64 szbytes;
+                               uint64          szbytes;
 
                                szbytes = table_relation_size(relation, forkNum);
 
 
                         * see LruDelete.
                         */
                        elog(vfdP->fdstate & FD_TEMP_FILE_LIMIT ? LOG : data_sync_elevel(LOG),
-                               "could not close file \"%s\": %m", vfdP->fileName);
+                                "could not close file \"%s\": %m", vfdP->fileName);
                }
 
                --nfile;
 
        if (rc < 0)
                ereport(ERROR,
                                (errcode_for_socket_access(),
-                                /* translator: %s is a syscall name, such as "poll()" */
+               /* translator: %s is a syscall name, such as "poll()" */
                                 errmsg("%s failed: %m",
                                                "epoll_ctl()")));
 }
                        waiting = false;
                        ereport(ERROR,
                                        (errcode_for_socket_access(),
-                                        /* translator: %s is a syscall name, such as "poll()" */
+                       /* translator: %s is a syscall name, such as "poll()" */
                                         errmsg("%s failed: %m",
                                                        "epoll_wait()")));
                }
                        waiting = false;
                        ereport(ERROR,
                                        (errcode_for_socket_access(),
-                                        /* translator: %s is a syscall name, such as "poll()" */
+                       /* translator: %s is a syscall name, such as "poll()" */
                                         errmsg("%s failed: %m",
                                                        "poll()")));
                }
 
 PostmasterDeathSignalInit(void)
 {
 #ifdef USE_POSTMASTER_DEATH_SIGNAL
-       int             signum = POSTMASTER_DEATH_SIGNAL;
+       int                     signum = POSTMASTER_DEATH_SIGNAL;
 
        /* Register our signal handler. */
        pqsignal(signum, postmaster_death_handler);
 
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
                                 (errmsg("must be superuser to rotate log files with adminpack 1.0"),
-                                 /* translator: %s is a SQL function name */
+               /* translator: %s is a SQL function name */
                                  errhint("Consider using %s, which is part of core, instead.",
                                                  "pg_logfile_rotate()"))));
 
 
                         */
                        if (progress)
                        {
-                               PGPROC *holder = BackendIdGetProc(lockholders->backendId);
+                               PGPROC     *holder = BackendIdGetProc(lockholders->backendId);
 
                                pgstat_progress_update_param(PROGRESS_WAITFOR_CURRENT_PID,
                                                                                         holder->pid);
                        PROGRESS_WAITFOR_DONE,
                        PROGRESS_WAITFOR_CURRENT_PID
                };
-               const int64     values[] = {
+               const int64 values[] = {
                        0, 0, 0
                };
+
                pgstat_progress_update_multi_param(3, index, values);
        }
 
 
  */
 static HTAB *SMgrRelationHash = NULL;
 
-static dlist_head      unowned_relns;
+static dlist_head unowned_relns;
 
 /* local function prototypes */
 static void smgrshutdown(int code, Datum arg);
 void
 AtEOXact_SMgr(void)
 {
-       dlist_mutable_iter      iter;
+       dlist_mutable_iter iter;
 
        /*
         * Zap all unowned SMgrRelations.  We rely on smgrclose() to remove each
         */
        dlist_foreach_modify(iter, &unowned_relns)
        {
-               SMgrRelation    rel = dlist_container(SMgrRelationData, node,
-                                                                                         iter.cur);
+               SMgrRelation rel = dlist_container(SMgrRelationData, node,
+                                                                                  iter.cur);
 
                Assert(rel->smgr_owner == NULL);
 
 
        for (;;)
        {
                /*
-                * Notify the checkpointer about it.  If we fail to queue a message
-                * in retryOnError mode, we have to sleep and try again ... ugly, but
+                * Notify the checkpointer about it.  If we fail to queue a message in
+                * retryOnError mode, we have to sleep and try again ... ugly, but
                 * hopefully won't happen often.
                 *
                 * XXX should we CHECK_FOR_INTERRUPTS in this loop?  Escaping with an
 
 CreateDestReceiver(CommandDest dest)
 {
        /*
-        * It's ok to cast the constness away as any modification of the none receiver
-        * would be a bug (which gets easier to catch this way).
+        * It's ok to cast the constness away as any modification of the none
+        * receiver would be a bug (which gets easier to catch this way).
         */
 
        switch (dest)
 
        int                     len,
                                value;
        bool            fx_mode = false;
+
        /* number of extra skipped characters (more than given in format string) */
        int                     extra_skip = 0;
 
                                /*
                                 * In non FX (fixed format) mode one format string space or
                                 * separator match to one space or separator in input string.
-                                * Or match nothing if there is no space or separator in
-                                * the current position of input string.
+                                * Or match nothing if there is no space or separator in the
+                                * current position of input string.
                                 */
                                extra_skip--;
                                if (isspace((unsigned char) *s) || is_separator_char(s))
                                                                n->key->name)));
                                break;
                        case DCH_TZH:
+
                                /*
                                 * Value of TZH might be negative.  And the issue is that we
                                 * might swallow minus sign as the separator.  So, if we have
-                                * skipped more characters than specified in the format string,
-                                * then we consider prepending last skipped minus to TZH.
+                                * skipped more characters than specified in the format
+                                * string, then we consider prepending last skipped minus to
+                                * TZH.
                                 */
                                if (*s == '+' || *s == '-' || *s == ' ')
                                {
 
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
                                 (errmsg("must be superuser to read files with adminpack 1.0"),
-                                 /* translator: %s is a SQL function name */
+               /* translator: %s is a SQL function name */
                                  errhint("Consider using %s, which is part of core, instead.",
                                                  "pg_file_read()"))));
 
 
 static inline void line_construct(LINE *result, Point *pt, float8 m);
 static inline float8 line_sl(LINE *line);
 static inline float8 line_invsl(LINE *line);
-static bool    line_interpt_line(Point *result, LINE *l1, LINE *l2);
+static bool line_interpt_line(Point *result, LINE *l1, LINE *l2);
 static bool line_contain_point(LINE *line, Point *point);
 static float8 line_closept_point(Point *result, LINE *line, Point *pt);
 
 static inline void statlseg_construct(LSEG *lseg, Point *pt1, Point *pt2);
 static inline float8 lseg_sl(LSEG *lseg);
 static inline float8 lseg_invsl(LSEG *lseg);
-static bool    lseg_interpt_line(Point *result, LSEG *lseg, LINE *line);
-static bool    lseg_interpt_lseg(Point *result, LSEG *l1, LSEG *l2);
+static bool lseg_interpt_line(Point *result, LSEG *lseg, LINE *line);
+static bool lseg_interpt_lseg(Point *result, LSEG *l1, LSEG *l2);
 static int     lseg_crossing(float8 x, float8 y, float8 px, float8 py);
-static bool    lseg_contain_point(LSEG *lseg, Point *point);
+static bool lseg_contain_point(LSEG *lseg, Point *point);
 static float8 lseg_closept_point(Point *result, LSEG *lseg, Point *pt);
 static float8 lseg_closept_line(Point *result, LSEG *lseg, LINE *line);
 static float8 lseg_closept_lseg(Point *result, LSEG *on_lseg, LSEG *to_lseg);
 box_contain_box(BOX *contains_box, BOX *contained_box)
 {
        return FPge(contains_box->high.x, contained_box->high.x) &&
-                  FPle(contains_box->low.x, contained_box->low.x) &&
-                  FPge(contains_box->high.y, contained_box->high.y) &&
-                  FPle(contains_box->low.y, contained_box->low.y);
+               FPle(contains_box->low.x, contained_box->low.x) &&
+               FPge(contains_box->high.y, contained_box->high.y) &&
+               FPle(contains_box->low.y, contained_box->low.y);
 }
 
 
        Assert(path->npts > 0);
 
        /*
-        * The distance from a point to a path is the smallest distance
-        * from the point to any of its constituent segments.
+        * The distance from a point to a path is the smallest distance from the
+        * point to any of its constituent segments.
         */
        for (i = 0; i < path->npts; i++)
        {
        LINE            tmp;
 
        /*
-        * First, we promote the line segment to a line, because we know how
-        * to find the intersection point of two lines.  If they don't have
-        * an intersection point, we are done.
+        * First, we promote the line segment to a line, because we know how to
+        * find the intersection point of two lines.  If they don't have an
+        * intersection point, we are done.
         */
        line_construct(&tmp, &lseg->p[0], lseg_sl(lseg));
        if (!line_interpt_line(&interpt, &tmp, line))
        LINE            tmp;
 
        /*
-        * We drop a perpendicular to find the intersection point.  Ordinarily
-        * we should always find it, but that can fail in the presence of NaN
+        * We drop a perpendicular to find the intersection point.  Ordinarily we
+        * should always find it, but that can fail in the presence of NaN
         * coordinates, and perhaps even from simple roundoff issues.
         */
        line_construct(&tmp, point, line_invsl(line));
                return 0.0;
 
        /*
-        * Then, we find the closest points from the endpoints of the second
-        * line segment, and keep the closest one.
+        * Then, we find the closest points from the endpoints of the second line
+        * segment, and keep the closest one.
         */
        dist = lseg_closept_point(result, on_lseg, &to_lseg->p[0]);
        d = lseg_closept_point(&point, on_lseg, &to_lseg->p[1]);
 box_contain_point(BOX *box, Point *point)
 {
        return box->high.x >= point->x && box->low.x <= point->x &&
-                  box->high.y >= point->y && box->low.y <= point-> y;
+               box->high.y >= point->y && box->low.y <= point->y;
 }
 
 Datum
 box_contain_lseg(BOX *box, LSEG *lseg)
 {
        return box_contain_point(box, &lseg->p[0]) &&
-                  box_contain_point(box, &lseg->p[1]);
+               box_contain_point(box, &lseg->p[1]);
 }
 
 Datum
 
         */
        if (*str == '-')
        {
-               dummy_lex.input = unconstify(char *, str) + 1;
+               dummy_lex.input = unconstify(char *, str) +1;
                dummy_lex.input_length = len - 1;
        }
        else
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                                 errmsg("argument list must have even number of elements"),
-                                /* translator: %s is a SQL function name */
+               /* translator: %s is a SQL function name */
                                 errhint("The arguments of %s must consist of alternating keys and values.",
                                                 "json_build_object()")));
 
 
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                                 errmsg("argument list must have even number of elements"),
-                                /* translator: %s is a SQL function name */
+               /* translator: %s is a SQL function name */
                                 errhint("The arguments of %s must consist of alternating keys and values.",
                                                 "jsonb_build_object()")));
 
 
 {
        if (collation && !lc_ctype_is_c(collation) && collation != DEFAULT_COLLATION_OID)
        {
-               pg_locale_t             locale = pg_newlocale_from_collation(collation);
+               pg_locale_t locale = pg_newlocale_from_collation(collation);
 
                if (locale && !locale->deterministic)
                        ereport(ERROR,
 
         * optimized equality or prefix tests use bytewise comparisons, which is
         * not consistent with nondeterministic collations.  The actual
         * pattern-matching implementation functions will later error out that
-        * pattern-matching is not supported with nondeterministic collations.
-        * (We could also error out here, but by doing it later we get more
-        * precise error messages.)  (It should be possible to support at least
+        * pattern-matching is not supported with nondeterministic collations. (We
+        * could also error out here, but by doing it later we get more precise
+        * error messages.)  (It should be possible to support at least
         * Pattern_Prefix_Exact, but no point as along as the actual
         * pattern-matching implementations don't support it.)
         *
 
                         errmsg("invalid input syntax for type %s: \"%s\"",
                                        "smallint", s)));
 
-       return 0;                       /* keep compiler quiet */
+       return 0;                                       /* keep compiler quiet */
 }
 
 /*
                         errmsg("invalid input syntax for type %s: \"%s\"",
                                        "integer", s)));
 
-       return 0;                       /* keep compiler quiet */
+       return 0;                                       /* keep compiler quiet */
 }
 
 /*
 
                        /* enlarge output space if needed */
                        while (array_idx + matchctx->npatterns * 2 + 1 > array_len)
                        {
-                               array_len += array_len + 1;             /* 2^n-1 => 2^(n+1)-1 */
-                               if (array_len > MaxAllocSize/sizeof(int))
+                               array_len += array_len + 1; /* 2^n-1 => 2^(n+1)-1 */
+                               if (array_len > MaxAllocSize / sizeof(int))
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                                         errmsg("too many regular expression matches")));
 
                                for (i = 1; i <= matchctx->npatterns; i++)
                                {
-                                       int             so = pmatch[i].rm_so;
-                                       int             eo = pmatch[i].rm_eo;
+                                       int                     so = pmatch[i].rm_so;
+                                       int                     eo = pmatch[i].rm_eo;
+
                                        matchctx->match_locs[array_idx++] = so;
                                        matchctx->match_locs[array_idx++] = eo;
                                        if (so >= 0 && eo >= 0 && (eo - so) > maxlen)
                        }
                        else
                        {
-                               int             so = pmatch[0].rm_so;
-                               int             eo = pmatch[0].rm_eo;
+                               int                     so = pmatch[0].rm_so;
+                               int                     eo = pmatch[0].rm_eo;
+
                                matchctx->match_locs[array_idx++] = so;
                                matchctx->match_locs[array_idx++] = eo;
                                if (so >= 0 && eo >= 0 && (eo - so) > maxlen)
                 * interest.
                 *
                 * Worst case: assume we need the maximum size (maxlen*eml), but take
-                * advantage of the fact that the original string length in bytes is an
-                * upper bound on the byte length of any fetched substring (and we know
-                * that len+1 is safe to allocate because the varlena header is longer
-                * than 1 byte).
+                * advantage of the fact that the original string length in bytes is
+                * an upper bound on the byte length of any fetched substring (and we
+                * know that len+1 is safe to allocate because the varlena header is
+                * longer than 1 byte).
                 */
                if (maxsiz > orig_len)
                        conv_bufsiz = orig_len + 1;
                }
                else if (buf)
                {
-                       int             len = pg_wchar2mb_with_len(matchctx->wide_str + so,
-                                                                                          buf,
-                                                                                          eo - so);
+                       int                     len = pg_wchar2mb_with_len(matchctx->wide_str + so,
+                                                                                                  buf,
+                                                                                                  eo - so);
+
                        Assert(len < bufsiz);
                        elems[i] = PointerGetDatum(cstring_to_text_with_len(buf, len));
                        nulls[i] = false;
 
        if (buf)
        {
-               int             bufsiz PG_USED_FOR_ASSERTS_ONLY = splitctx->conv_bufsiz;
-               int             len;
+               int                     bufsiz PG_USED_FOR_ASSERTS_ONLY = splitctx->conv_bufsiz;
+               int                     len;
 
                endpos = splitctx->match_locs[splitctx->next_match * 2];
                if (endpos < startpos)
                        elog(ERROR, "invalid match starting position");
                len = pg_wchar2mb_with_len(splitctx->wide_str + startpos,
                                                                   buf,
-                                                                  endpos-startpos);
+                                                                  endpos - startpos);
                Assert(len < bufsiz);
                return PointerGetDatum(cstring_to_text_with_len(buf, len));
        }
 
        oldslot = trigdata->tg_trigslot;
 
        /*
-        * If another PK row now exists providing the old key values, we
-        * should not do anything.  However, this check should only be
-        * made in the NO ACTION case; in RESTRICT cases we don't wish to
-        * allow another row to be substituted.
+        * If another PK row now exists providing the old key values, we should
+        * not do anything.  However, this check should only be made in the NO
+        * ACTION case; in RESTRICT cases we don't wish to allow another row to be
+        * substituted.
         */
        if (is_no_action &&
                ri_Check_Pk_Match(pk_rel, fk_rel, oldslot, riinfo))
                elog(ERROR, "SPI_connect failed");
 
        /*
-        * Fetch or prepare a saved plan for the restrict lookup (it's the
-        * same query for delete and update cases)
+        * Fetch or prepare a saved plan for the restrict lookup (it's the same
+        * query for delete and update cases)
         */
        ri_BuildQueryKey(&qkey, riinfo, RI_PLAN_RESTRICT_CHECKREF);
 
        ri_PerformCheck(riinfo, &qkey, qplan,
                                        fk_rel, pk_rel,
                                        oldslot, NULL,
-                                       true,   /* must detect new rows */
+                                       true,           /* must detect new rows */
                                        SPI_OK_SELECT);
 
        if (SPI_finish() != SPI_OK_FINISH)
        }
 
        /*
-        * We have a plan now. Build up the arguments from the key values
-        * in the deleted PK tuple and delete the referencing rows
+        * We have a plan now. Build up the arguments from the key values in the
+        * deleted PK tuple and delete the referencing rows
         */
        ri_PerformCheck(riinfo, &qkey, qplan,
                                        fk_rel, pk_rel,
                                        oldslot, NULL,
-                                       true,   /* must detect new rows */
+                                       true,           /* must detect new rows */
                                        SPI_OK_DELETE);
 
        if (SPI_finish() != SPI_OK_FINISH)
        ri_PerformCheck(riinfo, &qkey, qplan,
                                        fk_rel, pk_rel,
                                        oldslot, newslot,
-                                       true,   /* must detect new rows */
+                                       true,           /* must detect new rows */
                                        SPI_OK_UPDATE);
 
        if (SPI_finish() != SPI_OK_FINISH)
        ri_PerformCheck(riinfo, &qkey, qplan,
                                        fk_rel, pk_rel,
                                        oldslot, NULL,
-                                       true,   /* must detect new rows */
+                                       true,           /* must detect new rows */
                                        SPI_OK_UPDATE);
 
        if (SPI_finish() != SPI_OK_FINISH)
        else
        {
                /*
-                * If we just deleted or updated the PK row whose key was equal to
-                * the FK columns' default values, and a referencing row exists in
-                * the FK table, we would have updated that row to the same values
-                * it already had --- and RI_FKey_fk_upd_check_required would
-                * hence believe no check is necessary.  So we need to do another
-                * lookup now and in case a reference still exists, abort the
-                * operation.  That is already implemented in the NO ACTION
-                * trigger, so just run it.  (This recheck is only needed in the
-                * SET DEFAULT case, since CASCADE would remove such rows in case
-                * of a DELETE operation or would change the FK key values in case
-                * of an UPDATE, while SET NULL is certain to result in rows that
-                * satisfy the FK constraint.)
+                * If we just deleted or updated the PK row whose key was equal to the
+                * FK columns' default values, and a referencing row exists in the FK
+                * table, we would have updated that row to the same values it already
+                * had --- and RI_FKey_fk_upd_check_required would hence believe no
+                * check is necessary.  So we need to do another lookup now and in
+                * case a reference still exists, abort the operation.  That is
+                * already implemented in the NO ACTION trigger, so just run it. (This
+                * recheck is only needed in the SET DEFAULT case, since CASCADE would
+                * remove such rows in case of a DELETE operation or would change the
+                * FK key values in case of an UPDATE, while SET NULL is certain to
+                * result in rows that satisfy the FK constraint.)
                 */
                return ri_restrict(trigdata, true);
        }
        riinfo = ri_FetchConstraintInfo(trigger, pk_rel, true);
 
        /*
-        * If any old key value is NULL, the row could not have been
-        * referenced by an FK row, so no check is needed.
+        * If any old key value is NULL, the row could not have been referenced by
+        * an FK row, so no check is needed.
         */
        if (ri_NullCheck(RelationGetDescr(pk_rel), oldslot, riinfo, true) != RI_KEYS_NONE_NULL)
                return false;
         */
        if (ri_nullcheck == RI_KEYS_ALL_NULL)
                return false;
+
        /*
-        * If some new key values are NULL, the behavior depends on the match type.
+        * If some new key values are NULL, the behavior depends on the match
+        * type.
         */
        else if (ri_nullcheck == RI_KEYS_SOME_NULL)
        {
                switch (riinfo->confmatchtype)
                {
                        case FKCONSTR_MATCH_SIMPLE:
+
                                /*
                                 * If any new key value is NULL, the row must satisfy the
                                 * constraint, so no check is needed.
                                return false;
 
                        case FKCONSTR_MATCH_PARTIAL:
+
                                /*
                                 * Don't know, must run full check.
                                 */
                                break;
 
                        case FKCONSTR_MATCH_FULL:
+
                                /*
                                 * If some new key values are NULL, the row fails the
                                 * constraint.  We must not throw error here, because the row
         */
 
        /*
-        * If the original row was inserted by our own transaction, we
-        * must fire the trigger whether or not the keys are equal.  This
-        * is because our UPDATE will invalidate the INSERT so that the
-        * INSERT RI trigger will not do anything; so we had better do the
-        * UPDATE check.  (We could skip this if we knew the INSERT
-        * trigger already fired, but there is no easy way to know that.)
+        * If the original row was inserted by our own transaction, we must fire
+        * the trigger whether or not the keys are equal.  This is because our
+        * UPDATE will invalidate the INSERT so that the INSERT RI trigger will
+        * not do anything; so we had better do the UPDATE check.  (We could skip
+        * this if we knew the INSERT trigger already fired, but there is no easy
+        * way to know that.)
         */
        xminDatum = slot_getsysattr(oldslot, MinTransactionIdAttributeNumber, &isnull);
        Assert(!isnull);
 
         */
        if (!ndistinct_enabled || !dependencies_enabled || !mcv_enabled)
        {
-               bool    gotone = false;
+               bool            gotone = false;
 
                appendStringInfoString(&buf, " (");
 
 
                pg_newlocale_from_collation(collid)->deterministic)
        {
                /*
-                * Since we only care about equality or not-equality, we can avoid all the
-                * expense of strcoll() here, and just do bitwise comparison.
+                * Since we only care about equality or not-equality, we can avoid all
+                * the expense of strcoll() here, and just do bitwise comparison.
                 */
                if (len1 != len2)
                        result = false;
                pg_newlocale_from_collation(collid)->deterministic)
        {
                /*
-                * Since we only care about equality or not-equality, we can avoid all the
-                * expense of strcoll() here, and just do bitwise comparison.
+                * Since we only care about equality or not-equality, we can avoid all
+                * the expense of strcoll() here, and just do bitwise comparison.
                 */
                if (len1 != len2)
                        result = true;
        Oid                     collid = PG_GET_COLLATION();
        char       *keydata;
        int                     keylen;
-       pg_locale_t     mylocale = 0;
+       pg_locale_t mylocale = 0;
        Datum           result;
 
        if (!collid)
                        int32_t         ulen = -1;
                        UChar      *uchar = NULL;
                        Size            bsize;
-                       uint8_t    *buf;
+                       uint8_t    *buf;
 
                        ulen = icu_to_uchar(&uchar, keydata, keylen);
 
        Oid                     collid = PG_GET_COLLATION();
        char       *keydata;
        int                     keylen;
-       pg_locale_t     mylocale = 0;
+       pg_locale_t mylocale = 0;
        Datum           result;
 
        if (!collid)
                        int32_t         ulen = -1;
                        UChar      *uchar = NULL;
                        Size            bsize;
-                       uint8_t    *buf;
+                       uint8_t    *buf;
 
                        ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
 
 
 {
        int                     len1 = VARSIZE_ANY_EXHDR(t1);
        int                     len2 = VARSIZE_ANY_EXHDR(t2);
-       pg_locale_t     mylocale = 0;
+       pg_locale_t mylocale = 0;
 
        check_collation_set(collid);
 
                                        len2;
 
                /*
-                * Since we only care about equality or not-equality, we can avoid all the
-                * expense of strcoll() here, and just do bitwise comparison.  In fact, we
-                * don't even have to do a bitwise comparison if we can show the lengths
-                * of the strings are unequal; which might save us from having to detoast
-                * one or both values.
+                * Since we only care about equality or not-equality, we can avoid all
+                * the expense of strcoll() here, and just do bitwise comparison.  In
+                * fact, we don't even have to do a bitwise comparison if we can show
+                * the lengths of the strings are unequal; which might save us from
+                * having to detoast one or both values.
                 */
                len1 = toast_raw_datum_size(arg1);
                len2 = toast_raw_datum_size(arg2);
        Datum           arg1 = PG_GETARG_DATUM(0);
        Datum           arg2 = PG_GETARG_DATUM(1);
        Oid                     collid = PG_GET_COLLATION();
-       pg_locale_t     mylocale = 0;
+       pg_locale_t mylocale = 0;
        bool            result;
        Size            len1,
                                len2;
 Datum
 text_left(PG_FUNCTION_ARGS)
 {
-       int             n = PG_GETARG_INT32(1);
+       int                     n = PG_GETARG_INT32(1);
 
        if (n < 0)
        {
 
                         * there should be no PartitionDirectory with a pointer to the old
                         * entry.
                         *
-                        * Note that newrel and relation have already been swapped, so
-                        * the "old" partition descriptor is actually the one hanging off
-                        * of newrel.
+                        * Note that newrel and relation have already been swapped, so the
+                        * "old" partition descriptor is actually the one hanging off of
+                        * newrel.
                         */
                        MemoryContextSetParent(newrel->rd_pdcxt, relation->rd_pdcxt);
                        newrel->rd_partdesc = NULL;
 
 void
 SerializeRelationMap(Size maxSize, char *startAddress)
 {
-       SerializedActiveRelMaps    *relmaps;
+       SerializedActiveRelMaps *relmaps;
 
        Assert(maxSize >= EstimateRelationMapSpace());
 
 void
 RestoreRelationMap(char *startAddress)
 {
-       SerializedActiveRelMaps    *relmaps;
+       SerializedActiveRelMaps *relmaps;
 
        if (active_shared_updates.num_mappings != 0 ||
                active_local_updates.num_mappings != 0 ||
 
        result = heap_getattr(tuple, oidcol,
                                                  SysCache[cacheId]->cc_tupdesc,
                                                  &isNull);
-       Assert(!isNull); /* columns used as oids should never be NULL */
+       Assert(!isNull);                        /* columns used as oids should never be NULL */
        ReleaseSysCache(tuple);
        return result;
 }
 
 record_C_func(HeapTuple procedureTuple,
                          PGFunction user_fn, const Pg_finfo_record *inforec)
 {
-       Oid                     fn_oid =  ((Form_pg_proc) GETSTRUCT(procedureTuple))->oid;
+       Oid                     fn_oid = ((Form_pg_proc) GETSTRUCT(procedureTuple))->oid;
        CFuncHashTabEntry *entry;
        bool            found;
 
 
        /* report the result */
        PG_RETURN_UINT64(((uint64) b << 32) | c);
 }
+
 /*
  * string_hash: hash function for keys that are NUL-terminated strings.
  *
 
 
 int                    MyProcPid;
 pg_time_t      MyStartTime;
-TimestampTz    MyStartTimestamp;
+TimestampTz MyStartTimestamp;
 struct Port *MyProcPort;
 int32          MyCancelKey;
 int                    MyPMChildSlot;
 
        AssertState(!OidIsValid(AuthenticatedUserId));
 
        /*
-        * Make sure syscache entries are flushed for recent catalog changes.
-        * This allows us to find roles that were created on-the-fly during
+        * Make sure syscache entries are flushed for recent catalog changes. This
+        * allows us to find roles that were created on-the-fly during
         * authentication.
         */
        AcceptInvalidationMessages();
 
 pg_any_to_server(const char *s, int len, int encoding)
 {
        if (len <= 0)
-               return unconstify(char *, s);           /* empty string is always valid */
+               return unconstify(char *, s);   /* empty string is always valid */
 
        if (encoding == DatabaseEncoding->encoding ||
                encoding == PG_SQL_ASCII)
 pg_server_to_any(const char *s, int len, int encoding)
 {
        if (len <= 0)
-               return unconstify(char *, s);           /* empty string is always valid */
+               return unconstify(char *, s);   /* empty string is always valid */
 
        if (encoding == DatabaseEncoding->encoding ||
                encoding == PG_SQL_ASCII)
-               return unconstify(char *, s);           /* assume data is valid */
+               return unconstify(char *, s);   /* assume data is valid */
 
        if (DatabaseEncoding->encoding == PG_SQL_ASCII)
        {
 
 
 static struct config_enum_entry shared_memory_options[] = {
 #ifndef WIN32
-       { "sysv", SHMEM_TYPE_SYSV, false},
+       {"sysv", SHMEM_TYPE_SYSV, false},
 #endif
 #ifndef EXEC_BACKEND
-       { "mmap", SHMEM_TYPE_MMAP, false},
+       {"mmap", SHMEM_TYPE_MMAP, false},
 #endif
 #ifdef WIN32
-       { "windows", SHMEM_TYPE_WINDOWS, false},
+       {"windows", SHMEM_TYPE_WINDOWS, false},
 #endif
        {NULL, 0, false}
 };
                true,
                NULL, NULL, NULL
        },
+
        /*
         * WITH OIDS support, and consequently default_with_oids, was removed in
         * PostgreSQL 12, but we tolerate the parameter being set to false to
 struct config_generic **
 get_explain_guc_options(int *num)
 {
-       int             i;
+       int                     i;
        struct config_generic **result;
 
        *num = 0;
 
        /*
-        * Allocate enough space to fit all GUC_EXPLAIN options. We may not
-        * need all the space, but there are fairly few such options so we
-        * don't waste a lot of memory.
+        * Allocate enough space to fit all GUC_EXPLAIN options. We may not need
+        * all the space, but there are fairly few such options so we don't waste
+        * a lot of memory.
         */
        result = palloc(sizeof(struct config_generic *) * num_guc_explain_variables);
 
        for (i = 0; i < num_guc_variables; i++)
        {
-               bool modified;
+               bool            modified;
                struct config_generic *conf = guc_variables[i];
 
                /* return only options visible to the user */
                switch (conf->vartype)
                {
                        case PGC_BOOL:
-                       {
-                               struct config_bool *lconf = (struct config_bool *) conf;
-                               modified = (lconf->boot_val != *(lconf->variable));
-                       }
-                       break;
+                               {
+                                       struct config_bool *lconf = (struct config_bool *) conf;
+
+                                       modified = (lconf->boot_val != *(lconf->variable));
+                               }
+                               break;
 
                        case PGC_INT:
                                {
                                        struct config_int *lconf = (struct config_int *) conf;
+
                                        modified = (lconf->boot_val != *(lconf->variable));
                                }
                                break;
                        case PGC_REAL:
                                {
                                        struct config_real *lconf = (struct config_real *) conf;
+
                                        modified = (lconf->boot_val != *(lconf->variable));
                                }
                                break;
                        case PGC_STRING:
                                {
                                        struct config_string *lconf = (struct config_string *) conf;
+
                                        modified = (strcmp(lconf->boot_val, *(lconf->variable)) != 0);
                                }
                                break;
                        case PGC_ENUM:
                                {
                                        struct config_enum *lconf = (struct config_enum *) conf;
+
                                        modified = (lconf->boot_val != *(lconf->variable));
                                }
                                break;
 
                        return false;
                }
        }
+
        /*
         * This shouldn't happen: get_best_segment() or make_new_segment()
         * promised that we can successfully allocate npages.
 check_for_freed_segments_locked(dsa_area *area)
 {
        size_t          freed_segment_counter;
-       int             i;
+       int                     i;
 
        Assert(LWLockHeldByMe(DSA_AREA_LOCK(area)));
        freed_segment_counter = area->control->freed_segment_counter;
 
 "# allows any local user to connect as any PostgreSQL user, including\n" \
 "# the database superuser.  If you do not trust all your local users,\n" \
 "# use another authentication method.\n"
-static bool    authwarning = false;
+static bool authwarning = false;
 
 /*
  * Centralized knowledge of switches to pass to backend
                !(pwprompt || pwfilename))
        {
                pg_log_error("must specify a password for the superuser to enable %s authentication",
-                               (strcmp(authmethodlocal, "md5") == 0 ||
-                                strcmp(authmethodlocal, "password") == 0 ||
-                                strcmp(authmethodlocal, "scram-sha-256") == 0)
-                               ? authmethodlocal
-                               : authmethodhost);
+                                        (strcmp(authmethodlocal, "md5") == 0 ||
+                                         strcmp(authmethodlocal, "password") == 0 ||
+                                         strcmp(authmethodlocal, "scram-sha-256") == 0)
+                                        ? authmethodlocal
+                                        : authmethodhost);
                exit(1);
        }
 }
        char            pg_ctl_path[MAXPGPATH];
 
        /*
-        * Ensure that buffering behavior of stdout matches what it is
-        * in interactive usage (at least on most platforms).  This prevents
+        * Ensure that buffering behavior of stdout matches what it is in
+        * interactive usage (at least on most platforms).  This prevents
         * unexpected output ordering when, eg, output is redirected to a file.
         * POSIX says we must do this before any other usage of these files.
         */
 
                        if ((IsXLogFileName(walfile) || IsPartialXLogFileName(walfile)) &&
                                strcmp(walfile + 8, exclusiveCleanupFileName + 8) < 0)
                        {
-                               char            WALFilePath[MAXPGPATH * 2]; /* the file path including archive */
+                               char            WALFilePath[MAXPGPATH * 2]; /* the file path
+                                                                                                                * including archive */
 
                                /*
                                 * Use the original file name again now, including any
 
        {
                /* translator: second %s is an option name */
                pg_log_error("%s needs a slot to be specified using --slot",
-                               do_drop_slot ? "--drop-slot" : "--create-slot");
+                                        do_drop_slot ? "--drop-slot" : "--create-slot");
                fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
                                progname);
                exit(1);
        if (do_drop_slot)
        {
                if (verbose)
-                       pg_log_info("dropping replication slot \"%s\"", replication_slot);
+                       pg_log_info("dropping replication slot \"%s\"", replication_slot);
 
                if (!DropReplicationSlot(conn, replication_slot))
                        exit(1);
 
 
        if (verbose)
                pg_log_info("confirming write up to %X/%X, flush to %X/%X (slot %s)",
-                               (uint32) (output_written_lsn >> 32), (uint32) output_written_lsn,
-                               (uint32) (output_fsync_lsn >> 32), (uint32) output_fsync_lsn,
-                               replication_slot);
+                                       (uint32) (output_written_lsn >> 32), (uint32) output_written_lsn,
+                                       (uint32) (output_fsync_lsn >> 32), (uint32) output_fsync_lsn,
+                                       replication_slot);
 
        replybuf[len] = 'r';
        len += 1;
        {
                if (keepalive)
                        pg_log_info("endpos %X/%X reached by keepalive",
-                                       (uint32) (endpos >> 32), (uint32) endpos);
+                                               (uint32) (endpos >> 32), (uint32) endpos);
                else
                        pg_log_info("endpos %X/%X reached by record at %X/%X",
-                                       (uint32) (endpos >> 32), (uint32) (endpos),
-                                       (uint32) (lsn >> 32), (uint32) lsn);
+                                               (uint32) (endpos >> 32), (uint32) (endpos),
+                                               (uint32) (lsn >> 32), (uint32) lsn);
 
        }
 }
 
                const char *serverver = PQparameterStatus(conn, "server_version");
 
                pg_log_error("incompatible server version %s; client does not support streaming from server versions older than %s",
-                               serverver ? serverver : "'unknown'",
-                               "9.3");
+                                        serverver ? serverver : "'unknown'",
+                                        "9.3");
                return false;
        }
        else if (serverMajor > maxServerMajor)
                const char *serverver = PQparameterStatus(conn, "server_version");
 
                pg_log_error("incompatible server version %s; client does not support streaming from server versions newer than %s",
-                               serverver ? serverver : "'unknown'",
-                               PG_VERSION);
+                                        serverver ? serverver : "'unknown'",
+                                        PG_VERSION);
                return false;
        }
        return true;
                        if (stream->startpos > stoppos)
                        {
                                pg_log_error("server stopped streaming timeline %u at %X/%X, but reported next timeline %u to begin at %X/%X",
-                                               stream->timeline, (uint32) (stoppos >> 32), (uint32) stoppos,
-                                               newtimeline, (uint32) (stream->startpos >> 32), (uint32) stream->startpos);
+                                                        stream->timeline, (uint32) (stoppos >> 32), (uint32) stoppos,
+                                                        newtimeline, (uint32) (stream->startpos >> 32), (uint32) stream->startpos);
                                goto error;
                        }
 
 
                                if (parent == NULL)
                                {
                                        pg_log_error("failed sanity check, parent OID %u of table \"%s\" (OID %u) not found",
-                                                         inhinfo[i].inhparent,
-                                                         self->dobj.name,
-                                                         oid);
+                                                                inhinfo[i].inhparent,
+                                                                self->dobj.name,
+                                                                oid);
                                        exit_nicely(1);
                                }
                                self->parents[j++] = parent;
 
 
        if (deflateInit(zp, level) != Z_OK)
                fatal("could not initialize compression library: %s",
-                                         zp->msg);
+                         zp->msg);
 
        /* Just be paranoid - maybe End is called after Start, with no Write */
        zp->next_out = (void *) cs->zlibOut;
 
        if (inflateInit(zp) != Z_OK)
                fatal("could not initialize compression library: %s",
-                                         zp->msg);
+                         zp->msg);
 
        /* no minimal chunk size for zlib */
        while ((cnt = readF(AH, &buf, &buflen)))
                        const char *errmsg = gzerror(fp->compressedfp, &errnum);
 
                        fatal("could not read from input file: %s",
-                                                 errnum == Z_ERRNO ? strerror(errno) : errmsg);
+                                 errnum == Z_ERRNO ? strerror(errno) : errmsg);
                }
        }
        else
 
        }
        else
                fatal("unrecognized command received from master: \"%s\"",
-                                         msg);
+                         msg);
 }
 
 /*
        }
        else
                fatal("invalid message received from worker: \"%s\"",
-                                         msg);
+                         msg);
 
        return status;
 }
        }
        else
                fatal("invalid message received from worker: \"%s\"",
-                                         msg);
+                         msg);
 
        /* Free the string returned from getMessageFromWorker */
        free(msg);
        if ((s = socket(AF_INET, SOCK_STREAM, 0)) == PGINVALID_SOCKET)
        {
                pg_log_error("pgpipe: could not create socket: error code %d",
-                                 WSAGetLastError());
+                                        WSAGetLastError());
                return -1;
        }
 
        if (bind(s, (SOCKADDR *) &serv_addr, len) == SOCKET_ERROR)
        {
                pg_log_error("pgpipe: could not bind: error code %d",
-                                 WSAGetLastError());
+                                        WSAGetLastError());
                closesocket(s);
                return -1;
        }
        if (listen(s, 1) == SOCKET_ERROR)
        {
                pg_log_error("pgpipe: could not listen: error code %d",
-                                 WSAGetLastError());
+                                        WSAGetLastError());
                closesocket(s);
                return -1;
        }
        if (getsockname(s, (SOCKADDR *) &serv_addr, &len) == SOCKET_ERROR)
        {
                pg_log_error("pgpipe: getsockname() failed: error code %d",
-                                 WSAGetLastError());
+                                        WSAGetLastError());
                closesocket(s);
                return -1;
        }
        if ((tmp_sock = socket(AF_INET, SOCK_STREAM, 0)) == PGINVALID_SOCKET)
        {
                pg_log_error("pgpipe: could not create second socket: error code %d",
-                                 WSAGetLastError());
+                                        WSAGetLastError());
                closesocket(s);
                return -1;
        }
        if (connect(handles[1], (SOCKADDR *) &serv_addr, len) == SOCKET_ERROR)
        {
                pg_log_error("pgpipe: could not connect socket: error code %d",
-                                 WSAGetLastError());
+                                        WSAGetLastError());
                closesocket(handles[1]);
                handles[1] = -1;
                closesocket(s);
        if ((tmp_sock = accept(s, (SOCKADDR *) &serv_addr, &len)) == PGINVALID_SOCKET)
        {
                pg_log_error("pgpipe: could not accept connection: error code %d",
-                                 WSAGetLastError());
+                                        WSAGetLastError());
                closesocket(handles[1]);
                handles[1] = -1;
                closesocket(s);
 
                                        break;
                                default:
                                        fatal("unexpected section code %d",
-                                                                 (int) te->section);
+                                                 (int) te->section);
                                        break;
                        }
                }
                                                                {
                                                                        /* complain and emit unmodified command */
                                                                        pg_log_warning("could not find where to insert IF EXISTS in statement \"%s\"",
-                                                                                         dropStmtOrig);
+                                                                                                  dropStmtOrig);
                                                                        appendPQExpBufferStr(ftStmt, dropStmt);
                                                                }
                                                        }
                                        _selectOutputSchema(AH, te->namespace);
 
                                        pg_log_info("processing data for table \"%s.%s\"",
-                                                 te->namespace, te->tag);
+                                                               te->namespace, te->tag);
 
                                        /*
                                         * In parallel restore, if we created the table earlier in
 
        pg_log_info(ngettext("restored %d large object",
                                                 "restored %d large objects",
-                                                 AH->blobCount),
-                 AH->blobCount);
+                                                AH->blobCount),
+                               AH->blobCount);
 }
 
 
                        loOid = lo_create(AH->connection, oid);
                        if (loOid == 0 || loOid != oid)
                                fatal("could not create large object %u: %s",
-                                                         oid, PQerrorMessage(AH->connection));
+                                         oid, PQerrorMessage(AH->connection));
                }
                AH->loFd = lo_open(AH->connection, oid, INV_WRITE);
                if (AH->loFd == -1)
                        fatal("could not open large object %u: %s",
-                                                 oid, PQerrorMessage(AH->connection));
+                                 oid, PQerrorMessage(AH->connection));
        }
        else
        {
                te = getTocEntryByDumpId(AH, id);
                if (!te)
                        fatal("could not find entry for ID %d",
-                                                 id);
+                                 id);
 
                /* Mark it wanted */
                ropt->idWanted[id - 1] = true;
                pg_log_debug(ngettext("wrote %lu byte of large object data (result = %lu)",
                                                          "wrote %lu bytes of large object data (result = %lu)",
                                                          AH->lo_buf_used),
-                         (unsigned long) AH->lo_buf_used, (unsigned long) res);
+                                        (unsigned long) AH->lo_buf_used, (unsigned long) res);
                if (res != AH->lo_buf_used)
                        fatal("could not write to large object (result: %lu, expected: %lu)",
-                                                 (unsigned long) res, (unsigned long) AH->lo_buf_used);
+                                 (unsigned long) res, (unsigned long) AH->lo_buf_used);
        }
        else
        {
        if (AH->currentTE != NULL && AH->currentTE != AH->lastErrorTE)
        {
                pg_log_generic(PG_LOG_INFO, "from TOC entry %d; %u %u %s %s %s",
-                                 AH->currentTE->dumpId,
-                                 AH->currentTE->catalogId.tableoid,
-                                 AH->currentTE->catalogId.oid,
-                                 AH->currentTE->desc ? AH->currentTE->desc : "(no desc)",
-                                 AH->currentTE->tag ? AH->currentTE->tag : "(no tag)",
-                                 AH->currentTE->owner ? AH->currentTE->owner : "(no owner)");
+                                          AH->currentTE->dumpId,
+                                          AH->currentTE->catalogId.tableoid,
+                                          AH->currentTE->catalogId.oid,
+                                          AH->currentTE->desc ? AH->currentTE->desc : "(no desc)",
+                                          AH->currentTE->tag ? AH->currentTE->tag : "(no tag)",
+                                          AH->currentTE->owner ? AH->currentTE->owner : "(no owner)");
        }
        AH->lastErrorStage = AH->stage;
        AH->lastErrorTE = AH->currentTE;
 
                        if (snprintf(buf, MAXPGPATH, "%s/toc.dat", AH->fSpec) >= MAXPGPATH)
                                fatal("directory name too long: \"%s\"",
-                                                         AH->fSpec);
+                                         AH->fSpec);
                        if (stat(buf, &st) == 0 && S_ISREG(st.st_mode))
                        {
                                AH->format = archDirectory;
 #ifdef HAVE_LIBZ
                        if (snprintf(buf, MAXPGPATH, "%s/toc.dat.gz", AH->fSpec) >= MAXPGPATH)
                                fatal("directory name too long: \"%s\"",
-                                                         AH->fSpec);
+                                         AH->fSpec);
                        if (stat(buf, &st) == 0 && S_ISREG(st.st_mode))
                        {
                                AH->format = archDirectory;
                        }
 #endif
                        fatal("directory \"%s\" does not appear to be a valid archive (\"toc.dat\" does not exist)",
-                                                 AH->fSpec);
+                                 AH->fSpec);
                        fh = NULL;                      /* keep compiler quiet */
                }
                else
                        fatal("could not read input file: %m");
                else
                        fatal("input file is too short (read %lu, expected 5)",
-                                                 (unsigned long) cnt);
+                                 (unsigned long) cnt);
        }
 
        /* Save it, just in case we need it later */
        AH->currUser = NULL;            /* unknown */
        AH->currSchema = NULL;          /* ditto */
        AH->currTablespace = NULL;      /* ditto */
-       AH->currTableAm = NULL; /* ditto */
+       AH->currTableAm = NULL;         /* ditto */
 
        AH->toc = (TocEntry *) pg_malloc0(sizeof(TocEntry));
 
                                   void *callback_data)
 {
        pg_log_info("finished item %d %s %s",
-                 te->dumpId, te->desc, te->tag);
+                               te->dumpId, te->desc, te->tag);
 
        if (status != 0)
                fatal("worker process failed: exit code %d",
-                                         status);
+                         status);
 }
 
 
                /* Sanity check */
                if (te->dumpId <= 0)
                        fatal("entry ID %d out of range -- perhaps a corrupt TOC",
-                                                 te->dumpId);
+                                 te->dumpId);
 
                te->hadDumper = ReadInt(AH);
 
                        AH->ReadExtraTocPtr(AH, te);
 
                pg_log_debug("read TOC entry %d (ID %d) for %s %s",
-                         i, te->dumpId, te->desc, te->tag);
+                                        i, te->dumpId, te->desc, te->tag);
 
                /* link completed entry into TOC circular list */
                te->prev = AH->toc->prev;
                encoding = pg_char_to_encoding(ptr1);
                if (encoding < 0)
                        fatal("unrecognized encoding \"%s\"",
-                                                 ptr1);
+                                 ptr1);
                AH->public.encoding = encoding;
        }
        else
                fatal("invalid ENCODING item: %s",
-                                         te->defn);
+                         te->defn);
 
        free(defn);
 }
                AH->public.std_strings = false;
        else
                fatal("invalid STDSTRINGS item: %s",
-                                         te->defn);
+                         te->defn);
 }
 
 static void
                if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
                        /* NOT warn_or_exit_horribly... use -O instead to skip this. */
                        fatal("could not set session user to \"%s\": %s",
-                                                 user, PQerrorMessage(AH->connection));
+                                 user, PQerrorMessage(AH->connection));
 
                PQclear(res);
        }
 _selectTableAccessMethod(ArchiveHandle *AH, const char *tableam)
 {
        PQExpBuffer cmd;
-       const char *want, *have;
+       const char *want,
+                          *have;
 
        have = AH->currTableAm;
        want = tableam;
        }
 
        pg_log_warning("don't know how to set owner for object type \"%s\"",
-                         type);
+                                  type);
 }
 
 /*
                else
                {
                        pg_log_warning("don't know how to set owner for object type \"%s\"",
-                                         te->desc);
+                                                  te->desc);
                }
        }
 
 
                if (AH->version < K_VERS_1_0 || AH->version > K_VERS_MAX)
                        fatal("unsupported version (%d.%d) in file header",
-                                                 vmaj, vmin);
+                                 vmaj, vmin);
 
                AH->intSize = AH->ReadBytePtr(AH);
                if (AH->intSize > 32)
                        fatal("sanity check on integer size (%lu) failed",
-                                                 (unsigned long) AH->intSize);
+                                 (unsigned long) AH->intSize);
 
                if (AH->intSize > sizeof(int))
                        pg_log_warning("archive was made on a machine with larger integers, some operations might fail");
 
                if (AH->format != fmt)
                        fatal("expected format (%d) differs from format found in file (%d)",
-                                                 AH->format, fmt);
+                                 AH->format, fmt);
        }
 
        if (AH->version >= K_VERS_1_2)
                {
                        /* OK, restore the item and update its dependencies */
                        pg_log_info("processing item %d %s %s",
-                                 next_work_item->dumpId,
-                                 next_work_item->desc, next_work_item->tag);
+                                               next_work_item->dumpId,
+                                               next_work_item->desc, next_work_item->tag);
 
                        (void) restore_toc_entry(AH, next_work_item, false);
 
                        if ((next_work_item->reqs & (REQ_SCHEMA | REQ_DATA)) == 0)
                        {
                                pg_log_info("skipping item %d %s %s",
-                                         next_work_item->dumpId,
-                                         next_work_item->desc, next_work_item->tag);
+                                                       next_work_item->dumpId,
+                                                       next_work_item->desc, next_work_item->tag);
                                /* Update its dependencies as though we'd completed it */
                                reduce_dependencies(AH, next_work_item, &ready_list);
                                /* Loop around to see if anything else can be dispatched */
                        }
 
                        pg_log_info("launching item %d %s %s",
-                                 next_work_item->dumpId,
-                                 next_work_item->desc, next_work_item->tag);
+                                               next_work_item->dumpId,
+                                               next_work_item->desc, next_work_item->tag);
 
                        /* Dispatch to some worker */
                        DispatchJobForTocEntry(AH, pstate, next_work_item, ACT_RESTORE,
        for (te = pending_list->pending_next; te != pending_list; te = te->pending_next)
        {
                pg_log_info("processing missed item %d %s %s",
-                         te->dumpId, te->desc, te->tag);
+                                       te->dumpId, te->desc, te->tag);
                (void) restore_toc_entry(AH, te, false);
        }
 }
        ParallelReadyList *ready_list = (ParallelReadyList *) callback_data;
 
        pg_log_info("finished item %d %s %s",
-                 te->dumpId, te->desc, te->tag);
+                               te->dumpId, te->desc, te->tag);
 
        if (status == WORKER_CREATE_DONE)
                mark_create_done(AH, te);
                AH->public.n_errors++;
        else if (status != 0)
                fatal("worker process failed: exit code %d",
-                                         status);
+                         status);
 
        reduce_dependencies(AH, te, ready_list);
 }
                                te->dependencies[i] = tabledataid;
                                te->dataLength = Max(te->dataLength, tabledatate->dataLength);
                                pg_log_debug("transferring dependency %d -> %d to %d",
-                                         te->dumpId, olddep, tabledataid);
+                                                        te->dumpId, olddep, tabledataid);
                        }
                }
        }
 inhibit_data_for_failed_table(ArchiveHandle *AH, TocEntry *te)
 {
        pg_log_info("table \"%s\" could not be created, will not restore its data",
-                 te->tag);
+                               te->tag);
 
        if (AH->tableDataId[te->dumpId] != 0)
        {
 
 
                                default:                /* Always have a default */
                                        fatal("unrecognized data block type (%d) while searching archive",
-                                                                 blkType);
+                                                 blkType);
                                        break;
                        }
                        _readBlockHeader(AH, &blkType, &id);
        /* Are we sane? */
        if (id != te->dumpId)
                fatal("found unexpected block ID (%d) when reading data -- expected %d",
-                                         id, te->dumpId);
+                         id, te->dumpId);
 
        switch (blkType)
        {
 
                default:                                /* Always have a default */
                        fatal("unrecognized data block type %d while restoring archive",
-                                                 blkType);
+                                 blkType);
                        break;
        }
 }
        int                     byt;
 
        /*
-        * Note: if we are at EOF with a pre-1.3 input file, we'll fatal()
-        * inside ReadInt rather than returning EOF.  It doesn't seem worth
-        * jumping through hoops to deal with that case better, because no such
-        * files are likely to exist in the wild: only some 7.1 development
-        * versions of pg_dump ever generated such files.
+        * Note: if we are at EOF with a pre-1.3 input file, we'll fatal() inside
+        * ReadInt rather than returning EOF.  It doesn't seem worth jumping
+        * through hoops to deal with that case better, because no such files are
+        * likely to exist in the wild: only some 7.1 development versions of
+        * pg_dump ever generated such files.
         */
        if (AH->version < K_VERS_1_3)
                *type = BLK_DATA;
 
                        remoteversion > AH->public.maxRemoteVersion))
        {
                pg_log_error("server version: %s; %s version: %s",
-                                 remoteversion_str, progname, PG_VERSION);
+                                        remoteversion_str, progname, PG_VERSION);
                fatal("aborting because of server version mismatch");
        }
 
                newuser = requser;
 
        pg_log_info("connecting to database \"%s\" as user \"%s\"",
-                 newdb, newuser);
+                               newdb, newuser);
 
        password = AH->savedPassword;
 
                {
                        if (!PQconnectionNeedsPassword(newConn))
                                fatal("could not reconnect to database: %s",
-                                                         PQerrorMessage(newConn));
+                                         PQerrorMessage(newConn));
                        PQfinish(newConn);
 
                        if (password)
        /* check to see that the backend connection was successfully made */
        if (PQstatus(AH->connection) == CONNECTION_BAD)
                fatal("connection to database \"%s\" failed: %s",
-                                         PQdb(AH->connection) ? PQdb(AH->connection) : "",
-                                         PQerrorMessage(AH->connection));
+                         PQdb(AH->connection) ? PQdb(AH->connection) : "",
+                         PQerrorMessage(AH->connection));
 
        /* Start strict; later phases may override this. */
        PQclear(ExecuteSqlQueryForSingleRow((Archive *) AH,
 die_on_query_failure(ArchiveHandle *AH, const char *query)
 {
        pg_log_error("query failed: %s",
-                         PQerrorMessage(AH->connection));
+                                PQerrorMessage(AH->connection));
        fatal("query was: %s", query);
 }
 
        if (ntups != 1)
                fatal(ngettext("query returned %d row instead of one: %s",
                                           "query returned %d rows instead of one: %s",
-                                                          ntups),
-                                         ntups, query);
+                                          ntups),
+                         ntups, query);
 
        return res;
 }
                if (AH->pgCopyIn &&
                        PQputCopyData(AH->connection, buf, bufLen) <= 0)
                        fatal("error returned by PQputCopyData: %s",
-                                                 PQerrorMessage(AH->connection));
+                                 PQerrorMessage(AH->connection));
        }
        else if (AH->outputKind == OUTPUT_OTHERDATA)
        {
 
                if (PQputCopyEnd(AH->connection, NULL) <= 0)
                        fatal("error returned by PQputCopyEnd: %s",
-                                                 PQerrorMessage(AH->connection));
+                                 PQerrorMessage(AH->connection));
 
                /* Check command status and return to normal libpq state */
                res = PQgetResult(AH->connection);
                /* Do this to ensure we've pumped libpq back to idle state */
                if (PQgetResult(AH->connection) != NULL)
                        pg_log_warning("unexpected extra results during COPY of table \"%s\"",
-                                         tocEntryTag);
+                                                  tocEntryTag);
 
                AH->pgCopyIn = false;
        }
 
 
        if (dLen > 0 && cfwrite(data, dLen, ctx->dataFH) != dLen)
                fatal("could not write to output file: %s",
-                                         get_cfp_error(ctx->dataFH));
+                         get_cfp_error(ctx->dataFH));
 
 
        return;
                /* Can't overflow because line and fname are the same length. */
                if (sscanf(line, "%u %s\n", &oid, fname) != 2)
                        fatal("invalid line in large object TOC file \"%s\": \"%s\"",
-                                                 fname, line);
+                                 fname, line);
 
                StartRestoreBlob(AH, oid, AH->public.ropt->dropSchema);
                snprintf(path, MAXPGPATH, "%s/%s", ctx->directory, fname);
        }
        if (!cfeof(ctx->blobsTocFH))
                fatal("error reading large object TOC file \"%s\"",
-                                         fname);
+                         fname);
 
        if (cfclose(ctx->blobsTocFH) != 0)
                fatal("could not close large object TOC file \"%s\": %m",
 
        if (cfwrite(&c, 1, ctx->dataFH) != 1)
                fatal("could not write to output file: %s",
-                                         get_cfp_error(ctx->dataFH));
+                         get_cfp_error(ctx->dataFH));
 
        return 1;
 }
 
        if (cfwrite(buf, len, ctx->dataFH) != len)
                fatal("could not write to output file: %s",
-                                         get_cfp_error(ctx->dataFH));
+                         get_cfp_error(ctx->dataFH));
 
        return;
 }
 
                                        const char *errmsg = gzerror(th->zFH, &errnum);
 
                                        fatal("could not read from input file: %s",
-                                                                 errnum == Z_ERRNO ? strerror(errno) : errmsg);
+                                                 errnum == Z_ERRNO ? strerror(errno) : errmsg);
 #else
                                        fatal("could not read from input file: %s",
-                                                                 strerror(errno));
+                                                 strerror(errno));
 #endif
                                }
                        }
                        if (pos1 < 6 || strncmp(te->copyStmt, "COPY ", 5) != 0 ||
                                strcmp(te->copyStmt + pos1, " FROM stdin;\n") != 0)
                                fatal("unexpected COPY statement syntax: \"%s\"",
-                                                         te->copyStmt);
+                                         te->copyStmt);
 
                        /* Emit all but the FROM part ... */
                        ahwrite(te->copyStmt, 1, pos1, AH);
                snprintf(buf1, sizeof(buf1), INT64_FORMAT, (int64) len);
                snprintf(buf2, sizeof(buf2), INT64_FORMAT, (int64) th->fileLen);
                fatal("actual file length (%s) does not match expected (%s)",
-                                         buf1, buf2);
+                         buf1, buf2);
        }
 
        pad = ((len + 511) & ~511) - len;
                snprintf(buf1, sizeof(buf1), INT64_FORMAT, (int64) ctx->tarFHpos);
                snprintf(buf2, sizeof(buf2), INT64_FORMAT, (int64) ctx->tarNextMember);
                pg_log_debug("moving from position %s to next member at file position %s",
-                         buf1, buf2);
+                                        buf1, buf2);
 
                while (ctx->tarFHpos < ctx->tarNextMember)
                        _tarReadRaw(AH, &c, 1, NULL, ctx->tarFH);
                id = atoi(th->targetFile);
                if ((TocIDRequired(AH, id) & REQ_DATA) != 0)
                        fatal("restoring data out of order is not supported in this archive format: "
-                                                 "\"%s\" is required, but comes before \"%s\" in the archive file.",
-                                                 th->targetFile, filename);
+                                 "\"%s\" is required, but comes before \"%s\" in the archive file.",
+                                 th->targetFile, filename);
 
                /* Header doesn't match, so read to next header */
                len = ((th->fileLen + 511) & ~511); /* Padded length */
                if (len != 512)
                        fatal(ngettext("incomplete tar header found (%lu byte)",
                                                   "incomplete tar header found (%lu bytes)",
-                                                                  len),
-                                                 (unsigned long) len);
+                                                  len),
+                                 (unsigned long) len);
 
                /* Calc checksum */
                chk = tarChecksum(h);
                snprintf(posbuf, sizeof(posbuf), UINT64_FORMAT, (uint64) hPos);
                snprintf(lenbuf, sizeof(lenbuf), UINT64_FORMAT, (uint64) len);
                pg_log_debug("TOC Entry %s at %s (length %s, checksum %d)",
-                         tag, posbuf, lenbuf, sum);
+                                        tag, posbuf, lenbuf, sum);
        }
 
        if (chk != sum)
                snprintf(posbuf, sizeof(posbuf), UINT64_FORMAT,
                                 (uint64) ftello(ctx->tarFH));
                fatal("corrupt tar header found in %s (expected %d, computed %d) file position %s",
-                                         tag, sum, chk, posbuf);
+                         tag, sum, chk, posbuf);
        }
 
        th->targetFile = pg_strdup(tag);
 
 
 /* override for standard extra_float_digits setting */
 static bool have_extra_float_digits = false;
-static int extra_float_digits;
+static int     extra_float_digits;
 
 /*
  * The default number of rows per INSERT when
                                        errno == ERANGE)
                                {
                                        pg_log_error("rows-per-insert must be in range %d..%d",
-                                                         1, INT_MAX);
+                                                                1, INT_MAX);
                                        exit_nicely(1);
                                }
                                dopt.dump_inserts = (int) rowsPerInsert;
                ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
 
        /*
-        * Use an explicitly specified extra_float_digits if it has been
-        * provided. Otherwise, set extra_float_digits so that we can dump float
-        * data exactly (given correctly implemented float I/O code, anyway).
+        * Use an explicitly specified extra_float_digits if it has been provided.
+        * Otherwise, set extra_float_digits so that we can dump float data
+        * exactly (given correctly implemented float I/O code, anyway).
         */
        if (have_extra_float_digits)
        {
                PQExpBuffer q = createPQExpBuffer();
+
                appendPQExpBuffer(q, "SET extra_float_digits TO %d",
                                                  extra_float_digits);
                ExecuteSqlStatement(AH, q->data);
        /* Do this to ensure we've pumped libpq back to idle state */
        if (PQgetResult(conn) != NULL)
                pg_log_warning("unexpected extra results during COPY of table \"%s\"",
-                                 classname);
+                                          classname);
 
        destroyPQExpBuffer(q);
        return 1;
                        loFd = lo_open(conn, blobOid, INV_READ);
                        if (loFd == -1)
                                fatal("could not open large object %u: %s",
-                                                         blobOid, PQerrorMessage(conn));
+                                         blobOid, PQerrorMessage(conn));
 
                        StartBlob(fout, blobOid);
 
                                cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
                                if (cnt < 0)
                                        fatal("error reading large object %u: %s",
-                                                                 blobOid, PQerrorMessage(conn));
+                                                 blobOid, PQerrorMessage(conn));
 
                                WriteData(fout, buf, cnt);
                        } while (cnt > 0);
        else
        {
                pg_log_error("unexpected policy command type: %c",
-                                 polinfo->polcmd);
+                                        polinfo->polcmd);
                exit_nicely(1);
        }
 
 
                if (strlen(pubinfo[i].rolname) == 0)
                        pg_log_warning("owner of publication \"%s\" appears to be invalid",
-                                         pubinfo[i].dobj.name);
+                                                  pubinfo[i].dobj.name);
 
                /* Decide whether we want to dump it */
                selectDumpableObject(&(pubinfo[i].dobj), fout);
 
                if (strlen(subinfo[i].rolname) == 0)
                        pg_log_warning("owner of subscription \"%s\" appears to be invalid",
-                                         subinfo[i].dobj.name);
+                                                  subinfo[i].dobj.name);
 
                /* Decide whether we want to dump it */
                selectDumpableObject(&(subinfo[i].dobj), fout);
        }
        if (extobj == NULL)
                fatal("could not find parent extension for %s %s",
-                                         objtype, objname);
+                         objtype, objname);
 
        appendPQExpBufferStr(upgrade_buffer,
                                                 "\n-- For binary upgrade, handle extension membership the hard way\n");
 
                if (strlen(nsinfo[i].rolname) == 0)
                        pg_log_warning("owner of schema \"%s\" appears to be invalid",
-                                         nsinfo[i].dobj.name);
+                                                  nsinfo[i].dobj.name);
        }
 
        PQclear(res);
 
                if (strlen(tyinfo[i].rolname) == 0)
                        pg_log_warning("owner of data type \"%s\" appears to be invalid",
-                                         tyinfo[i].dobj.name);
+                                                  tyinfo[i].dobj.name);
        }
 
        *numTypes = ntups;
 
                if (strlen(oprinfo[i].rolname) == 0)
                        pg_log_warning("owner of operator \"%s\" appears to be invalid",
-                                         oprinfo[i].dobj.name);
+                                                  oprinfo[i].dobj.name);
        }
 
        PQclear(res);
 
                if (strlen(opcinfo[i].rolname) == 0)
                        pg_log_warning("owner of operator class \"%s\" appears to be invalid",
-                                         opcinfo[i].dobj.name);
+                                                  opcinfo[i].dobj.name);
        }
 
        PQclear(res);
 
                if (strlen(opfinfo[i].rolname) == 0)
                        pg_log_warning("owner of operator family \"%s\" appears to be invalid",
-                                         opfinfo[i].dobj.name);
+                                                  opfinfo[i].dobj.name);
        }
 
        PQclear(res);
                agginfo[i].aggfn.rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
                if (strlen(agginfo[i].aggfn.rolname) == 0)
                        pg_log_warning("owner of aggregate function \"%s\" appears to be invalid",
-                                         agginfo[i].aggfn.dobj.name);
+                                                  agginfo[i].aggfn.dobj.name);
                agginfo[i].aggfn.lang = InvalidOid; /* not currently interesting */
                agginfo[i].aggfn.prorettype = InvalidOid;       /* not saved */
                agginfo[i].aggfn.proacl = pg_strdup(PQgetvalue(res, i, i_aggacl));
 
                if (strlen(finfo[i].rolname) == 0)
                        pg_log_warning("owner of function \"%s\" appears to be invalid",
-                                         finfo[i].dobj.name);
+                                                  finfo[i].dobj.name);
        }
 
        PQclear(res);
                /* Emit notice if join for owner failed */
                if (strlen(tblinfo[i].rolname) == 0)
                        pg_log_warning("owner of table \"%s\" appears to be invalid",
-                                         tblinfo[i].dobj.name);
+                                                  tblinfo[i].dobj.name);
        }
 
        if (dopt->lockWaitTimeout)
                owning_tab = findTableByOid(seqinfo->owning_tab);
                if (owning_tab == NULL)
                        fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
-                                                 seqinfo->owning_tab, seqinfo->dobj.catId.oid);
+                                 seqinfo->owning_tab, seqinfo->dobj.catId.oid);
 
                /*
                 * Only dump identity sequences if we're going to dump the table that
                ruleinfo[i].ruletable = findTableByOid(ruletableoid);
                if (ruleinfo[i].ruletable == NULL)
                        fatal("failed sanity check, parent table with OID %u of pg_rewrite entry with OID %u not found",
-                                                 ruletableoid, ruleinfo[i].dobj.catId.oid);
+                                 ruletableoid, ruleinfo[i].dobj.catId.oid);
                ruleinfo[i].dobj.namespace = ruleinfo[i].ruletable->dobj.namespace;
                ruleinfo[i].dobj.dump = ruleinfo[i].ruletable->dobj.dump;
                ruleinfo[i].ev_type = *(PQgetvalue(res, i, i_ev_type));
                                        {
                                                if (PQgetisnull(res, j, i_tgconstrrelname))
                                                        fatal("query produced null referenced table name for foreign key trigger \"%s\" on table \"%s\" (OID of table: %u)",
-                                                                                 tginfo[j].dobj.name,
-                                                                                 tbinfo->dobj.name,
-                                                                                 tginfo[j].tgconstrrelid);
+                                                                 tginfo[j].dobj.name,
+                                                                 tbinfo->dobj.name,
+                                                                 tginfo[j].tgconstrrelid);
                                                tginfo[j].tgconstrrelname = pg_strdup(PQgetvalue(res, j, i_tgconstrrelname));
                                        }
                                        else
                {
                        if (j + 1 != atoi(PQgetvalue(res, j, i_attnum)))
                                fatal("invalid column numbering in table \"%s\"",
-                                                         tbinfo->dobj.name);
+                                         tbinfo->dobj.name);
                        tbinfo->attnames[j] = pg_strdup(PQgetvalue(res, j, i_attname));
                        tbinfo->atttypnames[j] = pg_strdup(PQgetvalue(res, j, i_atttypname));
                        tbinfo->atttypmod[j] = atoi(PQgetvalue(res, j, i_atttypmod));
 
                                if (adnum <= 0 || adnum > ntups)
                                        fatal("invalid adnum value %d for table \"%s\"",
-                                                                 adnum, tbinfo->dobj.name);
+                                                 adnum, tbinfo->dobj.name);
 
                                /*
                                 * dropped columns shouldn't have defaults, but just in case,
                                pg_log_error(ngettext("expected %d check constraint on table \"%s\" but found %d",
                                                                          "expected %d check constraints on table \"%s\" but found %d",
                                                                          tbinfo->ncheck),
-                                                 tbinfo->ncheck, tbinfo->dobj.name, numConstrs);
+                                                        tbinfo->ncheck, tbinfo->dobj.name, numConstrs);
                                pg_log_error("(The system catalogs might be corrupted.)");
                                exit_nicely(1);
                        }
                dumpUndefinedType(fout, tyinfo);
        else
                pg_log_warning("typtype of data type \"%s\" appears to be invalid",
-                                 tyinfo->dobj.name);
+                                          tyinfo->dobj.name);
 }
 
 /*
                        appendPQExpBufferStr(q, " STABLE");
                else if (provolatile[0] != PROVOLATILE_VOLATILE)
                        fatal("unrecognized provolatile value for function \"%s\"",
-                                                 finfo->dobj.name);
+                                 finfo->dobj.name);
        }
 
        if (proisstrict[0] == 't')
                        appendPQExpBufferStr(q, " PARALLEL RESTRICTED");
                else if (proparallel[0] != PROPARALLEL_UNSAFE)
                        fatal("unrecognized proparallel value for function \"%s\"",
-                                                 finfo->dobj.name);
+                                 finfo->dobj.name);
        }
 
        for (i = 0; i < nconfigitems; i++)
                funcInfo = findFuncByOid(cast->castfunc);
                if (funcInfo == NULL)
                        fatal("could not find function definition for function with OID %u",
-                                                 cast->castfunc);
+                                 cast->castfunc);
        }
 
        defqry = createPQExpBuffer();
                fromsqlFuncInfo = findFuncByOid(transform->trffromsql);
                if (fromsqlFuncInfo == NULL)
                        fatal("could not find function definition for function with OID %u",
-                                                 transform->trffromsql);
+                                 transform->trffromsql);
        }
        if (OidIsValid(transform->trftosql))
        {
                tosqlFuncInfo = findFuncByOid(transform->trftosql);
                if (tosqlFuncInfo == NULL)
                        fatal("could not find function definition for function with OID %u",
-                                                 transform->trftosql);
+                                 transform->trftosql);
        }
 
        defqry = createPQExpBuffer();
        if (oprInfo == NULL)
        {
                pg_log_warning("could not find operator with OID %s",
-                                 oproid);
+                                          oproid);
                return NULL;
        }
 
                        break;
                default:
                        pg_log_warning("invalid type \"%c\" of access method \"%s\"",
-                                         aminfo->amtype, qamname);
+                                                  aminfo->amtype, qamname);
                        destroyPQExpBuffer(q);
                        destroyPQExpBuffer(delq);
                        free(qamname);
                appendPQExpBufferStr(q, "default");
        else
                fatal("unrecognized collation provider: %s\n",
-                                         collprovider);
+                         collprovider);
 
        if (strcmp(PQgetvalue(res, 0, i_collisdeterministic), "f") == 0)
                appendPQExpBufferStr(q, ", deterministic = false");
        if (!convertok)
        {
                pg_log_warning("aggregate function %s could not be dumped correctly for this database version; ignored",
-                                 aggsig);
+                                          aggsig);
 
                if (aggfullsig)
                        free(aggfullsig);
                                        break;
                                default:
                                        fatal("unrecognized aggfinalmodify value for aggregate \"%s\"",
-                                                                 agginfo->aggfn.dobj.name);
+                                                 agginfo->aggfn.dobj.name);
                                        break;
                        }
                }
                                        break;
                                default:
                                        fatal("unrecognized aggmfinalmodify value for aggregate \"%s\"",
-                                                                 agginfo->aggfn.dobj.name);
+                                                 agginfo->aggfn.dobj.name);
                                        break;
                        }
                }
                        appendPQExpBufferStr(details, ",\n    PARALLEL = restricted");
                else if (proparallel[0] != PROPARALLEL_UNSAFE)
                        fatal("unrecognized proparallel value for function \"%s\"",
-                                                 agginfo->aggfn.dobj.name);
+                                 agginfo->aggfn.dobj.name);
        }
 
        appendPQExpBuffer(delq, "DROP AGGREGATE %s.%s;\n",
                default:
                        /* shouldn't get here */
                        fatal("unrecognized object type in default privileges: %d",
-                                                 (int) daclinfo->defaclobjtype);
+                                 (int) daclinfo->defaclobjtype);
                        type = "";                      /* keep compiler quiet */
        }
 
                                                                 fout->remoteVersion,
                                                                 q))
                fatal("could not parse default ACL list (%s)",
-                                         daclinfo->defaclacl);
+                         daclinfo->defaclacl);
 
        if (daclinfo->dobj.dump & DUMP_COMPONENT_ACL)
                ArchiveEntry(fout, daclinfo->dobj.catId, daclinfo->dobj.dumpId,
                                                          initacls, initracls, owner,
                                                          "", fout->remoteVersion, sql))
                        fatal("could not parse initial GRANT ACL list (%s) or initial REVOKE ACL list (%s) for object \"%s\" (%s)",
-                                                 initacls, initracls, name, type);
+                                 initacls, initracls, name, type);
                appendPQExpBuffer(sql, "SELECT pg_catalog.binary_upgrade_set_record_init_privs(false);\n");
        }
 
                                                  acls, racls, owner,
                                                  "", fout->remoteVersion, sql))
                fatal("could not parse GRANT ACL list (%s) or REVOKE ACL list (%s) for object \"%s\" (%s)",
-                                         acls, racls, name, type);
+                         acls, racls, name, type);
 
        if (sql->len > 0)
        {
        {
                if (PQntuples(res) < 1)
                        fatal("query to obtain definition of view \"%s\" returned no data",
-                                                 tbinfo->dobj.name);
+                                 tbinfo->dobj.name);
                else
                        fatal("query to obtain definition of view \"%s\" returned more than one definition",
-                                                 tbinfo->dobj.name);
+                                 tbinfo->dobj.name);
        }
 
        len = PQgetlength(res, 0, 0);
 
        if (len == 0)
                fatal("definition of view \"%s\" appears to be empty (length zero)",
-                                         tbinfo->dobj.name);
+                         tbinfo->dobj.name);
 
        /* Strip off the trailing semicolon so that other things may follow. */
        Assert(PQgetvalue(res, 0, 0)[len - 1] == ';');
 
        if (tbinfo->hasoids)
                pg_log_warning("WITH OIDS is not supported anymore (table \"%s\")",
-                                 qrelname);
+                                          qrelname);
 
        if (dopt->binary_upgrade)
                binary_upgrade_set_type_oids_by_rel_oid(fout, q,
                         */
                        if (tbinfo->numParents != 1)
                                fatal("invalid number of parents %d for table \"%s\"",
-                                                         tbinfo->numParents, tbinfo->dobj.name);
+                                         tbinfo->numParents, tbinfo->dobj.name);
 
                        appendPQExpBuffer(q, " PARTITION OF %s",
                                                          fmtQualifiedDumpable(parentRel));
 
        if (tbinfo->dobj.dump & DUMP_COMPONENT_DEFINITION)
        {
-               char *tableam = NULL;
+               char       *tableam = NULL;
 
                if (tbinfo->relkind == RELKIND_RELATION ||
                        tbinfo->relkind == RELKIND_MATVIEW)
                        return "tableoid";
        }
        fatal("invalid column number %d for table \"%s\"",
-                                 attrnum, tblInfo->dobj.name);
+                 attrnum, tblInfo->dobj.name);
        return NULL;                            /* keep compiler quiet */
 }
 
 
                if (indxinfo == NULL)
                        fatal("missing index for constraint \"%s\"",
-                                                 coninfo->dobj.name);
+                                 coninfo->dobj.name);
 
                if (dopt->binary_upgrade)
                        binary_upgrade_set_pg_class_oids(fout, q,
        else
        {
                fatal("unrecognized constraint type: %c",
-                                         coninfo->contype);
+                         coninfo->contype);
        }
 
        /* Dump Constraint Comments --- only works for table constraints */
        {
                pg_log_error(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
                                                          "query to get data of sequence \"%s\" returned %d rows (expected 1)",
-                                                                PQntuples(res)),
-                                 tbinfo->dobj.name, PQntuples(res));
+                                                         PQntuples(res)),
+                                        tbinfo->dobj.name, PQntuples(res));
                exit_nicely(1);
        }
 
 
                if (owning_tab == NULL)
                        fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
-                                                 tbinfo->owning_tab, tbinfo->dobj.catId.oid);
+                                 tbinfo->owning_tab, tbinfo->dobj.catId.oid);
 
                if (owning_tab->dobj.dump & DUMP_COMPONENT_DEFINITION)
                {
        {
                pg_log_error(ngettext("query to get data of sequence \"%s\" returned %d row (expected 1)",
                                                          "query to get data of sequence \"%s\" returned %d rows (expected 1)",
-                                                                PQntuples(res)),
-                                 tbinfo->dobj.name, PQntuples(res));
+                                                         PQntuples(res)),
+                                        tbinfo->dobj.name, PQntuples(res));
                exit_nicely(1);
        }
 
                        {
                                /* hm, not found before end of bytea value... */
                                pg_log_error("invalid argument string (%s) for trigger \"%s\" on table \"%s\"",
-                                                 tginfo->tgargs,
-                                                 tginfo->dobj.name,
-                                                 tbinfo->dobj.name);
+                                                        tginfo->tgargs,
+                                                        tginfo->dobj.name,
+                                                        tbinfo->dobj.name);
                                exit_nicely(1);
                        }
 
                if (PQntuples(res) != 1)
                {
                        pg_log_error("query to get rule \"%s\" for table \"%s\" failed: wrong number of rows returned",
-                                         rinfo->dobj.name, tbinfo->dobj.name);
+                                                rinfo->dobj.name, tbinfo->dobj.name);
                        exit_nicely(1);
                }
 
                {
 #ifdef NOT_USED
                        pg_log_warning("no referencing object %u %u",
-                                       objId.tableoid, objId.oid);
+                                                  objId.tableoid, objId.oid);
 #endif
                        continue;
                }
                {
 #ifdef NOT_USED
                        pg_log_warning("no referenced object %u %u",
-                                       refobjId.tableoid, refobjId.oid);
+                                                  refobjId.tableoid, refobjId.oid);
 #endif
                        continue;
                }
 
 static PGresult *executeQuery(PGconn *conn, const char *query);
 static void executeCommand(PGconn *conn, const char *query);
 static void expand_dbname_patterns(PGconn *conn, SimpleStringList *patterns,
-                                                                  SimpleStringList *names);
+                                          SimpleStringList *names);
 
 static char pg_dump_bin[MAXPGPATH];
 static const char *progname;
 
        /*
         * The loop below runs multiple SELECTs, which might sometimes result in
-        * duplicate entries in the name list, but we don't care, since all
-        * we're going to do is test membership of the list.
+        * duplicate entries in the name list, but we don't care, since all we're
+        * going to do is test membership of the list.
         */
 
        for (SimpleStringListCell *cell = patterns->head; cell; cell = cell->next)
 
 
                        default:
                                pg_log_error("unrecognized archive format \"%s\"; please specify \"c\", \"d\", or \"t\"",
-                                                 opts->formatName);
+                                                        opts->formatName);
                                exit_nicely(1);
                }
        }
 
                {
                        if (is_source)
                                pg_log_debug("entry \"%s\" excluded from source file list",
-                                          path);
+                                                        path);
                        else
                                pg_log_debug("entry \"%s\" excluded from target file list",
-                                          path);
+                                                        path);
                        return true;
                }
        }
                {
                        if (is_source)
                                pg_log_debug("entry \"%s\" excluded from source file list",
-                                          path);
+                                                        path);
                        else
                                pg_log_debug("entry \"%s\" excluded from target file list",
-                                          path);
+                                                        path);
                        return true;
                }
        }
                        entry->pagemap.bitmapsize > 0)
                {
                        pg_log_debug("%s (%s)", entry->path,
-                                  action_to_str(entry->action));
+                                                action_to_str(entry->action));
 
                        if (entry->pagemap.bitmapsize > 0)
                                datapagemap_print(&entry->pagemap);
 
                if (PQgetisnull(res, 0, 2))
                {
                        pg_log_debug("received null value for chunk for file \"%s\", file has been deleted",
-                                  filename);
+                                                filename);
                        remove_target_file(filename, true);
                        pg_free(filename);
                        PQclear(res);
                 */
                snprintf(chunkoff_str, sizeof(chunkoff_str), INT64_FORMAT, chunkoff);
                pg_log_debug("received chunk for file \"%s\", offset %s, size %d",
-                          filename, chunkoff_str, chunksize);
+                                        filename, chunkoff_str, chunksize);
 
                open_target_file(filename, false);
 
 
                        pg_log_error("could not read file \"%s\": %m", xlogfpath);
                else
                        pg_log_error("could not read file \"%s\": read %d of %zu",
-                                  xlogfpath, r, (Size) XLOG_BLCKSZ);
+                                                xlogfpath, r, (Size) XLOG_BLCKSZ);
 
                return -1;
        }
 
 char      *datadir_source = NULL;
 char      *connstr_source = NULL;
 
-static bool    debug = false;
+static bool debug = false;
 bool           showprogress = false;
 bool           dry_run = false;
 bool           do_sync = true;
        {
                findCommonAncestorTimeline(&divergerec, &lastcommontliIndex);
                pg_log_info("servers diverged at WAL location %X/%X on timeline %u",
-                          (uint32) (divergerec >> 32), (uint32) divergerec,
-                          targetHistory[lastcommontliIndex].tli);
+                                       (uint32) (divergerec >> 32), (uint32) divergerec,
+                                       targetHistory[lastcommontliIndex].tli);
 
                /*
                 * Check for the possibility that the target is in fact a direct
                                           lastcommontliIndex,
                                           &chkptrec, &chkpttli, &chkptredo);
        pg_log_info("rewinding from last common checkpoint at %X/%X on timeline %u",
-                  (uint32) (chkptrec >> 32), (uint32) chkptrec,
-                  chkpttli);
+                               (uint32) (chkptrec >> 32), (uint32) chkptrec,
+                               chkpttli);
 
        /*
         * Build the filemap, by comparing the source and target data directories.
        if (showprogress)
        {
                pg_log_info("need to copy %lu MB (total source directory size is %lu MB)",
-                          (unsigned long) (filemap->fetch_size / (1024 * 1024)),
-                          (unsigned long) (filemap->total_size / (1024 * 1024)));
+                                       (unsigned long) (filemap->fetch_size / (1024 * 1024)),
+                                       (unsigned long) (filemap->total_size / (1024 * 1024)));
 
                fetch_size = filemap->fetch_size;
                fetch_done = 0;
                         fetch_size / 1024);
 
        fprintf(stderr, _("%*s/%s kB (%d%%) copied"),
-                  (int) strlen(fetch_size_str), fetch_done_str, fetch_size_str,
-                  percent);
+                       (int) strlen(fetch_size_str), fetch_done_str, fetch_size_str,
+                       percent);
        if (isatty(fileno(stderr)))
                fprintf(stderr, "\r");
        else
 
                        entry = &history[i];
                        pg_log_debug("%d: %X/%X - %X/%X", entry->tli,
-                                  (uint32) (entry->begin >> 32), (uint32) (entry->begin),
-                                  (uint32) (entry->end >> 32), (uint32) (entry->end));
+                                                (uint32) (entry->begin >> 32), (uint32) (entry->begin),
+                                                (uint32) (entry->end >> 32), (uint32) (entry->end));
                }
        }
 
 
                                if (p == NULL || strlen(p) <= 1)
                                        pg_fatal("%d: database cluster state problem\n", __LINE__);
 
-                               p++;                            /* remove ':' char */
+                               p++;                    /* remove ':' char */
 
                                /*
-                                * We checked earlier for a postmaster lock file, and if we found
-                                * one, we tried to start/stop the server to replay the WAL.  However,
-                                * pg_ctl -m immediate doesn't leave a lock file, but does require
-                                * WAL replay, so we check here that the server was shut down cleanly,
-                                * from the controldata perspective.
+                                * We checked earlier for a postmaster lock file, and if we
+                                * found one, we tried to start/stop the server to replay the
+                                * WAL.  However, pg_ctl -m immediate doesn't leave a lock
+                                * file, but does require WAL replay, so we check here that
+                                * the server was shut down cleanly, from the controldata
+                                * perspective.
                                 */
                                /* remove leading spaces */
                                while (*p == ' ')
 
                return cmp;
        else
                return ((const LibraryInfo *) p1)->dbnum -
-                          ((const LibraryInfo *) p2)->dbnum;
+                       ((const LibraryInfo *) p2)->dbnum;
 }
 
 
                {
                        /*
                         * In Postgres 9.0, Python 3 support was added, and to do that, a
-                        * plpython2u language was created with library name plpython2.so as a
-                        * symbolic link to plpython.so.  In Postgres 9.1, only the
-                        * plpython2.so library was created, and both plpythonu and plpython2u
-                        * pointing to it.  For this reason, any reference to library name
-                        * "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in
-                        * the new cluster.
+                        * plpython2u language was created with library name plpython2.so
+                        * as a symbolic link to plpython.so.  In Postgres 9.1, only the
+                        * plpython2.so library was created, and both plpythonu and
+                        * plpython2u pointing to it.  For this reason, any reference to
+                        * library name "plpython" in an old PG <= 9.1 cluster must look
+                        * for "plpython2" in the new cluster.
                         *
-                        * For this case, we could check pg_pltemplate, but that only works
-                        * for languages, and does not help with function shared objects, so
-                        * we just do a general fix.
+                        * For this case, we could check pg_pltemplate, but that only
+                        * works for languages, and does not help with function shared
+                        * objects, so we just do a general fix.
                         */
                        if (GET_MAJOR_VERSION(old_cluster.major_version) < 901 &&
                                strcmp(lib, "$libdir/plpython") == 0)
 
                if (was_load_failure)
                        fprintf(script, _("Database: %s\n"),
-                               old_cluster.dbarr.dbs[os_info.libraries[libnum].dbnum].db_name);
+                                       old_cluster.dbarr.dbs[os_info.libraries[libnum].dbnum].db_name);
        }
 
        PQfinish(conn);
 
                else if (!XLByteInSeg(private.startptr, segno, WalSegSz))
                {
                        pg_log_error("start WAL location %X/%X is not inside file \"%s\"",
-                                       (uint32) (private.startptr >> 32),
-                                       (uint32) private.startptr,
-                                       fname);
+                                                (uint32) (private.startptr >> 32),
+                                                (uint32) private.startptr,
+                                                fname);
                        goto bad_argument;
                }
 
                        private.endptr != (segno + 1) * WalSegSz)
                {
                        pg_log_error("end WAL location %X/%X is not inside file \"%s\"",
-                                       (uint32) (private.endptr >> 32),
-                                       (uint32) private.endptr,
-                                       argv[argc - 1]);
+                                                (uint32) (private.endptr >> 32),
+                                                (uint32) private.endptr,
+                                                argv[argc - 1]);
                        goto bad_argument;
                }
        }
 
 int                    nclients = 1;           /* number of clients */
 int                    nthreads = 1;           /* number of threads */
 bool           is_connect;                     /* establish connection for each transaction */
-bool           report_per_command;     /* report per-command latencies */
+bool           report_per_command; /* report per-command latencies */
 int                    main_pid;                       /* main process id used in log filename */
 
 char      *pghost = "";
 
        /*
         * Separate randomness for each thread. Each thread option uses its own
-        * random state to make all of them independent of each other and therefore
-        * deterministic at the thread level.
+        * random state to make all of them independent of each other and
+        * therefore deterministic at the thread level.
         */
        RandomState ts_choose_rs;       /* random state for selecting a script */
-       RandomState ts_throttle_rs;     /* random state for transaction throttling */
+       RandomState ts_throttle_rs; /* random state for transaction throttling */
        RandomState ts_sample_rs;       /* random state for log sampling */
 
        int64           throttle_trigger;       /* previous/next throttling (us) */
 bool
 strtodouble(const char *str, bool errorOK, double *dv)
 {
-       char *end;
+       char       *end;
 
        errno = 0;
        *dv = strtod(str, &end);
        else if (is_an_int(var->svalue))
        {
                /* if it looks like an int, it must be an int without overflow */
-               int64 iv;
+               int64           iv;
 
                if (!strtoint64(var->svalue, false, &iv))
                        return false;
 
        while (res != NULL)
        {
-               bool    is_last;
+               bool            is_last;
 
                /* peek at the next result to know whether the current is last */
                next_res = PQgetResult(st->con);
 
                !is_branching_command(cmd))
        {
                pg_log_warning("\\%s command ignored; use \\endif or Ctrl-C to exit current \\if block",
-                                  cmd);
+                                          cmd);
        }
 
        if (strcmp(cmd, "a") == 0)
                        if (!pw)
                        {
                                pg_log_error("could not get home directory for user ID %ld: %s",
-                                                  (long) user_id,
-                                                  errno ? strerror(errno) : _("user does not exist"));
+                                                        (long) user_id,
+                                                        errno ? strerror(errno) : _("user does not exist"));
                                exit(EXIT_FAILURE);
                        }
                        dir = pw->pw_dir;
                                                                  sverbuf, sizeof(sverbuf));
                        if (is_func)
                                pg_log_error("The server (version %s) does not support editing function source.",
-                                                  sverbuf);
+                                                        sverbuf);
                        else
                                pg_log_error("The server (version %s) does not support editing view definitions.",
-                                                  sverbuf);
+                                                        sverbuf);
                        status = PSQL_CMD_ERROR;
                }
                else if (!query_buf)
                                if (!result)
                                {
                                        pg_log_error("\\%s: could not read value for variable",
-                                                          cmd);
+                                                                cmd);
                                        success = false;
                                }
                        }
                else if (strchr(envvar, '=') != NULL)
                {
                        pg_log_error("\\%s: environment variable name must not contain \"=\"",
-                                          cmd);
+                                                cmd);
                        success = false;
                }
                else if (!envval)
                                                                  sverbuf, sizeof(sverbuf));
                        if (is_func)
                                pg_log_error("The server (version %s) does not support showing function source.",
-                                                  sverbuf);
+                                                        sverbuf);
                        else
                                pg_log_error("The server (version %s) does not support showing view definitions.",
-                                                  sverbuf);
+                                                        sverbuf);
                        status = PSQL_CMD_ERROR;
                }
                else if (!obj_desc)
                if (ret == 0 || ret > MAXPGPATH)
                {
                        pg_log_error("could not locate temporary directory: %s",
-                                          !ret ? strerror(errno) : "");
+                                                !ret ? strerror(errno) : "");
                        return false;
                }
 
                                        else
                                        {
                                                pg_log_error("\\pset: ambiguous abbreviation \"%s\" matches both \"%s\" and \"%s\"",
-                                                                  value,
-                                                                  formats[match_pos].name, formats[i].name);
+                                                                        value,
+                                                                        formats[match_pos].name, formats[i].name);
                                                return false;
                                        }
                                }
                                                        break;
                                                default:
                                                        pg_log_error("\"%s.%s\" is not a view",
-                                                                          nspname, relname);
+                                                                                nspname, relname);
                                                        result = false;
                                                        break;
                                        }
 
                                if (!appendShellStringNoError(&buf, value))
                                {
                                        pg_log_error("shell command argument contains a newline or carriage return: \"%s\"",
-                                                          value);
+                                                                value);
                                        free(buf.data);
                                        return NULL;
                                }
                        default:
                                OK = false;
                                pg_log_error("unexpected PQresultStatus: %d",
-                                                  PQresultStatus(result));
+                                                        PQresultStatus(result));
                                break;
                }
 
                default:
                        success = false;
                        pg_log_error("unexpected PQresultStatus: %d",
-                                          PQresultStatus(results));
+                                                PQresultStatus(results));
                        break;
        }
 
                        char            sverbuf[32];
 
                        pg_log_warning("The server (version %s) does not support savepoints for ON_ERROR_ROLLBACK.",
-                                          formatPGVersionNumber(pset.sversion, false,
-                                                                                        sverbuf, sizeof(sverbuf)));
+                                                  formatPGVersionNumber(pset.sversion, false,
+                                                                                                sverbuf, sizeof(sverbuf)));
                        on_error_rollback_warning = true;
                }
                else
                                /* PQTRANS_UNKNOWN is expected given a broken connection. */
                                if (transaction_status != PQTRANS_UNKNOWN || ConnectionUp())
                                        pg_log_error("unexpected transaction status (%d)",
-                                                          transaction_status);
+                                                                transaction_status);
                                break;
                }
 
 
                                        char       *reason = wait_result_to_str(pclose_rc);
 
                                        pg_log_error("%s: %s", options->file,
-                                                          reason ? reason : "");
+                                                                reason ? reason : "");
                                        if (reason)
                                                free(reason);
                                }
 
                if (piv_columns.count > CROSSTABVIEW_MAX_COLUMNS)
                {
                        pg_log_error("\\crosstabview: maximum number of columns (%d) exceeded",
-                                          CROSSTABVIEW_MAX_COLUMNS);
+                                                CROSSTABVIEW_MAX_COLUMNS);
                        goto error_return;
                }
 
                        if (cont.cells[idx] != NULL)
                        {
                                pg_log_error("\\crosstabview: query result contains multiple data values for row \"%s\", column \"%s\"",
-                                                  rp->name ? rp->name :
-                                                  (popt.nullPrint ? popt.nullPrint : "(null)"),
-                                                  cp->name ? cp->name :
-                                                  (popt.nullPrint ? popt.nullPrint : "(null)"));
+                                                        rp->name ? rp->name :
+                                                        (popt.nullPrint ? popt.nullPrint : "(null)"),
+                                                        cp->name ? cp->name :
+                                                        (popt.nullPrint ? popt.nullPrint : "(null)"));
                                goto error;
                        }
 
                if (idx < 0 || idx >= PQnfields(res))
                {
                        pg_log_error("\\crosstabview: column number %d is out of range 1..%d",
-                                          idx + 1, PQnfields(res));
+                                                idx + 1, PQnfields(res));
                        return -1;
                }
        }
 
                char            sverbuf[32];
 
                pg_log_error("The server (version %s) does not support access methods.",
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
                char            sverbuf[32];
 
                pg_log_info("The server (version %s) does not support tablespaces.",
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                       formatPGVersionNumber(pset.sversion, false,
+                                                                                 sverbuf, sizeof(sverbuf)));
                return true;
        }
 
                char            sverbuf[32];
 
                pg_log_error("\\df does not take a \"%c\" option with server version %s",
-                                  'p',
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        'p',
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
                char            sverbuf[32];
 
                pg_log_error("\\df does not take a \"%c\" option with server version %s",
-                                  'w',
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        'w',
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
                char            sverbuf[32];
 
                pg_log_error("The server (version %s) does not support altering default privileges.",
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
                {
                        if (pattern)
                                pg_log_error("Did not find any relation named \"%s\".",
-                                                  pattern);
+                                                        pattern);
                        else
                                pg_log_error("Did not find any relations.");
                }
                char            sverbuf[32];
 
                pg_log_error("The server (version %s) does not support per-database role settings.",
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
        {
                if (pattern && pattern2)
                        pg_log_error("Did not find any settings for role \"%s\" and database \"%s\".",
-                                          pattern, pattern2);
+                                                pattern, pattern2);
                else if (pattern)
                        pg_log_error("Did not find any settings for role \"%s\".",
-                                          pattern);
+                                                pattern);
                else
                        pg_log_error("Did not find any settings.");
        }
        {
                if (pattern)
                        pg_log_error("Did not find any relation named \"%s\".",
-                                          pattern);
+                                                pattern);
                else
                        pg_log_error("Did not find any relations.");
        }
        PQExpBufferData title;
        PGresult   *res;
        printQueryOpt myopt = pset.popt;
-       bool translate_columns[] = {false, false, false, false, false, false, false, false, false};
+       bool            translate_columns[] = {false, false, false, false, false, false, false, false, false};
        const char *tabletitle;
        bool            mixed_output = false;
 
                char            sverbuf[32];
 
                pg_log_error("The server (version %s) does not support collations.",
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
                char            sverbuf[32];
 
                pg_log_error("The server (version %s) does not support full text search.",
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
                {
                        if (pattern)
                                pg_log_error("Did not find any text search parser named \"%s\".",
-                                                  pattern);
+                                                        pattern);
                        else
                                pg_log_error("Did not find any text search parsers.");
                }
                char            sverbuf[32];
 
                pg_log_error("The server (version %s) does not support full text search.",
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
                char            sverbuf[32];
 
                pg_log_error("The server (version %s) does not support full text search.",
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
                char            sverbuf[32];
 
                pg_log_error("The server (version %s) does not support full text search.",
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
                {
                        if (pattern)
                                pg_log_error("Did not find any text search configuration named \"%s\".",
-                                                  pattern);
+                                                        pattern);
                        else
                                pg_log_error("Did not find any text search configurations.");
                }
                char            sverbuf[32];
 
                pg_log_error("The server (version %s) does not support foreign-data wrappers.",
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
                char            sverbuf[32];
 
                pg_log_error("The server (version %s) does not support foreign servers.",
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
                char            sverbuf[32];
 
                pg_log_error("The server (version %s) does not support user mappings.",
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
                char            sverbuf[32];
 
                pg_log_error("The server (version %s) does not support foreign tables.",
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
                char            sverbuf[32];
 
                pg_log_error("The server (version %s) does not support extensions.",
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
                char            sverbuf[32];
 
                pg_log_error("The server (version %s) does not support extensions.",
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
                {
                        if (pattern)
                                pg_log_error("Did not find any extension named \"%s\".",
-                                                  pattern);
+                                                        pattern);
                        else
                                pg_log_error("Did not find any extensions.");
                }
                char            sverbuf[32];
 
                pg_log_error("The server (version %s) does not support publications.",
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
                char            sverbuf[32];
 
                pg_log_error("The server (version %s) does not support publications.",
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
                {
                        if (pattern)
                                pg_log_error("Did not find any publication named \"%s\".",
-                                                  pattern);
+                                                        pattern);
                        else
                                pg_log_error("Did not find any publications.");
                }
                char            sverbuf[32];
 
                pg_log_error("The server (version %s) does not support subscriptions.",
-                                  formatPGVersionNumber(pset.sversion, false,
-                                                                                sverbuf, sizeof(sverbuf)));
+                                        formatPGVersionNumber(pset.sversion, false,
+                                                                                  sverbuf, sizeof(sverbuf)));
                return true;
        }
 
 
        if (pset.inputfile)
        {
                *filename = pset.inputfile;
-               *lineno =  pset.lineno;
+               *lineno = pset.lineno;
        }
        else
        {
 
 static const SchemaQuery Query_for_list_of_partitioned_relations = {
        .catname = "pg_catalog.pg_class c",
        .selcondition = "c.relkind IN (" CppAsString2(RELKIND_PARTITIONED_TABLE)
-               ", " CppAsString2(RELKIND_PARTITIONED_INDEX) ")",
+       ", " CppAsString2(RELKIND_PARTITIONED_INDEX) ")",
        .viscondition = "pg_catalog.pg_table_is_visible(c.oid)",
        .namespace = "c.relnamespace",
        .result = "pg_catalog.quote_ident(c.relname)",
        {
 #ifdef NOT_USED
                pg_log_error("tab completion query failed: %s\nQuery was:\n%s",
-                                  PQerrorMessage(pset.db), query);
+                                        PQerrorMessage(pset.db), query);
 #endif
                PQclear(result);
                result = NULL;
 
                /* string is not recognized; don't clobber *result */
                if (name)
                        pg_log_error("unrecognized value \"%s\" for \"%s\": Boolean expected",
-                                          value, name);
+                                                value, name);
                valid = false;
        }
        return valid;
                /* string is not recognized; don't clobber *result */
                if (name)
                        pg_log_error("invalid value \"%s\" for \"%s\": integer expected",
-                                          value, name);
+                                                value, name);
                return false;
        }
 }
 {
        pg_log_error("unrecognized value \"%s\" for \"%s\"\n"
                                 "Available values are: %s.",
-                          value, name, suggestions);
+                                value, name, suggestions);
 }
 
        else
        {
                /*
-                * We can save some code later by pre-filling with zeros. We know
-                * that there can be no more than 16 output digits in this form,
-                * otherwise we would not choose fixed-point output.
+                * We can save some code later by pre-filling with zeros. We know that
+                * there can be no more than 16 output digits in this form, otherwise
+                * we would not choose fixed-point output.
                 */
                Assert(exp < 16 && exp + olength <= 16);
                memset(result, '0', 16);
 
        /*
         * The thresholds for fixed-point output are chosen to match printf
-        * defaults. Beware that both the code of to_chars_df and the value
-        * of DOUBLE_SHORTEST_DECIMAL_LEN are sensitive to these thresholds.
+        * defaults. Beware that both the code of to_chars_df and the value of
+        * DOUBLE_SHORTEST_DECIMAL_LEN are sensitive to these thresholds.
         */
        if (exp >= -4 && exp < 15)
                return to_chars_df(v, olength, result + index) + sign;
 
        else
        {
                /*
-                * We can save some code later by pre-filling with zeros. We know
-                * that there can be no more than 6 output digits in this form,
-                * otherwise we would not choose fixed-point output. memset 8
-                * rather than 6 bytes to let the compiler optimize it.
+                * We can save some code later by pre-filling with zeros. We know that
+                * there can be no more than 6 output digits in this form, otherwise
+                * we would not choose fixed-point output. memset 8 rather than 6
+                * bytes to let the compiler optimize it.
                 */
                Assert(exp < 6 && exp + olength <= 6);
                memset(result, '0', 8);
 
        /*
         * The thresholds for fixed-point output are chosen to match printf
-        * defaults. Beware that both the code of to_chars_f and the value
-        * of FLOAT_SHORTEST_DECIMAL_LEN are sensitive to these thresholds.
+        * defaults. Beware that both the code of to_chars_f and the value of
+        * FLOAT_SHORTEST_DECIMAL_LEN are sensitive to these thresholds.
         */
        if (exp >= -4 && exp < 6)
                return to_chars_f(v, olength, result + index) + sign;
 
 #define MINIMUM_VERSION_FOR_PG_WAL     100000
 
 #ifdef PG_FLUSH_DATA_WORKS
-static int pre_sync_fname(const char *fname, bool isdir);
+static int     pre_sync_fname(const char *fname, bool isdir);
 #endif
 static void walkdir(const char *path,
                int (*action) (const char *fname, bool isdir),
 
 enum pg_log_level __pg_log_level;
 
 static const char *progname;
-static int log_flags;
+static int     log_flags;
 
-static void (*log_pre_callback)(void);
-static void (*log_locus_callback)(const char **, uint64 *);
+static void (*log_pre_callback) (void);
+static void (*log_locus_callback) (const char **, uint64 *);
 
 static const char *sgr_error = NULL;
 static const char *sgr_warning = NULL;
 
                if (pg_colors_env)
                {
-                       char     *colors = strdup(pg_colors_env);
+                       char       *colors = strdup(pg_colors_env);
 
                        if (colors)
                        {
                                for (char *token = strtok(colors, ":"); token; token = strtok(NULL, ":"))
                                {
-                                       char *e = strchr(token, '=');
+                                       char       *e = strchr(token, '=');
 
                                        if (e)
                                        {
 }
 
 void
-pg_logging_set_pre_callback(void (*cb)(void))
+pg_logging_set_pre_callback(void (*cb) (void))
 {
        log_pre_callback = cb;
 }
 
 void
-pg_logging_set_locus_callback(void (*cb)(const char **filename, uint64 *lineno))
+pg_logging_set_locus_callback(void (*cb) (const char **filename, uint64 *lineno))
 {
        log_locus_callback = cb;
 }
 
 void
-pg_log_generic(enum pg_log_level level, const char * pg_restrict fmt, ...)
+pg_log_generic(enum pg_log_level level, const char *pg_restrict fmt,...)
 {
        va_list         ap;
 
 }
 
 void
-pg_log_generic_v(enum pg_log_level level, const char * pg_restrict fmt, va_list ap)
+pg_log_generic_v(enum pg_log_level level, const char *pg_restrict fmt, va_list ap)
 {
        int                     save_errno = errno;
        const char *filename = NULL;
 
  */
 int32
 pglz_decompress(const char *source, int32 slen, char *dest,
-                                                int32 rawsize, bool check_complete)
+                               int32 rawsize, bool check_complete)
 {
        const unsigned char *sp;
        const unsigned char *srcend;
        }
 
        /*
-        * Check we decompressed the right amount.
-        * If we are slicing, then we won't necessarily
-        * be at the end of the source or dest buffers
-        * when we hit a stop, so we don't test them.
+        * Check we decompressed the right amount. If we are slicing, then we
+        * won't necessarily be at the end of the source or dest buffers when we
+        * hit a stop, so we don't test them.
         */
        if (check_complete && (dp != destend || sp != srcend))
                return -1;
        /*
         * That's it.
         */
-       return (char*)dp - dest;
+       return (char *) dp - dest;
 }
 
                        if (errno != ENOENT)
                        {
                                pg_log_warning("could not stat file or directory \"%s\": %m",
-                                        pathbuf);
+                                                          pathbuf);
                                result = false;
                        }
                        continue;
                                if (errno != ENOENT)
                                {
                                        pg_log_warning("could not remove file or directory \"%s\": %m",
-                                                pathbuf);
+                                                                  pathbuf);
                                        result = false;
                                }
                        }
                if (rmdir(path) != 0)
                {
                        pg_log_warning("could not remove file or directory \"%s\": %m",
-                                path);
+                                                  path);
                        result = false;
                }
        }
 
        amcostestimate_function amcostestimate;
        amoptions_function amoptions;
        amproperty_function amproperty; /* can be NULL */
-       ambuildphasename_function ambuildphasename;     /* can be NULL */
+       ambuildphasename_function ambuildphasename; /* can be NULL */
        amvalidate_function amvalidate;
        ambeginscan_function ambeginscan;
        amrescan_function amrescan;
 
 #include "lib/stringinfo.h"
 
 #define XLOG_GIST_PAGE_UPDATE          0x00
-#define XLOG_GIST_DELETE                       0x10 /* delete leaf index tuples for a page */
-#define XLOG_GIST_PAGE_REUSE           0x20 /* old page is about to be reused from
-                                                                                 * FSM */
+#define XLOG_GIST_DELETE                       0x10    /* delete leaf index tuples for a
+                                                                                        * page */
+#define XLOG_GIST_PAGE_REUSE           0x20    /* old page is about to be reused
+                                                                                        * from FSM */
 #define XLOG_GIST_PAGE_SPLIT           0x30
  /* #define XLOG_GIST_INSERT_COMPLETE   0x40 */        /* not used anymore */
  /* #define XLOG_GIST_CREATE_INDEX              0x50 */        /* not used anymore */
 typedef struct gistxlogPageDelete
 {
        TransactionId deleteXid;        /* last Xid which could see page in scan */
-       OffsetNumber downlinkOffset; /* Offset of downlink referencing this page */
+       OffsetNumber downlinkOffset;    /* Offset of downlink referencing this
+                                                                        * page */
 } gistxlogPageDelete;
 
 #define SizeOfGistxlogPageDelete       (offsetof(gistxlogPageDelete, downlinkOffset) + sizeof(OffsetNumber))
 
 {
        BufferAccessStrategy strategy;  /* our BULKWRITE strategy object */
        Buffer          current_buf;    /* current insertion target page */
-}                      BulkInsertStateData;
+} BulkInsertStateData;
 
 
 extern void RelationPutHeapTuple(Relation relation, Buffer buffer,
 
        BlockNumber phs_startblock; /* starting block number */
        pg_atomic_uint64 phs_nallocated;        /* number of blocks allocated to
                                                                                 * workers so far. */
-} ParallelBlockTableScanDescData;
+}                      ParallelBlockTableScanDescData;
 typedef struct ParallelBlockTableScanDescData *ParallelBlockTableScanDesc;
 
 /*
 
 
        /* array with numberOfOrderBys entries */
        double          distances[FLEXIBLE_ARRAY_MEMBER];
-}                      SpGistSearchItem;
+} SpGistSearchItem;
 
 #define SizeOfSpGistSearchItem(n_distances) \
        (offsetof(SpGistSearchItem, distances) + sizeof(double) * (n_distances))
 
 #include "storage/off.h"
 
 /* XLOG record types for SPGiST */
-/* #define XLOG_SPGIST_CREATE_INDEX       0x00 */      /* not used anymore */
+ /* #define XLOG_SPGIST_CREATE_INDEX       0x00 */     /* not used anymore */
 #define XLOG_SPGIST_ADD_LEAF           0x10
 #define XLOG_SPGIST_MOVE_LEAFS         0x20
 #define XLOG_SPGIST_ADD_NODE           0x30
 
 typedef enum ScanOptions
 {
        /* one of SO_TYPE_* may be specified */
-       SO_TYPE_SEQSCAN         = 1 << 0,
-       SO_TYPE_BITMAPSCAN      = 1 << 1,
-       SO_TYPE_SAMPLESCAN      = 1 << 2,
-       SO_TYPE_ANALYZE         = 1 << 3,
+       SO_TYPE_SEQSCAN = 1 << 0,
+       SO_TYPE_BITMAPSCAN = 1 << 1,
+       SO_TYPE_SAMPLESCAN = 1 << 2,
+       SO_TYPE_ANALYZE = 1 << 3,
 
        /* several of SO_ALLOW_* may be specified */
        /* allow or disallow use of access strategy */
-       SO_ALLOW_STRAT          = 1 << 4,
+       SO_ALLOW_STRAT = 1 << 4,
        /* report location to syncscan logic? */
-       SO_ALLOW_SYNC           = 1 << 5,
+       SO_ALLOW_SYNC = 1 << 5,
        /* verify visibility page-at-a-time? */
-       SO_ALLOW_PAGEMODE       = 1 << 6,
+       SO_ALLOW_PAGEMODE = 1 << 6,
 
        /* unregister snapshot at scan end? */
-       SO_TEMP_SNAPSHOT        = 1 << 7
+       SO_TEMP_SNAPSHOT = 1 << 7
 } ScanOptions;
 
 /*
 
        /*
         * This callback should return true if the relation requires a TOAST table
-        * and false if it does not.  It may wish to examine the relation's
-        * tuple descriptor before making a decision, but if it uses some other
-        * method of storing large values (or if it does not support them) it can
-        * simply return false.
+        * and false if it does not.  It may wish to examine the relation's tuple
+        * descriptor before making a decision, but if it uses some other method
+        * of storing large values (or if it does not support them) it can simply
+        * return false.
         */
-       bool        (*relation_needs_toast_table) (Relation rel);
+       bool            (*relation_needs_toast_table) (Relation rel);
 
 
        /* ------------------------------------------------------------------------
                                int nkeys, struct ScanKeyData *key)
 {
        uint32          flags = SO_TYPE_SEQSCAN |
-               SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
+       SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
 
        return rel->rd_tableam->scan_begin(rel, snapshot, nkeys, key, NULL, flags);
 }
 
 {
        AttrDefault *defval;            /* array */
        ConstrCheck *check;                     /* array */
-       struct AttrMissing *missing;            /* missing attributes values, NULL if none */
+       struct AttrMissing *missing;    /* missing attributes values, NULL if none */
        uint16          num_defval;
        uint16          num_check;
        bool            has_not_null;
        TupleConstr *constr;            /* constraints, or NULL if none */
        /* attrs[N] is the description of Attribute Number N+1 */
        FormData_pg_attribute attrs[FLEXIBLE_ARRAY_MEMBER];
-} TupleDescData;
+}                      TupleDescData;
 typedef struct TupleDescData *TupleDesc;
 
 /* Accessor for the i'th attribute of tupdesc. */
 
        RECOVERY_TARGET_TIMELINE_CONTROLFILE,
        RECOVERY_TARGET_TIMELINE_LATEST,
        RECOVERY_TARGET_TIMELINE_NUMERIC
-}                      RecoveryTargetTimeLineGoal;
+} RecoveryTargetTimeLineGoal;
 
 extern XLogRecPtr ProcLastRecPtr;
 extern XLogRecPtr XactLastRecEnd;
 
        RECOVERY_TARGET_ACTION_PAUSE,
        RECOVERY_TARGET_ACTION_PROMOTE,
        RECOVERY_TARGET_ACTION_SHUTDOWN
-} RecoveryTargetAction;
+}                      RecoveryTargetAction;
 
 /*
  * Method table for resource managers.
 
 #define PERFORM_DELETION_QUIETLY                       0x0004  /* suppress notices */
 #define PERFORM_DELETION_SKIP_ORIGINAL         0x0008  /* keep original obj */
 #define PERFORM_DELETION_SKIP_EXTENSIONS       0x0010  /* keep extensions */
-#define PERFORM_DELETION_CONCURRENT_LOCK       0x0020  /* normal drop with concurrent lock mode */
+#define PERFORM_DELETION_CONCURRENT_LOCK       0x0020  /* normal drop with
+                                                                                                        * concurrent lock mode */
 
 
 /* in dependency.c */
                                        Oid newRefObjectId);
 
 extern long changeDependenciesOf(Oid classId, Oid oldObjectId,
-                                                                Oid newObjectId);
+                                        Oid newObjectId);
 
 extern long changeDependenciesOn(Oid refClassId, Oid oldRefObjectId,
-                                                                Oid newRefObjectId);
+                                        Oid newRefObjectId);
 
 extern Oid     getExtensionOfObject(Oid classId, Oid objectId);
 
 
 #define        INDEX_CONSTR_CREATE_REMOVE_OLD_DEPS     (1 << 4)
 
 extern Oid index_concurrently_create_copy(Relation heapRelation,
-                                                                                 Oid oldIndexId,
-                                                                                 const char *newName);
+                                                          Oid oldIndexId,
+                                                          const char *newName);
 
 extern void index_concurrently_build(Oid heapRelationId,
-                                                                        Oid indexRelationId);
+                                                Oid indexRelationId);
 
 extern void index_concurrently_swap(Oid newIndexId,
-                                                                       Oid oldIndexId,
-                                                                       const char *oldName);
+                                               Oid oldIndexId,
+                                               const char *oldName);
 
 extern void index_concurrently_set_dead(Oid heapId,
-                                                                               Oid indexId);
+                                                       Oid indexId);
 
 extern ObjectAddress index_constraint_create(Relation heapRelation,
                                                Oid indexRelationId,
 
        int16           adnum;                  /* attnum of attribute */
 
 #ifdef CATALOG_VARLEN                  /* variable-length fields start here */
-       pg_node_tree adbin BKI_FORCE_NOT_NULL;                  /* nodeToString representation of default */
+       pg_node_tree adbin BKI_FORCE_NOT_NULL;  /* nodeToString representation of
+                                                                                        * default */
 #endif
 } FormData_pg_attrdef;
 
 
        char            defaclobjtype;  /* see DEFACLOBJ_xxx constants below */
 
 #ifdef CATALOG_VARLEN                  /* variable-length fields start here */
-       aclitem         defaclacl[1] BKI_FORCE_NOT_NULL;        /* permissions to add at CREATE time */
+       aclitem         defaclacl[1] BKI_FORCE_NOT_NULL;        /* permissions to add at
+                                                                                                        * CREATE time */
 #endif
 } FormData_pg_default_acl;
 
 
        bool            polpermissive;  /* restrictive or permissive policy */
 
 #ifdef CATALOG_VARLEN
-       Oid                     polroles[1] BKI_FORCE_NOT_NULL; /* Roles associated with policy */
+       Oid                     polroles[1] BKI_FORCE_NOT_NULL; /* Roles associated with
+                                                                                                * policy */
        pg_node_tree polqual;           /* Policy quals. */
        pg_node_tree polwithcheck;      /* WITH CHECK quals. */
 #endif
 
 extern void RelationPreserveStorage(RelFileNode rnode, bool atCommit);
 extern void RelationTruncate(Relation rel, BlockNumber nblocks);
 extern void RelationCopyStorage(SMgrRelation src, SMgrRelation dst,
-                                                               ForkNumber forkNum, char relpersistence);
+                                       ForkNumber forkNum, char relpersistence);
 
 /*
  * These functions used to be in storage/smgr/smgr.c, which explains the
 
 
 /* commands/aggregatecmds.c */
 extern ObjectAddress DefineAggregate(ParseState *pstate, List *name, List *args, bool oldstyle,
-                                                                        List *parameters, bool replace);
+                               List *parameters, bool replace);
 
 /* commands/opclasscmds.c */
 extern ObjectAddress DefineOpClass(CreateOpClassStmt *stmt);
 
         * and allow the inserting code (copy.c and nodeModifyTable.c) to provide
         * a slot containing the original tuple directly.
         */
-       TupleTableSlot  *tcs_original_insert_tuple;
+       TupleTableSlot *tcs_original_insert_tuple;
 
        /*
         * Private data including the tuplestore(s) into which to insert tuples.
 extern bool RI_FKey_pk_upd_check_required(Trigger *trigger, Relation pk_rel,
                                                          TupleTableSlot *old_slot, TupleTableSlot *new_slot);
 extern bool RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel,
-                                                         TupleTableSlot *old_slot, TupleTableSlot  *new_slot);
+                                                         TupleTableSlot *old_slot, TupleTableSlot *new_slot);
 extern bool RI_Initial_Check(Trigger *trigger,
                                 Relation fk_rel, Relation pk_rel);
 extern void RI_PartitionRemove_Check(Trigger *trigger, Relation fk_rel,
 
                                                                         * which  verbose logs are activated, -1
                                                                         * to use default */
        VacOptTernaryValue index_cleanup;       /* Do index vacuum and cleanup,
-                                                                               * default value depends on reloptions */
+                                                                                * default value depends on reloptions */
        VacOptTernaryValue truncate;    /* Truncate empty pages at the end,
-                                                                               * default value depends on reloptions */
+                                                                        * default value depends on reloptions */
 } VacuumParams;
 
 /* GUC parameters */
 
 #ifndef FILE_UTILS_H
 #define FILE_UTILS_H
 
-extern int fsync_fname(const char *fname, bool isdir);
+extern int     fsync_fname(const char *fname, bool isdir);
 extern void fsync_pgdata(const char *pg_data, int serverVersion);
 extern void fsync_dir_recurse(const char *dir);
-extern int durable_rename(const char *oldfile, const char *newfile);
+extern int     durable_rename(const char *oldfile, const char *newfile);
 extern int     fsync_parent_path(const char *fname);
 
 #endif                                                 /* FILE_UTILS_H */
 
  */
 #define PG_LOG_FLAG_TERSE      1
 
-void pg_logging_init(const char *argv0);
-void pg_logging_config(int new_flags);
-void pg_logging_set_level(enum pg_log_level new_level);
-void pg_logging_set_pre_callback(void (*cb)(void));
-void pg_logging_set_locus_callback(void (*cb)(const char **filename, uint64 *lineno));
+void           pg_logging_init(const char *argv0);
+void           pg_logging_config(int new_flags);
+void           pg_logging_set_level(enum pg_log_level new_level);
+void           pg_logging_set_pre_callback(void (*cb) (void));
+void           pg_logging_set_locus_callback(void (*cb) (const char **filename, uint64 *lineno));
 
-void pg_log_generic(enum pg_log_level level, const char * pg_restrict fmt, ...) pg_attribute_printf(2, 3);
-void pg_log_generic_v(enum pg_log_level level, const char * pg_restrict fmt, va_list ap) pg_attribute_printf(2, 0);
+void           pg_log_generic(enum pg_log_level level, const char *pg_restrict fmt,...) pg_attribute_printf(2, 3);
+void           pg_log_generic_v(enum pg_log_level level, const char *pg_restrict fmt, va_list ap) pg_attribute_printf(2, 0);
 
 #define pg_log_fatal(...) do { \
                if (likely(__pg_log_level <= PG_LOG_FATAL)) pg_log_generic(PG_LOG_FATAL, __VA_ARGS__); \
                if (unlikely(__pg_log_level <= PG_LOG_DEBUG)) pg_log_generic(PG_LOG_DEBUG, __VA_ARGS__); \
        } while(0)
 
-#endif /* COMMON_LOGGING_H */
+#endif                                                 /* COMMON_LOGGING_H */
 
        ParallelContext *pcxt;          /* parallel context we're using */
        BufferUsage *buffer_usage;      /* points to bufusage area in DSM */
        SharedExecutorInstrumentation *instrumentation; /* optional */
-       struct SharedJitInstrumentation *jit_instrumentation; /* optional */
+       struct SharedJitInstrumentation *jit_instrumentation;   /* optional */
        dsa_area   *area;                       /* points to DSA area in DSM */
        dsa_pointer param_exec;         /* serialized PARAM_EXEC parameters */
        bool            finished;               /* set true by ExecParallelFinish */
 
                                        MemoryContext tablecxt,
                                        MemoryContext tempcxt, bool use_variable_hash_iv);
 extern TupleHashTable BuildTupleHashTableExt(PlanState *parent,
-                                       TupleDesc inputDesc,
-                                       int numCols, AttrNumber *keyColIdx,
-                                       const Oid *eqfuncoids,
-                                       FmgrInfo *hashfunctions,
-                                       Oid *collations,
-                                       long nbuckets, Size additionalsize,
-                                       MemoryContext metacxt,
-                                       MemoryContext tablecxt,
-                                       MemoryContext tempcxt, bool use_variable_hash_iv);
+                                          TupleDesc inputDesc,
+                                          int numCols, AttrNumber *keyColIdx,
+                                          const Oid *eqfuncoids,
+                                          FmgrInfo *hashfunctions,
+                                          Oid *collations,
+                                          long nbuckets, Size additionalsize,
+                                          MemoryContext metacxt,
+                                          MemoryContext tablecxt,
+                                          MemoryContext tempcxt, bool use_variable_hash_iv);
 extern TupleHashEntry LookupTupleHashEntry(TupleHashTable hashtable,
                                         TupleTableSlot *slot,
                                         bool *isnew);
 extern void EvalPlanQualSetPlan(EPQState *epqstate,
                                        Plan *subplan, List *auxrowmarks);
 extern TupleTableSlot *EvalPlanQualSlot(EPQState *epqstate,
-                        Relation relation, Index rti);
+                                Relation relation, Index rti);
 
 #define EvalPlanQualSetSlot(epqstate, slot)  ((epqstate)->origslot = (slot))
 extern void EvalPlanQualFetchRowMarks(EPQState *epqstate);
  */
 extern void ExecInitResultTypeTL(PlanState *planstate);
 extern void ExecInitResultSlot(PlanState *planstate,
-                                                          const TupleTableSlotOps *tts_ops);
+                                  const TupleTableSlotOps *tts_ops);
 extern void ExecInitResultTupleSlotTL(PlanState *planstate,
-                                                                         const TupleTableSlotOps *tts_ops);
+                                                 const TupleTableSlotOps *tts_ops);
 extern void ExecInitScanTupleSlot(EState *estate, ScanState *scanstate,
-                                                                 TupleDesc tupleDesc,
-                                                                 const TupleTableSlotOps *tts_ops);
+                                         TupleDesc tupleDesc,
+                                         const TupleTableSlotOps *tts_ops);
 extern TupleTableSlot *ExecInitExtraTupleSlot(EState *estate,
                                           TupleDesc tupledesc,
                                           const TupleTableSlotOps *tts_ops);
 extern TupleDesc ExecGetResultType(PlanState *planstate);
 extern TupleTableSlot ExecGetResultSlot(PlanState *planstate);
 extern const TupleTableSlotOps *ExecGetResultSlotOps(PlanState *planstate,
-                                                                                                        bool *isfixed);
+                                        bool *isfixed);
 extern void ExecAssignProjectionInfo(PlanState *planstate,
                                                 TupleDesc inputDesc);
 extern void ExecConditionalAssignProjectionInfo(PlanState *planstate,
 
 #define FIELDNO_TUPLETABLESLOT_ISNULL 6
        bool       *tts_isnull;         /* current per-attribute isnull flags */
        MemoryContext tts_mcxt;         /* slot itself is in this context */
-       ItemPointerData tts_tid;    /* stored tuple's tid */
-       Oid                     tts_tableOid;   /* table oid of tuple */
+       ItemPointerData tts_tid;        /* stored tuple's tid */
+       Oid                     tts_tableOid;   /* table oid of tuple */
 } TupleTableSlot;
 
 /* routines for a TupleTableSlot implementation */
 struct TupleTableSlotOps
 {
        /* Minimum size of the slot */
-       size_t                  base_slot_size;
+       size_t          base_slot_size;
 
        /* Initialization. */
-       void (*init)(TupleTableSlot *slot);
+       void            (*init) (TupleTableSlot *slot);
 
        /* Destruction. */
-       void (*release)(TupleTableSlot *slot);
+       void            (*release) (TupleTableSlot *slot);
 
        /*
         * Clear the contents of the slot. Only the contents are expected to be
         * this callback should free the memory allocated for the tuple contained
         * in the slot.
         */
-       void (*clear)(TupleTableSlot *slot);
+       void            (*clear) (TupleTableSlot *slot);
 
        /*
         * Fill up first natts entries of tts_values and tts_isnull arrays with
         * in which case it should set tts_nvalid to the number of returned
         * columns.
         */
-       void (*getsomeattrs)(TupleTableSlot *slot, int natts);
+       void            (*getsomeattrs) (TupleTableSlot *slot, int natts);
 
        /*
         * Returns value of the given system attribute as a datum and sets isnull
         * to false, if it's not NULL. Throws an error if the slot type does not
         * support system attributes.
         */
-       Datum (*getsysattr)(TupleTableSlot *slot, int attnum, bool *isnull);
+       Datum           (*getsysattr) (TupleTableSlot *slot, int attnum, bool *isnull);
 
        /*
         * Make the contents of the slot solely depend on the slot, and not on
         * underlying resources (like another memory context, buffers, etc).
         */
-       void (*materialize)(TupleTableSlot *slot);
+       void            (*materialize) (TupleTableSlot *slot);
 
        /*
         * Copy the contents of the source slot into the destination slot's own
         * context. Invoked using callback of the destination slot.
         */
-       void (*copyslot) (TupleTableSlot *dstslot, TupleTableSlot *srcslot);
+       void            (*copyslot) (TupleTableSlot *dstslot, TupleTableSlot *srcslot);
 
        /*
         * Return a heap tuple "owned" by the slot. It is slot's responsibility to
         * heap tuple, it should not implement this callback and should set it as
         * NULL.
         */
-       HeapTuple (*get_heap_tuple)(TupleTableSlot *slot);
+       HeapTuple       (*get_heap_tuple) (TupleTableSlot *slot);
 
        /*
         * Return a minimal tuple "owned" by the slot. It is slot's responsibility
         * "own" a minimal tuple, it should not implement this callback and should
         * set it as NULL.
         */
-       MinimalTuple (*get_minimal_tuple)(TupleTableSlot *slot);
+       MinimalTuple (*get_minimal_tuple) (TupleTableSlot *slot);
 
        /*
         * Return a copy of heap tuple representing the contents of the slot. The
         * the slot i.e. the caller has to take responsibilty to free memory
         * consumed by the slot.
         */
-       HeapTuple (*copy_heap_tuple)(TupleTableSlot *slot);
+       HeapTuple       (*copy_heap_tuple) (TupleTableSlot *slot);
 
        /*
-        * Return a copy of minimal tuple representing the contents of the slot. The
-        * copy needs to be palloc'd in the current memory context. The slot
+        * Return a copy of minimal tuple representing the contents of the slot.
+        * The copy needs to be palloc'd in the current memory context. The slot
         * itself is expected to remain unaffected. It is *not* expected to have
         * meaningful "system columns" in the copy. The copy is not be "owned" by
         * the slot i.e. the caller has to take responsibilty to free memory
         * consumed by the slot.
         */
-       MinimalTuple (*copy_minimal_tuple)(TupleTableSlot *slot);
+       MinimalTuple (*copy_minimal_tuple) (TupleTableSlot *slot);
 };
 
 /*
 {
        TupleTableSlot base;
 
-       char       *data;               /* data for materialized slots */
+       char       *data;                       /* data for materialized slots */
 } VirtualTupleTableSlot;
 
 typedef struct HeapTupleTableSlot
        TupleTableSlot base;
 
 #define FIELDNO_HEAPTUPLETABLESLOT_TUPLE 1
-       HeapTuple       tuple;          /* physical tuple */
+       HeapTuple       tuple;                  /* physical tuple */
 #define FIELDNO_HEAPTUPLETABLESLOT_OFF 2
-       uint32          off;            /* saved state for slot_deform_heap_tuple */
-       HeapTupleData tupdata;  /* optional workspace for storing tuple */
+       uint32          off;                    /* saved state for slot_deform_heap_tuple */
+       HeapTupleData tupdata;          /* optional workspace for storing tuple */
 } HeapTupleTableSlot;
 
 /* heap tuple residing in a buffer */
         * false in such a case, since presumably tts_tuple is pointing at the
         * buffer page.)
         */
-       Buffer          buffer;         /* tuple's buffer, or InvalidBuffer */
+       Buffer          buffer;                 /* tuple's buffer, or InvalidBuffer */
 } BufferHeapTupleTableSlot;
 
 typedef struct MinimalTupleTableSlot
         * physical tuples.
         */
 #define FIELDNO_MINIMALTUPLETABLESLOT_TUPLE 1
-       HeapTuple       tuple;          /* tuple wrapper */
-       MinimalTuple mintuple;  /* minimal tuple, or NULL if none */
-       HeapTupleData minhdr;   /* workspace for minimal-tuple-only case */
+       HeapTuple       tuple;                  /* tuple wrapper */
+       MinimalTuple mintuple;          /* minimal tuple, or NULL if none */
+       HeapTupleData minhdr;           /* workspace for minimal-tuple-only case */
 #define FIELDNO_MINIMALTUPLETABLESLOT_OFF 4
-       uint32          off;            /* saved state for slot_deform_heap_tuple */
+       uint32          off;                    /* saved state for slot_deform_heap_tuple */
 } MinimalTupleTableSlot;
 
 /*
                                                 TupleTableSlot *slot,
                                                 Buffer buffer);
 extern TupleTableSlot *ExecStorePinnedBufferHeapTuple(HeapTuple tuple,
-                                                TupleTableSlot *slot,
-                                                Buffer buffer);
+                                                          TupleTableSlot *slot,
+                                                          Buffer buffer);
 extern TupleTableSlot *ExecStoreMinimalTuple(MinimalTuple mtup,
                                          TupleTableSlot *slot,
                                          bool shouldFree);
 extern void ExecForceStoreMinimalTuple(MinimalTuple mtup, TupleTableSlot *slot,
-                                                                          bool shouldFree);
+                                                  bool shouldFree);
 extern TupleTableSlot *ExecStoreVirtualTuple(TupleTableSlot *slot);
 extern TupleTableSlot *ExecStoreAllNullTuple(TupleTableSlot *slot);
 extern void ExecStoreHeapTupleDatum(Datum data, TupleTableSlot *slot);
 
 extern char *be_tls_get_certificate_hash(Port *port, size_t *len);
 #endif
 
-#endif /* USE_SSL */
+#endif                                                 /* USE_SSL */
 
 #ifdef ENABLE_GSS
 /*
 
 
 #include <signal.h>
 
-#include "datatype/timestamp.h"        /* for TimestampTZ */
+#include "datatype/timestamp.h" /* for TimestampTZ */
 #include "pgtime.h"                            /* for pg_time_t */
 
 
 
        Instrumentation *ri_TrigInstrument;
 
        /* On-demand created slots for triggers / returning processing */
-       TupleTableSlot *ri_ReturningSlot; /* for trigger output tuples */
+       TupleTableSlot *ri_ReturningSlot;       /* for trigger output tuples */
        TupleTableSlot *ri_TrigOldSlot; /* for a trigger's old tuple */
        TupleTableSlot *ri_TrigNewSlot; /* for a trigger's new tuple */
 
        AttrNumber *keyColIdx;          /* attr numbers of key columns */
        FmgrInfo   *tab_hash_funcs; /* hash functions for table datatype(s) */
        ExprState  *tab_eq_func;        /* comparator for table datatype(s) */
-       Oid                *tab_collations;     /* collations for hash and comparison */
+       Oid                *tab_collations; /* collations for hash and comparison */
        MemoryContext tablecxt;         /* memory context containing table */
        MemoryContext tempcxt;          /* context for function evaluations */
        Size            entrysize;              /* actual size to make each hash entry */
        AttrNumber *keyColIdx;          /* control data for hash tables */
        Oid                *tab_eq_funcoids;    /* equality func oids for table
                                                                         * datatype(s) */
-       Oid                *tab_collations;     /* collations for hash and comparison */
+       Oid                *tab_collations; /* collations for hash and comparison */
        FmgrInfo   *tab_hash_funcs; /* hash functions for table datatype(s) */
        FmgrInfo   *tab_eq_funcs;       /* equality functions for table datatype(s) */
        FmgrInfo   *lhs_hash_funcs; /* hash functions for lefthand datatype(s) */
        /*
         * Other run-time state needed by most if not all node types.
         */
-       TupleDesc ps_ResultTupleDesc;   /* node's return type */
+       TupleDesc       ps_ResultTupleDesc; /* node's return type */
        TupleTableSlot *ps_ResultTupleSlot; /* slot for my result tuples */
        ExprContext *ps_ExprContext;    /* node's expression-evaluation context */
        ProjectionInfo *ps_ProjInfo;    /* info for doing tuple projection */
        const TupleTableSlotOps *outerops;
        const TupleTableSlotOps *innerops;
        const TupleTableSlotOps *resultops;
-       bool scanopsfixed;
-       bool outeropsfixed;
-       bool inneropsfixed;
-       bool resultopsfixed;
-       bool scanopsset;
-       bool outeropsset;
-       bool inneropsset;
-       bool resultopsset;
+       bool            scanopsfixed;
+       bool            outeropsfixed;
+       bool            inneropsfixed;
+       bool            resultopsfixed;
+       bool            scanopsset;
+       bool            outeropsset;
+       bool            inneropsset;
+       bool            resultopsset;
 } PlanState;
 
 /* ----------------
        PlanState **mt_plans;           /* subplans (one per target rel) */
        int                     mt_nplans;              /* number of plans in the array */
        int                     mt_whichplan;   /* which one is being executed (0..n-1) */
-       TupleTableSlot** mt_scans;      /* input tuple corresponding to underlying
-                                                                  plans */
+       TupleTableSlot **mt_scans;      /* input tuple corresponding to underlying
+                                                                * plans */
        ResultRelInfo *resultRelInfo;   /* per-subplan target relations */
        ResultRelInfo *rootResultRelInfo;       /* root target relation (partitioned
                                                                                 * table root) */
  */
 typedef struct
 {
-       struct ScanKeyData *scan_key;           /* scankey to put value into */
+       struct ScanKeyData *scan_key;   /* scankey to put value into */
        ExprState  *key_expr;           /* expr to evaluate to get value */
        bool            key_toastable;  /* is expr's result a toastable datatype? */
 } IndexRuntimeKeyInfo;
 
 typedef struct
 {
-       struct ScanKeyData *scan_key;           /* scankey to put value into */
+       struct ScanKeyData *scan_key;   /* scankey to put value into */
        ExprState  *array_expr;         /* expr to evaluate to get array value */
        int                     next_elem;              /* next array element to use */
        int                     num_elems;              /* number of elems in current array value */
 
        bool            is_no_inherit;  /* is constraint non-inheritable? */
        Node       *raw_expr;           /* expr, as untransformed parse tree */
        char       *cooked_expr;        /* expr, as nodeToString representation */
-       char            generated_when; /* ALWAYS or BY DEFAULT */
+       char            generated_when; /* ALWAYS or BY DEFAULT */
        char            generated_kind; /* currently always STORED */
 
        /* Fields used for unique constraints (UNIQUE and PRIMARY KEY): */
 typedef struct VacuumStmt
 {
        NodeTag         type;
-       List            *options;               /* list of DefElem nodes */
+       List       *options;            /* list of DefElem nodes */
        List       *rels;                       /* list of VacuumRelation, or NIL for all */
        bool            is_vacuumcmd;   /* true for VACUUM, false for ANALYZE */
 } VacuumStmt;
 
        int                     partNumCols;    /* number of columns in partition clause */
        AttrNumber *partColIdx;         /* their indexes in the target list */
        Oid                *partOperators;      /* equality operators for partition columns */
-       Oid                *partCollations;     /* collations for partition columns */
+       Oid                *partCollations; /* collations for partition columns */
        int                     ordNumCols;             /* number of columns in ordering clause */
        AttrNumber *ordColIdx;          /* their indexes in the target list */
        Oid                *ordOperators;       /* equality operators for ordering columns */
        int                     numCols;                /* number of columns to check for uniqueness */
        AttrNumber *uniqColIdx;         /* their indexes in the target list */
        Oid                *uniqOperators;      /* equality operators to compare with */
-       Oid                *uniqCollations;     /* collations for equality comparisons */
+       Oid                *uniqCollations; /* collations for equality comparisons */
 } Unique;
 
 /* ------------
 
        EXPR_KIND_POLICY,                       /* USING or WITH CHECK expr in policy */
        EXPR_KIND_PARTITION_BOUND,      /* partition bound expression */
        EXPR_KIND_PARTITION_EXPRESSION, /* PARTITION BY expression */
-       EXPR_KIND_CALL_ARGUMENT,                /* procedure argument in CALL */
+       EXPR_KIND_CALL_ARGUMENT,        /* procedure argument in CALL */
        EXPR_KIND_COPY_WHERE,           /* WHERE condition in COPY FROM */
-       EXPR_KIND_GENERATED_COLUMN,     /* generation expression for a column */
+       EXPR_KIND_GENERATED_COLUMN, /* generation expression for a column */
 } ParseExprKind;
 
 
 
        PgStat_MsgHdr m_hdr;
        Oid                     m_databaseid;
        int                     m_failurecount;
-       TimestampTz     m_failure_time;
+       TimestampTz m_failure_time;
 } PgStat_MsgChecksumFailure;
 
 
 
 
 /* port/pg_strong_random.c */
 extern bool pg_strong_random(void *buf, size_t len);
+
 /*
  * pg_backend_random used to be a wrapper for pg_strong_random before
  * Postgres 12 for the backend code.
 
 
        /*
         * Marks the logical decoding context as fast forward decoding one. Such a
-        * context does not have plugin loaded so most of the following
-        * properties are unused.
+        * context does not have plugin loaded so most of the following properties
+        * are unused.
         */
        bool            fast_forward;
 
 
 ReorderBufferChange *ReorderBufferGetChange(ReorderBuffer *);
 void           ReorderBufferReturnChange(ReorderBuffer *, ReorderBufferChange *);
 
-Oid * ReorderBufferGetRelids(ReorderBuffer *, int nrelids);
-void ReorderBufferReturnRelids(ReorderBuffer *, Oid *relids);
+Oid               *ReorderBufferGetRelids(ReorderBuffer *, int nrelids);
+void           ReorderBufferReturnRelids(ReorderBuffer *, Oid *relids);
 
 void           ReorderBufferQueueChange(ReorderBuffer *, TransactionId, XLogRecPtr lsn, ReorderBufferChange *);
 void ReorderBufferQueueMessage(ReorderBuffer *, TransactionId, Snapshot snapshot, XLogRecPtr lsn,
 
 extern MCVList *statext_mcv_build(int numrows, HeapTuple *rows,
                                  Bitmapset *attrs, VacAttrStats **stats,
                                  double totalrows);
-extern bytea *statext_mcv_serialize(MCVList * mcv, VacAttrStats **stats);
-extern MCVList * statext_mcv_deserialize(bytea *data);
+extern bytea *statext_mcv_serialize(MCVList *mcv, VacAttrStats **stats);
+extern MCVList *statext_mcv_deserialize(bytea *data);
 
 extern MultiSortSupport multi_sort_init(int ndims);
 extern void multi_sort_add_dimension(MultiSortSupport mss, int sortdim,
 
 typedef struct MCVItem
 {
        double          frequency;              /* frequency of this combination */
-       double          base_frequency; /* frequency if independent */
+       double          base_frequency; /* frequency if independent */
        bool       *isnull;                     /* NULL flags */
        Datum      *values;                     /* item values */
 } MCVItem;
 
 extern int     durable_unlink(const char *fname, int loglevel);
 extern int     durable_link_or_rename(const char *oldfile, const char *newfile, int loglevel);
 extern void SyncDataDirectory(void);
-extern int data_sync_elevel(int elevel);
+extern int     data_sync_elevel(int elevel);
 
 /* Filename components */
 #define PG_TEMP_FILES_DIR "pgsql_tmp"
 
 extern void DropRelationFiles(RelFileNode *delrels, int ndelrels, bool isRedo);
 
 /* md sync callbacks */
-extern int mdsyncfiletag(const FileTag *ftag, char *path);
-extern int mdunlinkfiletag(const FileTag *ftag, char *path);
+extern int     mdsyncfiletag(const FileTag *ftag, char *path);
+extern int     mdunlinkfiletag(const FileTag *ftag, char *path);
 extern bool mdfiletagmatches(const FileTag *ftag, const FileTag *candidate);
 
 #endif                                                 /* MD_H */
 
                }                       defprivs;
        }                       d;
 
-       struct CollectedCommand *parent;                /* when nested */
+       struct CollectedCommand *parent;        /* when nested */
 } CollectedCommand;
 
 #endif                                                 /* DEPARSE_UTILITY_H */
 
  * true if the two datums are equal, false otherwise.
  */
 extern bool datum_image_eq(Datum value1, Datum value2,
-                                                  bool typByVal, int typLen);
+                          bool typByVal, int typLen);
 
 /*
  * Serialize and restore datums so that we can transfer them to parallel
 
  *             others --- keep same as the parameters in ECPGdo() function
  */
 bool
-ECPGopen(const char *cursor_name,const char *prepared_name,
-               const int lineno, const int compat,const int force_indicator,
-               const char *connection_name, const bool questionmarks,
-               const int st, const char *query,...)
+ECPGopen(const char *cursor_name, const char *prepared_name,
+                const int lineno, const int compat, const int force_indicator,
+                const char *connection_name, const bool questionmarks,
+                const int st, const char *query,...)
 {
        va_list         args;
        bool            status;
-       const char      *real_connection_name = NULL;
+       const char *real_connection_name = NULL;
 
        if (!query)
        {
        else
        {
                /*
-                * If can't get the connection name by declared name then using connection name
-                * coming from the parameter connection_name
+                * If can't get the connection name by declared name then using
+                * connection name coming from the parameter connection_name
                 */
                real_connection_name = connection_name;
        }
  */
 bool
 ECPGfetch(const char *cursor_name,
-               const int lineno, const int compat,const int force_indicator,
-               const char *connection_name, const bool questionmarks,
-               const int st, const char *query,...)
+                 const int lineno, const int compat, const int force_indicator,
+                 const char *connection_name, const bool questionmarks,
+                 const int st, const char *query,...)
 {
        va_list         args;
        bool            status;
-       const char      *real_connection_name = NULL;
+       const char *real_connection_name = NULL;
 
        if (!query)
        {
        if (real_connection_name == NULL)
        {
                /*
-                * If can't get the connection name by cursor name then using connection name
-                * coming from the parameter connection_name
+                * If can't get the connection name by cursor name then using
+                * connection name coming from the parameter connection_name
                 */
                real_connection_name = connection_name;
        }
  */
 bool
 ECPGclose(const char *cursor_name,
-               const int lineno, const int compat,const int force_indicator,
-               const char *connection_name, const bool questionmarks,
-               const int st, const char *query,...)
+                 const int lineno, const int compat, const int force_indicator,
+                 const char *connection_name, const bool questionmarks,
+                 const int st, const char *query,...)
 {
        va_list         args;
        bool            status;
-       const char      *real_connection_name = NULL;
+       const char *real_connection_name = NULL;
        struct connection *con = NULL;
 
        if (!query)
        if (real_connection_name == NULL)
        {
                /*
-                * If can't get the connection name by cursor name then using connection name
-                * coming from the parameter connection_name
+                * If can't get the connection name by cursor name then using
+                * connection name coming from the parameter connection_name
                 */
                real_connection_name = connection_name;
        }
        if (!con)
        {
                ecpg_raise(lineno, ECPG_NO_CONN, ECPG_SQLSTATE_CONNECTION_DOES_NOT_EXIST,
-                          connection_name ? connection_name : ecpg_gettext("NULL"));
+                                  connection_name ? connection_name : ecpg_gettext("NULL"));
                return;
        }
 
        /* allocate a node to store the new cursor */
-       new = (struct cursor_statement *)ecpg_alloc(sizeof(struct cursor_statement), lineno);
+       new = (struct cursor_statement *) ecpg_alloc(sizeof(struct cursor_statement), lineno);
        if (new)
        {
                new->name = ecpg_strdup(cursor_name, lineno);
 
 
 static void
 set_desc_attr(struct descriptor_item *desc_item, struct variable *var,
-                               char *tobeinserted)
+                         char *tobeinserted)
 {
        if (var->type != ECPGt_bytea)
                desc_item->is_binary = false;
                desc_item->data_len = variable->len;
        }
 
-       ecpg_free(desc_item->data); /* free() takes care of a
-                                                                * potential NULL value */
+       ecpg_free(desc_item->data); /* free() takes care of a potential NULL value */
        desc_item->data = (char *) tobeinserted;
 }
 
        if (real_connection_name == NULL)
        {
                /*
-                * If can't get the connection name by declared name then using connection name
-                * coming from the parameter connection_name
+                * If can't get the connection name by declared name then using
+                * connection name coming from the parameter connection_name
                 */
                real_connection_name = connection_name;
        }
 
 /* structure to store declared statements */
 struct declared_statement
 {
-       char       *name; /* declared name */
-       char            *connection_name;
-       char            *cursor_name;
+       char       *name;                       /* declared name */
+       char       *connection_name;
+       char       *cursor_name;
        struct declared_statement *next;
 };
 
 
 struct cursor_statement
 {
-       char       *name; /*cursor name*/
+       char       *name;                       /* cursor name */
        struct cursor_statement *next;
 };
 
 struct prepared_statement *ecpg_find_prepared_statement(const char *,
                                                         struct connection *, struct prepared_statement **);
 
-void ecpg_update_declare_statement(const char *, const char *, const int);
-char *ecpg_get_con_name_by_declared_name(const char *);
+void           ecpg_update_declare_statement(const char *, const char *, const int);
+char      *ecpg_get_con_name_by_declared_name(const char *);
 const char *ecpg_get_con_name_by_cursor_name(const char *);
-void ecpg_release_declared_statement(const char *);
+void           ecpg_release_declared_statement(const char *);
 
 bool ecpg_store_result(const PGresult *results, int act_field,
                                  const struct statement *stmt, struct variable *var);
 
                        break;
 
                case ECPG_INVALID_CURSOR:
-                        snprintf(sqlca->sqlerrm.sqlerrmc, sizeof(sqlca->sqlerrm.sqlerrmc),
+                       snprintf(sqlca->sqlerrm.sqlerrmc, sizeof(sqlca->sqlerrm.sqlerrmc),
                        /*------
                                translator: this string will be truncated at 149 characters expanded.  */
-                               ecpg_gettext("The cursor is invalid on line %d"),line);
+                                        ecpg_gettext("The cursor is invalid on line %d"), line);
                        break;
 
                default:
 
                sprintf(ptr, "%.15g%s", value, delim);
 }
 
-static char*
+static char *
 convert_bytea_to_string(char *from_data, int from_len, int lineno)
 {
-       char *to_data;
-       int to_len = ecpg_hex_enc_len(from_len) + 4 + 1; /* backslash + 'x' + quote + quote */
+       char       *to_data;
+       int                     to_len = ecpg_hex_enc_len(from_len) + 4 + 1;    /* backslash + 'x' +
+                                                                                                                                * quote + quote */
 
        to_data = ecpg_alloc(to_len, lineno);
        if (!to_data)
 static void
 print_param_value(char *value, int len, int is_binary, int lineno, int nth)
 {
-       char *value_s;
-       bool malloced = false;
+       char       *value_s;
+       bool            malloced = false;
 
        if (value == NULL)
                value_s = "null";
-       else if (! is_binary)
+       else if (!is_binary)
                value_s = value;
        else
        {
-               value_s = ecpg_alloc(ecpg_hex_enc_len(len)+1, lineno);
+               value_s = ecpg_alloc(ecpg_hex_enc_len(len) + 1, lineno);
                if (value_s != NULL)
                {
                        ecpg_hex_encode(value, len, value_s);
        }
 
        ecpg_log("ecpg_free_params on line %d: parameter %d = %s\n",
-                               lineno, nth, value_s);
+                        lineno, nth, value_s);
 
        if (malloced)
                ecpg_free(value_s);
        {
                if (print)
                        print_param_value(stmt->paramvalues[n], stmt->paramlengths[n],
-                                                               stmt->paramformats[n], stmt->lineno, n + 1);
+                                                         stmt->paramformats[n], stmt->lineno, n + 1);
                ecpg_free(stmt->paramvalues[n]);
        }
        ecpg_free(stmt->paramvalues);
 
 static bool
 store_input_from_desc(struct statement *stmt, struct descriptor_item *desc_item,
-                                               char **tobeinserted)
+                                         char **tobeinserted)
 {
        struct variable var;
 
        /*
-        * In case of binary data, only allocate memory and memcpy because
-        * binary data have been already stored into desc_item->data with
+        * In case of binary data, only allocate memory and memcpy because binary
+        * data have been already stored into desc_item->data with
         * ecpg_store_input() at ECPGset_desc().
         */
        if (desc_item->is_binary)
                                stmt->statement_type == ECPGst_exec_with_exprlist)
                        {
                                /* Add double quote both side for embedding statement name. */
-                               char *str = ecpg_alloc(strlen(tobeinserted) + 2 + 1, stmt->lineno);
+                               char       *str = ecpg_alloc(strlen(tobeinserted) + 2 + 1, stmt->lineno);
+
                                sprintf(str, "\"%s\"", tobeinserted);
                                ecpg_free(tobeinserted);
                                tobeinserted = str;
 
                        if (binary_format)
                        {
-                               char *p = convert_bytea_to_string(tobeinserted, binary_length, stmt->lineno);
+                               char       *p = convert_bytea_to_string(tobeinserted, binary_length, stmt->lineno);
+
                                if (!p)
                                {
                                        ecpg_free_params(stmt, false);
        }
 
        /*
-        * Check if there are unmatched things left.
-        * PREPARE AS has no parameter. Check other statement.
+        * Check if there are unmatched things left. PREPARE AS has no parameter.
+        * Check other statement.
         */
        if (stmt->statement_type != ECPGst_prepare &&
                next_insert(stmt->command, position, stmt->questionmarks, std_strings) >= 0)
 
                if (stmt->statement_type == ECPGst_prepare)
                {
-                       if(! ecpg_register_prepared_stmt(stmt))
+                       if (!ecpg_register_prepared_stmt(stmt))
                        {
                                ecpg_free_params(stmt, true);
                                return false;
 {
        va_list         args;
        bool            ret;
-       const char  *real_connection_name = NULL;
+       const char *real_connection_name = NULL;
 
        real_connection_name = connection_name;
 
                if (real_connection_name == NULL)
                {
                        /*
-                        * If can't get the connection name by declared name then using connection name
-                        * coming from the parameter connection_name
+                        * If can't get the connection name by declared name then using
+                        * connection name coming from the parameter connection_name
                         */
                        real_connection_name = connection_name;
-                }
+               }
        }
 
        va_start(args, query);
 
        struct prepared_statement *this;
        struct connection *con = NULL;
        struct prepared_statement *prev = NULL;
-       char *real_connection_name;
-       int lineno = stmt->lineno;
-
-    real_connection_name = ecpg_get_con_name_by_declared_name(stmt->name);
-    if (real_connection_name == NULL)
-        real_connection_name = stmt->connection->name;
-
-    con = ecpg_get_connection(real_connection_name);
-    if (!ecpg_init(con, real_connection_name, stmt->lineno))
-        return false;
-
-    /* check if we already have prepared this statement */
-    this = ecpg_find_prepared_statement(stmt->name, con, &prev);
-    if (this && !deallocate_one(lineno, ECPG_COMPAT_PGSQL, con, prev, this))
-        return false;
-
-    /* allocate new statement */
-    this = (struct prepared_statement *) ecpg_alloc(sizeof(struct prepared_statement), lineno);
-    if (!this)
-        return false;
-
-    prep_stmt = (struct statement *) ecpg_alloc(sizeof(struct statement), lineno);
-    if (!stmt)
-    {
-        ecpg_free(this);
-        return false;
-    }
-    memset(prep_stmt, 0, sizeof(struct statement));
-
-    /* create statement */
-    prep_stmt->lineno = lineno;
-    prep_stmt->connection = con;
-    prep_stmt->command = ecpg_strdup(stmt->command, lineno);
-    prep_stmt->inlist = prep_stmt->outlist = NULL;
-    this->name = ecpg_strdup(stmt->name, lineno);
-    this->stmt = prep_stmt;
-    this->prepared = true;
-
-    if (con->prep_stmts == NULL)
-        this->next = NULL;
-    else
-        this->next = con->prep_stmts;
-
-    con->prep_stmts = this;
-    return true;
+       char       *real_connection_name;
+       int                     lineno = stmt->lineno;
+
+       real_connection_name = ecpg_get_con_name_by_declared_name(stmt->name);
+       if (real_connection_name == NULL)
+               real_connection_name = stmt->connection->name;
+
+       con = ecpg_get_connection(real_connection_name);
+       if (!ecpg_init(con, real_connection_name, stmt->lineno))
+               return false;
+
+       /* check if we already have prepared this statement */
+       this = ecpg_find_prepared_statement(stmt->name, con, &prev);
+       if (this && !deallocate_one(lineno, ECPG_COMPAT_PGSQL, con, prev, this))
+               return false;
+
+       /* allocate new statement */
+       this = (struct prepared_statement *) ecpg_alloc(sizeof(struct prepared_statement), lineno);
+       if (!this)
+               return false;
+
+       prep_stmt = (struct statement *) ecpg_alloc(sizeof(struct statement), lineno);
+       if (!stmt)
+       {
+               ecpg_free(this);
+               return false;
+       }
+       memset(prep_stmt, 0, sizeof(struct statement));
+
+       /* create statement */
+       prep_stmt->lineno = lineno;
+       prep_stmt->connection = con;
+       prep_stmt->command = ecpg_strdup(stmt->command, lineno);
+       prep_stmt->inlist = prep_stmt->outlist = NULL;
+       this->name = ecpg_strdup(stmt->name, lineno);
+       this->stmt = prep_stmt;
+       this->prepared = true;
+
+       if (con->prep_stmts == NULL)
+               this->next = NULL;
+       else
+               this->next = con->prep_stmts;
+
+       con->prep_stmts = this;
+       return true;
 }
 
 static bool
        if (real_connection_name == NULL)
        {
                /*
-                * If can't get the connection name by declared name then using connection name
-                * coming from the parameter connection_name
+                * If can't get the connection name by declared name then using
+                * connection name coming from the parameter connection_name
                 */
                real_connection_name = connection_name;
        }
        if (real_connection_name == NULL)
        {
                /*
-                * If can't get the connection name by declared name then using connection name
-                * coming from the parameter connection_name
+                * If can't get the connection name by declared name then using
+                * connection name coming from the parameter connection_name
                 */
                real_connection_name = connection_name;
        }
        if (real_connection_name == NULL)
        {
                /*
-                * If can't get the connection name by declared name then using connection name
-                * coming from the parameter connection_name
+                * If can't get the connection name by declared name then using
+                * connection name coming from the parameter connection_name
                 */
                real_connection_name = connection_name;
        }
        {
                /*
                 * Going to here means not using AT clause in the DECLARE STATEMENT
-                * ECPG pre-processor allows this case.
-                * However, we don't allocate a node to store the declared name
-                * because the DECLARE STATEMENT without using AT clause will be ignored.
-                * The following statement such as PREPARE, EXECUTE are executed
-                * as usual on the current connection.
+                * ECPG pre-processor allows this case. However, we don't allocate a
+                * node to store the declared name because the DECLARE STATEMENT
+                * without using AT clause will be ignored. The following statement
+                * such as PREPARE, EXECUTE are executed as usual on the current
+                * connection.
                 */
                return true;
        }
 
        if (ecpg_find_declared_statement(name))
        {
-               /* Should not go to here because the pre-compiler has check the duplicate name */
+               /*
+                * Should not go to here because the pre-compiler has check the
+                * duplicate name
+                */
                return false;
        }
 
        /* Find the declared node by declared name */
        p = ecpg_find_declared_statement(declared_name);
        if (p)
-               p->cursor_name = ecpg_strdup(cursor_name,lineno);
+               p->cursor_name = ecpg_strdup(cursor_name, lineno);
 }
 
 /*
                        ecpg_free(cur->cursor_name);
                        ecpg_free(cur);
 
-                       /* One connection can be used by multiple declared name, so no break here */
+                       /*
+                        * One connection can be used by multiple declared name, so no
+                        * break here
+                        */
                }
                else
                        prev = cur;
 
 bool           ECPGdeallocate(int, int, const char *, const char *);
 bool           ECPGdeallocate_all(int, int, const char *);
 bool           ECPGdeclare(int, const char *, const char *);
-bool           ECPGopen(const char*, const char*, const int, const int, const int, const char *, const bool, const int, const char *,...);
-bool           ECPGfetch(const char*, const int, const int, const int, const char *, const bool, const int, const char *,...);
-bool           ECPGclose(const char*, const int, const int, const int, const char *, const bool, const int, const char *,...);
+bool           ECPGopen(const char *, const char *, const int, const int, const int, const char *, const bool, const int, const char *,...);
+bool           ECPGfetch(const char *, const int, const int, const int, const char *, const bool, const int, const char *,...);
+bool           ECPGclose(const char *, const int, const int, const int, const char *, const bool, const int, const char *,...);
 char      *ECPGprepared_statement(const char *, const char *, int);
 PGconn    *ECPGget_PGconn(const char *);
 PGTransactionStatusType ECPGtransactionStatus(const char *);
 
        free_argument(arg->next);
 
        /*
-        *  Don't free variables in it because the original codes don't free it either
-        *  variables are static structures instead of allocating
+        * Don't free variables in it because the original codes don't free it
+        * either variables are static structures instead of allocating
         */
        free(arg);
 }
                        free(input_filename);
                }
 
-               if(g_declared_list)
+               if (g_declared_list)
                {
                        free_declared_stmt(g_declared_list);
                        g_declared_list = NULL;
                }
 
-               if(cur)
+               if (cur)
                {
                        free_cursor(cur);
                        cur = NULL;
 
                st = ECPGst_normal;
 
        /*
-        * In following cases, stmt is CSTRING or char_variable. They must be output directly.
-        * - prepared_name of EXECUTE without exprlist
-        * - execstring of EXECUTE IMMEDIATE
+        * In following cases, stmt is CSTRING or char_variable. They must be
+        * output directly. - prepared_name of EXECUTE without exprlist -
+        * execstring of EXECUTE IMMEDIATE
         */
        fprintf(base_yyout, "%s, ", ecpg_statement_type_name[st]);
        if (st == ECPGst_execute || st == ECPGst_exec_immediate)
                                        j++;
                                } while (str[j] == ' ' || str[j] == '\t');
 
-                               if ((str[j] != '\n') && (str[j] != '\r' || str[j + 1] != '\n'))         /* not followed by a
-                                                                                                                                                                        * newline */
-                                       fputs("\\\\",base_yyout);
+                               if ((str[j] != '\n') && (str[j] != '\r' || str[j + 1] != '\n')) /* not followed by a
+                                                                                                                                                                * newline */
+                                       fputs("\\\\", base_yyout);
                        }
                        else if (str[i] == '\r' && str[i + 1] == '\n')
                        {
 void
 output_cursor_statement(int cursor_stmt, char *cursor_name, char *prepared_name, char *stmt, int whenever_mode, enum ECPG_statement_type st)
 {
-       switch(cursor_stmt)
+       switch (cursor_stmt)
        {
                case ECPGcst_open:
                        fprintf(base_yyout, "{ ECPGopen(");
 
 extern void output_deallocate_prepare_statement(char *);
 extern void output_simple_statement(char *, int);
 extern void output_declare_statement(char *);
-extern void output_cursor_statement(int , char *, char *, char *, int , enum ECPG_statement_type);
+extern void output_cursor_statement(int, char *, char *, char *, int, enum ECPG_statement_type);
 extern char *hashline_number(void);
 extern int     base_yyparse(void);
 extern int     base_yylex(void);
 
 /* structure to store declared name */
 struct declared_name_st
 {
-       char       *name; /* declared name */
+       char       *name;                       /* declared name */
        struct declared_name_st *next;
 };
 
 
         * and "prefer".
         */
        {"gssencmode", "PGGSSENCMODE", DefaultGSSMode, NULL,
-               "GSSENC-Mode", "", 7,           /* sizeof("disable") == 7 */
+               "GSSENC-Mode", "", 7,   /* sizeof("disable") == 7 */
        offsetof(struct pg_conn, gssencmode)},
 
 #if defined(ENABLE_GSS) || defined(ENABLE_SSPI)
                                        getHostaddr(conn, host_addr, NI_MAXHOST);
                                        if (strlen(host_addr) > 0)
                                                conn->connip = strdup(host_addr);
+
                                        /*
                                         * purposely ignore strdup failure; not a big problem if
                                         * it fails anyway.
 
 extern void PQinitOpenSSL(int do_ssl, int do_crypto);
 
 /* Return true if GSSAPI encryption is in use */
-extern int PQgssEncInUse(PGconn *conn);
+extern int     PQgssEncInUse(PGconn *conn);
 
 /* Returns GSSAPI context if GSSAPI is in use */
 extern void *PQgetgssctx(PGconn *conn);
 
 
 /* All "broken" PQExpBuffers point to this string. */
 static const char oom_buffer[1] = "";
+
 /* Need a char * for unconstify() compatiblity */
-static const char * oom_buffer_ptr = oom_buffer;
+static const char *oom_buffer_ptr = oom_buffer;
 
 static bool appendPQExpBufferVA(PQExpBuffer str, const char *fmt, va_list args) pg_attribute_printf(2, 0);
 
        str->data = (char *) malloc(INITIAL_EXPBUFFER_SIZE);
        if (str->data == NULL)
        {
-               str->data = unconstify(char *, oom_buffer_ptr); /* see comment above */
+               str->data = unconstify(char *, oom_buffer_ptr); /* see comment above */
                str->maxlen = 0;
                str->len = 0;
        }
        if (str->data != oom_buffer)
                free(str->data);
        /* just for luck, make the buffer validly empty. */
-       str->data = unconstify(char *, oom_buffer_ptr); /* see comment above */
+       str->data = unconstify(char *, oom_buffer_ptr); /* see comment above */
        str->maxlen = 0;
        str->len = 0;
 }
 
                                        expanded_record_set_field_internal(rec_new->erh,
                                                                                                           i + 1,
                                                                                                           (Datum) 0,
-                                                                                                          true, /*isnull*/
+                                                                                                          true,        /* isnull */
                                                                                                           false, false);
                }
        }
        int                     strict_multiassignment_level = 0;
 
        /*
-        * The extra check strict strict_multi_assignment can be active,
-        * only when input tupdesc is specified.
+        * The extra check strict strict_multi_assignment can be active, only when
+        * input tupdesc is specified.
         */
        if (tupdesc != NULL)
        {
 
                                        /* When source value is missing */
                                        if (strict_multiassignment_level)
-                                                       ereport(strict_multiassignment_level,
-                                                                       (errcode(ERRCODE_DATATYPE_MISMATCH),
-                                                                        errmsg("number of source and target fields in assignment does not match"),
-                                                                        /* translator: %s represents a name of an extra check */
-                                                                        errdetail("%s check of %s is active.",
-                                                                                          "strict_multi_assignment",
-                                                                                                         strict_multiassignment_level == ERROR ? "extra_errors" :
-                                                                                                                                         "extra_warnings"),
-                                                                        errhint("Make sure the query returns the exact list of columns.")));
+                                               ereport(strict_multiassignment_level,
+                                                               (errcode(ERRCODE_DATATYPE_MISMATCH),
+                                                                errmsg("number of source and target fields in assignment does not match"),
+                                               /* translator: %s represents a name of an extra check */
+                                                                errdetail("%s check of %s is active.",
+                                                                                  "strict_multi_assignment",
+                                                                                  strict_multiassignment_level == ERROR ? "extra_errors" :
+                                                                                  "extra_warnings"),
+                                                                errhint("Make sure the query returns the exact list of columns.")));
                                }
 
                                /* Cast the new value to the right type, if needed. */
                                        ereport(strict_multiassignment_level,
                                                        (errcode(ERRCODE_DATATYPE_MISMATCH),
                                                         errmsg("number of source and target fields in assignment does not match"),
-                                                        /* translator: %s represents a name of an extra check */
+                                       /* translator: %s represents a name of an extra check */
                                                         errdetail("%s check of %s is active.",
                                                                           "strict_multi_assignment",
-                                                                                 strict_multiassignment_level == ERROR ? "extra_errors" :
-                                                                                                                                         "extra_warnings"),
+                                                                          strict_multiassignment_level == ERROR ? "extra_errors" :
+                                                                          "extra_warnings"),
                                                         errhint("Make sure the query returns the exact list of columns.")));
                        }
 
                                valtypmod = -1;
 
                                if (strict_multiassignment_level)
-                                               ereport(strict_multiassignment_level,
-                                                               (errcode(ERRCODE_DATATYPE_MISMATCH),
-                                                                errmsg("number of source and target fields in assignment does not match"),
-                                                /* translator: %s represents a name of an extra check */
-                                                errdetail("%s check of %s is active.",
-                                                                 "strict_multi_assignment",
-                                                                 strict_multiassignment_level == ERROR ? "extra_errors" :
-                                                                                                                                         "extra_warnings"),
-                                                                errhint("Make sure the query returns the exact list of columns.")));
+                                       ereport(strict_multiassignment_level,
+                                                       (errcode(ERRCODE_DATATYPE_MISMATCH),
+                                                        errmsg("number of source and target fields in assignment does not match"),
+                                       /* translator: %s represents a name of an extra check */
+                                                        errdetail("%s check of %s is active.",
+                                                                          "strict_multi_assignment",
+                                                                          strict_multiassignment_level == ERROR ? "extra_errors" :
+                                                                          "extra_warnings"),
+                                                        errhint("Make sure the query returns the exact list of columns.")));
                        }
 
                        exec_assign_value(estate, (PLpgSQL_datum *) var,
                }
 
                /*
-                * When strict_multiassignment extra check is active, ensure there
-                * are no unassigned source attributes.
+                * When strict_multiassignment extra check is active, ensure there are
+                * no unassigned source attributes.
                 */
                if (strict_multiassignment_level && anum < td_natts)
                {
                                ereport(strict_multiassignment_level,
                                                (errcode(ERRCODE_DATATYPE_MISMATCH),
                                                 errmsg("number of source and target fields in assignment does not match"),
-                                                /* translator: %s represents a name of an extra check */
+                               /* translator: %s represents a name of an extra check */
                                                 errdetail("%s check of %s is active.",
-                                                                 "strict_multi_assignment",
-                                                                 strict_multiassignment_level == ERROR ? "extra_errors" :
-                                                                                                                                         "extra_warnings"),
+                                                                  "strict_multi_assignment",
+                                                                  strict_multiassignment_level == ERROR ? "extra_errors" :
+                                                                  "extra_warnings"),
                                                 errhint("Make sure the query returns the exact list of columns.")));
                }
 
 
         * set).  This can be used by a profiler as the index for an array of
         * per-statement metrics.
         */
-       unsigned int    stmtid;
+       unsigned int stmtid;
 } PLpgSQL_stmt;
 
 /*
 
                        Py_DECREF(pltlevel);
 
                        /*
-                        * Note: In BEFORE trigger, stored generated columns are not computed yet,
-                        * so don't make them accessible in NEW row.
+                        * Note: In BEFORE trigger, stored generated columns are not
+                        * computed yet, so don't make them accessible in NEW row.
                         */
 
                        if (TRIGGER_FIRED_BY_INSERT(tdata->tg_event))
 
                         * Now the command part of the event for TG_op and data for NEW
                         * and OLD
                         *
-                        * Note: In BEFORE trigger, stored generated columns are not computed yet,
-                        * so don't make them accessible in NEW row.
+                        * Note: In BEFORE trigger, stored generated columns are not
+                        * computed yet, so don't make them accessible in NEW row.
                         */
                        if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
                        {
 
 void *
 dlopen(const char *file, int mode)
 {
-       int             flags = 0;
+       int                     flags = 0;
 
        if (mode & RTLD_NOW)
                flags |= BIND_IMMEDIATE;
 
 {
        uint32          res;
 
-       __asm__ __volatile__(" popcntl %1,%0\n" : "=q"(res) : "rm"(word) : "cc");
+__asm__ __volatile__(" popcntl %1,%0\n":"=q"(res):"rm"(word):"cc");
        return (int) res;
 }
 
 {
        uint64          res;
 
-       __asm__ __volatile__(" popcntq %1,%0\n" : "=q"(res) : "rm"(word) : "cc");
+__asm__ __volatile__(" popcntq %1,%0\n":"=q"(res):"rm"(word):"cc");
        return (int) res;
 }
 
 
                 * Value might be in-range for double but not float.
                 */
                if (dresult != 0 && fresult == 0)
-                       caller_errno = ERANGE;                    /* underflow */
+                       caller_errno = ERANGE;  /* underflow */
                if (!isinf(dresult) && isinf(fresult))
-                       caller_errno = ERANGE;                    /* overflow */
+                       caller_errno = ERANGE;  /* overflow */
        }
        else
                caller_errno = errno;
        float           fresult;
 
        errno = 0;
-       fresult = (strtof)(nptr, endptr);
+       fresult = (strtof) (nptr, endptr);
        if (errno)
        {
                /* On error, just return the error to the caller. */
                /*
                 * Try again. errno is already 0 here.
                 */
-               double  dresult = strtod(nptr, NULL);
+               double          dresult = strtod(nptr, NULL);
+
                if (errno)
                {
                        /* On error, just return the error */
                        errno = caller_errno;
                        return fresult;
                }
-               else if ((dresult > 0 && dresult <= FLT_MIN && (float)dresult != 0.0) ||
-                                (dresult < 0 && dresult >= -FLT_MIN && (float)dresult != 0.0))
+               else if ((dresult > 0 && dresult <= FLT_MIN && (float) dresult != 0.0) ||
+                                (dresult < 0 && dresult >= -FLT_MIN && (float) dresult != 0.0))
                {
                        /* subnormal but nonzero value */
                        errno = caller_errno;
 
                        }
                        else
                        {
-                               status(_("ok    ")); /* align with FAILED */
+                               status(_("ok    "));    /* align with FAILED */
                                success_count++;
                        }
 
        }
        else
        {
-               status(_("ok    ")); /* align with FAILED */
+               status(_("ok    "));    /* align with FAILED */
                success_count++;
        }
 
 
 
        distance = DatumGetFloat8(DirectFunctionCall2(point_distance,
                                                                                                  PointPGetDatum(point),
-                                                                                       PointPGetDatum(&widget->center)));
+                                                                                                 PointPGetDatum(&widget->center)));
 
        PG_RETURN_BOOL(distance < widget->radius);
 }
 
 ArchiveFormat
 ArchiveHandle
 ArchiveMode
+ArchiveOpts
 ArchiverOutput
 ArchiverStage
 ArrayAnalyzeExtraData
 ArrayMapState
 ArrayMetaState
 ArrayParseState
-ArrayRef
-ArrayRefState
 ArrayType
 AsyncQueueControl
 AsyncQueueEntry
 BTBuildState
 BTCycleId
 BTIndexStat
+BTInsertState
+BTInsertStateData
 BTLeader
 BTMetaPageData
 BTOneVacInfo
 BTPageStat
 BTPageState
 BTParallelScanDesc
+BTScanInsert
+BTScanInsertData
 BTScanOpaque
 BTScanOpaqueData
 BTScanPos
 BernoulliSamplerData
 BgWorkerStartTime
 BgwHandleStatus
+BinaryArithmFunc
 BipartiteMatchState
 BitmapAnd
 BitmapAndPath
 BufferCachePagesRec
 BufferDesc
 BufferDescPadded
+BufferHeapTupleTableSlot
 BufferLookupEnt
 BufferStrategyControl
 BufferTag
 BuildAccumulator
 BuiltinScript
 BulkInsertState
+BulkInsertStateData
 CACHESIGN
 CAC_state
 CCFastEqualFN
 CRITICAL_SECTION
 CRSSnapshotAction
 CState
+CTEMaterialize
 CV
 C_block
+CachedExpression
 CachedPlan
 CachedPlanSource
 CallContext
 CkptSortItem
 CkptTsStatus
 ClientAuthentication_hook_type
+ClientCertMode
 ClientData
 ClonePtrType
-ClonedConstraint
 ClosePortalStmt
 ClosePtrType
 Clump
 ClusterInfo
-ClusterOption
 ClusterStmt
 CmdType
 CoalesceExpr
 DictSubState
 DictSyn
 DictThesaurus
+DimensionInfo
 DirectoryMethodData
 DirectoryMethodFile
 DisableTimeoutParams
 FileFdwPlanState
 FileNameMap
 FileTag
+FinalPathExtraData
 FindSplitData
+FindSplitStrat
 FixedParallelExecutorState
 FixedParallelState
 FixedParamState
 FuncDetailCode
 FuncExpr
 FuncInfo
+FuncLookupError
 Function
 FunctionCallInfo
 FunctionCallInfoBaseData
 GenerationContext
 GenerationPointer
 GenericCosts
-GenericIndexOpts
 GenericXLogState
 GeqoPrivateData
 GetForeignJoinPaths_function
 GinTernaryValue
 GinTupleCollector
 GinVacuumState
-GistBDItem
 GistBufferingMode
+GistBulkDeleteResult
 GistEntryVector
 GistInetKey
 GistNSN
 GistSplitUnion
 GistSplitVector
+GistVacState
 GlobalTransaction
 GrantRoleStmt
 GrantStmt
 Group
 GroupPath
 GroupPathExtraData
+GroupResultPath
 GroupState
 GroupVarInfo
 GroupingFunc
 HeapTupleFields
 HeapTupleHeader
 HeapTupleHeaderData
+HeapTupleTableSlot
 HistControl
 HotStandbyState
 I32
 IndexBuildResult
 IndexBulkDeleteCallback
 IndexBulkDeleteResult
+IndexClause
 IndexClauseSet
 IndexElem
 IndexFetchHeapData
 IndexOnlyScanState
 IndexOptInfo
 IndexPath
-IndexQualInfo
 IndexRuntimeKeyInfo
 IndexScan
 IndexScanDesc
 Int128AggState
 Int8TransTypeData
 IntRBTreeNode
+IntegerSet
 InternalDefaultACL
 InternalGrant
 Interval
 InvalidationListHeader
 IpcMemoryId
 IpcMemoryKey
+IpcMemoryState
 IpcSemaphoreId
 IpcSemaphoreKey
 IsForeignRelUpdatable_function
 JOBOBJECT_BASIC_UI_RESTRICTIONS
 JOBOBJECT_SECURITY_LIMIT_INFORMATION
 JitContext
+JitInstrumentation
 JitProviderCallbacks
 JitProviderCompileExprCB
 JitProviderInit
 JsonPathGinPathItem
 JsonPathItem
 JsonPathItemType
+JsonPathKeyword
 JsonPathParseItem
 JsonPathParseResult
 JsonPathPredicateCallback
+JsonPathString
 JsonSemAction
 JsonTokenType
 JsonTransformStringValuesAction
 LWLockMode
 LWLockPadded
 LabelProvider
+LagTracker
 LargeObjectDesc
 LastAttnumInfo
 Latch
 LexemeInfo
 LexemeKey
 LexizeData
+LibraryInfo
 Limit
 LimitPath
 LimitState
 LogicalTapeSet
 MAGIC
 MBuf
+MCVItem
+MCVList
 MEMORY_BASIC_INFORMATION
 MINIDUMPWRITEDUMP
 MINIDUMP_TYPE
 MinMaxOp
 MinimalTuple
 MinimalTupleData
+MinimalTupleTableSlot
 MinmaxOpaque
-MissingPtr
 ModifyTable
 ModifyTablePath
 ModifyTableState
 Notification
 NotifyStmt
 Nsrt
-NullableDatum
 NullIfExpr
 NullTest
 NullTestType
+NullableDatum
 Numeric
 NumericAggState
 NumericDigit
 ObjectAccessPostCreate
 ObjectAccessType
 ObjectAddress
+ObjectAddressAndFlags
 ObjectAddressExtra
 ObjectAddressStack
 ObjectAddresses
 PBOOL
 PCtxtHandle
 PFN
+PGAlignedBlock
+PGAlignedXLogBlock
 PGAsyncStatusType
 PGCALL2
+PGChecksummablePage
 PGContextVisibility
 PGEvent
 PGEventConnDestroy
 PTIterationArray
 PTOKEN_PRIVILEGES
 PTOKEN_USER
-PULONG
 PUTENVPROC
 PVOID
 PX_Alias
 ParallelAppendState
 ParallelBitmapHeapState
 ParallelBlockTableScanDesc
-ParallelBlockTableScanDescData
 ParallelCompletionPtr
 ParallelContext
 ParallelExecutorInfo
 ParallelHashJoinBatch
 ParallelHashJoinBatchAccessor
 ParallelHashJoinState
-ParallelTableScanDesc
-ParallelTableScanDescData
 ParallelIndexScanDesc
+ParallelReadyList
 ParallelSlot
 ParallelState
+ParallelTableScanDesc
+ParallelTableScanDescData
 ParallelWorkerContext
 ParallelWorkerInfo
 Param
 ParserState
 PartClauseInfo
 PartClauseMatchStatus
+PartClauseTarget
 PartitionBoundInfo
 PartitionBoundInfoData
 PartitionBoundSpec
 PartitionCmd
 PartitionDesc
 PartitionDescData
+PartitionDirectory
+PartitionDirectoryEntry
 PartitionDispatch
-PartitionDispatchData
 PartitionElem
 PartitionHashBound
 PartitionKey
 PartitionRangeBound
 PartitionRangeDatum
 PartitionRangeDatumKind
+PartitionRoutingInfo
 PartitionScheme
 PartitionSpec
 PartitionTupleRouting
+PartitionedRelPruneInfo
+PartitionedRelPruningData
 PartitionwiseAggregateType
 PasswordType
 Path
 Perl_check_t
 Perl_ppaddr_t
 Permutation
+PgBackendGSSStatus
 PgBackendSSLStatus
 PgBackendStatus
 PgBenchExpr
 PgFdwDirectModifyState
 PgFdwModifyState
 PgFdwOption
+PgFdwPathExtraData
 PgFdwRelationInfo
 PgFdwScanState
 PgIfAddrCallback
 PgStat_MsgArchiver
 PgStat_MsgAutovacStart
 PgStat_MsgBgWriter
+PgStat_MsgChecksumFailure
 PgStat_MsgDeadlock
 PgStat_MsgDropdb
 PgStat_MsgDummy
 PredXactListElement
 PredicateLockData
 PredicateLockTargetType
+PrepParallelRestorePtrType
 PrepareStmt
 PreparedParamsData
 PreparedStatement
 QTN2QTState
 QTNode
 QUERYTYPE
+QUERY_SECURITY_CONTEXT_TOKEN_FN
 QualCost
 QualItem
 Query
 QuerySource
 QueueBackendStatus
 QueuePosition
-RBNode
-RBOrderControl
+RBTNode
+RBTOrderControl
 RBTree
 RBTreeIterator
 REPARSE_JUNCTION_DATA_BUFFER
 RTEKind
 RWConflict
 RWConflictPoolHeader
+RandomState
 Range
 RangeBound
 RangeBox
 RecordCompareData
 RecordIOData
 RecoveryLockListsEntry
-RecoveryTargetAction
+RecoveryTargetTimeLineGoal
 RecoveryTargetType
 RectBox
 RecursionContext
 RelationData
 RelationPtr
 RelationSyncEntry
-RelativeTime
 RelcacheCallbackFunction
 RelfilenodeMapEntry
 RelfilenodeMapKey
 RestorePass
 RestrictInfo
 Result
-ResultPath
 ResultRelInfo
 ResultState
 ReturnSetInfo
 ScanDirection
 ScanKey
 ScanKeyData
-ScanKeyword
-ScanStackEntry
+ScanKeywordHashFunc
+ScanKeywordList
 ScanState
 ScanTypeControl
 SchemaQuery
 SeenRelsEntry
 SelectStmt
 Selectivity
+SemTPadded
 SemiAntiJoinFactors
 SeqScan
 SeqScanState
 SeqTable
 SeqTableData
 SerCommitSeqNo
+SerializableXactHandle
+SerializedActiveRelMaps
 SerializedReindexState
 SerializedSnapshotData
 SerializedTransactionState
 SetOperationStmt
 SetToDefault
 SetupWorkerPtrType
+ShDependObjectInfo
 SharedBitmapState
 SharedDependencyObjectType
 SharedDependencyType
 SharedInvalSmgrMsg
 SharedInvalSnapshotMsg
 SharedInvalidationMessage
+SharedJitInstrumentation
 SharedRecordTableEntry
 SharedRecordTableKey
 SharedRecordTypmodRegistry
 SpGistPageOpaqueData
 SpGistScanOpaque
 SpGistScanOpaqueData
+SpGistSearchItem
 SpGistState
 SpGistTypeDesc
 SpecialJoinInfo
 SpinDelayStatus
 SplitInterval
 SplitLR
+SplitPoint
 SplitVar
 SplitedPageLayout
 StackElem
 SubXactCallback
 SubXactCallbackItem
 SubXactEvent
+SubplanResultRelHashElem
 SubqueryScan
 SubqueryScanPath
 SubqueryScanState
+SubscriptingRef
+SubscriptingRefState
 Subscription
 SubscriptionInfo
 SubscriptionRelState
+SupportRequestCost
+SupportRequestIndexCondition
+SupportRequestRows
+SupportRequestSelectivity
+SupportRequestSimplify
 Syn
 SyncOps
 SyncRepConfigData
-SyncRequestHandler
 SyncRequestType
 SysScanDesc
 SyscacheCallbackFunction
 TSVectorStat
 TState
 TStoreState
-TTOffList
 TYPCATEGORY
 T_Action
 T_WorkerStatus
 TidScan
 TidScanState
 TimeADT
-TimeInterval
-TimeIntervalData
 TimeLineHistoryCmd
 TimeLineHistoryEntry
 TimeLineID
 U8
 UChar
 UCharIterator
+UColAttribute
+UColAttributeValue
 UCollator
 UConverter
 UErrorCode
 UserOpts
 VacAttrStats
 VacAttrStatsP
+VacOptTernaryValue
 VacuumParams
 VacuumRelation
 VacuumStmt
 ViewOptions
 ViewStmt
 VirtualTransactionId
+VirtualTupleTableSlot
 Vsrt
 WAITORTIMERCALLBACK
 WAIT_ORDER
 WindowAggPath
 WindowAggState
 WindowClause
+WindowClauseSortData
 WindowDef
 WindowFunc
 WindowFuncExprState
 XLogwrtResult
 XLogwrtRqst
 XPVIV
+XPVMG
 XactCallback
 XactCallbackItem
 XactEvent
 YYLTYPE
 YYSTYPE
 YY_BUFFER_STATE
-ZipfCache
-ZipfCell
 _SPI_connection
 _SPI_plan
 __AssignProcessToJobObject
 ambeginscan_function
 ambuild_function
 ambuildempty_function
+ambuildphasename_function
 ambulkdelete_function
 amcanreturn_function
 amcostestimate_function
 bits32
 bits8
 bloom_filter
-boolean
 brin_column_state
 bytea
 cached_re_str
 file_type_t
 filemap_t
 finalize_primnode_context
+find_dependent_phvs_context
 find_expr_references_context
 fix_join_expr_context
 fix_scan_expr_context
 float4KEY
 float8
 float8KEY
+floating_decimal_32
+floating_decimal_64
 fmAggrefPtr
 fmExprContextCallbackFunction
 fmNodePtr
 ginxlogSplit
 ginxlogUpdateMeta
 ginxlogVacuumDataLeafPage
+gistxlogDelete
 gistxlogPage
+gistxlogPageDelete
+gistxlogPageReuse
 gistxlogPageSplit
 gistxlogPageUpdate
 grouping_sets_data
 gseg_picksplit_item
-gss_OID
-gss_OID_desc
 gss_buffer_desc
 gss_cred_id_t
 gss_ctx_id_t
 inetKEY
 inet_struct
 init_function
+inline_cte_walker_context
 inline_error_callback_arg
 ino_t
 inquiry
 int8
 internalPQconninfoOption
 intptr_t
+intset_internal_node
+intset_leaf_node
+intset_node
 intvKEY
 itemIdSort
 itemIdSortData
 lclContext
 lclTocEntry
 leafSegmentInfo
+leaf_item
 line_t
 lineno_t
 list_qsort_comparator
 mp_result
 mp_sign
 mp_size
+mp_small
+mp_usmall
 mp_word
 mpz_t
 mxact
 radius_packet
 rangeTableEntry_used_context
 rank_context
-rb_allocfunc
-rb_combiner
-rb_comparator
-rb_freefunc
+rbt_allocfunc
+rbt_combiner
+rbt_comparator
+rbt_freefunc
 reduce_outer_joins_state
 reference
 regex_arc_t
 slist_mutable_iter
 slist_node
 slock_t
-smgrid
+socket_set
 spgBulkDeleteState
 spgChooseIn
 spgChooseOut
 string
 substitute_actual_parameters_context
 substitute_actual_srf_parameters_context
-substitute_multiple_relids_context
+substitute_phv_relids_context
 svtype
 symbol
 tablespaceinfo
 temp_tablespaces_extra
 test_function
 test_shm_mq_header
+test_spec
 text
 timeKEY
 time_t
 walrcv_readtimelinehistoryfile_fn
 walrcv_receive_fn
 walrcv_send_fn
+walrcv_server_version_fn
 walrcv_startstreaming_fn
 wchar2mb_with_len_converter
 wchar_t