Initial pgindent run for v12.
authorTom Lane <tgl@sss.pgh.pa.us>
Wed, 22 May 2019 16:55:34 +0000 (12:55 -0400)
committerTom Lane <tgl@sss.pgh.pa.us>
Wed, 22 May 2019 16:55:34 +0000 (12:55 -0400)
This is still using the 2.0 version of pg_bsd_indent.
I thought it would be good to commit this separately,
so as to document the differences between 2.0 and 2.1 behavior.

Discussion: https://postgr.es/m/16296.1558103386@sss.pgh.pa.us

221 files changed:
contrib/amcheck/verify_nbtree.c
contrib/cube/cube.c
contrib/file_fdw/file_fdw.c
contrib/intarray/_int_gist.c
contrib/intarray/_int_tool.c
contrib/pg_stat_statements/pg_stat_statements.c
contrib/pgcrypto/imath.c
contrib/pgcrypto/imath.h
contrib/pgstattuple/pgstattuple.c
contrib/postgres_fdw/postgres_fdw.c
src/backend/access/common/heaptuple.c
src/backend/access/gin/ginvacuum.c
src/backend/access/gin/ginxlog.c
src/backend/access/gist/gistutil.c
src/backend/access/hash/hashfunc.c
src/backend/access/heap/heapam.c
src/backend/access/heap/heapam_handler.c
src/backend/access/heap/rewriteheap.c
src/backend/access/heap/tuptoaster.c
src/backend/access/heap/vacuumlazy.c
src/backend/access/nbtree/nbtinsert.c
src/backend/access/nbtree/nbtsearch.c
src/backend/access/nbtree/nbtsort.c
src/backend/access/nbtree/nbtutils.c
src/backend/access/spgist/spgscan.c
src/backend/access/spgist/spgtextproc.c
src/backend/access/spgist/spgvacuum.c
src/backend/access/table/tableam.c
src/backend/access/transam/xact.c
src/backend/access/transam/xlog.c
src/backend/catalog/aclchk.c
src/backend/catalog/catalog.c
src/backend/catalog/heap.c
src/backend/catalog/index.c
src/backend/catalog/objectaddress.c
src/backend/catalog/pg_aggregate.c
src/backend/catalog/pg_proc.c
src/backend/catalog/pg_publication.c
src/backend/catalog/storage.c
src/backend/commands/amcmds.c
src/backend/commands/cluster.c
src/backend/commands/constraint.c
src/backend/commands/dbcommands.c
src/backend/commands/explain.c
src/backend/commands/extension.c
src/backend/commands/indexcmds.c
src/backend/commands/statscmds.c
src/backend/commands/tablecmds.c
src/backend/commands/tablespace.c
src/backend/commands/trigger.c
src/backend/commands/vacuum.c
src/backend/executor/execExpr.c
src/backend/executor/execExprInterp.c
src/backend/executor/execMain.c
src/backend/executor/execParallel.c
src/backend/executor/execPartition.c
src/backend/executor/execReplication.c
src/backend/executor/execScan.c
src/backend/executor/execTuples.c
src/backend/executor/functions.c
src/backend/executor/nodeAgg.c
src/backend/executor/nodeGatherMerge.c
src/backend/executor/nodeHashjoin.c
src/backend/executor/nodeIndexonlyscan.c
src/backend/executor/nodeIndexscan.c
src/backend/executor/nodeLockRows.c
src/backend/executor/nodeModifyTable.c
src/backend/executor/nodeSeqscan.c
src/backend/executor/nodeSubplan.c
src/backend/executor/nodeSubqueryscan.c
src/backend/libpq/auth.c
src/backend/libpq/be-secure-openssl.c
src/backend/nodes/bitmapset.c
src/backend/optimizer/path/clausesel.c
src/backend/optimizer/util/inherit.c
src/backend/optimizer/util/pathnode.c
src/backend/parser/parse_utilcmd.c
src/backend/partitioning/partdesc.c
src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
src/backend/replication/logical/reorderbuffer.c
src/backend/replication/slotfuncs.c
src/backend/replication/syncrep.c
src/backend/replication/walreceiver.c
src/backend/replication/walsender.c
src/backend/statistics/dependencies.c
src/backend/statistics/extended_stats.c
src/backend/statistics/mcv.c
src/backend/storage/buffer/bufmgr.c
src/backend/storage/file/fd.c
src/backend/storage/ipc/latch.c
src/backend/storage/ipc/pmsignal.c
src/backend/storage/ipc/signalfuncs.c
src/backend/storage/lmgr/lmgr.c
src/backend/storage/smgr/smgr.c
src/backend/storage/sync/sync.c
src/backend/tcop/dest.c
src/backend/utils/adt/formatting.c
src/backend/utils/adt/genfile.c
src/backend/utils/adt/geo_ops.c
src/backend/utils/adt/json.c
src/backend/utils/adt/jsonb.c
src/backend/utils/adt/like.c
src/backend/utils/adt/like_support.c
src/backend/utils/adt/numutils.c
src/backend/utils/adt/regexp.c
src/backend/utils/adt/ri_triggers.c
src/backend/utils/adt/ruleutils.c
src/backend/utils/adt/varchar.c
src/backend/utils/adt/varlena.c
src/backend/utils/cache/relcache.c
src/backend/utils/cache/relmapper.c
src/backend/utils/cache/syscache.c
src/backend/utils/fmgr/fmgr.c
src/backend/utils/hash/hashfn.c
src/backend/utils/init/globals.c
src/backend/utils/init/miscinit.c
src/backend/utils/mb/mbutils.c
src/backend/utils/misc/guc.c
src/backend/utils/mmgr/dsa.c
src/bin/initdb/initdb.c
src/bin/pg_archivecleanup/pg_archivecleanup.c
src/bin/pg_basebackup/pg_receivewal.c
src/bin/pg_basebackup/pg_recvlogical.c
src/bin/pg_basebackup/receivelog.c
src/bin/pg_dump/common.c
src/bin/pg_dump/compress_io.c
src/bin/pg_dump/parallel.c
src/bin/pg_dump/pg_backup_archiver.c
src/bin/pg_dump/pg_backup_custom.c
src/bin/pg_dump/pg_backup_db.c
src/bin/pg_dump/pg_backup_directory.c
src/bin/pg_dump/pg_backup_tar.c
src/bin/pg_dump/pg_dump.c
src/bin/pg_dump/pg_dumpall.c
src/bin/pg_dump/pg_restore.c
src/bin/pg_rewind/filemap.c
src/bin/pg_rewind/libpq_fetch.c
src/bin/pg_rewind/parsexlog.c
src/bin/pg_rewind/pg_rewind.c
src/bin/pg_upgrade/controldata.c
src/bin/pg_upgrade/function.c
src/bin/pg_waldump/pg_waldump.c
src/bin/pgbench/pgbench.c
src/bin/psql/command.c
src/bin/psql/common.c
src/bin/psql/copy.c
src/bin/psql/crosstabview.c
src/bin/psql/describe.c
src/bin/psql/startup.c
src/bin/psql/tab-complete.c
src/bin/psql/variables.c
src/common/d2s.c
src/common/f2s.c
src/common/file_utils.c
src/common/logging.c
src/common/pg_lzcompress.c
src/common/rmtree.c
src/include/access/amapi.h
src/include/access/gistxlog.h
src/include/access/hio.h
src/include/access/relscan.h
src/include/access/spgist_private.h
src/include/access/spgxlog.h
src/include/access/tableam.h
src/include/access/tupdesc.h
src/include/access/xlog.h
src/include/access/xlog_internal.h
src/include/catalog/dependency.h
src/include/catalog/index.h
src/include/catalog/pg_attrdef.h
src/include/catalog/pg_default_acl.h
src/include/catalog/pg_policy.h
src/include/catalog/storage.h
src/include/commands/defrem.h
src/include/commands/trigger.h
src/include/commands/vacuum.h
src/include/common/file_utils.h
src/include/common/logging.h
src/include/executor/execParallel.h
src/include/executor/executor.h
src/include/executor/tuptable.h
src/include/libpq/libpq-be.h
src/include/miscadmin.h
src/include/nodes/execnodes.h
src/include/nodes/parsenodes.h
src/include/nodes/plannodes.h
src/include/parser/parse_node.h
src/include/pgstat.h
src/include/port.h
src/include/replication/logical.h
src/include/replication/reorderbuffer.h
src/include/statistics/extended_stats_internal.h
src/include/statistics/statistics.h
src/include/storage/fd.h
src/include/storage/md.h
src/include/tcop/deparse_utility.h
src/include/utils/datum.h
src/interfaces/ecpg/ecpglib/cursor.c
src/interfaces/ecpg/ecpglib/descriptor.c
src/interfaces/ecpg/ecpglib/ecpglib_extern.h
src/interfaces/ecpg/ecpglib/error.c
src/interfaces/ecpg/ecpglib/execute.c
src/interfaces/ecpg/ecpglib/prepare.c
src/interfaces/ecpg/include/ecpglib.h
src/interfaces/ecpg/preproc/ecpg.c
src/interfaces/ecpg/preproc/output.c
src/interfaces/ecpg/preproc/preproc_extern.h
src/interfaces/ecpg/preproc/type.h
src/interfaces/libpq/fe-connect.c
src/interfaces/libpq/libpq-fe.h
src/interfaces/libpq/pqexpbuffer.c
src/pl/plpgsql/src/pl_exec.c
src/pl/plpgsql/src/plpgsql.h
src/pl/plpython/plpy_exec.c
src/pl/tcl/pltcl.c
src/port/dlopen.c
src/port/pg_bitutils.c
src/port/strtof.c
src/test/regress/pg_regress.c
src/test/regress/regress.c
src/tools/pgindent/typedefs.list

index 2576d4f9e9125e5f983f3a3250cfbb9805b22c92..c935abc7e09619601ab8d835324c9ae7cba81912 100644 (file)
@@ -160,7 +160,7 @@ static inline bool invariant_l_nontarget_offset(BtreeCheckState *state,
                                                         OffsetNumber upperbound);
 static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum);
 static inline BTScanInsert bt_mkscankey_pivotsearch(Relation rel,
-                                                                                                       IndexTuple itup);
+                                                IndexTuple itup);
 static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block,
                                         Page page, OffsetNumber offset);
 static inline ItemPointer BTreeTupleGetHeapTIDCareful(BtreeCheckState *state,
@@ -1029,7 +1029,7 @@ bt_target_page_check(BtreeCheckState *state)
                /* Fingerprint leaf page tuples (those that point to the heap) */
                if (state->heapallindexed && P_ISLEAF(topaque) && !ItemIdIsDead(itemid))
                {
-                       IndexTuple              norm;
+                       IndexTuple      norm;
 
                        norm = bt_normalize_tuple(state, itup);
                        bloom_add_element(state->filter, (unsigned char *) norm,
@@ -1174,7 +1174,7 @@ bt_target_page_check(BtreeCheckState *state)
                 */
                else if (offset == max)
                {
-                       BTScanInsert    rightkey;
+                       BTScanInsert rightkey;
 
                        /* Get item in next/right page */
                        rightkey = bt_right_page_check_scankey(state);
@@ -1851,7 +1851,8 @@ bt_tuple_present_callback(Relation index, HeapTuple htup, Datum *values,
                                                  bool *isnull, bool tupleIsAlive, void *checkstate)
 {
        BtreeCheckState *state = (BtreeCheckState *) checkstate;
-       IndexTuple      itup, norm;
+       IndexTuple      itup,
+                               norm;
 
        Assert(state->heapallindexed);
 
@@ -1931,7 +1932,7 @@ bt_normalize_tuple(BtreeCheckState *state, IndexTuple itup)
 
        for (i = 0; i < tupleDescriptor->natts; i++)
        {
-               Form_pg_attribute       att;
+               Form_pg_attribute att;
 
                att = TupleDescAttr(tupleDescriptor, i);
 
index 3bbfbf2847b399513d2cbe683164ce76be6c2caf..2be0055c3fbbe4305bbf6821692bd854797d85ca 100644 (file)
@@ -156,7 +156,7 @@ cube_a_f8_f8(PG_FUNCTION_ARGS)
                                (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                 errmsg("can't extend cube"),
                                 errdetail("A cube cannot have more than %d dimensions.",
-                                                          CUBE_MAX_DIM)));
+                                                  CUBE_MAX_DIM)));
 
        if (ARRNELEMS(ll) != dim)
                ereport(ERROR,
@@ -220,7 +220,7 @@ cube_a_f8(PG_FUNCTION_ARGS)
                                (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                 errmsg("array is too long"),
                                 errdetail("A cube cannot have more than %d dimensions.",
-                                                          CUBE_MAX_DIM)));
+                                                  CUBE_MAX_DIM)));
 
        dur = ARRPTR(ur);
 
@@ -260,7 +260,7 @@ cube_subset(PG_FUNCTION_ARGS)
                                (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                 errmsg("array is too long"),
                                 errdetail("A cube cannot have more than %d dimensions.",
-                                                          CUBE_MAX_DIM)));
+                                                  CUBE_MAX_DIM)));
 
        size = IS_POINT(c) ? POINT_SIZE(dim) : CUBE_SIZE(dim);
        result = (NDBOX *) palloc0(size);
@@ -1780,7 +1780,7 @@ cube_c_f8(PG_FUNCTION_ARGS)
                                (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                 errmsg("can't extend cube"),
                                 errdetail("A cube cannot have more than %d dimensions.",
-                                                          CUBE_MAX_DIM)));
+                                                  CUBE_MAX_DIM)));
 
        if (IS_POINT(cube))
        {
@@ -1828,7 +1828,7 @@ cube_c_f8_f8(PG_FUNCTION_ARGS)
                                (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                 errmsg("can't extend cube"),
                                 errdetail("A cube cannot have more than %d dimensions.",
-                                                          CUBE_MAX_DIM)));
+                                                  CUBE_MAX_DIM)));
 
        if (IS_POINT(cube) && (x1 == x2))
        {
index 85534a3a7682e35336802034f885d403c4f1da82..99baa496bd1e13e6fc8e3fc491208c7f945eb8da 100644 (file)
@@ -922,8 +922,11 @@ check_selective_binary_conversion(RelOptInfo *baserel,
                        /* Skip dropped attributes (probably shouldn't see any here). */
                        if (attr->attisdropped)
                                continue;
-                       /* Skip generated columns (COPY won't accept them in the column
-                        * list) */
+
+                       /*
+                        * Skip generated columns (COPY won't accept them in the column
+                        * list)
+                        */
                        if (attr->attgenerated)
                                continue;
                        *columns = lappend(*columns, makeString(pstrdup(attname)));
index fe10cb56b6eec199b49f64dc3a3c85dd80a20266..13dd7ac202c63a33f825ac247d89e99d78e962cb 100644 (file)
@@ -212,41 +212,44 @@ g_int_compress(PG_FUNCTION_ARGS)
                 */
                for (j = i = len - 1; i > 0 && lenr > 0; i--, j--)
                {
-                       int             r_end = dr[i];
-                       int             r_start = r_end;
-                       while (i > 0 && lenr > 0 && dr[i-1] == r_start - 1)
+                       int                     r_end = dr[i];
+                       int                     r_start = r_end;
+
+                       while (i > 0 && lenr > 0 && dr[i - 1] == r_start - 1)
                                --r_start, --i, --lenr;
-                       dr[2*j] = r_start;
-                       dr[2*j+1] = r_end;
+                       dr[2 * j] = r_start;
+                       dr[2 * j + 1] = r_end;
                }
                /* just copy the rest, if any, as trivial ranges */
                for (; i >= 0; i--, j--)
-                       dr[2*j] = dr[2*j + 1] = dr[i];
+                       dr[2 * j] = dr[2 * j + 1] = dr[i];
 
                if (++j)
                {
                        /*
                         * shunt everything down to start at the right place
                         */
-                       memmove((void *) &dr[0], (void *) &dr[2*j], 2*(len - j) * sizeof(int32));
+                       memmove((void *) &dr[0], (void *) &dr[2 * j], 2 * (len - j) * sizeof(int32));
                }
+
                /*
                 * make "len" be number of array elements, not ranges
                 */
-               len = 2*(len - j);
+               len = 2 * (len - j);
                cand = 1;
                while (len > MAXNUMRANGE * 2)
                {
                        min = PG_INT64_MAX;
                        for (i = 2; i < len; i += 2)
-                               if (min > ((int64)dr[i] - (int64)dr[i - 1]))
+                               if (min > ((int64) dr[i] - (int64) dr[i - 1]))
                                {
-                                       min = ((int64)dr[i] - (int64)dr[i - 1]);
+                                       min = ((int64) dr[i] - (int64) dr[i - 1]);
                                        cand = i;
                                }
                        memmove((void *) &dr[cand - 1], (void *) &dr[cand + 1], (len - cand - 1) * sizeof(int32));
                        len -= 2;
                }
+
                /*
                 * check sparseness of result
                 */
index e12d15c21e820b02cd09b62d41cfc02a6382cb77..2d3e50178f6c16b1f0ec1db77995d36c36afbe95 100644 (file)
@@ -298,10 +298,10 @@ internal_size(int *a, int len)
        for (i = 0; i < len; i += 2)
        {
                if (!i || a[i] != a[i - 1]) /* do not count repeated range */
-                       size += (int64)(a[i + 1]) - (int64)(a[i]) + 1;
+                       size += (int64) (a[i + 1]) - (int64) (a[i]) + 1;
        }
 
-       if (size > (int64)INT_MAX || size < (int64)INT_MIN)
+       if (size > (int64) INT_MAX || size < (int64) INT_MIN)
                return -1;                              /* overflow */
        return (int) size;
 }
index 16b8074a00670d1af1d1d436f89eb139ac33d790..64df24039692359ae023b4d4f441de75de15f871 100644 (file)
@@ -1153,8 +1153,9 @@ pgss_store(const char *query, uint64 queryId,
                queryId = pgss_hash_string(query, query_len);
 
                /*
-                * If we are unlucky enough to get a hash of zero(invalid), use queryID
-                * as 2 instead, queryID 1 is already in use for normal statements.
+                * If we are unlucky enough to get a hash of zero(invalid), use
+                * queryID as 2 instead, queryID 1 is already in use for normal
+                * statements.
                 */
                if (queryId == UINT64CONST(0))
                        queryId = UINT64CONST(2);
index bf399eebe5441922a9390a5a1b200b02885ba9c2..62e68b00e7c3f826390419dd332bf621d9517b36 100644 (file)
@@ -1138,7 +1138,7 @@ mp_int_mod(mp_int a, mp_int m, mp_int c)
 }
 
 mp_result
-mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small * r)
+mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small *r)
 {
        mpz_t           vtmp;
        mp_digit        vbuf[MP_VALUE_DIGITS(value)];
@@ -1819,7 +1819,7 @@ mp_int_root(mp_int a, mp_small b, mp_int c)
 }
 
 mp_result
-mp_int_to_int(mp_int z, mp_small * out)
+mp_int_to_int(mp_int z, mp_small *out)
 {
        assert(z != NULL);
 
@@ -1850,7 +1850,7 @@ mp_int_to_int(mp_int z, mp_small * out)
 }
 
 mp_result
-mp_int_to_uint(mp_int z, mp_usmall * out)
+mp_int_to_uint(mp_int z, mp_usmall *out)
 {
        assert(z != NULL);
 
index 9c471bb7a1d45645c6dffeb764095955d84ed8d3..0e1676d04e907f0e8b33ec0b901ee2713021a4d0 100644 (file)
@@ -218,7 +218,7 @@ mp_result   mp_int_div(mp_int a, mp_int b, mp_int q, mp_int r);
 /** Sets `q` and `*r` to the quotent and remainder of `a / value`. Division by
        powers of 2 is detected and handled efficiently. The remainder is pinned to
        `0 <= *r < b`. Either of `q` or `r` may be NULL. */
-mp_result      mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small * r);
+mp_result      mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small *r);
 
 /** Sets `q` and `r` to the quotient and remainder of `a / 2^p2`. This is a
        special case for division by powers of two that is more efficient than
@@ -246,7 +246,7 @@ mp_result   mp_int_expt_full(mp_int a, mp_int b, mp_int c);
        The remainder is pinned to `0 <= r < value`. */
 static inline
 mp_result
-mp_int_mod_value(mp_int a, mp_small value, mp_small * r)
+mp_int_mod_value(mp_int a, mp_small value, mp_small *r)
 {
        return mp_int_div_value(a, value, 0, r);
 }
@@ -339,11 +339,11 @@ mp_int_sqrt(mp_int a, mp_int c)
 
 /** Returns `MP_OK` if `z` is representable as `mp_small`, else `MP_RANGE`.
        If `out` is not NULL, `*out` is set to the value of `z` when `MP_OK`. */
-mp_result      mp_int_to_int(mp_int z, mp_small * out);
+mp_result      mp_int_to_int(mp_int z, mp_small *out);
 
 /** Returns `MP_OK` if `z` is representable as `mp_usmall`, or `MP_RANGE`.
        If `out` is not NULL, `*out` is set to the value of `z` when `MP_OK`. */
-mp_result      mp_int_to_uint(mp_int z, mp_usmall * out);
+mp_result      mp_int_to_uint(mp_int z, mp_usmall *out);
 
 /** Converts `z` to a zero-terminated string of characters in the specified
        `radix`, writing at most `limit` characters to `str` including the
index 6151e8095d0cf29c8460d483416bef3643122295..e1df60bb3535a44484ec03b3780d9c8f5e36d053 100644 (file)
@@ -339,7 +339,7 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo)
 
        InitDirtySnapshot(SnapshotDirty);
 
-       nblocks = hscan->rs_nblocks; /* # blocks to be scanned */
+       nblocks = hscan->rs_nblocks;    /* # blocks to be scanned */
 
        /* scan the relation */
        while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
index 2b6d885efed1b9e3659a435f1ae36c41c20017d1..04dcd40f2e7df8d1e3396ca287a7ffcc5e0c7653 100644 (file)
@@ -1858,7 +1858,7 @@ postgresExecForeignInsert(EState *estate,
        if (fmstate->aux_fmstate)
                resultRelInfo->ri_FdwState = fmstate->aux_fmstate;
        rslot = execute_foreign_modify(estate, resultRelInfo, CMD_INSERT,
-                                                                 slot, planSlot);
+                                                                  slot, planSlot);
        /* Revert that change */
        if (fmstate->aux_fmstate)
                resultRelInfo->ri_FdwState = fmstate;
@@ -1934,11 +1934,11 @@ postgresBeginForeignInsert(ModifyTableState *mtstate,
        bool            doNothing = false;
 
        /*
-        * If the foreign table we are about to insert routed rows into is also
-        * an UPDATE subplan result rel that will be updated later, proceeding
-        * with the INSERT will result in the later UPDATE incorrectly modifying
-        * those routed rows, so prevent the INSERT --- it would be nice if we
-        * could handle this case; but for now, throw an error for safety.
+        * If the foreign table we are about to insert routed rows into is also an
+        * UPDATE subplan result rel that will be updated later, proceeding with
+        * the INSERT will result in the later UPDATE incorrectly modifying those
+        * routed rows, so prevent the INSERT --- it would be nice if we could
+        * handle this case; but for now, throw an error for safety.
         */
        if (plan && plan->operation == CMD_UPDATE &&
                (resultRelInfo->ri_usesFdwDirectModify ||
@@ -3169,7 +3169,7 @@ adjust_foreign_grouping_path_cost(PlannerInfo *root,
        if (!grouping_is_sortable(root->parse->groupClause) ||
                !pathkeys_contained_in(pathkeys, root->group_pathkeys))
        {
-               Path            sort_path;              /* dummy for result of cost_sort */
+               Path            sort_path;      /* dummy for result of cost_sort */
 
                cost_sort(&sort_path,
                                  root,
@@ -3191,7 +3191,7 @@ adjust_foreign_grouping_path_cost(PlannerInfo *root,
                 * add 1/4th of that default.
                 */
                double          sort_multiplier = 1.0 + (DEFAULT_FDW_SORT_MULTIPLIER
- - 1.0) * 0.25;
                                                                                       - 1.0) * 0.25;
 
                *p_startup_cost *= sort_multiplier;
                *p_run_cost *= sort_multiplier;
@@ -3773,6 +3773,7 @@ store_returning_result(PgFdwModifyState *fmstate,
                                                                                        fmstate->retrieved_attrs,
                                                                                        NULL,
                                                                                        fmstate->temp_cxt);
+
                /*
                 * The returning slot will not necessarily be suitable to store
                 * heaptuples directly, so allow for conversion.
@@ -6059,8 +6060,8 @@ add_foreign_final_paths(PlannerInfo *root, RelOptInfo *input_rel,
                /*
                 * Grouping and aggregation are not supported with FOR UPDATE/SHARE,
                 * so the input_rel should be a base, join, or ordered relation; and
-                * if it's an ordered relation, its input relation should be a base
-                * or join relation.
+                * if it's an ordered relation, its input relation should be a base or
+                * join relation.
                 */
                Assert(input_rel->reloptkind == RELOPT_BASEREL ||
                           input_rel->reloptkind == RELOPT_JOINREL ||
index 783b04a3cb960d7598ab788aa5ea3514ae255b49..a48a6cd757f4442ef291ac013db6a623488a79ea 100644 (file)
@@ -787,8 +787,8 @@ expand_tuple(HeapTuple *targetHeapTuple,
                }
 
                /*
-                * Now walk the missing attributes. If there is a missing value
-                * make space for it. Otherwise, it's going to be NULL.
+                * Now walk the missing attributes. If there is a missing value make
+                * space for it. Otherwise, it's going to be NULL.
                 */
                for (attnum = firstmissingnum;
                         attnum < natts;
index b9a28d186333f9a40690b1fe9b4c083c28e8d965..dc46f2460e262a31eb051680362403d9359076e7 100644 (file)
@@ -394,17 +394,17 @@ ginVacuumPostingTree(GinVacuumState *gvs, BlockNumber rootBlkno)
                 * There is at least one empty page.  So we have to rescan the tree
                 * deleting empty pages.
                 */
-               Buffer                          buffer;
+               Buffer          buffer;
                DataPageDeleteStack root,
-                                                  *ptr,
-                                                  *tmp;
+                                  *ptr,
+                                  *tmp;
 
                buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, rootBlkno,
                                                                        RBM_NORMAL, gvs->strategy);
 
                /*
-                * Lock posting tree root for cleanup to ensure there are no concurrent
-                * inserts.
+                * Lock posting tree root for cleanup to ensure there are no
+                * concurrent inserts.
                 */
                LockBufferForCleanup(buffer);
 
index b648af1ff650d5d82d0d089f29f6a1d1d4dfcc54..c945b282721134833369154b442900dad8dd80ec 100644 (file)
@@ -205,8 +205,8 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data)
                while (segno < a_segno)
                {
                        /*
-                        * Once modification is started and page tail is copied, we've
-                        * to copy unmodified segments.
+                        * Once modification is started and page tail is copied, we've to
+                        * copy unmodified segments.
                         */
                        segsize = SizeOfGinPostingList(oldseg);
                        if (tailCopy)
@@ -257,12 +257,12 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data)
                }
 
                /*
-                * We're about to start modification of the page.  So, copy tail of the
-                * page if it's not done already.
+                * We're about to start modification of the page.  So, copy tail of
+                * the page if it's not done already.
                 */
                if (!tailCopy && segptr != segmentend)
                {
-                       int tailSize = segmentend - segptr;
+                       int                     tailSize = segmentend - segptr;
 
                        tailCopy = (Pointer) palloc(tailSize);
                        memcpy(tailCopy, segptr, tailSize);
@@ -304,7 +304,7 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data)
        segptr = (Pointer) oldseg;
        if (segptr != segmentend && tailCopy)
        {
-               int restSize = segmentend - segptr;
+               int                     restSize = segmentend - segptr;
 
                Assert(writePtr + restSize <= PageGetSpecialPointer(page));
                memcpy(writePtr, segptr, restSize);
index 94b6ad6a59b657a6394ed1c969e34ac5732d41ed..49df05653b31273a57c03179d3b22b7026b9ddd8 100644 (file)
@@ -839,16 +839,16 @@ gistNewBuffer(Relation r)
                        gistcheckpage(r, buffer);
 
                        /*
-                        * Otherwise, recycle it if deleted, and too old to have any processes
-                        * interested in it.
+                        * Otherwise, recycle it if deleted, and too old to have any
+                        * processes interested in it.
                         */
                        if (gistPageRecyclable(page))
                        {
                                /*
-                                * If we are generating WAL for Hot Standby then create a
-                                * WAL record that will allow us to conflict with queries
-                                * running on standby, in case they have snapshots older
-                                * than the page's deleteXid.
+                                * If we are generating WAL for Hot Standby then create a WAL
+                                * record that will allow us to conflict with queries running
+                                * on standby, in case they have snapshots older than the
+                                * page's deleteXid.
                                 */
                                if (XLogStandbyInfoActive() && RelationNeedsWAL(r))
                                        gistXLogPageReuse(r, blkno, GistPageGetDeleteXid(page));
index 0bf15ae7236c559363ed9805248e0cf8d4f1275b..6ec1ec3df3a9d0825325f8276a8c824fe5a509b7 100644 (file)
@@ -246,7 +246,7 @@ hashtext(PG_FUNCTION_ARGS)
 {
        text       *key = PG_GETARG_TEXT_PP(0);
        Oid                     collid = PG_GET_COLLATION();
-       pg_locale_t     mylocale = 0;
+       pg_locale_t mylocale = 0;
        Datum           result;
 
        if (!collid)
@@ -271,7 +271,7 @@ hashtext(PG_FUNCTION_ARGS)
                        int32_t         ulen = -1;
                        UChar      *uchar = NULL;
                        Size            bsize;
-                       uint8_t    *buf;
+                       uint8_t    *buf;
 
                        ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
 
@@ -302,7 +302,7 @@ hashtextextended(PG_FUNCTION_ARGS)
 {
        text       *key = PG_GETARG_TEXT_PP(0);
        Oid                     collid = PG_GET_COLLATION();
-       pg_locale_t     mylocale = 0;
+       pg_locale_t mylocale = 0;
        Datum           result;
 
        if (!collid)
@@ -328,7 +328,7 @@ hashtextextended(PG_FUNCTION_ARGS)
                        int32_t         ulen = -1;
                        UChar      *uchar = NULL;
                        Size            bsize;
-                       uint8_t    *buf;
+                       uint8_t    *buf;
 
                        ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
 
index 19d2c529d80be4d3fb2191bd719bcde1826b1704..723e153705df663f7a9816e72cd23d847a47d0fa 100644 (file)
@@ -1684,8 +1684,8 @@ void
 heap_get_latest_tid(TableScanDesc sscan,
                                        ItemPointer tid)
 {
-       Relation relation = sscan->rs_rd;
-       Snapshot snapshot = sscan->rs_snapshot;
+       Relation        relation = sscan->rs_rd;
+       Snapshot        snapshot = sscan->rs_snapshot;
        ItemPointerData ctid;
        TransactionId priorXmax;
 
index 56b2abda5fb086d11ec0573d83d9319ab29d8c9c..674c1d3a818e1bb47ef84ebe27ccce52cd4082c2 100644 (file)
@@ -474,6 +474,7 @@ tuple_lock_retry:
                                                HeapTupleHeaderGetCmin(tuple->t_data) >= cid)
                                        {
                                                tmfd->xmax = priorXmax;
+
                                                /*
                                                 * Cmin is the problematic value, so store that. See
                                                 * above.
@@ -1172,7 +1173,7 @@ heapam_index_build_range_scan(Relation heapRelation,
        Snapshot        snapshot;
        bool            need_unregister_snapshot = false;
        TransactionId OldestXmin;
-       BlockNumber     previous_blkno = InvalidBlockNumber;
+       BlockNumber previous_blkno = InvalidBlockNumber;
        BlockNumber root_blkno = InvalidBlockNumber;
        OffsetNumber root_offsets[MaxHeapTuplesPerPage];
 
@@ -1263,7 +1264,7 @@ heapam_index_build_range_scan(Relation heapRelation,
        /* Publish number of blocks to scan */
        if (progress)
        {
-               BlockNumber             nblocks;
+               BlockNumber nblocks;
 
                if (hscan->rs_base.rs_parallel != NULL)
                {
@@ -1314,7 +1315,7 @@ heapam_index_build_range_scan(Relation heapRelation,
                /* Report scan progress, if asked to. */
                if (progress)
                {
-                       BlockNumber     blocks_done = heapam_scan_get_blocks_done(hscan);
+                       BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
 
                        if (blocks_done != previous_blkno)
                        {
@@ -1668,7 +1669,7 @@ heapam_index_build_range_scan(Relation heapRelation,
        /* Report scan progress one last time. */
        if (progress)
        {
-               BlockNumber             blks_done;
+               BlockNumber blks_done;
 
                if (hscan->rs_base.rs_parallel != NULL)
                {
@@ -1720,7 +1721,7 @@ heapam_index_validate_scan(Relation heapRelation,
        BlockNumber root_blkno = InvalidBlockNumber;
        OffsetNumber root_offsets[MaxHeapTuplesPerPage];
        bool            in_index[MaxHeapTuplesPerPage];
-       BlockNumber     previous_blkno = InvalidBlockNumber;
+       BlockNumber previous_blkno = InvalidBlockNumber;
 
        /* state variables for the merge */
        ItemPointer indexcursor = NULL;
@@ -1955,8 +1956,8 @@ static BlockNumber
 heapam_scan_get_blocks_done(HeapScanDesc hscan)
 {
        ParallelBlockTableScanDesc bpscan = NULL;
-       BlockNumber             startblock;
-       BlockNumber             blocks_done;
+       BlockNumber startblock;
+       BlockNumber blocks_done;
 
        if (hscan->rs_base.rs_parallel != NULL)
        {
@@ -1974,7 +1975,7 @@ heapam_scan_get_blocks_done(HeapScanDesc hscan)
                blocks_done = hscan->rs_cblock - startblock;
        else
        {
-               BlockNumber     nblocks;
+               BlockNumber nblocks;
 
                nblocks = bpscan != NULL ? bpscan->phs_nblocks : hscan->rs_nblocks;
                blocks_done = nblocks - startblock +
index bce4274362ca6955746a8a42d1b769ec369ce141..131ec7b8d7f55d3db9a44cf57e20f08cae836634 100644 (file)
@@ -652,7 +652,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
        }
        else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
        {
-               int options = HEAP_INSERT_SKIP_FSM;
+               int                     options = HEAP_INSERT_SKIP_FSM;
 
                if (!state->rs_use_wal)
                        options |= HEAP_INSERT_SKIP_WAL;
index 74e957abb72bd8d850cc3595384d6ca43e94dbe0..e10715a7755273c5deeacd2b9eac4dc09bb44d6c 100644 (file)
@@ -2295,16 +2295,16 @@ static struct varlena *
 toast_decompress_datum_slice(struct varlena *attr, int32 slicelength)
 {
        struct varlena *result;
-       int32 rawsize;
+       int32           rawsize;
 
        Assert(VARATT_IS_COMPRESSED(attr));
 
        result = (struct varlena *) palloc(slicelength + VARHDRSZ);
 
        rawsize = pglz_decompress(TOAST_COMPRESS_RAWDATA(attr),
-                                               VARSIZE(attr) - TOAST_COMPRESS_HDRSZ,
-                                               VARDATA(result),
-                                               slicelength, false);
+                                                         VARSIZE(attr) - TOAST_COMPRESS_HDRSZ,
+                                                         VARDATA(result),
+                                                         slicelength, false);
        if (rawsize < 0)
                elog(ERROR, "compressed data is corrupted");
 
index 9e17acc110e706f62291ce8aecd8d86dee2ce916..637e47c08ce12067906e5f20be61716f6bfb3219 100644 (file)
@@ -164,7 +164,7 @@ static void lazy_cleanup_index(Relation indrel,
 static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
                                 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
 static bool should_attempt_truncation(VacuumParams *params,
-                                                LVRelStats *vacrelstats);
+                                                 LVRelStats *vacrelstats);
 static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
 static BlockNumber count_nondeletable_pages(Relation onerel,
                                                 LVRelStats *vacrelstats);
@@ -1067,9 +1067,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
                                         * cheaper to get rid of it in the next pruning pass than
                                         * to treat it like an indexed tuple. Finally, if index
                                         * cleanup is disabled, the second heap pass will not
-                                        * execute, and the tuple will not get removed, so we
-                                        * must treat it like any other dead tuple that we choose
-                                        * to keep.
+                                        * execute, and the tuple will not get removed, so we must
+                                        * treat it like any other dead tuple that we choose to
+                                        * keep.
                                         *
                                         * If this were to happen for a tuple that actually needed
                                         * to be deleted, we'd be in trouble, because it'd
@@ -1087,6 +1087,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
                                        all_visible = false;
                                        break;
                                case HEAPTUPLE_LIVE:
+
                                        /*
                                         * Count it as live.  Not only is this natural, but it's
                                         * also what acquire_sample_rows() does.
@@ -1251,13 +1252,14 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
                        else
                        {
                                /*
-                                * Here, we have indexes but index cleanup is disabled. Instead of
-                                * vacuuming the dead tuples on the heap, we just forget them.
+                                * Here, we have indexes but index cleanup is disabled.
+                                * Instead of vacuuming the dead tuples on the heap, we just
+                                * forget them.
                                 *
                                 * Note that vacrelstats->dead_tuples could have tuples which
                                 * became dead after HOT-pruning but are not marked dead yet.
-                                * We do not process them because it's a very rare condition, and
-                                * the next vacuum will process them anyway.
+                                * We do not process them because it's a very rare condition,
+                                * and the next vacuum will process them anyway.
                                 */
                                Assert(params->index_cleanup == VACOPT_TERNARY_DISABLED);
                        }
index 0a9472c71b5f04bfb60b12fa90c90c917f7d7d0b..36a570045ac1fdb6c7cf76f6f4a12fba839e3fde 100644 (file)
@@ -1811,11 +1811,11 @@ _bt_insert_parent(Relation rel,
                /*
                 * Re-find and write lock the parent of buf.
                 *
-                * It's possible that the location of buf's downlink has changed
-                * since our initial _bt_search() descent.  _bt_getstackbuf() will
-                * detect and recover from this, updating the stack, which ensures
-                * that the new downlink will be inserted at the correct offset.
-                * Even buf's parent may have changed.
+                * It's possible that the location of buf's downlink has changed since
+                * our initial _bt_search() descent.  _bt_getstackbuf() will detect
+                * and recover from this, updating the stack, which ensures that the
+                * new downlink will be inserted at the correct offset. Even buf's
+                * parent may have changed.
                 */
                stack->bts_btentry = bknum;
                pbuf = _bt_getstackbuf(rel, stack);
index 5906c41f316fd03d587cae12aa1f28b78ba41398..dc42213ac6c73c2be339cafef5179326a07ded68 100644 (file)
@@ -166,8 +166,8 @@ _bt_search(Relation rel, BTScanInsert key, Buffer *bufP, int access,
                new_stack->bts_parent = stack_in;
 
                /*
-                * Page level 1 is lowest non-leaf page level prior to leaves.  So,
-                * if we're on the level 1 and asked to lock leaf page in write mode,
+                * Page level 1 is lowest non-leaf page level prior to leaves.  So, if
+                * we're on the level 1 and asked to lock leaf page in write mode,
                 * then lock next page in write mode, because it must be a leaf.
                 */
                if (opaque->btpo.level == 1 && access == BT_WRITE)
@@ -1235,7 +1235,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
 
        /* Initialize remaining insertion scan key fields */
        inskey.heapkeyspace = _bt_heapkeyspace(rel);
-       inskey.anynullkeys = false;             /* unusued */
+       inskey.anynullkeys = false; /* unused */
        inskey.nextkey = nextkey;
        inskey.pivotsearch = false;
        inskey.scantid = NULL;
index 0b5be776d63fa07f16da80eca5170a436cbadf59..d6fa5742384fb28d7ede2a65ed624e2109184fde 100644 (file)
@@ -962,10 +962,10 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
                         * much smaller.
                         *
                         * Since the truncated tuple is often smaller than the original
-                        * tuple, it cannot just be copied in place (besides, we want
-                        * to actually save space on the leaf page).  We delete the
-                        * original high key, and add our own truncated high key at the
-                        * same offset.
+                        * tuple, it cannot just be copied in place (besides, we want to
+                        * actually save space on the leaf page).  We delete the original
+                        * high key, and add our own truncated high key at the same
+                        * offset.
                         *
                         * Note that the page layout won't be changed very much.  oitup is
                         * already located at the physical beginning of tuple space, so we
index 77c9c7285cdf3e7e56598cc86a4cf3bd644a8513..1238d544cd3a2477f9d39da857bee2bbbe307810 100644 (file)
@@ -107,7 +107,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
        key = palloc(offsetof(BTScanInsertData, scankeys) +
                                 sizeof(ScanKeyData) * indnkeyatts);
        key->heapkeyspace = itup == NULL || _bt_heapkeyspace(rel);
-       key->anynullkeys = false;               /* initial assumption */
+       key->anynullkeys = false;       /* initial assumption */
        key->nextkey = false;
        key->pivotsearch = false;
        key->keysz = Min(indnkeyatts, tupnatts);
index 9365bc57ad5928059a98d1e7f20d0919540d4bb0..7bc5ec09bf980c405b61975a82dd9a1e6c317c68 100644 (file)
@@ -39,8 +39,8 @@ static int
 pairingheap_SpGistSearchItem_cmp(const pairingheap_node *a,
                                                                 const pairingheap_node *b, void *arg)
 {
-       const           SpGistSearchItem *sa = (const SpGistSearchItem *) a;
-       const           SpGistSearchItem *sb = (const SpGistSearchItem *) b;
+       const SpGistSearchItem *sa = (const SpGistSearchItem *) a;
+       const SpGistSearchItem *sb = (const SpGistSearchItem *) b;
        SpGistScanOpaque so = (SpGistScanOpaque) arg;
        int                     i;
 
@@ -79,7 +79,7 @@ pairingheap_SpGistSearchItem_cmp(const pairingheap_node *a,
 }
 
 static void
-spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem * item)
+spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem *item)
 {
        if (!so->state.attLeafType.attbyval &&
                DatumGetPointer(item->value) != NULL)
@@ -97,7 +97,7 @@ spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem * item)
  * Called in queue context
  */
 static void
-spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem * item)
+spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem *item)
 {
        pairingheap_add(so->scanQueue, &item->phNode);
 }
@@ -439,7 +439,7 @@ spgNewHeapItem(SpGistScanOpaque so, int level, ItemPointer heapPtr,
  *             the scan is not ordered AND the item satisfies the scankeys
  */
 static bool
-spgLeafTest(SpGistScanOpaque so, SpGistSearchItem * item,
+spgLeafTest(SpGistScanOpaque so, SpGistSearchItem *item,
                        SpGistLeafTuple leafTuple, bool isnull,
                        bool *reportedSome, storeRes_func storeRes)
 {
@@ -530,7 +530,7 @@ spgLeafTest(SpGistScanOpaque so, SpGistSearchItem * item,
 static void
 spgInitInnerConsistentIn(spgInnerConsistentIn *in,
                                                 SpGistScanOpaque so,
-                                                SpGistSearchItem * item,
+                                                SpGistSearchItem *item,
                                                 SpGistInnerTuple innerTuple)
 {
        in->scankeys = so->keyData;
@@ -551,7 +551,7 @@ spgInitInnerConsistentIn(spgInnerConsistentIn *in,
 
 static SpGistSearchItem *
 spgMakeInnerItem(SpGistScanOpaque so,
-                                SpGistSearchItem * parentItem,
+                                SpGistSearchItem *parentItem,
                                 SpGistNodeTuple tuple,
                                 spgInnerConsistentOut *out, int i, bool isnull,
                                 double *distances)
@@ -585,7 +585,7 @@ spgMakeInnerItem(SpGistScanOpaque so,
 }
 
 static void
-spgInnerTest(SpGistScanOpaque so, SpGistSearchItem * item,
+spgInnerTest(SpGistScanOpaque so, SpGistSearchItem *item,
                         SpGistInnerTuple innerTuple, bool isnull)
 {
        MemoryContext oldCxt = MemoryContextSwitchTo(so->tempCxt);
@@ -683,7 +683,7 @@ enum SpGistSpecialOffsetNumbers
 
 static OffsetNumber
 spgTestLeafTuple(SpGistScanOpaque so,
-                                SpGistSearchItem * item,
+                                SpGistSearchItem *item,
                                 Page page, OffsetNumber offset,
                                 bool isnull, bool isroot,
                                 bool *reportedSome,
index d22998c54bf4424d991bf0cfa2f5cea4deec5f68..a7c1a09e05f587889dbcbad4d000f7de2e3e673f 100644 (file)
@@ -632,8 +632,8 @@ spg_text_leaf_consistent(PG_FUNCTION_ARGS)
                        res = (level >= queryLen) ||
                                DatumGetBool(DirectFunctionCall2Coll(text_starts_with,
                                                                                                         PG_GET_COLLATION(),
-                                                                                                out->leafValue,
-                                                                                                PointerGetDatum(query)));
+                                                                                                        out->leafValue,
+                                                                                                        PointerGetDatum(query)));
 
                        if (!res)                       /* no need to consider remaining conditions */
                                break;
index fc85c6f940789af773ce2f458f29d65286e05546..2b1662a267dccf06dc1db064ece0a732097a43d9 100644 (file)
@@ -192,9 +192,9 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
                         * happened since VACUUM started.
                         *
                         * Note: we could make a tighter test by seeing if the xid is
-                        * "running" according to the active snapshot; but snapmgr.c doesn't
-                        * currently export a suitable API, and it's not entirely clear
-                        * that a tighter test is worth the cycles anyway.
+                        * "running" according to the active snapshot; but snapmgr.c
+                        * doesn't currently export a suitable API, and it's not entirely
+                        * clear that a tighter test is worth the cycles anyway.
                         */
                        if (TransactionIdFollowsOrEquals(dt->xid, bds->myXmin))
                                spgAddPendingTID(bds, &dt->pointer);
index c3455bc48ba875c934fec5cc5f9a5fea8dacfe5b..12adf590853103b9af816ea424faf09b02fab1d9 100644 (file)
@@ -94,7 +94,7 @@ TableScanDesc
 table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
 {
        uint32          flags = SO_TYPE_SEQSCAN |
-               SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
+       SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
        Oid                     relid = RelationGetRelid(relation);
        Snapshot        snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
 
@@ -158,7 +158,7 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc parallel_scan)
 {
        Snapshot        snapshot;
        uint32          flags = SO_TYPE_SEQSCAN |
-               SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
+       SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
 
        Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
 
@@ -223,7 +223,7 @@ table_index_fetch_tuple_check(Relation rel,
 void
 table_get_latest_tid(TableScanDesc scan, ItemPointer tid)
 {
-       Relation rel = scan->rs_rd;
+       Relation        rel = scan->rs_rd;
        const TableAmRoutine *tableam = rel->rd_tableam;
 
        /*
index 20feeec327001afff9a67e8338607b99cb5f4759..b40da74e0920bda5d801a385b9db69c232efcc81 100644 (file)
@@ -570,9 +570,9 @@ AssignTransactionId(TransactionState s)
 
        /*
         * Ensure parent(s) have XIDs, so that a child always has an XID later
-        * than its parent.  Mustn't recurse here, or we might get a stack overflow
-        * if we're at the bottom of a huge stack of subtransactions none of which
-        * have XIDs yet.
+        * than its parent.  Mustn't recurse here, or we might get a stack
+        * overflow if we're at the bottom of a huge stack of subtransactions none
+        * of which have XIDs yet.
         */
        if (isSubXact && !FullTransactionIdIsValid(s->parent->fullTransactionId))
        {
@@ -2868,8 +2868,8 @@ StartTransactionCommand(void)
  * just skipping the reset in StartTransaction() won't work.)
  */
 static int     save_XactIsoLevel;
-static bool    save_XactReadOnly;
-static bool    save_XactDeferrable;
+static bool save_XactReadOnly;
+static bool save_XactDeferrable;
 
 void
 SaveTransactionCharacteristics(void)
@@ -5193,7 +5193,7 @@ SerializeTransactionState(Size maxsize, char *start_address)
                nxids = add_size(nxids, s->nChildXids);
        }
        Assert(SerializedTransactionStateHeaderSize + nxids * sizeof(TransactionId)
-               <= maxsize);
+                  <= maxsize);
 
        /* Copy them to our scratch space. */
        workspace = palloc(nxids * sizeof(TransactionId));
index 527522f165551a3d7f713516e4c00f7178f22973..c7c9e91b6a45150f4aedd1a492bb5485bfaaf2f6 100644 (file)
@@ -6397,9 +6397,9 @@ StartupXLOG(void)
                                        ereport(FATAL,
                                                        (errmsg("could not find redo location referenced by checkpoint record"),
                                                         errhint("If you are restoring from a backup, touch \"%s/recovery.signal\" and add required recovery options.\n"
-                                                        "If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n"
-                                                        "Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.",
-                                                        DataDir, DataDir, DataDir)));
+                                                                        "If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n"
+                                                                        "Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.",
+                                                                        DataDir, DataDir, DataDir)));
                        }
                }
                else
index a600f43a675a05aaae02fed4efb0f1c7120febdb..f0fdda1eb913570720fc3804f8df934ff7cf6749 100644 (file)
@@ -848,7 +848,7 @@ objectsInSchemaToOids(ObjectType objtype, List *nspnames)
 
                                        while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
                                        {
-                                               Oid             oid = ((Form_pg_proc) GETSTRUCT(tuple))->oid;
+                                               Oid                     oid = ((Form_pg_proc) GETSTRUCT(tuple))->oid;
 
                                                objects = lappend_oid(objects, oid);
                                        }
@@ -895,7 +895,7 @@ getRelationsInNamespace(Oid namespaceId, char relkind)
 
        while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
        {
-               Oid             oid  = ((Form_pg_class) GETSTRUCT(tuple))->oid;
+               Oid                     oid = ((Form_pg_class) GETSTRUCT(tuple))->oid;
 
                relations = lappend_oid(relations, oid);
        }
@@ -1311,7 +1311,7 @@ SetDefaultACL(InternalDefaultACL *iacls)
        }
        else
        {
-               Oid             defAclOid;
+               Oid                     defAclOid;
 
                /* Prepare to insert or update pg_default_acl entry */
                MemSet(values, 0, sizeof(values));
@@ -1384,7 +1384,7 @@ SetDefaultACL(InternalDefaultACL *iacls)
                if (isNew)
                        InvokeObjectPostCreateHook(DefaultAclRelationId, defAclOid, 0);
                else
-                       InvokeObjectPostAlterHook(DefaultAclRelationId,  defAclOid, 0);
+                       InvokeObjectPostAlterHook(DefaultAclRelationId, defAclOid, 0);
        }
 
        if (HeapTupleIsValid(tuple))
index 2878e6a5b036e21c25817f747117c3cacb604992..11936a657131f22dda6a2809d612e640211fa88a 100644 (file)
@@ -476,15 +476,15 @@ GetNewRelFileNode(Oid reltablespace, Relation pg_class, char relpersistence)
 Datum
 pg_nextoid(PG_FUNCTION_ARGS)
 {
-       Oid             reloid = PG_GETARG_OID(0);
-       Name    attname = PG_GETARG_NAME(1);
-       Oid             idxoid = PG_GETARG_OID(2);
-       Relation rel;
-       Relation idx;
-       HeapTuple atttuple;
+       Oid                     reloid = PG_GETARG_OID(0);
+       Name            attname = PG_GETARG_NAME(1);
+       Oid                     idxoid = PG_GETARG_OID(2);
+       Relation        rel;
+       Relation        idx;
+       HeapTuple       atttuple;
        Form_pg_attribute attform;
-       AttrNumber attno;
-       Oid             newoid;
+       AttrNumber      attno;
+       Oid                     newoid;
 
        /*
         * As this function is not intended to be used during normal running, and
index 6cffe550b31f2063e1bc07216eecc42ee1257c97..3c46c2510713a7f6bd25941bf8589295f2ee729f 100644 (file)
@@ -2550,8 +2550,8 @@ AddRelationNewConstraints(Relation rel,
                /*
                 * If the expression is just a NULL constant, we do not bother to make
                 * an explicit pg_attrdef entry, since the default behavior is
-                * equivalent.  This applies to column defaults, but not for generation
-                * expressions.
+                * equivalent.  This applies to column defaults, but not for
+                * generation expressions.
                 *
                 * Note a nonobvious property of this test: if the column is of a
                 * domain type, what we'll get is not a bare null Const but a
index c8d22e1b655cea27d733c17166798f0ad5545d9a..b7d1ac0923f8878d43bf5e5db67813f3319d52cd 100644 (file)
@@ -1236,8 +1236,7 @@ index_concurrently_create_copy(Relation heapRelation, Oid oldIndexId, const char
                                                                  Anum_pg_class_reloptions, &isnull);
 
        /*
-        * Extract the list of column names to be used for the index
-        * creation.
+        * Extract the list of column names to be used for the index creation.
         */
        for (int i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
        {
@@ -1270,8 +1269,8 @@ index_concurrently_create_copy(Relation heapRelation, Oid oldIndexId, const char
                                                          optionDatum,
                                                          INDEX_CREATE_SKIP_BUILD | INDEX_CREATE_CONCURRENT,
                                                          0,
-                                                         true, /* allow table to be a system catalog? */
-                                                         false, /* is_internal? */
+                                                         true, /* allow table to be a system catalog? */
+                                                         false,        /* is_internal? */
                                                          NULL);
 
        /* Close the relations used and clean up */
@@ -1540,7 +1539,7 @@ index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName)
                                                                          values, nulls, replaces);
                        CatalogTupleUpdate(description, &tuple->t_self, tuple);
 
-                       break;                                  /* Assume there can be only one match */
+                       break;                          /* Assume there can be only one match */
                }
 
                systable_endscan(sd);
@@ -1552,8 +1551,8 @@ index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName)
         */
        if (get_rel_relispartition(oldIndexId))
        {
-               List   *ancestors = get_partition_ancestors(oldIndexId);
-               Oid             parentIndexRelid = linitial_oid(ancestors);
+               List       *ancestors = get_partition_ancestors(oldIndexId);
+               Oid                     parentIndexRelid = linitial_oid(ancestors);
 
                DeleteInheritsTuple(oldIndexId, parentIndexRelid);
                StoreSingleInheritance(newIndexId, parentIndexRelid, 1);
@@ -1583,7 +1582,11 @@ index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName)
                                newClassRel->pgstat_info->t_counts.t_tuples_fetched = tabentry->tuples_fetched;
                                newClassRel->pgstat_info->t_counts.t_blocks_fetched = tabentry->blocks_fetched;
                                newClassRel->pgstat_info->t_counts.t_blocks_hit = tabentry->blocks_hit;
-                               /* The data will be sent by the next pgstat_report_stat() call. */
+
+                               /*
+                                * The data will be sent by the next pgstat_report_stat()
+                                * call.
+                                */
                        }
                }
        }
@@ -1614,27 +1617,26 @@ index_concurrently_set_dead(Oid heapId, Oid indexId)
        Relation        userIndexRelation;
 
        /*
-        * No more predicate locks will be acquired on this index, and we're
-        * about to stop doing inserts into the index which could show
-        * conflicts with existing predicate locks, so now is the time to move
-        * them to the heap relation.
+        * No more predicate locks will be acquired on this index, and we're about
+        * to stop doing inserts into the index which could show conflicts with
+        * existing predicate locks, so now is the time to move them to the heap
+        * relation.
         */
        userHeapRelation = table_open(heapId, ShareUpdateExclusiveLock);
        userIndexRelation = index_open(indexId, ShareUpdateExclusiveLock);
        TransferPredicateLocksToHeapRelation(userIndexRelation);
 
        /*
-        * Now we are sure that nobody uses the index for queries; they just
-        * might have it open for updating it.  So now we can unset indisready
-        * and indislive, then wait till nobody could be using it at all
-        * anymore.
+        * Now we are sure that nobody uses the index for queries; they just might
+        * have it open for updating it.  So now we can unset indisready and
+        * indislive, then wait till nobody could be using it at all anymore.
         */
        index_set_state_flags(indexId, INDEX_DROP_SET_DEAD);
 
        /*
-        * Invalidate the relcache for the table, so that after this commit
-        * all sessions will refresh the table's index list.  Forgetting just
-        * the index's relcache entry is not enough.
+        * Invalidate the relcache for the table, so that after this commit all
+        * sessions will refresh the table's index list.  Forgetting just the
+        * index's relcache entry is not enough.
         */
        CacheInvalidateRelcache(userHeapRelation);
 
@@ -1786,7 +1788,7 @@ index_constraint_create(Relation heapRelation,
         */
        if (OidIsValid(parentConstraintId))
        {
-               ObjectAddress   referenced;
+               ObjectAddress referenced;
 
                ObjectAddressSet(referenced, ConstraintRelationId, parentConstraintId);
                recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
@@ -2709,7 +2711,7 @@ index_build(Relation heapRelation,
                        PROGRESS_SCAN_BLOCKS_DONE,
                        PROGRESS_SCAN_BLOCKS_TOTAL
                };
-               const int64     val[] = {
+               const int64 val[] = {
                        PROGRESS_CREATEIDX_PHASE_BUILD,
                        PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE,
                        0, 0, 0, 0
@@ -3014,10 +3016,11 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
                        PROGRESS_SCAN_BLOCKS_DONE,
                        PROGRESS_SCAN_BLOCKS_TOTAL
                };
-               const int64     val[] = {
+               const int64 val[] = {
                        PROGRESS_CREATEIDX_PHASE_VALIDATE_IDXSCAN,
                        0, 0, 0, 0
                };
+
                pgstat_progress_update_multi_param(5, index, val);
        }
 
@@ -3080,7 +3083,7 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
                        PROGRESS_SCAN_BLOCKS_DONE,
                        PROGRESS_SCAN_BLOCKS_TOTAL
                };
-               const int64     val[] = {
+               const int64 val[] = {
                        PROGRESS_CREATEIDX_PHASE_VALIDATE_SORT,
                        0, 0
                };
index 8b51ec7f3940ec12ea63fd734c49ed2fba7da186..7a32ac1fb13ff89cb7d1f727f8ce43ac1b2ba810 100644 (file)
@@ -3050,7 +3050,7 @@ getObjectDescription(const ObjectAddress *object)
                                StringInfoData opfam;
 
                                amprocDesc = table_open(AccessMethodProcedureRelationId,
-                                                                          AccessShareLock);
+                                                                               AccessShareLock);
 
                                ScanKeyInit(&skey[0],
                                                        Anum_pg_amproc_oid,
index cdc8d9453d9c9779280d0586c8e592916c4ed2d3..310d45266ffc5de94353f443cbc2687494558c1e 100644 (file)
@@ -612,7 +612,7 @@ AggregateCreate(const char *aggName,
 
        myself = ProcedureCreate(aggName,
                                                         aggNamespace,
-                                                        replace, /* maybe replacement */
+                                                        replace,       /* maybe replacement */
                                                         false, /* doesn't return a set */
                                                         finaltype, /* returnType */
                                                         GetUserId(),   /* proowner */
@@ -693,10 +693,9 @@ AggregateCreate(const char *aggName,
 
                /*
                 * If we're replacing an existing entry, we need to validate that
-                * we're not changing anything that would break callers.
-                * Specifically we must not change aggkind or aggnumdirectargs,
-                * which affect how an aggregate call is treated in parse
-                * analysis.
+                * we're not changing anything that would break callers. Specifically
+                * we must not change aggkind or aggnumdirectargs, which affect how an
+                * aggregate call is treated in parse analysis.
                 */
                if (aggKind != oldagg->aggkind)
                        ereport(ERROR,
index fb22035a2a6d9f071656f0ae37cbb884a78791e6..3487caf82f6f6aabb453be9a73792bc344ff074c 100644 (file)
@@ -423,7 +423,11 @@ ProcedureCreate(const char *procedureName,
                                         prokind == PROKIND_PROCEDURE
                                         ? errmsg("cannot change whether a procedure has output parameters")
                                         : errmsg("cannot change return type of existing function"),
-                                        /* translator: first %s is DROP FUNCTION, DROP PROCEDURE or DROP AGGREGATE */
+
+                       /*
+                        * translator: first %s is DROP FUNCTION, DROP PROCEDURE or DROP
+                        * AGGREGATE
+                        */
                                         errhint("Use %s %s first.",
                                                         dropcmd,
                                                         format_procedure(oldproc->oid))));
@@ -450,7 +454,7 @@ ProcedureCreate(const char *procedureName,
                                                (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
                                                 errmsg("cannot change return type of existing function"),
                                                 errdetail("Row type defined by OUT parameters is different."),
-                                                /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+                               /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
                                                 errhint("Use %s %s first.",
                                                                 dropcmd,
                                                                 format_procedure(oldproc->oid))));
@@ -495,7 +499,7 @@ ProcedureCreate(const char *procedureName,
                                                        (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
                                                         errmsg("cannot change name of input parameter \"%s\"",
                                                                        old_arg_names[j]),
-                                                        /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+                                       /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
                                                         errhint("Use %s %s first.",
                                                                         dropcmd,
                                                                         format_procedure(oldproc->oid))));
@@ -521,7 +525,7 @@ ProcedureCreate(const char *procedureName,
                                ereport(ERROR,
                                                (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
                                                 errmsg("cannot remove parameter defaults from existing function"),
-                                                /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+                               /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
                                                 errhint("Use %s %s first.",
                                                                 dropcmd,
                                                                 format_procedure(oldproc->oid))));
@@ -549,7 +553,7 @@ ProcedureCreate(const char *procedureName,
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
                                                         errmsg("cannot change data type of existing parameter default value"),
-                                                        /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+                                       /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
                                                         errhint("Use %s %s first.",
                                                                         dropcmd,
                                                                         format_procedure(oldproc->oid))));
@@ -575,7 +579,7 @@ ProcedureCreate(const char *procedureName,
        else
        {
                /* Creating a new procedure */
-               Oid             newOid;
+               Oid                     newOid;
 
                /* First, get default permissions and set up proacl */
                proacl = get_user_default_acl(OBJECT_FUNCTION, proowner,
index f8475c1aba209eec6f554b482acf90b68d63447c..b3bf81ae63327aaaffb9a53a8b34bd62cec0b8d7 100644 (file)
@@ -317,7 +317,7 @@ GetAllTablesPublications(void)
        result = NIL;
        while (HeapTupleIsValid(tup = systable_getnext(scan)))
        {
-               Oid             oid = ((Form_pg_publication) GETSTRUCT(tup))->oid;
+               Oid                     oid = ((Form_pg_publication) GETSTRUCT(tup))->oid;
 
                result = lappend_oid(result, oid);
        }
index fb41f223ada2b523aea5d53501f7e77fbd424af3..3cc886f7fe22e5c707073a258a7c49b81ee36369 100644 (file)
@@ -99,7 +99,7 @@ RelationCreateStorage(RelFileNode rnode, char relpersistence)
                        break;
                default:
                        elog(ERROR, "invalid relpersistence: %c", relpersistence);
-                       return NULL;                            /* placate compiler */
+                       return NULL;            /* placate compiler */
        }
 
        srel = smgropen(rnode, backend);
index c1603737eb557ab71fa8435b1f0a23145a77135c..c0e40980d5fce560c3a09fe3c75222daac9ae505 100644 (file)
@@ -61,7 +61,7 @@ CreateAccessMethod(CreateAmStmt *stmt)
                                 errhint("Must be superuser to create an access method.")));
 
        /* Check if name is used */
-       amoid = GetSysCacheOid1(AMNAME,  Anum_pg_am_oid,
+       amoid = GetSysCacheOid1(AMNAME, Anum_pg_am_oid,
                                                        CStringGetDatum(stmt->amname));
        if (OidIsValid(amoid))
        {
index 3ee70560476ef3d1437325bef1c8340c62a8242e..cacc023619c4c0e5b7d9c16b34290ec1d4150b25 100644 (file)
@@ -70,8 +70,8 @@ typedef struct
 
 static void rebuild_relation(Relation OldHeap, Oid indexOid, bool verbose);
 static void copy_table_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
-                          bool verbose, bool *pSwapToastByContent,
-                          TransactionId *pFreezeXid, MultiXactId *pCutoffMulti);
+                               bool verbose, bool *pSwapToastByContent,
+                               TransactionId *pFreezeXid, MultiXactId *pCutoffMulti);
 static List *get_tables_to_cluster(MemoryContext cluster_context);
 
 
@@ -614,7 +614,7 @@ rebuild_relation(Relation OldHeap, Oid indexOid, bool verbose)
 
        /* Copy the heap data into the new table in the desired order */
        copy_table_data(OIDNewHeap, tableOid, indexOid, verbose,
-                                  &swap_toast_by_content, &frozenXid, &cutoffMulti);
+                                       &swap_toast_by_content, &frozenXid, &cutoffMulti);
 
        /*
         * Swap the physical files of the target and transient tables, then
index cd04e4ea81b99cfcf9276e44d54e0f169d99068f..806962a686bf2437ae1afdfa7d4be49f8ff5de77 100644 (file)
@@ -83,7 +83,7 @@ unique_key_recheck(PG_FUNCTION_ARGS)
                                (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
                                 errmsg("function \"%s\" must be fired for INSERT or UPDATE",
                                                funcname)));
-               ItemPointerSetInvalid(&checktid);               /* keep compiler quiet */
+               ItemPointerSetInvalid(&checktid);       /* keep compiler quiet */
        }
 
        slot = table_slot_create(trigdata->tg_relation, NULL);
@@ -109,7 +109,7 @@ unique_key_recheck(PG_FUNCTION_ARGS)
        tmptid = checktid;
        {
                IndexFetchTableData *scan = table_index_fetch_begin(trigdata->tg_relation);
-               bool call_again = false;
+               bool            call_again = false;
 
                if (!table_index_fetch_tuple(scan, &tmptid, SnapshotSelf, slot,
                                                                         &call_again, NULL))
index 9707afabd98d15377dee1781e96e11475b2dcfcd..5015e5b3b602a1d46cc25999686c8f8827f21da9 100644 (file)
@@ -2033,7 +2033,7 @@ get_database_oid(const char *dbname, bool missing_ok)
 
        /* We assume that there can be at most one matching tuple */
        if (HeapTupleIsValid(dbtuple))
-               oid = ((Form_pg_database)GETSTRUCT(dbtuple))->oid;
+               oid = ((Form_pg_database) GETSTRUCT(dbtuple))->oid;
        else
                oid = InvalidOid;
 
index a6c6de78f11605223838957eac79564e59885190..039a87c15519d4c722149ee51d677fac5da06bc1 100644 (file)
@@ -606,7 +606,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
 static void
 ExplainPrintSettings(ExplainState *es)
 {
-       int             num;
+       int                     num;
        struct config_generic **gucs;
 
        /* bail out if information about settings not requested */
@@ -622,13 +622,13 @@ ExplainPrintSettings(ExplainState *es)
 
        if (es->format != EXPLAIN_FORMAT_TEXT)
        {
-               int             i;
+               int                     i;
 
                ExplainOpenGroup("Settings", "Settings", true, es);
 
                for (i = 0; i < num; i++)
                {
-                       char *setting;
+                       char       *setting;
                        struct config_generic *conf = gucs[i];
 
                        setting = GetConfigOptionByName(conf->name, NULL, true);
@@ -640,14 +640,14 @@ ExplainPrintSettings(ExplainState *es)
        }
        else
        {
-               int             i;
-               StringInfoData  str;
+               int                     i;
+               StringInfoData str;
 
                initStringInfo(&str);
 
                for (i = 0; i < num; i++)
                {
-                       char *setting;
+                       char       *setting;
                        struct config_generic *conf = gucs[i];
 
                        if (i > 0)
@@ -705,8 +705,8 @@ ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc)
        ExplainNode(ps, NIL, NULL, NULL, es);
 
        /*
-        * If requested, include information about GUC parameters with values
-        * that don't match the built-in defaults.
+        * If requested, include information about GUC parameters with values that
+        * don't match the built-in defaults.
         */
        ExplainPrintSettings(es);
 }
@@ -1674,7 +1674,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
                                if (es->costs && es->verbose &&
                                        outerPlanState(planstate)->worker_jit_instrument)
                                {
-                                       PlanState *child = outerPlanState(planstate);
+                                       PlanState  *child = outerPlanState(planstate);
                                        int                     n;
                                        SharedJitInstrumentation *w = child->worker_jit_instrument;
 
index d4723fced898785d3f371315b457e340bb70749e..300bb1261f757421aa3dc4ab6adc248b49d0832b 100644 (file)
@@ -903,9 +903,9 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
 
                        t_sql = DirectFunctionCall3Coll(replace_text,
                                                                                        C_COLLATION_OID,
-                                                                               t_sql,
-                                                                               CStringGetTextDatum("@extschema@"),
-                                                                               CStringGetTextDatum(qSchemaName));
+                                                                                       t_sql,
+                                                                                       CStringGetTextDatum("@extschema@"),
+                                                                                       CStringGetTextDatum(qSchemaName));
                }
 
                /*
@@ -916,9 +916,9 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
                {
                        t_sql = DirectFunctionCall3Coll(replace_text,
                                                                                        C_COLLATION_OID,
-                                                                               t_sql,
-                                                                               CStringGetTextDatum("MODULE_PATHNAME"),
-                                                                               CStringGetTextDatum(control->module_pathname));
+                                                                                       t_sql,
+                                                                                       CStringGetTextDatum("MODULE_PATHNAME"),
+                                                                                       CStringGetTextDatum(control->module_pathname));
                }
 
                /* And now back to C string */
index 7e7c03ef1247534c4e4cca4e1d02b493a6a59fa3..62a4c4fb9beb8932930ae60ba262509071097dbb 100644 (file)
@@ -96,8 +96,8 @@ static void update_relispartition(Oid relationId, bool newval);
  */
 struct ReindexIndexCallbackState
 {
-       bool        concurrent;                 /* flag from statement */
-       Oid         locked_table_oid;   /* tracks previously locked table */
+       bool            concurrent;             /* flag from statement */
+       Oid                     locked_table_oid;       /* tracks previously locked table */
 };
 
 /*
@@ -396,7 +396,7 @@ WaitForOlderSnapshots(TransactionId limitXmin, bool progress)
                {
                        if (progress)
                        {
-                               PGPROC *holder = BackendIdGetProc(old_snapshots[i].backendId);
+                               PGPROC     *holder = BackendIdGetProc(old_snapshots[i].backendId);
 
                                pgstat_progress_update_param(PROGRESS_WAITFOR_CURRENT_PID,
                                                                                         holder->pid);
@@ -984,7 +984,7 @@ DefineIndex(Oid relationId,
         */
        if (partitioned && stmt->relation && !stmt->relation->inh)
        {
-               PartitionDesc   pd = RelationGetPartitionDesc(rel);
+               PartitionDesc pd = RelationGetPartitionDesc(rel);
 
                if (pd->nparts != 0)
                        flags |= INDEX_CREATE_INVALID;
@@ -3003,7 +3003,7 @@ ReindexRelationConcurrently(Oid relationOid, int options)
        /* Get a session-level lock on each table. */
        foreach(lc, relationLocks)
        {
-               LockRelId   *lockrelid = (LockRelId *) lfirst(lc);
+               LockRelId  *lockrelid = (LockRelId *) lfirst(lc);
 
                LockRelationIdForSession(lockrelid, ShareUpdateExclusiveLock);
        }
@@ -3112,8 +3112,8 @@ ReindexRelationConcurrently(Oid relationOid, int options)
 
                /*
                 * The index is now valid in the sense that it contains all currently
-                * interesting tuples.  But since it might not contain tuples deleted just
-                * before the reference snap was taken, we have to wait out any
+                * interesting tuples.  But since it might not contain tuples deleted
+                * just before the reference snap was taken, we have to wait out any
                 * transactions that might have older snapshots.
                 */
                pgstat_progress_update_param(PROGRESS_CREATEIDX_PHASE,
@@ -3250,7 +3250,7 @@ ReindexRelationConcurrently(Oid relationOid, int options)
         */
        foreach(lc, relationLocks)
        {
-               LockRelId   *lockrelid = (LockRelId *) lfirst(lc);
+               LockRelId  *lockrelid = (LockRelId *) lfirst(lc);
 
                UnlockRelationIdForSession(lockrelid, ShareUpdateExclusiveLock);
        }
index a191916d0324450c314a7d2c424750abb35ad976..95ec352abe688ad08ef73b70efbf2ae4aa53fbbf 100644 (file)
@@ -465,9 +465,9 @@ UpdateStatisticsForTypeChange(Oid statsOid, Oid relationOid, int attnum,
                elog(ERROR, "cache lookup failed for statistics object %u", statsOid);
 
        /*
-        * When none of the defined statistics types contain datum values
-        * from the table's columns then there's no need to reset the stats.
-        * Functional dependencies and ndistinct stats should still hold true.
+        * When none of the defined statistics types contain datum values from the
+        * table's columns then there's no need to reset the stats. Functional
+        * dependencies and ndistinct stats should still hold true.
         */
        if (!statext_is_kind_built(oldtup, STATS_EXT_MCV))
        {
index bfcf9472d7af0a189a9e8502850136ff2fe84952..7fa8dcce61476a6ae7149c24b014e9e374f60d20 100644 (file)
@@ -379,7 +379,7 @@ static void ATExecCheckNotNull(AlteredTableInfo *tab, Relation rel,
                                   const char *colName, LOCKMODE lockmode);
 static bool NotNullImpliedByRelConstraints(Relation rel, Form_pg_attribute attr);
 static bool ConstraintImpliedByRelConstraint(Relation scanrel,
-                                                                        List *partConstraint, List *existedConstraints);
+                                                                List *partConstraint, List *existedConstraints);
 static ObjectAddress ATExecColumnDefault(Relation rel, const char *colName,
                                        Node *newDefault, LOCKMODE lockmode);
 static ObjectAddress ATExecAddIdentity(Relation rel, const char *colName,
@@ -1099,9 +1099,9 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
        }
 
        /*
-        * Now add any newly specified CHECK constraints to the new relation.
-        * Same as for defaults above, but these need to come after partitioning
-        * is set up.
+        * Now add any newly specified CHECK constraints to the new relation. Same
+        * as for defaults above, but these need to come after partitioning is set
+        * up.
         */
        if (stmt->constraints)
                AddRelationNewConstraints(rel, NIL, stmt->constraints,
@@ -1401,9 +1401,9 @@ RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid,
         */
        if (IsSystemClass(relOid, classform) && relkind == RELKIND_INDEX)
        {
-               HeapTuple               locTuple;
-               Form_pg_index   indexform;
-               bool                    indisvalid;
+               HeapTuple       locTuple;
+               Form_pg_index indexform;
+               bool            indisvalid;
 
                locTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(relOid));
                if (!HeapTupleIsValid(locTuple))
@@ -1786,6 +1786,7 @@ ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged,
                        {
                                Relation        toastrel = relation_open(toast_relid,
                                                                                                         AccessExclusiveLock);
+
                                RelationSetNewRelfilenode(toastrel,
                                                                                  toastrel->rd_rel->relpersistence);
                                table_close(toastrel, NoLock);
@@ -4336,6 +4337,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
                        /* nothing to do here, oid columns don't exist anymore */
                        break;
                case AT_SetTableSpace:  /* SET TABLESPACE */
+
                        /*
                         * Only do this for partitioned tables and indexes, for which this
                         * is just a catalog change.  Other relation types which have
@@ -4626,8 +4628,8 @@ ATRewriteTables(AlterTableStmt *parsetree, List **wqueue, LOCKMODE lockmode)
                {
                        /*
                         * If required, test the current data within the table against new
-                        * constraints generated by ALTER TABLE commands, but don't rebuild
-                        * data.
+                        * constraints generated by ALTER TABLE commands, but don't
+                        * rebuild data.
                         */
                        if (tab->constraints != NIL || tab->verify_new_notnull ||
                                tab->partition_constraint != NULL)
@@ -4798,8 +4800,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
        {
                /*
                 * If we are rebuilding the tuples OR if we added any new but not
-                * verified NOT NULL constraints, check all not-null constraints.
-                * This is a bit of overkill but it minimizes risk of bugs, and
+                * verified NOT NULL constraints, check all not-null constraints. This
+                * is a bit of overkill but it minimizes risk of bugs, and
                 * heap_attisnull is a pretty cheap test anyway.
                 */
                for (i = 0; i < newTupDesc->natts; i++)
@@ -4941,8 +4943,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
                        {
                                /*
                                 * If there's no rewrite, old and new table are guaranteed to
-                                * have the same AM, so we can just use the old slot to
-                                * verify new constraints etc.
+                                * have the same AM, so we can just use the old slot to verify
+                                * new constraints etc.
                                 */
                                insertslot = oldslot;
                        }
@@ -6209,9 +6211,8 @@ ATExecSetNotNull(AlteredTableInfo *tab, Relation rel,
                /*
                 * Ordinarily phase 3 must ensure that no NULLs exist in columns that
                 * are set NOT NULL; however, if we can find a constraint which proves
-                * this then we can skip that.  We needn't bother looking if
-                * we've already found that we must verify some other NOT NULL
-                * constraint.
+                * this then we can skip that.  We needn't bother looking if we've
+                * already found that we must verify some other NOT NULL constraint.
                 */
                if (!tab->verify_new_notnull &&
                        !NotNullImpliedByRelConstraints(rel, (Form_pg_attribute) GETSTRUCT(tuple)))
@@ -10503,7 +10504,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
         */
        if (tab->rewrite)
        {
-               Relation    newrel;
+               Relation        newrel;
 
                newrel = table_open(RelationGetRelid(rel), NoLock);
                RelationClearMissing(newrel);
@@ -10657,8 +10658,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
                                        {
                                                /*
                                                 * Changing the type of a column that is used by a
-                                                * generated column is not allowed by SQL standard.
-                                                * It might be doable with some thinking and effort.
+                                                * generated column is not allowed by SQL standard. It
+                                                * might be doable with some thinking and effort.
                                                 */
                                                ereport(ERROR,
                                                                (errcode(ERRCODE_SYNTAX_ERROR),
@@ -10862,13 +10863,13 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
 
        /*
         * Here we go --- change the recorded column type and collation.  (Note
-        * heapTup is a copy of the syscache entry, so okay to scribble on.)
-        * First fix up the missing value if any.
+        * heapTup is a copy of the syscache entry, so okay to scribble on.) First
+        * fix up the missing value if any.
         */
        if (attTup->atthasmissing)
        {
-               Datum       missingval;
-               bool        missingNull;
+               Datum           missingval;
+               bool            missingNull;
 
                /* if rewrite is true the missing value should already be cleared */
                Assert(tab->rewrite == 0);
@@ -10881,7 +10882,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
 
                /* if it's a null array there is nothing to do */
 
-               if (! missingNull)
+               if (!missingNull)
                {
                        /*
                         * Get the datum out of the array and repack it in a new array
@@ -10890,12 +10891,12 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
                         * changed, only the array metadata.
                         */
 
-                       int one = 1;
-                       bool isNull;
-                       Datum       valuesAtt[Natts_pg_attribute];
-                       bool        nullsAtt[Natts_pg_attribute];
-                       bool        replacesAtt[Natts_pg_attribute];
-                       HeapTuple   newTup;
+                       int                     one = 1;
+                       bool            isNull;
+                       Datum           valuesAtt[Natts_pg_attribute];
+                       bool            nullsAtt[Natts_pg_attribute];
+                       bool            replacesAtt[Natts_pg_attribute];
+                       HeapTuple       newTup;
 
                        MemSet(valuesAtt, 0, sizeof(valuesAtt));
                        MemSet(nullsAtt, false, sizeof(nullsAtt));
@@ -10910,12 +10911,12 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
                                                                                   attTup->attalign,
                                                                                   &isNull);
                        missingval = PointerGetDatum(
-                               construct_array(&missingval,
-                                                               1,
-                                                               targettype,
-                                                               tform->typlen,
-                                                               tform->typbyval,
-                                                               tform->typalign));
+                                                                                construct_array(&missingval,
+                                                                                                                1,
+                                                                                                                targettype,
+                                                                                                                tform->typlen,
+                                                                                                                tform->typbyval,
+                                                                                                                tform->typalign));
 
                        valuesAtt[Anum_pg_attribute_attmissingval - 1] = missingval;
                        replacesAtt[Anum_pg_attribute_attmissingval - 1] = true;
@@ -12311,16 +12312,16 @@ ATExecSetTableSpaceNoStorage(Relation rel, Oid newTableSpace)
        Oid                     reloid = RelationGetRelid(rel);
 
        /*
-        * Shouldn't be called on relations having storage; these are processed
-        * in phase 3.
+        * Shouldn't be called on relations having storage; these are processed in
+        * phase 3.
         */
        Assert(!RELKIND_HAS_STORAGE(rel->rd_rel->relkind));
 
        /* Can't allow a non-shared relation in pg_global */
        if (newTableSpace == GLOBALTABLESPACE_OID)
                ereport(ERROR,
-               (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                       errmsg("only shared relations can be placed in pg_global tablespace")));
+                               (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+                                errmsg("only shared relations can be placed in pg_global tablespace")));
 
        /*
         * No work if no change in tablespace.
@@ -15044,7 +15045,7 @@ ComputePartitionAttrs(ParseState *pstate, Relation rel, List *partParams, AttrNu
                                i = -1;
                                while ((i = bms_next_member(expr_attrs, i)) >= 0)
                                {
-                                       AttrNumber  attno = i + FirstLowInvalidHeapAttributeNumber;
+                                       AttrNumber      attno = i + FirstLowInvalidHeapAttributeNumber;
 
                                        if (TupleDescAttr(RelationGetDescr(rel), attno - 1)->attgenerated)
                                                ereport(ERROR,
@@ -15202,7 +15203,7 @@ PartConstraintImpliedByRelConstraint(Relation scanrel,
 bool
 ConstraintImpliedByRelConstraint(Relation scanrel, List *testConstraint, List *provenConstraint)
 {
-       List            *existConstraint = list_copy(provenConstraint);
+       List       *existConstraint = list_copy(provenConstraint);
        TupleConstr *constr = RelationGetDescr(scanrel)->constr;
        int                     num_check,
                                i;
@@ -15240,8 +15241,8 @@ ConstraintImpliedByRelConstraint(Relation scanrel, List *testConstraint, List *p
         * not-false and try to prove the same for testConstraint.
         *
         * Note that predicate_implied_by assumes its first argument is known
-        * immutable.  That should always be true for both NOT NULL and
-        * partition constraints, so we don't test it here.
+        * immutable.  That should always be true for both NOT NULL and partition
+        * constraints, so we don't test it here.
         */
        return predicate_implied_by(testConstraint, existConstraint, true);
 }
index 8ec963f1cfb3675abbe902e8463a742c3f57978a..33df2ec0af3bdf6afc0a68d3e919fad742238e48 100644 (file)
@@ -1143,9 +1143,9 @@ GetDefaultTablespace(char relpersistence, bool partitioned)
 
        /*
         * Allow explicit specification of database's default tablespace in
-        * default_tablespace without triggering permissions checks.  Don't
-        * allow specifying that when creating a partitioned table, however,
-        * since the result is confusing.
+        * default_tablespace without triggering permissions checks.  Don't allow
+        * specifying that when creating a partitioned table, however, since the
+        * result is confusing.
         */
        if (result == MyDatabaseTableSpace)
        {
index 2beb37814507d1da399144eecefa29c51cf3cafa..209021a61a93518876ad338cfc1d378d7eeb38b7 100644 (file)
@@ -4245,9 +4245,9 @@ AfterTriggerExecute(EState *estate,
                case AFTER_TRIGGER_FDW_REUSE:
 
                        /*
-                        * Store tuple in the slot so that tg_trigtuple does not
-                        * reference tuplestore memory.  (It is formally possible for the
-                        * trigger function to queue trigger events that add to the same
+                        * Store tuple in the slot so that tg_trigtuple does not reference
+                        * tuplestore memory.  (It is formally possible for the trigger
+                        * function to queue trigger events that add to the same
                         * tuplestore, which can push other tuples out of memory.)  The
                         * distinction is academic, because we start with a minimal tuple
                         * that is stored as a heap tuple, constructed in different memory
index afdd3307acd4c372921a9b3451d78486f506705b..d69a73d13ed703080fae89ebdff579096c07bf86 100644 (file)
@@ -88,13 +88,13 @@ void
 ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel)
 {
        VacuumParams params;
-       bool verbose = false;
-       bool skip_locked = false;
-       bool analyze = false;
-       bool freeze = false;
-       bool full = false;
-       bool disable_page_skipping = false;
-       ListCell        *lc;
+       bool            verbose = false;
+       bool            skip_locked = false;
+       bool            analyze = false;
+       bool            freeze = false;
+       bool            full = false;
+       bool            disable_page_skipping = false;
+       ListCell   *lc;
 
        /* Set default value */
        params.index_cleanup = VACOPT_TERNARY_DEFAULT;
@@ -103,7 +103,7 @@ ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel)
        /* Parse options list */
        foreach(lc, vacstmt->options)
        {
-               DefElem *opt = (DefElem *) lfirst(lc);
+               DefElem    *opt = (DefElem *) lfirst(lc);
 
                /* Parse common options for VACUUM and ANALYZE */
                if (strcmp(opt->defname, "verbose") == 0)
@@ -593,8 +593,9 @@ vacuum_open_relation(Oid relid, RangeVar *relation, int options,
        /*
         * Determine the log level.
         *
-        * For manual VACUUM or ANALYZE, we emit a WARNING to match the log statements
-        * in the permission checks; otherwise, only log if the caller so requested.
+        * For manual VACUUM or ANALYZE, we emit a WARNING to match the log
+        * statements in the permission checks; otherwise, only log if the caller
+        * so requested.
         */
        if (!IsAutoVacuumWorkerProcess())
                elevel = WARNING;
@@ -1328,9 +1329,9 @@ vac_update_datfrozenxid(void)
                }
 
                /*
-                * Some table AMs might not need per-relation xid / multixid
-                * horizons. It therefore seems reasonable to allow relfrozenxid and
-                * relminmxid to not be set (i.e. set to their respective Invalid*Id)
+                * Some table AMs might not need per-relation xid / multixid horizons.
+                * It therefore seems reasonable to allow relfrozenxid and relminmxid
+                * to not be set (i.e. set to their respective Invalid*Id)
                 * independently. Thus validate and compute horizon for each only if
                 * set.
                 *
index 0a7b2b8f477d240481ea7143bb5850227197b83d..5d64471eeda193f0dc3fc019ff8323ee04d35a99 100644 (file)
@@ -2367,10 +2367,10 @@ get_last_attnums_walker(Node *node, LastAttnumInfo *info)
 static void
 ExecComputeSlotInfo(ExprState *state, ExprEvalStep *op)
 {
-       PlanState *parent = state->parent;
+       PlanState  *parent = state->parent;
        TupleDesc       desc = NULL;
        const TupleTableSlotOps *tts_ops = NULL;
-       bool isfixed = false;
+       bool            isfixed = false;
 
        if (op->d.fetch.known_desc != NULL)
        {
@@ -3313,7 +3313,7 @@ ExecBuildAggTransCall(ExprState *state, AggState *aggstate,
  */
 ExprState *
 ExecBuildGroupingEqual(TupleDesc ldesc, TupleDesc rdesc,
-                                          const TupleTableSlotOps * lops, const TupleTableSlotOps * rops,
+                                          const TupleTableSlotOps *lops, const TupleTableSlotOps *rops,
                                           int numCols,
                                           const AttrNumber *keyColIdx,
                                           const Oid *eqfunctions,
index a018925d4ed115f5bec78cc8198acbf276bbc627..612a88456eb330b8f38085ab3bd8c66247984785 100644 (file)
@@ -4038,7 +4038,7 @@ void
 ExecEvalSysVar(ExprState *state, ExprEvalStep *op, ExprContext *econtext,
                           TupleTableSlot *slot)
 {
-       Datum d;
+       Datum           d;
 
        /* slot_getsysattr has sufficient defenses against bad attnums */
        d = slot_getsysattr(slot,
index ed7c0606bf1bb72c12b8256a28e10e71cd299c85..44e4a6d104c1a7a7b90f7db694c07d293ca98d4d 100644 (file)
@@ -2551,7 +2551,7 @@ EvalPlanQualSlot(EPQState *epqstate,
 
                if (relation)
                        *slot = table_slot_create(relation,
-                                                                                &epqstate->estate->es_tupleTable);
+                                                                         &epqstate->estate->es_tupleTable);
                else
                        *slot = ExecAllocTableSlot(&epqstate->estate->es_tupleTable,
                                                                           epqstate->origslot->tts_tupleDescriptor,
index 3d4b01cb4d69ec32223a5462558a44d6e23d503c..da9074c54c9821f4bad705d225dca45d7c2d34af 100644 (file)
@@ -1058,7 +1058,7 @@ ExecParallelRetrieveJitInstrumentation(PlanState *planstate,
         * instrumentation in per-query context.
         */
        ibytes = offsetof(SharedJitInstrumentation, jit_instr)
-                        + mul_size(shared_jit->num_workers, sizeof(JitInstrumentation));
+               + mul_size(shared_jit->num_workers, sizeof(JitInstrumentation));
        planstate->worker_jit_instrument =
                MemoryContextAlloc(planstate->state->es_query_cxt, ibytes);
 
@@ -1133,7 +1133,7 @@ ExecParallelCleanup(ParallelExecutorInfo *pei)
        /* Accumulate JIT instrumentation, if any. */
        if (pei->jit_instrumentation)
                ExecParallelRetrieveJitInstrumentation(pei->planstate,
-                                                                                       pei->jit_instrumentation);
+                                                                                          pei->jit_instrumentation);
 
        /* Free any serialized parameters. */
        if (DsaPointerIsValid(pei->param_exec))
index 6cdbb9db421adc0dd4183a8540d62f21c48808dd..73ba298c5da68499219fa5ff98cf7bb62bd80eef 100644 (file)
@@ -145,12 +145,12 @@ typedef struct PartitionDispatchData
        TupleTableSlot *tupslot;
        AttrNumber *tupmap;
        int                     indexes[FLEXIBLE_ARRAY_MEMBER];
-} PartitionDispatchData;
+}                      PartitionDispatchData;
 
 /* struct to hold result relations coming from UPDATE subplans */
 typedef struct SubplanResultRelHashElem
 {
-       Oid             relid;          /* hash key -- must be first */
+       Oid                     relid;                  /* hash key -- must be first */
        ResultRelInfo *rri;
 } SubplanResultRelHashElem;
 
@@ -375,7 +375,7 @@ ExecFindPartition(ModifyTableState *mtstate,
                                if (proute->subplan_resultrel_htab)
                                {
                                        Oid                     partoid = partdesc->oids[partidx];
-                                       SubplanResultRelHashElem   *elem;
+                                       SubplanResultRelHashElem *elem;
 
                                        elem = hash_search(proute->subplan_resultrel_htab,
                                                                           &partoid, HASH_FIND, NULL);
@@ -474,7 +474,7 @@ ExecHashSubPlanResultRelsByOid(ModifyTableState *mtstate,
                ResultRelInfo *rri = &mtstate->resultRelInfo[i];
                bool            found;
                Oid                     partoid = RelationGetRelid(rri->ri_RelationDesc);
-               SubplanResultRelHashElem   *elem;
+               SubplanResultRelHashElem *elem;
 
                elem = (SubplanResultRelHashElem *)
                        hash_search(htab, &partoid, HASH_ENTER, &found);
@@ -762,9 +762,9 @@ ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate,
                                 * It's safe to reuse these from the partition root, as we
                                 * only process one tuple at a time (therefore we won't
                                 * overwrite needed data in slots), and the results of
-                                * projections are independent of the underlying
-                                * storage. Projections and where clauses themselves don't
-                                * store state / are independent of the underlying storage.
+                                * projections are independent of the underlying storage.
+                                * Projections and where clauses themselves don't store state
+                                * / are independent of the underlying storage.
                                 */
                                leaf_part_rri->ri_onConflict->oc_ProjSlot =
                                        rootResultRelInfo->ri_onConflict->oc_ProjSlot;
@@ -892,7 +892,7 @@ ExecInitRoutingInfo(ModifyTableState *mtstate,
 {
        MemoryContext oldcxt;
        PartitionRoutingInfo *partrouteinfo;
-       int             rri_index;
+       int                     rri_index;
 
        oldcxt = MemoryContextSwitchTo(proute->memcxt);
 
@@ -1668,16 +1668,16 @@ ExecCreatePartitionPruneState(PlanState *planstate,
                        }
                        else
                        {
-                               int             pd_idx = 0;
-                               int             pp_idx;
+                               int                     pd_idx = 0;
+                               int                     pp_idx;
 
                                /*
                                 * Some new partitions have appeared since plan time, and
                                 * those are reflected in our PartitionDesc but were not
                                 * present in the one used to construct subplan_map and
                                 * subpart_map.  So we must construct new and longer arrays
-                                * where the partitions that were originally present map to the
-                                * same place, and any added indexes map to -1, as if the
+                                * where the partitions that were originally present map to
+                                * the same place, and any added indexes map to -1, as if the
                                 * new partitions had been pruned.
                                 */
                                pprune->subpart_map = palloc(sizeof(int) * partdesc->nparts);
index f8f6463358f8883fe126c2b66515f5565363dd3a..0326284c83f4c410421d3dc855d1b218fed9bde3 100644 (file)
@@ -227,7 +227,7 @@ retry:
 static bool
 tuples_equal(TupleTableSlot *slot1, TupleTableSlot *slot2)
 {
-       int         attrnum;
+       int                     attrnum;
 
        Assert(slot1->tts_tupleDescriptor->natts ==
                   slot2->tts_tupleDescriptor->natts);
@@ -265,8 +265,8 @@ tuples_equal(TupleTableSlot *slot1, TupleTableSlot *slot2)
 
                if (!DatumGetBool(FunctionCall2Coll(&typentry->eq_opr_finfo,
                                                                                        att->attcollation,
-                                                                               slot1->tts_values[attrnum],
-                                                                               slot2->tts_values[attrnum])))
+                                                                                       slot1->tts_values[attrnum],
+                                                                                       slot2->tts_values[attrnum])))
                        return false;
        }
 
@@ -406,7 +406,7 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
                resultRelInfo->ri_TrigDesc->trig_insert_before_row)
        {
                if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
-                       skip_tuple = true;              /* "do nothing" */
+                       skip_tuple = true;      /* "do nothing" */
        }
 
        if (!skip_tuple)
@@ -471,7 +471,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
        {
                if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
                                                                  tid, NULL, slot))
-                       skip_tuple = true;              /* "do nothing" */
+                       skip_tuple = true;      /* "do nothing" */
        }
 
        if (!skip_tuple)
@@ -490,7 +490,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
                if (resultRelInfo->ri_PartitionCheck)
                        ExecPartitionCheck(resultRelInfo, slot, estate, true);
 
-               simple_table_update(rel, tid, slot,estate->es_snapshot,
+               simple_table_update(rel, tid, slot, estate->es_snapshot,
                                                        &update_indexes);
 
                if (resultRelInfo->ri_NumIndices > 0 && update_indexes)
@@ -591,8 +591,8 @@ CheckSubscriptionRelkind(char relkind, const char *nspname,
                                                 const char *relname)
 {
        /*
-        * We currently only support writing to regular tables.  However, give
-        * more specific error for partitioned and foreign tables.
+        * We currently only support writing to regular tables.  However, give a
+        * more specific error for partitioned and foreign tables.
         */
        if (relkind == RELKIND_PARTITIONED_TABLE)
                ereport(ERROR,
@@ -600,14 +600,14 @@ CheckSubscriptionRelkind(char relkind, const char *nspname,
                                 errmsg("cannot use relation \"%s.%s\" as logical replication target",
                                                nspname, relname),
                                 errdetail("\"%s.%s\" is a partitioned table.",
-                                               nspname, relname)));
+                                                  nspname, relname)));
        else if (relkind == RELKIND_FOREIGN_TABLE)
                ereport(ERROR,
                                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
                                 errmsg("cannot use relation \"%s.%s\" as logical replication target",
                                                nspname, relname),
                                 errdetail("\"%s.%s\" is a foreign table.",
-                                               nspname, relname)));
+                                                  nspname, relname)));
 
        if (relkind != RELKIND_RELATION)
                ereport(ERROR,
@@ -615,5 +615,5 @@ CheckSubscriptionRelkind(char relkind, const char *nspname,
                                 errmsg("cannot use relation \"%s.%s\" as logical replication target",
                                                nspname, relname),
                                 errdetail("\"%s.%s\" is not a table.",
-                                               nspname, relname)));
+                                                  nspname, relname)));
 }
index 881131aff26b07050ec95a36e52db10e63fcb516..67c4be5108f21abbd7f884367b699cf54f682b99 100644 (file)
@@ -81,7 +81,8 @@ ExecScanFetch(ScanState *node,
 
                        /* Check if it meets the access-method conditions */
                        if (!(*recheckMtd) (node, slot))
-                               return ExecClearTuple(slot);    /* would not be returned by scan */
+                               return ExecClearTuple(slot);    /* would not be returned by
+                                                                                                * scan */
 
                        return slot;
                }
index 55d1669db099ac8b3b4ffa6d5a3c70b84ea6fd94..ad13fd9a05bbe0abb71ebd5fcc15238c6d95b5f7 100644 (file)
 
 static TupleDesc ExecTypeFromTLInternal(List *targetList,
                                           bool skipjunk);
-static pg_attribute_always_inline void
-slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp,
+static pg_attribute_always_inline void slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp,
                                           int natts);
 static inline void tts_buffer_heap_store_tuple(TupleTableSlot *slot,
-                                                                                          HeapTuple tuple,
-                                                                                          Buffer buffer,
-                                                                                          bool transfer_pin);
+                                                       HeapTuple tuple,
+                                                       Buffer buffer,
+                                                       bool transfer_pin);
 static void tts_heap_store_tuple(TupleTableSlot *slot, HeapTuple tuple, bool shouldFree);
 
 
@@ -138,7 +137,7 @@ tts_virtual_getsysattr(TupleTableSlot *slot, int attnum, bool *isnull)
 {
        elog(ERROR, "virtual tuple table slot does not have system attributes");
 
-       return 0; /* silence compiler warnings */
+       return 0;                                       /* silence compiler warnings */
 }
 
 /*
@@ -164,7 +163,7 @@ tts_virtual_materialize(TupleTableSlot *slot)
        for (int natt = 0; natt < desc->natts; natt++)
        {
                Form_pg_attribute att = TupleDescAttr(desc, natt);
-               Datum val;
+               Datum           val;
 
                if (att->attbyval || slot->tts_isnull[natt])
                        continue;
@@ -200,7 +199,7 @@ tts_virtual_materialize(TupleTableSlot *slot)
        for (int natt = 0; natt < desc->natts; natt++)
        {
                Form_pg_attribute att = TupleDescAttr(desc, natt);
-               Datum val;
+               Datum           val;
 
                if (att->attbyval || slot->tts_isnull[natt])
                        continue;
@@ -210,7 +209,7 @@ tts_virtual_materialize(TupleTableSlot *slot)
                if (att->attlen == -1 &&
                        VARATT_IS_EXTERNAL_EXPANDED(DatumGetPointer(val)))
                {
-                       Size data_length;
+                       Size            data_length;
 
                        /*
                         * We want to flatten the expanded value so that the materialized
@@ -228,7 +227,7 @@ tts_virtual_materialize(TupleTableSlot *slot)
                }
                else
                {
-                       Size data_length = 0;
+                       Size            data_length = 0;
 
                        data = (char *) att_align_nominal(data, att->attalign);
                        data_length = att_addlength_datum(data_length, att->attlen, val);
@@ -382,7 +381,7 @@ tts_heap_materialize(TupleTableSlot *slot)
 static void
 tts_heap_copyslot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
 {
-       HeapTuple tuple;
+       HeapTuple       tuple;
        MemoryContext oldcontext;
 
        oldcontext = MemoryContextSwitchTo(dstslot->tts_mcxt);
@@ -499,7 +498,7 @@ tts_minimal_getsysattr(TupleTableSlot *slot, int attnum, bool *isnull)
 {
        elog(ERROR, "minimal tuple table slot does not have system attributes");
 
-       return 0; /* silence compiler warnings */
+       return 0;                                       /* silence compiler warnings */
 }
 
 static void
@@ -1077,8 +1076,10 @@ TupleTableSlot *
 MakeTupleTableSlot(TupleDesc tupleDesc,
                                   const TupleTableSlotOps *tts_ops)
 {
-       Size            basesz, allocsz;
+       Size            basesz,
+                               allocsz;
        TupleTableSlot *slot;
+
        basesz = tts_ops->base_slot_size;
 
        /*
@@ -1866,7 +1867,7 @@ void
 slot_getsomeattrs_int(TupleTableSlot *slot, int attnum)
 {
        /* Check for caller errors */
-       Assert(slot->tts_nvalid < attnum); /* slot_getsomeattr checked */
+       Assert(slot->tts_nvalid < attnum);      /* slot_getsomeattr checked */
        Assert(attnum > 0);
 
        if (unlikely(attnum > slot->tts_tupleDescriptor->natts))
@@ -1876,8 +1877,8 @@ slot_getsomeattrs_int(TupleTableSlot *slot, int attnum)
        slot->tts_ops->getsomeattrs(slot, attnum);
 
        /*
-        * If the underlying tuple doesn't have enough attributes, tuple descriptor
-        * must have the missing attributes.
+        * If the underlying tuple doesn't have enough attributes, tuple
+        * descriptor must have the missing attributes.
         */
        if (unlikely(slot->tts_nvalid < attnum))
        {
index 965e5dea70ed17051d577747b113e52358ea6926..b34f565bfe73e3f733e5302e7b826b3da99bb041 100644 (file)
@@