Initial pgindent run for v12.
authorTom Lane <tgl@sss.pgh.pa.us>
Wed, 22 May 2019 16:55:34 +0000 (12:55 -0400)
committerTom Lane <tgl@sss.pgh.pa.us>
Wed, 22 May 2019 16:55:34 +0000 (12:55 -0400)
This is still using the 2.0 version of pg_bsd_indent.
I thought it would be good to commit this separately,
so as to document the differences between 2.0 and 2.1 behavior.

Discussion: https://postgr.es/m/16296.1558103386@sss.pgh.pa.us

221 files changed:
contrib/amcheck/verify_nbtree.c
contrib/cube/cube.c
contrib/file_fdw/file_fdw.c
contrib/intarray/_int_gist.c
contrib/intarray/_int_tool.c
contrib/pg_stat_statements/pg_stat_statements.c
contrib/pgcrypto/imath.c
contrib/pgcrypto/imath.h
contrib/pgstattuple/pgstattuple.c
contrib/postgres_fdw/postgres_fdw.c
src/backend/access/common/heaptuple.c
src/backend/access/gin/ginvacuum.c
src/backend/access/gin/ginxlog.c
src/backend/access/gist/gistutil.c
src/backend/access/hash/hashfunc.c
src/backend/access/heap/heapam.c
src/backend/access/heap/heapam_handler.c
src/backend/access/heap/rewriteheap.c
src/backend/access/heap/tuptoaster.c
src/backend/access/heap/vacuumlazy.c
src/backend/access/nbtree/nbtinsert.c
src/backend/access/nbtree/nbtsearch.c
src/backend/access/nbtree/nbtsort.c
src/backend/access/nbtree/nbtutils.c
src/backend/access/spgist/spgscan.c
src/backend/access/spgist/spgtextproc.c
src/backend/access/spgist/spgvacuum.c
src/backend/access/table/tableam.c
src/backend/access/transam/xact.c
src/backend/access/transam/xlog.c
src/backend/catalog/aclchk.c
src/backend/catalog/catalog.c
src/backend/catalog/heap.c
src/backend/catalog/index.c
src/backend/catalog/objectaddress.c
src/backend/catalog/pg_aggregate.c
src/backend/catalog/pg_proc.c
src/backend/catalog/pg_publication.c
src/backend/catalog/storage.c
src/backend/commands/amcmds.c
src/backend/commands/cluster.c
src/backend/commands/constraint.c
src/backend/commands/dbcommands.c
src/backend/commands/explain.c
src/backend/commands/extension.c
src/backend/commands/indexcmds.c
src/backend/commands/statscmds.c
src/backend/commands/tablecmds.c
src/backend/commands/tablespace.c
src/backend/commands/trigger.c
src/backend/commands/vacuum.c
src/backend/executor/execExpr.c
src/backend/executor/execExprInterp.c
src/backend/executor/execMain.c
src/backend/executor/execParallel.c
src/backend/executor/execPartition.c
src/backend/executor/execReplication.c
src/backend/executor/execScan.c
src/backend/executor/execTuples.c
src/backend/executor/functions.c
src/backend/executor/nodeAgg.c
src/backend/executor/nodeGatherMerge.c
src/backend/executor/nodeHashjoin.c
src/backend/executor/nodeIndexonlyscan.c
src/backend/executor/nodeIndexscan.c
src/backend/executor/nodeLockRows.c
src/backend/executor/nodeModifyTable.c
src/backend/executor/nodeSeqscan.c
src/backend/executor/nodeSubplan.c
src/backend/executor/nodeSubqueryscan.c
src/backend/libpq/auth.c
src/backend/libpq/be-secure-openssl.c
src/backend/nodes/bitmapset.c
src/backend/optimizer/path/clausesel.c
src/backend/optimizer/util/inherit.c
src/backend/optimizer/util/pathnode.c
src/backend/parser/parse_utilcmd.c
src/backend/partitioning/partdesc.c
src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
src/backend/replication/logical/reorderbuffer.c
src/backend/replication/slotfuncs.c
src/backend/replication/syncrep.c
src/backend/replication/walreceiver.c
src/backend/replication/walsender.c
src/backend/statistics/dependencies.c
src/backend/statistics/extended_stats.c
src/backend/statistics/mcv.c
src/backend/storage/buffer/bufmgr.c
src/backend/storage/file/fd.c
src/backend/storage/ipc/latch.c
src/backend/storage/ipc/pmsignal.c
src/backend/storage/ipc/signalfuncs.c
src/backend/storage/lmgr/lmgr.c
src/backend/storage/smgr/smgr.c
src/backend/storage/sync/sync.c
src/backend/tcop/dest.c
src/backend/utils/adt/formatting.c
src/backend/utils/adt/genfile.c
src/backend/utils/adt/geo_ops.c
src/backend/utils/adt/json.c
src/backend/utils/adt/jsonb.c
src/backend/utils/adt/like.c
src/backend/utils/adt/like_support.c
src/backend/utils/adt/numutils.c
src/backend/utils/adt/regexp.c
src/backend/utils/adt/ri_triggers.c
src/backend/utils/adt/ruleutils.c
src/backend/utils/adt/varchar.c
src/backend/utils/adt/varlena.c
src/backend/utils/cache/relcache.c
src/backend/utils/cache/relmapper.c
src/backend/utils/cache/syscache.c
src/backend/utils/fmgr/fmgr.c
src/backend/utils/hash/hashfn.c
src/backend/utils/init/globals.c
src/backend/utils/init/miscinit.c
src/backend/utils/mb/mbutils.c
src/backend/utils/misc/guc.c
src/backend/utils/mmgr/dsa.c
src/bin/initdb/initdb.c
src/bin/pg_archivecleanup/pg_archivecleanup.c
src/bin/pg_basebackup/pg_receivewal.c
src/bin/pg_basebackup/pg_recvlogical.c
src/bin/pg_basebackup/receivelog.c
src/bin/pg_dump/common.c
src/bin/pg_dump/compress_io.c
src/bin/pg_dump/parallel.c
src/bin/pg_dump/pg_backup_archiver.c
src/bin/pg_dump/pg_backup_custom.c
src/bin/pg_dump/pg_backup_db.c
src/bin/pg_dump/pg_backup_directory.c
src/bin/pg_dump/pg_backup_tar.c
src/bin/pg_dump/pg_dump.c
src/bin/pg_dump/pg_dumpall.c
src/bin/pg_dump/pg_restore.c
src/bin/pg_rewind/filemap.c
src/bin/pg_rewind/libpq_fetch.c
src/bin/pg_rewind/parsexlog.c
src/bin/pg_rewind/pg_rewind.c
src/bin/pg_upgrade/controldata.c
src/bin/pg_upgrade/function.c
src/bin/pg_waldump/pg_waldump.c
src/bin/pgbench/pgbench.c
src/bin/psql/command.c
src/bin/psql/common.c
src/bin/psql/copy.c
src/bin/psql/crosstabview.c
src/bin/psql/describe.c
src/bin/psql/startup.c
src/bin/psql/tab-complete.c
src/bin/psql/variables.c
src/common/d2s.c
src/common/f2s.c
src/common/file_utils.c
src/common/logging.c
src/common/pg_lzcompress.c
src/common/rmtree.c
src/include/access/amapi.h
src/include/access/gistxlog.h
src/include/access/hio.h
src/include/access/relscan.h
src/include/access/spgist_private.h
src/include/access/spgxlog.h
src/include/access/tableam.h
src/include/access/tupdesc.h
src/include/access/xlog.h
src/include/access/xlog_internal.h
src/include/catalog/dependency.h
src/include/catalog/index.h
src/include/catalog/pg_attrdef.h
src/include/catalog/pg_default_acl.h
src/include/catalog/pg_policy.h
src/include/catalog/storage.h
src/include/commands/defrem.h
src/include/commands/trigger.h
src/include/commands/vacuum.h
src/include/common/file_utils.h
src/include/common/logging.h
src/include/executor/execParallel.h
src/include/executor/executor.h
src/include/executor/tuptable.h
src/include/libpq/libpq-be.h
src/include/miscadmin.h
src/include/nodes/execnodes.h
src/include/nodes/parsenodes.h
src/include/nodes/plannodes.h
src/include/parser/parse_node.h
src/include/pgstat.h
src/include/port.h
src/include/replication/logical.h
src/include/replication/reorderbuffer.h
src/include/statistics/extended_stats_internal.h
src/include/statistics/statistics.h
src/include/storage/fd.h
src/include/storage/md.h
src/include/tcop/deparse_utility.h
src/include/utils/datum.h
src/interfaces/ecpg/ecpglib/cursor.c
src/interfaces/ecpg/ecpglib/descriptor.c
src/interfaces/ecpg/ecpglib/ecpglib_extern.h
src/interfaces/ecpg/ecpglib/error.c
src/interfaces/ecpg/ecpglib/execute.c
src/interfaces/ecpg/ecpglib/prepare.c
src/interfaces/ecpg/include/ecpglib.h
src/interfaces/ecpg/preproc/ecpg.c
src/interfaces/ecpg/preproc/output.c
src/interfaces/ecpg/preproc/preproc_extern.h
src/interfaces/ecpg/preproc/type.h
src/interfaces/libpq/fe-connect.c
src/interfaces/libpq/libpq-fe.h
src/interfaces/libpq/pqexpbuffer.c
src/pl/plpgsql/src/pl_exec.c
src/pl/plpgsql/src/plpgsql.h
src/pl/plpython/plpy_exec.c
src/pl/tcl/pltcl.c
src/port/dlopen.c
src/port/pg_bitutils.c
src/port/strtof.c
src/test/regress/pg_regress.c
src/test/regress/regress.c
src/tools/pgindent/typedefs.list

index 2576d4f9e9125e5f983f3a3250cfbb9805b22c92..c935abc7e09619601ab8d835324c9ae7cba81912 100644 (file)
@@ -160,7 +160,7 @@ static inline bool invariant_l_nontarget_offset(BtreeCheckState *state,
                                                         OffsetNumber upperbound);
 static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum);
 static inline BTScanInsert bt_mkscankey_pivotsearch(Relation rel,
-                                                                                                       IndexTuple itup);
+                                                IndexTuple itup);
 static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block,
                                         Page page, OffsetNumber offset);
 static inline ItemPointer BTreeTupleGetHeapTIDCareful(BtreeCheckState *state,
@@ -1029,7 +1029,7 @@ bt_target_page_check(BtreeCheckState *state)
                /* Fingerprint leaf page tuples (those that point to the heap) */
                if (state->heapallindexed && P_ISLEAF(topaque) && !ItemIdIsDead(itemid))
                {
-                       IndexTuple              norm;
+                       IndexTuple      norm;
 
                        norm = bt_normalize_tuple(state, itup);
                        bloom_add_element(state->filter, (unsigned char *) norm,
@@ -1174,7 +1174,7 @@ bt_target_page_check(BtreeCheckState *state)
                 */
                else if (offset == max)
                {
-                       BTScanInsert    rightkey;
+                       BTScanInsert rightkey;
 
                        /* Get item in next/right page */
                        rightkey = bt_right_page_check_scankey(state);
@@ -1851,7 +1851,8 @@ bt_tuple_present_callback(Relation index, HeapTuple htup, Datum *values,
                                                  bool *isnull, bool tupleIsAlive, void *checkstate)
 {
        BtreeCheckState *state = (BtreeCheckState *) checkstate;
-       IndexTuple      itup, norm;
+       IndexTuple      itup,
+                               norm;
 
        Assert(state->heapallindexed);
 
@@ -1931,7 +1932,7 @@ bt_normalize_tuple(BtreeCheckState *state, IndexTuple itup)
 
        for (i = 0; i < tupleDescriptor->natts; i++)
        {
-               Form_pg_attribute       att;
+               Form_pg_attribute att;
 
                att = TupleDescAttr(tupleDescriptor, i);
 
index 3bbfbf2847b399513d2cbe683164ce76be6c2caf..2be0055c3fbbe4305bbf6821692bd854797d85ca 100644 (file)
@@ -156,7 +156,7 @@ cube_a_f8_f8(PG_FUNCTION_ARGS)
                                (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                 errmsg("can't extend cube"),
                                 errdetail("A cube cannot have more than %d dimensions.",
-                                                          CUBE_MAX_DIM)));
+                                                  CUBE_MAX_DIM)));
 
        if (ARRNELEMS(ll) != dim)
                ereport(ERROR,
@@ -220,7 +220,7 @@ cube_a_f8(PG_FUNCTION_ARGS)
                                (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                 errmsg("array is too long"),
                                 errdetail("A cube cannot have more than %d dimensions.",
-                                                          CUBE_MAX_DIM)));
+                                                  CUBE_MAX_DIM)));
 
        dur = ARRPTR(ur);
 
@@ -260,7 +260,7 @@ cube_subset(PG_FUNCTION_ARGS)
                                (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                 errmsg("array is too long"),
                                 errdetail("A cube cannot have more than %d dimensions.",
-                                                          CUBE_MAX_DIM)));
+                                                  CUBE_MAX_DIM)));
 
        size = IS_POINT(c) ? POINT_SIZE(dim) : CUBE_SIZE(dim);
        result = (NDBOX *) palloc0(size);
@@ -1780,7 +1780,7 @@ cube_c_f8(PG_FUNCTION_ARGS)
                                (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                 errmsg("can't extend cube"),
                                 errdetail("A cube cannot have more than %d dimensions.",
-                                                          CUBE_MAX_DIM)));
+                                                  CUBE_MAX_DIM)));
 
        if (IS_POINT(cube))
        {
@@ -1828,7 +1828,7 @@ cube_c_f8_f8(PG_FUNCTION_ARGS)
                                (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                 errmsg("can't extend cube"),
                                 errdetail("A cube cannot have more than %d dimensions.",
-                                                          CUBE_MAX_DIM)));
+                                                  CUBE_MAX_DIM)));
 
        if (IS_POINT(cube) && (x1 == x2))
        {
index 85534a3a7682e35336802034f885d403c4f1da82..99baa496bd1e13e6fc8e3fc491208c7f945eb8da 100644 (file)
@@ -922,8 +922,11 @@ check_selective_binary_conversion(RelOptInfo *baserel,
                        /* Skip dropped attributes (probably shouldn't see any here). */
                        if (attr->attisdropped)
                                continue;
-                       /* Skip generated columns (COPY won't accept them in the column
-                        * list) */
+
+                       /*
+                        * Skip generated columns (COPY won't accept them in the column
+                        * list)
+                        */
                        if (attr->attgenerated)
                                continue;
                        *columns = lappend(*columns, makeString(pstrdup(attname)));
index fe10cb56b6eec199b49f64dc3a3c85dd80a20266..13dd7ac202c63a33f825ac247d89e99d78e962cb 100644 (file)
@@ -212,41 +212,44 @@ g_int_compress(PG_FUNCTION_ARGS)
                 */
                for (j = i = len - 1; i > 0 && lenr > 0; i--, j--)
                {
-                       int             r_end = dr[i];
-                       int             r_start = r_end;
-                       while (i > 0 && lenr > 0 && dr[i-1] == r_start - 1)
+                       int                     r_end = dr[i];
+                       int                     r_start = r_end;
+
+                       while (i > 0 && lenr > 0 && dr[i - 1] == r_start - 1)
                                --r_start, --i, --lenr;
-                       dr[2*j] = r_start;
-                       dr[2*j+1] = r_end;
+                       dr[2 * j] = r_start;
+                       dr[2 * j + 1] = r_end;
                }
                /* just copy the rest, if any, as trivial ranges */
                for (; i >= 0; i--, j--)
-                       dr[2*j] = dr[2*j + 1] = dr[i];
+                       dr[2 * j] = dr[2 * j + 1] = dr[i];
 
                if (++j)
                {
                        /*
                         * shunt everything down to start at the right place
                         */
-                       memmove((void *) &dr[0], (void *) &dr[2*j], 2*(len - j) * sizeof(int32));
+                       memmove((void *) &dr[0], (void *) &dr[2 * j], 2 * (len - j) * sizeof(int32));
                }
+
                /*
                 * make "len" be number of array elements, not ranges
                 */
-               len = 2*(len - j);
+               len = 2 * (len - j);
                cand = 1;
                while (len > MAXNUMRANGE * 2)
                {
                        min = PG_INT64_MAX;
                        for (i = 2; i < len; i += 2)
-                               if (min > ((int64)dr[i] - (int64)dr[i - 1]))
+                               if (min > ((int64) dr[i] - (int64) dr[i - 1]))
                                {
-                                       min = ((int64)dr[i] - (int64)dr[i - 1]);
+                                       min = ((int64) dr[i] - (int64) dr[i - 1]);
                                        cand = i;
                                }
                        memmove((void *) &dr[cand - 1], (void *) &dr[cand + 1], (len - cand - 1) * sizeof(int32));
                        len -= 2;
                }
+
                /*
                 * check sparseness of result
                 */
index e12d15c21e820b02cd09b62d41cfc02a6382cb77..2d3e50178f6c16b1f0ec1db77995d36c36afbe95 100644 (file)
@@ -298,10 +298,10 @@ internal_size(int *a, int len)
        for (i = 0; i < len; i += 2)
        {
                if (!i || a[i] != a[i - 1]) /* do not count repeated range */
-                       size += (int64)(a[i + 1]) - (int64)(a[i]) + 1;
+                       size += (int64) (a[i + 1]) - (int64) (a[i]) + 1;
        }
 
-       if (size > (int64)INT_MAX || size < (int64)INT_MIN)
+       if (size > (int64) INT_MAX || size < (int64) INT_MIN)
                return -1;                              /* overflow */
        return (int) size;
 }
index 16b8074a00670d1af1d1d436f89eb139ac33d790..64df24039692359ae023b4d4f441de75de15f871 100644 (file)
@@ -1153,8 +1153,9 @@ pgss_store(const char *query, uint64 queryId,
                queryId = pgss_hash_string(query, query_len);
 
                /*
-                * If we are unlucky enough to get a hash of zero(invalid), use queryID
-                * as 2 instead, queryID 1 is already in use for normal statements.
+                * If we are unlucky enough to get a hash of zero(invalid), use
+                * queryID as 2 instead, queryID 1 is already in use for normal
+                * statements.
                 */
                if (queryId == UINT64CONST(0))
                        queryId = UINT64CONST(2);
index bf399eebe5441922a9390a5a1b200b02885ba9c2..62e68b00e7c3f826390419dd332bf621d9517b36 100644 (file)
@@ -1138,7 +1138,7 @@ mp_int_mod(mp_int a, mp_int m, mp_int c)
 }
 
 mp_result
-mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small * r)
+mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small *r)
 {
        mpz_t           vtmp;
        mp_digit        vbuf[MP_VALUE_DIGITS(value)];
@@ -1819,7 +1819,7 @@ mp_int_root(mp_int a, mp_small b, mp_int c)
 }
 
 mp_result
-mp_int_to_int(mp_int z, mp_small * out)
+mp_int_to_int(mp_int z, mp_small *out)
 {
        assert(z != NULL);
 
@@ -1850,7 +1850,7 @@ mp_int_to_int(mp_int z, mp_small * out)
 }
 
 mp_result
-mp_int_to_uint(mp_int z, mp_usmall * out)
+mp_int_to_uint(mp_int z, mp_usmall *out)
 {
        assert(z != NULL);
 
index 9c471bb7a1d45645c6dffeb764095955d84ed8d3..0e1676d04e907f0e8b33ec0b901ee2713021a4d0 100644 (file)
@@ -218,7 +218,7 @@ mp_result   mp_int_div(mp_int a, mp_int b, mp_int q, mp_int r);
 /** Sets `q` and `*r` to the quotent and remainder of `a / value`. Division by
        powers of 2 is detected and handled efficiently. The remainder is pinned to
        `0 <= *r < b`. Either of `q` or `r` may be NULL. */
-mp_result      mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small * r);
+mp_result      mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small *r);
 
 /** Sets `q` and `r` to the quotient and remainder of `a / 2^p2`. This is a
        special case for division by powers of two that is more efficient than
@@ -246,7 +246,7 @@ mp_result   mp_int_expt_full(mp_int a, mp_int b, mp_int c);
        The remainder is pinned to `0 <= r < value`. */
 static inline
 mp_result
-mp_int_mod_value(mp_int a, mp_small value, mp_small * r)
+mp_int_mod_value(mp_int a, mp_small value, mp_small *r)
 {
        return mp_int_div_value(a, value, 0, r);
 }
@@ -339,11 +339,11 @@ mp_int_sqrt(mp_int a, mp_int c)
 
 /** Returns `MP_OK` if `z` is representable as `mp_small`, else `MP_RANGE`.
        If `out` is not NULL, `*out` is set to the value of `z` when `MP_OK`. */
-mp_result      mp_int_to_int(mp_int z, mp_small * out);
+mp_result      mp_int_to_int(mp_int z, mp_small *out);
 
 /** Returns `MP_OK` if `z` is representable as `mp_usmall`, or `MP_RANGE`.
        If `out` is not NULL, `*out` is set to the value of `z` when `MP_OK`. */
-mp_result      mp_int_to_uint(mp_int z, mp_usmall * out);
+mp_result      mp_int_to_uint(mp_int z, mp_usmall *out);
 
 /** Converts `z` to a zero-terminated string of characters in the specified
        `radix`, writing at most `limit` characters to `str` including the
index 6151e8095d0cf29c8460d483416bef3643122295..e1df60bb3535a44484ec03b3780d9c8f5e36d053 100644 (file)
@@ -339,7 +339,7 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo)
 
        InitDirtySnapshot(SnapshotDirty);
 
-       nblocks = hscan->rs_nblocks; /* # blocks to be scanned */
+       nblocks = hscan->rs_nblocks;    /* # blocks to be scanned */
 
        /* scan the relation */
        while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
index 2b6d885efed1b9e3659a435f1ae36c41c20017d1..04dcd40f2e7df8d1e3396ca287a7ffcc5e0c7653 100644 (file)
@@ -1858,7 +1858,7 @@ postgresExecForeignInsert(EState *estate,
        if (fmstate->aux_fmstate)
                resultRelInfo->ri_FdwState = fmstate->aux_fmstate;
        rslot = execute_foreign_modify(estate, resultRelInfo, CMD_INSERT,
-                                                                 slot, planSlot);
+                                                                  slot, planSlot);
        /* Revert that change */
        if (fmstate->aux_fmstate)
                resultRelInfo->ri_FdwState = fmstate;
@@ -1934,11 +1934,11 @@ postgresBeginForeignInsert(ModifyTableState *mtstate,
        bool            doNothing = false;
 
        /*
-        * If the foreign table we are about to insert routed rows into is also
-        * an UPDATE subplan result rel that will be updated later, proceeding
-        * with the INSERT will result in the later UPDATE incorrectly modifying
-        * those routed rows, so prevent the INSERT --- it would be nice if we
-        * could handle this case; but for now, throw an error for safety.
+        * If the foreign table we are about to insert routed rows into is also an
+        * UPDATE subplan result rel that will be updated later, proceeding with
+        * the INSERT will result in the later UPDATE incorrectly modifying those
+        * routed rows, so prevent the INSERT --- it would be nice if we could
+        * handle this case; but for now, throw an error for safety.
         */
        if (plan && plan->operation == CMD_UPDATE &&
                (resultRelInfo->ri_usesFdwDirectModify ||
@@ -3169,7 +3169,7 @@ adjust_foreign_grouping_path_cost(PlannerInfo *root,
        if (!grouping_is_sortable(root->parse->groupClause) ||
                !pathkeys_contained_in(pathkeys, root->group_pathkeys))
        {
-               Path            sort_path;              /* dummy for result of cost_sort */
+               Path            sort_path;      /* dummy for result of cost_sort */
 
                cost_sort(&sort_path,
                                  root,
@@ -3191,7 +3191,7 @@ adjust_foreign_grouping_path_cost(PlannerInfo *root,
                 * add 1/4th of that default.
                 */
                double          sort_multiplier = 1.0 + (DEFAULT_FDW_SORT_MULTIPLIER
- - 1.0) * 0.25;
                                                                                       - 1.0) * 0.25;
 
                *p_startup_cost *= sort_multiplier;
                *p_run_cost *= sort_multiplier;
@@ -3773,6 +3773,7 @@ store_returning_result(PgFdwModifyState *fmstate,
                                                                                        fmstate->retrieved_attrs,
                                                                                        NULL,
                                                                                        fmstate->temp_cxt);
+
                /*
                 * The returning slot will not necessarily be suitable to store
                 * heaptuples directly, so allow for conversion.
@@ -6059,8 +6060,8 @@ add_foreign_final_paths(PlannerInfo *root, RelOptInfo *input_rel,
                /*
                 * Grouping and aggregation are not supported with FOR UPDATE/SHARE,
                 * so the input_rel should be a base, join, or ordered relation; and
-                * if it's an ordered relation, its input relation should be a base
-                * or join relation.
+                * if it's an ordered relation, its input relation should be a base or
+                * join relation.
                 */
                Assert(input_rel->reloptkind == RELOPT_BASEREL ||
                           input_rel->reloptkind == RELOPT_JOINREL ||
index 783b04a3cb960d7598ab788aa5ea3514ae255b49..a48a6cd757f4442ef291ac013db6a623488a79ea 100644 (file)
@@ -787,8 +787,8 @@ expand_tuple(HeapTuple *targetHeapTuple,
                }
 
                /*
-                * Now walk the missing attributes. If there is a missing value
-                * make space for it. Otherwise, it's going to be NULL.
+                * Now walk the missing attributes. If there is a missing value make
+                * space for it. Otherwise, it's going to be NULL.
                 */
                for (attnum = firstmissingnum;
                         attnum < natts;
index b9a28d186333f9a40690b1fe9b4c083c28e8d965..dc46f2460e262a31eb051680362403d9359076e7 100644 (file)
@@ -394,17 +394,17 @@ ginVacuumPostingTree(GinVacuumState *gvs, BlockNumber rootBlkno)
                 * There is at least one empty page.  So we have to rescan the tree
                 * deleting empty pages.
                 */
-               Buffer                          buffer;
+               Buffer          buffer;
                DataPageDeleteStack root,
-                                                  *ptr,
-                                                  *tmp;
+                                  *ptr,
+                                  *tmp;
 
                buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, rootBlkno,
                                                                        RBM_NORMAL, gvs->strategy);
 
                /*
-                * Lock posting tree root for cleanup to ensure there are no concurrent
-                * inserts.
+                * Lock posting tree root for cleanup to ensure there are no
+                * concurrent inserts.
                 */
                LockBufferForCleanup(buffer);
 
index b648af1ff650d5d82d0d089f29f6a1d1d4dfcc54..c945b282721134833369154b442900dad8dd80ec 100644 (file)
@@ -205,8 +205,8 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data)
                while (segno < a_segno)
                {
                        /*
-                        * Once modification is started and page tail is copied, we've
-                        * to copy unmodified segments.
+                        * Once modification is started and page tail is copied, we've to
+                        * copy unmodified segments.
                         */
                        segsize = SizeOfGinPostingList(oldseg);
                        if (tailCopy)
@@ -257,12 +257,12 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data)
                }
 
                /*
-                * We're about to start modification of the page.  So, copy tail of the
-                * page if it's not done already.
+                * We're about to start modification of the page.  So, copy tail of
+                * the page if it's not done already.
                 */
                if (!tailCopy && segptr != segmentend)
                {
-                       int tailSize = segmentend - segptr;
+                       int                     tailSize = segmentend - segptr;
 
                        tailCopy = (Pointer) palloc(tailSize);
                        memcpy(tailCopy, segptr, tailSize);
@@ -304,7 +304,7 @@ ginRedoRecompress(Page page, ginxlogRecompressDataLeaf *data)
        segptr = (Pointer) oldseg;
        if (segptr != segmentend && tailCopy)
        {
-               int restSize = segmentend - segptr;
+               int                     restSize = segmentend - segptr;
 
                Assert(writePtr + restSize <= PageGetSpecialPointer(page));
                memcpy(writePtr, segptr, restSize);
index 94b6ad6a59b657a6394ed1c969e34ac5732d41ed..49df05653b31273a57c03179d3b22b7026b9ddd8 100644 (file)
@@ -839,16 +839,16 @@ gistNewBuffer(Relation r)
                        gistcheckpage(r, buffer);
 
                        /*
-                        * Otherwise, recycle it if deleted, and too old to have any processes
-                        * interested in it.
+                        * Otherwise, recycle it if deleted, and too old to have any
+                        * processes interested in it.
                         */
                        if (gistPageRecyclable(page))
                        {
                                /*
-                                * If we are generating WAL for Hot Standby then create a
-                                * WAL record that will allow us to conflict with queries
-                                * running on standby, in case they have snapshots older
-                                * than the page's deleteXid.
+                                * If we are generating WAL for Hot Standby then create a WAL
+                                * record that will allow us to conflict with queries running
+                                * on standby, in case they have snapshots older than the
+                                * page's deleteXid.
                                 */
                                if (XLogStandbyInfoActive() && RelationNeedsWAL(r))
                                        gistXLogPageReuse(r, blkno, GistPageGetDeleteXid(page));
index 0bf15ae7236c559363ed9805248e0cf8d4f1275b..6ec1ec3df3a9d0825325f8276a8c824fe5a509b7 100644 (file)
@@ -246,7 +246,7 @@ hashtext(PG_FUNCTION_ARGS)
 {
        text       *key = PG_GETARG_TEXT_PP(0);
        Oid                     collid = PG_GET_COLLATION();
-       pg_locale_t     mylocale = 0;
+       pg_locale_t mylocale = 0;
        Datum           result;
 
        if (!collid)
@@ -271,7 +271,7 @@ hashtext(PG_FUNCTION_ARGS)
                        int32_t         ulen = -1;
                        UChar      *uchar = NULL;
                        Size            bsize;
-                       uint8_t    *buf;
+                       uint8_t    *buf;
 
                        ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
 
@@ -302,7 +302,7 @@ hashtextextended(PG_FUNCTION_ARGS)
 {
        text       *key = PG_GETARG_TEXT_PP(0);
        Oid                     collid = PG_GET_COLLATION();
-       pg_locale_t     mylocale = 0;
+       pg_locale_t mylocale = 0;
        Datum           result;
 
        if (!collid)
@@ -328,7 +328,7 @@ hashtextextended(PG_FUNCTION_ARGS)
                        int32_t         ulen = -1;
                        UChar      *uchar = NULL;
                        Size            bsize;
-                       uint8_t    *buf;
+                       uint8_t    *buf;
 
                        ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
 
index 19d2c529d80be4d3fb2191bd719bcde1826b1704..723e153705df663f7a9816e72cd23d847a47d0fa 100644 (file)
@@ -1684,8 +1684,8 @@ void
 heap_get_latest_tid(TableScanDesc sscan,
                                        ItemPointer tid)
 {
-       Relation relation = sscan->rs_rd;
-       Snapshot snapshot = sscan->rs_snapshot;
+       Relation        relation = sscan->rs_rd;
+       Snapshot        snapshot = sscan->rs_snapshot;
        ItemPointerData ctid;
        TransactionId priorXmax;
 
index 56b2abda5fb086d11ec0573d83d9319ab29d8c9c..674c1d3a818e1bb47ef84ebe27ccce52cd4082c2 100644 (file)
@@ -474,6 +474,7 @@ tuple_lock_retry:
                                                HeapTupleHeaderGetCmin(tuple->t_data) >= cid)
                                        {
                                                tmfd->xmax = priorXmax;
+
                                                /*
                                                 * Cmin is the problematic value, so store that. See
                                                 * above.
@@ -1172,7 +1173,7 @@ heapam_index_build_range_scan(Relation heapRelation,
        Snapshot        snapshot;
        bool            need_unregister_snapshot = false;
        TransactionId OldestXmin;
-       BlockNumber     previous_blkno = InvalidBlockNumber;
+       BlockNumber previous_blkno = InvalidBlockNumber;
        BlockNumber root_blkno = InvalidBlockNumber;
        OffsetNumber root_offsets[MaxHeapTuplesPerPage];
 
@@ -1263,7 +1264,7 @@ heapam_index_build_range_scan(Relation heapRelation,
        /* Publish number of blocks to scan */
        if (progress)
        {
-               BlockNumber             nblocks;
+               BlockNumber nblocks;
 
                if (hscan->rs_base.rs_parallel != NULL)
                {
@@ -1314,7 +1315,7 @@ heapam_index_build_range_scan(Relation heapRelation,
                /* Report scan progress, if asked to. */
                if (progress)
                {
-                       BlockNumber     blocks_done = heapam_scan_get_blocks_done(hscan);
+                       BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
 
                        if (blocks_done != previous_blkno)
                        {
@@ -1668,7 +1669,7 @@ heapam_index_build_range_scan(Relation heapRelation,
        /* Report scan progress one last time. */
        if (progress)
        {
-               BlockNumber             blks_done;
+               BlockNumber blks_done;
 
                if (hscan->rs_base.rs_parallel != NULL)
                {
@@ -1720,7 +1721,7 @@ heapam_index_validate_scan(Relation heapRelation,
        BlockNumber root_blkno = InvalidBlockNumber;
        OffsetNumber root_offsets[MaxHeapTuplesPerPage];
        bool            in_index[MaxHeapTuplesPerPage];
-       BlockNumber     previous_blkno = InvalidBlockNumber;
+       BlockNumber previous_blkno = InvalidBlockNumber;
 
        /* state variables for the merge */
        ItemPointer indexcursor = NULL;
@@ -1955,8 +1956,8 @@ static BlockNumber
 heapam_scan_get_blocks_done(HeapScanDesc hscan)
 {
        ParallelBlockTableScanDesc bpscan = NULL;
-       BlockNumber             startblock;
-       BlockNumber             blocks_done;
+       BlockNumber startblock;
+       BlockNumber blocks_done;
 
        if (hscan->rs_base.rs_parallel != NULL)
        {
@@ -1974,7 +1975,7 @@ heapam_scan_get_blocks_done(HeapScanDesc hscan)
                blocks_done = hscan->rs_cblock - startblock;
        else
        {
-               BlockNumber     nblocks;
+               BlockNumber nblocks;
 
                nblocks = bpscan != NULL ? bpscan->phs_nblocks : hscan->rs_nblocks;
                blocks_done = nblocks - startblock +
index bce4274362ca6955746a8a42d1b769ec369ce141..131ec7b8d7f55d3db9a44cf57e20f08cae836634 100644 (file)
@@ -652,7 +652,7 @@ raw_heap_insert(RewriteState state, HeapTuple tup)
        }
        else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
        {
-               int options = HEAP_INSERT_SKIP_FSM;
+               int                     options = HEAP_INSERT_SKIP_FSM;
 
                if (!state->rs_use_wal)
                        options |= HEAP_INSERT_SKIP_WAL;
index 74e957abb72bd8d850cc3595384d6ca43e94dbe0..e10715a7755273c5deeacd2b9eac4dc09bb44d6c 100644 (file)
@@ -2295,16 +2295,16 @@ static struct varlena *
 toast_decompress_datum_slice(struct varlena *attr, int32 slicelength)
 {
        struct varlena *result;
-       int32 rawsize;
+       int32           rawsize;
 
        Assert(VARATT_IS_COMPRESSED(attr));
 
        result = (struct varlena *) palloc(slicelength + VARHDRSZ);
 
        rawsize = pglz_decompress(TOAST_COMPRESS_RAWDATA(attr),
-                                               VARSIZE(attr) - TOAST_COMPRESS_HDRSZ,
-                                               VARDATA(result),
-                                               slicelength, false);
+                                                         VARSIZE(attr) - TOAST_COMPRESS_HDRSZ,
+                                                         VARDATA(result),
+                                                         slicelength, false);
        if (rawsize < 0)
                elog(ERROR, "compressed data is corrupted");
 
index 9e17acc110e706f62291ce8aecd8d86dee2ce916..637e47c08ce12067906e5f20be61716f6bfb3219 100644 (file)
@@ -164,7 +164,7 @@ static void lazy_cleanup_index(Relation indrel,
 static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
                                 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
 static bool should_attempt_truncation(VacuumParams *params,
-                                                LVRelStats *vacrelstats);
+                                                 LVRelStats *vacrelstats);
 static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
 static BlockNumber count_nondeletable_pages(Relation onerel,
                                                 LVRelStats *vacrelstats);
@@ -1067,9 +1067,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
                                         * cheaper to get rid of it in the next pruning pass than
                                         * to treat it like an indexed tuple. Finally, if index
                                         * cleanup is disabled, the second heap pass will not
-                                        * execute, and the tuple will not get removed, so we
-                                        * must treat it like any other dead tuple that we choose
-                                        * to keep.
+                                        * execute, and the tuple will not get removed, so we must
+                                        * treat it like any other dead tuple that we choose to
+                                        * keep.
                                         *
                                         * If this were to happen for a tuple that actually needed
                                         * to be deleted, we'd be in trouble, because it'd
@@ -1087,6 +1087,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
                                        all_visible = false;
                                        break;
                                case HEAPTUPLE_LIVE:
+
                                        /*
                                         * Count it as live.  Not only is this natural, but it's
                                         * also what acquire_sample_rows() does.
@@ -1251,13 +1252,14 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
                        else
                        {
                                /*
-                                * Here, we have indexes but index cleanup is disabled. Instead of
-                                * vacuuming the dead tuples on the heap, we just forget them.
+                                * Here, we have indexes but index cleanup is disabled.
+                                * Instead of vacuuming the dead tuples on the heap, we just
+                                * forget them.
                                 *
                                 * Note that vacrelstats->dead_tuples could have tuples which
                                 * became dead after HOT-pruning but are not marked dead yet.
-                                * We do not process them because it's a very rare condition, and
-                                * the next vacuum will process them anyway.
+                                * We do not process them because it's a very rare condition,
+                                * and the next vacuum will process them anyway.
                                 */
                                Assert(params->index_cleanup == VACOPT_TERNARY_DISABLED);
                        }
index 0a9472c71b5f04bfb60b12fa90c90c917f7d7d0b..36a570045ac1fdb6c7cf76f6f4a12fba839e3fde 100644 (file)
@@ -1811,11 +1811,11 @@ _bt_insert_parent(Relation rel,
                /*
                 * Re-find and write lock the parent of buf.
                 *
-                * It's possible that the location of buf's downlink has changed
-                * since our initial _bt_search() descent.  _bt_getstackbuf() will
-                * detect and recover from this, updating the stack, which ensures
-                * that the new downlink will be inserted at the correct offset.
-                * Even buf's parent may have changed.
+                * It's possible that the location of buf's downlink has changed since
+                * our initial _bt_search() descent.  _bt_getstackbuf() will detect
+                * and recover from this, updating the stack, which ensures that the
+                * new downlink will be inserted at the correct offset. Even buf's
+                * parent may have changed.
                 */
                stack->bts_btentry = bknum;
                pbuf = _bt_getstackbuf(rel, stack);
index 5906c41f316fd03d587cae12aa1f28b78ba41398..dc42213ac6c73c2be339cafef5179326a07ded68 100644 (file)
@@ -166,8 +166,8 @@ _bt_search(Relation rel, BTScanInsert key, Buffer *bufP, int access,
                new_stack->bts_parent = stack_in;
 
                /*
-                * Page level 1 is lowest non-leaf page level prior to leaves.  So,
-                * if we're on the level 1 and asked to lock leaf page in write mode,
+                * Page level 1 is lowest non-leaf page level prior to leaves.  So, if
+                * we're on the level 1 and asked to lock leaf page in write mode,
                 * then lock next page in write mode, because it must be a leaf.
                 */
                if (opaque->btpo.level == 1 && access == BT_WRITE)
@@ -1235,7 +1235,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
 
        /* Initialize remaining insertion scan key fields */
        inskey.heapkeyspace = _bt_heapkeyspace(rel);
-       inskey.anynullkeys = false;             /* unusued */
+       inskey.anynullkeys = false; /* unused */
        inskey.nextkey = nextkey;
        inskey.pivotsearch = false;
        inskey.scantid = NULL;
index 0b5be776d63fa07f16da80eca5170a436cbadf59..d6fa5742384fb28d7ede2a65ed624e2109184fde 100644 (file)
@@ -962,10 +962,10 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup)
                         * much smaller.
                         *
                         * Since the truncated tuple is often smaller than the original
-                        * tuple, it cannot just be copied in place (besides, we want
-                        * to actually save space on the leaf page).  We delete the
-                        * original high key, and add our own truncated high key at the
-                        * same offset.
+                        * tuple, it cannot just be copied in place (besides, we want to
+                        * actually save space on the leaf page).  We delete the original
+                        * high key, and add our own truncated high key at the same
+                        * offset.
                         *
                         * Note that the page layout won't be changed very much.  oitup is
                         * already located at the physical beginning of tuple space, so we
index 77c9c7285cdf3e7e56598cc86a4cf3bd644a8513..1238d544cd3a2477f9d39da857bee2bbbe307810 100644 (file)
@@ -107,7 +107,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
        key = palloc(offsetof(BTScanInsertData, scankeys) +
                                 sizeof(ScanKeyData) * indnkeyatts);
        key->heapkeyspace = itup == NULL || _bt_heapkeyspace(rel);
-       key->anynullkeys = false;               /* initial assumption */
+       key->anynullkeys = false;       /* initial assumption */
        key->nextkey = false;
        key->pivotsearch = false;
        key->keysz = Min(indnkeyatts, tupnatts);
index 9365bc57ad5928059a98d1e7f20d0919540d4bb0..7bc5ec09bf980c405b61975a82dd9a1e6c317c68 100644 (file)
@@ -39,8 +39,8 @@ static int
 pairingheap_SpGistSearchItem_cmp(const pairingheap_node *a,
                                                                 const pairingheap_node *b, void *arg)
 {
-       const           SpGistSearchItem *sa = (const SpGistSearchItem *) a;
-       const           SpGistSearchItem *sb = (const SpGistSearchItem *) b;
+       const SpGistSearchItem *sa = (const SpGistSearchItem *) a;
+       const SpGistSearchItem *sb = (const SpGistSearchItem *) b;
        SpGistScanOpaque so = (SpGistScanOpaque) arg;
        int                     i;
 
@@ -79,7 +79,7 @@ pairingheap_SpGistSearchItem_cmp(const pairingheap_node *a,
 }
 
 static void
-spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem * item)
+spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem *item)
 {
        if (!so->state.attLeafType.attbyval &&
                DatumGetPointer(item->value) != NULL)
@@ -97,7 +97,7 @@ spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem * item)
  * Called in queue context
  */
 static void
-spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem * item)
+spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem *item)
 {
        pairingheap_add(so->scanQueue, &item->phNode);
 }
@@ -439,7 +439,7 @@ spgNewHeapItem(SpGistScanOpaque so, int level, ItemPointer heapPtr,
  *             the scan is not ordered AND the item satisfies the scankeys
  */
 static bool
-spgLeafTest(SpGistScanOpaque so, SpGistSearchItem * item,
+spgLeafTest(SpGistScanOpaque so, SpGistSearchItem *item,
                        SpGistLeafTuple leafTuple, bool isnull,
                        bool *reportedSome, storeRes_func storeRes)
 {
@@ -530,7 +530,7 @@ spgLeafTest(SpGistScanOpaque so, SpGistSearchItem * item,
 static void
 spgInitInnerConsistentIn(spgInnerConsistentIn *in,
                                                 SpGistScanOpaque so,
-                                                SpGistSearchItem * item,
+                                                SpGistSearchItem *item,
                                                 SpGistInnerTuple innerTuple)
 {
        in->scankeys = so->keyData;
@@ -551,7 +551,7 @@ spgInitInnerConsistentIn(spgInnerConsistentIn *in,
 
 static SpGistSearchItem *
 spgMakeInnerItem(SpGistScanOpaque so,
-                                SpGistSearchItem * parentItem,
+                                SpGistSearchItem *parentItem,
                                 SpGistNodeTuple tuple,
                                 spgInnerConsistentOut *out, int i, bool isnull,
                                 double *distances)
@@ -585,7 +585,7 @@ spgMakeInnerItem(SpGistScanOpaque so,
 }
 
 static void
-spgInnerTest(SpGistScanOpaque so, SpGistSearchItem * item,
+spgInnerTest(SpGistScanOpaque so, SpGistSearchItem *item,
                         SpGistInnerTuple innerTuple, bool isnull)
 {
        MemoryContext oldCxt = MemoryContextSwitchTo(so->tempCxt);
@@ -683,7 +683,7 @@ enum SpGistSpecialOffsetNumbers
 
 static OffsetNumber
 spgTestLeafTuple(SpGistScanOpaque so,
-                                SpGistSearchItem * item,
+                                SpGistSearchItem *item,
                                 Page page, OffsetNumber offset,
                                 bool isnull, bool isroot,
                                 bool *reportedSome,
index d22998c54bf4424d991bf0cfa2f5cea4deec5f68..a7c1a09e05f587889dbcbad4d000f7de2e3e673f 100644 (file)
@@ -632,8 +632,8 @@ spg_text_leaf_consistent(PG_FUNCTION_ARGS)
                        res = (level >= queryLen) ||
                                DatumGetBool(DirectFunctionCall2Coll(text_starts_with,
                                                                                                         PG_GET_COLLATION(),
-                                                                                                out->leafValue,
-                                                                                                PointerGetDatum(query)));
+                                                                                                        out->leafValue,
+                                                                                                        PointerGetDatum(query)));
 
                        if (!res)                       /* no need to consider remaining conditions */
                                break;
index fc85c6f940789af773ce2f458f29d65286e05546..2b1662a267dccf06dc1db064ece0a732097a43d9 100644 (file)
@@ -192,9 +192,9 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
                         * happened since VACUUM started.
                         *
                         * Note: we could make a tighter test by seeing if the xid is
-                        * "running" according to the active snapshot; but snapmgr.c doesn't
-                        * currently export a suitable API, and it's not entirely clear
-                        * that a tighter test is worth the cycles anyway.
+                        * "running" according to the active snapshot; but snapmgr.c
+                        * doesn't currently export a suitable API, and it's not entirely
+                        * clear that a tighter test is worth the cycles anyway.
                         */
                        if (TransactionIdFollowsOrEquals(dt->xid, bds->myXmin))
                                spgAddPendingTID(bds, &dt->pointer);
index c3455bc48ba875c934fec5cc5f9a5fea8dacfe5b..12adf590853103b9af816ea424faf09b02fab1d9 100644 (file)
@@ -94,7 +94,7 @@ TableScanDesc
 table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
 {
        uint32          flags = SO_TYPE_SEQSCAN |
-               SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
+       SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
        Oid                     relid = RelationGetRelid(relation);
        Snapshot        snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
 
@@ -158,7 +158,7 @@ table_beginscan_parallel(Relation relation, ParallelTableScanDesc parallel_scan)
 {
        Snapshot        snapshot;
        uint32          flags = SO_TYPE_SEQSCAN |
-               SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
+       SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
 
        Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
 
@@ -223,7 +223,7 @@ table_index_fetch_tuple_check(Relation rel,
 void
 table_get_latest_tid(TableScanDesc scan, ItemPointer tid)
 {
-       Relation rel = scan->rs_rd;
+       Relation        rel = scan->rs_rd;
        const TableAmRoutine *tableam = rel->rd_tableam;
 
        /*
index 20feeec327001afff9a67e8338607b99cb5f4759..b40da74e0920bda5d801a385b9db69c232efcc81 100644 (file)
@@ -570,9 +570,9 @@ AssignTransactionId(TransactionState s)
 
        /*
         * Ensure parent(s) have XIDs, so that a child always has an XID later
-        * than its parent.  Mustn't recurse here, or we might get a stack overflow
-        * if we're at the bottom of a huge stack of subtransactions none of which
-        * have XIDs yet.
+        * than its parent.  Mustn't recurse here, or we might get a stack
+        * overflow if we're at the bottom of a huge stack of subtransactions none
+        * of which have XIDs yet.
         */
        if (isSubXact && !FullTransactionIdIsValid(s->parent->fullTransactionId))
        {
@@ -2868,8 +2868,8 @@ StartTransactionCommand(void)
  * just skipping the reset in StartTransaction() won't work.)
  */
 static int     save_XactIsoLevel;
-static bool    save_XactReadOnly;
-static bool    save_XactDeferrable;
+static bool save_XactReadOnly;
+static bool save_XactDeferrable;
 
 void
 SaveTransactionCharacteristics(void)
@@ -5193,7 +5193,7 @@ SerializeTransactionState(Size maxsize, char *start_address)
                nxids = add_size(nxids, s->nChildXids);
        }
        Assert(SerializedTransactionStateHeaderSize + nxids * sizeof(TransactionId)
-               <= maxsize);
+                  <= maxsize);
 
        /* Copy them to our scratch space. */
        workspace = palloc(nxids * sizeof(TransactionId));
index 527522f165551a3d7f713516e4c00f7178f22973..c7c9e91b6a45150f4aedd1a492bb5485bfaaf2f6 100644 (file)
@@ -6397,9 +6397,9 @@ StartupXLOG(void)
                                        ereport(FATAL,
                                                        (errmsg("could not find redo location referenced by checkpoint record"),
                                                         errhint("If you are restoring from a backup, touch \"%s/recovery.signal\" and add required recovery options.\n"
-                                                        "If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n"
-                                                        "Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.",
-                                                        DataDir, DataDir, DataDir)));
+                                                                        "If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n"
+                                                                        "Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.",
+                                                                        DataDir, DataDir, DataDir)));
                        }
                }
                else
index a600f43a675a05aaae02fed4efb0f1c7120febdb..f0fdda1eb913570720fc3804f8df934ff7cf6749 100644 (file)
@@ -848,7 +848,7 @@ objectsInSchemaToOids(ObjectType objtype, List *nspnames)
 
                                        while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
                                        {
-                                               Oid             oid = ((Form_pg_proc) GETSTRUCT(tuple))->oid;
+                                               Oid                     oid = ((Form_pg_proc) GETSTRUCT(tuple))->oid;
 
                                                objects = lappend_oid(objects, oid);
                                        }
@@ -895,7 +895,7 @@ getRelationsInNamespace(Oid namespaceId, char relkind)
 
        while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
        {
-               Oid             oid  = ((Form_pg_class) GETSTRUCT(tuple))->oid;
+               Oid                     oid = ((Form_pg_class) GETSTRUCT(tuple))->oid;
 
                relations = lappend_oid(relations, oid);
        }
@@ -1311,7 +1311,7 @@ SetDefaultACL(InternalDefaultACL *iacls)
        }
        else
        {
-               Oid             defAclOid;
+               Oid                     defAclOid;
 
                /* Prepare to insert or update pg_default_acl entry */
                MemSet(values, 0, sizeof(values));
@@ -1384,7 +1384,7 @@ SetDefaultACL(InternalDefaultACL *iacls)
                if (isNew)
                        InvokeObjectPostCreateHook(DefaultAclRelationId, defAclOid, 0);
                else
-                       InvokeObjectPostAlterHook(DefaultAclRelationId,  defAclOid, 0);
+                       InvokeObjectPostAlterHook(DefaultAclRelationId, defAclOid, 0);
        }
 
        if (HeapTupleIsValid(tuple))
index 2878e6a5b036e21c25817f747117c3cacb604992..11936a657131f22dda6a2809d612e640211fa88a 100644 (file)
@@ -476,15 +476,15 @@ GetNewRelFileNode(Oid reltablespace, Relation pg_class, char relpersistence)
 Datum
 pg_nextoid(PG_FUNCTION_ARGS)
 {
-       Oid             reloid = PG_GETARG_OID(0);
-       Name    attname = PG_GETARG_NAME(1);
-       Oid             idxoid = PG_GETARG_OID(2);
-       Relation rel;
-       Relation idx;
-       HeapTuple atttuple;
+       Oid                     reloid = PG_GETARG_OID(0);
+       Name            attname = PG_GETARG_NAME(1);
+       Oid                     idxoid = PG_GETARG_OID(2);
+       Relation        rel;
+       Relation        idx;
+       HeapTuple       atttuple;
        Form_pg_attribute attform;
-       AttrNumber attno;
-       Oid             newoid;
+       AttrNumber      attno;
+       Oid                     newoid;
 
        /*
         * As this function is not intended to be used during normal running, and
index 6cffe550b31f2063e1bc07216eecc42ee1257c97..3c46c2510713a7f6bd25941bf8589295f2ee729f 100644 (file)
@@ -2550,8 +2550,8 @@ AddRelationNewConstraints(Relation rel,
                /*
                 * If the expression is just a NULL constant, we do not bother to make
                 * an explicit pg_attrdef entry, since the default behavior is
-                * equivalent.  This applies to column defaults, but not for generation
-                * expressions.
+                * equivalent.  This applies to column defaults, but not for
+                * generation expressions.
                 *
                 * Note a nonobvious property of this test: if the column is of a
                 * domain type, what we'll get is not a bare null Const but a
index c8d22e1b655cea27d733c17166798f0ad5545d9a..b7d1ac0923f8878d43bf5e5db67813f3319d52cd 100644 (file)
@@ -1236,8 +1236,7 @@ index_concurrently_create_copy(Relation heapRelation, Oid oldIndexId, const char
                                                                  Anum_pg_class_reloptions, &isnull);
 
        /*
-        * Extract the list of column names to be used for the index
-        * creation.
+        * Extract the list of column names to be used for the index creation.
         */
        for (int i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
        {
@@ -1270,8 +1269,8 @@ index_concurrently_create_copy(Relation heapRelation, Oid oldIndexId, const char
                                                          optionDatum,
                                                          INDEX_CREATE_SKIP_BUILD | INDEX_CREATE_CONCURRENT,
                                                          0,
-                                                         true, /* allow table to be a system catalog? */
-                                                         false, /* is_internal? */
+                                                         true, /* allow table to be a system catalog? */
+                                                         false,        /* is_internal? */
                                                          NULL);
 
        /* Close the relations used and clean up */
@@ -1540,7 +1539,7 @@ index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName)
                                                                          values, nulls, replaces);
                        CatalogTupleUpdate(description, &tuple->t_self, tuple);
 
-                       break;                                  /* Assume there can be only one match */
+                       break;                          /* Assume there can be only one match */
                }
 
                systable_endscan(sd);
@@ -1552,8 +1551,8 @@ index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName)
         */
        if (get_rel_relispartition(oldIndexId))
        {
-               List   *ancestors = get_partition_ancestors(oldIndexId);
-               Oid             parentIndexRelid = linitial_oid(ancestors);
+               List       *ancestors = get_partition_ancestors(oldIndexId);
+               Oid                     parentIndexRelid = linitial_oid(ancestors);
 
                DeleteInheritsTuple(oldIndexId, parentIndexRelid);
                StoreSingleInheritance(newIndexId, parentIndexRelid, 1);
@@ -1583,7 +1582,11 @@ index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName)
                                newClassRel->pgstat_info->t_counts.t_tuples_fetched = tabentry->tuples_fetched;
                                newClassRel->pgstat_info->t_counts.t_blocks_fetched = tabentry->blocks_fetched;
                                newClassRel->pgstat_info->t_counts.t_blocks_hit = tabentry->blocks_hit;
-                               /* The data will be sent by the next pgstat_report_stat() call. */
+
+                               /*
+                                * The data will be sent by the next pgstat_report_stat()
+                                * call.
+                                */
                        }
                }
        }
@@ -1614,27 +1617,26 @@ index_concurrently_set_dead(Oid heapId, Oid indexId)
        Relation        userIndexRelation;
 
        /*
-        * No more predicate locks will be acquired on this index, and we're
-        * about to stop doing inserts into the index which could show
-        * conflicts with existing predicate locks, so now is the time to move
-        * them to the heap relation.
+        * No more predicate locks will be acquired on this index, and we're about
+        * to stop doing inserts into the index which could show conflicts with
+        * existing predicate locks, so now is the time to move them to the heap
+        * relation.
         */
        userHeapRelation = table_open(heapId, ShareUpdateExclusiveLock);
        userIndexRelation = index_open(indexId, ShareUpdateExclusiveLock);
        TransferPredicateLocksToHeapRelation(userIndexRelation);
 
        /*
-        * Now we are sure that nobody uses the index for queries; they just
-        * might have it open for updating it.  So now we can unset indisready
-        * and indislive, then wait till nobody could be using it at all
-        * anymore.
+        * Now we are sure that nobody uses the index for queries; they just might
+        * have it open for updating it.  So now we can unset indisready and
+        * indislive, then wait till nobody could be using it at all anymore.
         */
        index_set_state_flags(indexId, INDEX_DROP_SET_DEAD);
 
        /*
-        * Invalidate the relcache for the table, so that after this commit
-        * all sessions will refresh the table's index list.  Forgetting just
-        * the index's relcache entry is not enough.
+        * Invalidate the relcache for the table, so that after this commit all
+        * sessions will refresh the table's index list.  Forgetting just the
+        * index's relcache entry is not enough.
         */
        CacheInvalidateRelcache(userHeapRelation);
 
@@ -1786,7 +1788,7 @@ index_constraint_create(Relation heapRelation,
         */
        if (OidIsValid(parentConstraintId))
        {
-               ObjectAddress   referenced;
+               ObjectAddress referenced;
 
                ObjectAddressSet(referenced, ConstraintRelationId, parentConstraintId);
                recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
@@ -2709,7 +2711,7 @@ index_build(Relation heapRelation,
                        PROGRESS_SCAN_BLOCKS_DONE,
                        PROGRESS_SCAN_BLOCKS_TOTAL
                };
-               const int64     val[] = {
+               const int64 val[] = {
                        PROGRESS_CREATEIDX_PHASE_BUILD,
                        PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE,
                        0, 0, 0, 0
@@ -3014,10 +3016,11 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
                        PROGRESS_SCAN_BLOCKS_DONE,
                        PROGRESS_SCAN_BLOCKS_TOTAL
                };
-               const int64     val[] = {
+               const int64 val[] = {
                        PROGRESS_CREATEIDX_PHASE_VALIDATE_IDXSCAN,
                        0, 0, 0, 0
                };
+
                pgstat_progress_update_multi_param(5, index, val);
        }
 
@@ -3080,7 +3083,7 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
                        PROGRESS_SCAN_BLOCKS_DONE,
                        PROGRESS_SCAN_BLOCKS_TOTAL
                };
-               const int64     val[] = {
+               const int64 val[] = {
                        PROGRESS_CREATEIDX_PHASE_VALIDATE_SORT,
                        0, 0
                };
index 8b51ec7f3940ec12ea63fd734c49ed2fba7da186..7a32ac1fb13ff89cb7d1f727f8ce43ac1b2ba810 100644 (file)
@@ -3050,7 +3050,7 @@ getObjectDescription(const ObjectAddress *object)
                                StringInfoData opfam;
 
                                amprocDesc = table_open(AccessMethodProcedureRelationId,
-                                                                          AccessShareLock);
+                                                                               AccessShareLock);
 
                                ScanKeyInit(&skey[0],
                                                        Anum_pg_amproc_oid,
index cdc8d9453d9c9779280d0586c8e592916c4ed2d3..310d45266ffc5de94353f443cbc2687494558c1e 100644 (file)
@@ -612,7 +612,7 @@ AggregateCreate(const char *aggName,
 
        myself = ProcedureCreate(aggName,
                                                         aggNamespace,
-                                                        replace, /* maybe replacement */
+                                                        replace,       /* maybe replacement */
                                                         false, /* doesn't return a set */
                                                         finaltype, /* returnType */
                                                         GetUserId(),   /* proowner */
@@ -693,10 +693,9 @@ AggregateCreate(const char *aggName,
 
                /*
                 * If we're replacing an existing entry, we need to validate that
-                * we're not changing anything that would break callers.
-                * Specifically we must not change aggkind or aggnumdirectargs,
-                * which affect how an aggregate call is treated in parse
-                * analysis.
+                * we're not changing anything that would break callers. Specifically
+                * we must not change aggkind or aggnumdirectargs, which affect how an
+                * aggregate call is treated in parse analysis.
                 */
                if (aggKind != oldagg->aggkind)
                        ereport(ERROR,
index fb22035a2a6d9f071656f0ae37cbb884a78791e6..3487caf82f6f6aabb453be9a73792bc344ff074c 100644 (file)
@@ -423,7 +423,11 @@ ProcedureCreate(const char *procedureName,
                                         prokind == PROKIND_PROCEDURE
                                         ? errmsg("cannot change whether a procedure has output parameters")
                                         : errmsg("cannot change return type of existing function"),
-                                        /* translator: first %s is DROP FUNCTION, DROP PROCEDURE or DROP AGGREGATE */
+
+                       /*
+                        * translator: first %s is DROP FUNCTION, DROP PROCEDURE or DROP
+                        * AGGREGATE
+                        */
                                         errhint("Use %s %s first.",
                                                         dropcmd,
                                                         format_procedure(oldproc->oid))));
@@ -450,7 +454,7 @@ ProcedureCreate(const char *procedureName,
                                                (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
                                                 errmsg("cannot change return type of existing function"),
                                                 errdetail("Row type defined by OUT parameters is different."),
-                                                /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+                               /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
                                                 errhint("Use %s %s first.",
                                                                 dropcmd,
                                                                 format_procedure(oldproc->oid))));
@@ -495,7 +499,7 @@ ProcedureCreate(const char *procedureName,
                                                        (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
                                                         errmsg("cannot change name of input parameter \"%s\"",
                                                                        old_arg_names[j]),
-                                                        /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+                                       /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
                                                         errhint("Use %s %s first.",
                                                                         dropcmd,
                                                                         format_procedure(oldproc->oid))));
@@ -521,7 +525,7 @@ ProcedureCreate(const char *procedureName,
                                ereport(ERROR,
                                                (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
                                                 errmsg("cannot remove parameter defaults from existing function"),
-                                                /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+                               /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
                                                 errhint("Use %s %s first.",
                                                                 dropcmd,
                                                                 format_procedure(oldproc->oid))));
@@ -549,7 +553,7 @@ ProcedureCreate(const char *procedureName,
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
                                                         errmsg("cannot change data type of existing parameter default value"),
-                                                        /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+                                       /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
                                                         errhint("Use %s %s first.",
                                                                         dropcmd,
                                                                         format_procedure(oldproc->oid))));
@@ -575,7 +579,7 @@ ProcedureCreate(const char *procedureName,
        else
        {
                /* Creating a new procedure */
-               Oid             newOid;
+               Oid                     newOid;
 
                /* First, get default permissions and set up proacl */
                proacl = get_user_default_acl(OBJECT_FUNCTION, proowner,
index f8475c1aba209eec6f554b482acf90b68d63447c..b3bf81ae63327aaaffb9a53a8b34bd62cec0b8d7 100644 (file)
@@ -317,7 +317,7 @@ GetAllTablesPublications(void)
        result = NIL;
        while (HeapTupleIsValid(tup = systable_getnext(scan)))
        {
-               Oid             oid = ((Form_pg_publication) GETSTRUCT(tup))->oid;
+               Oid                     oid = ((Form_pg_publication) GETSTRUCT(tup))->oid;
 
                result = lappend_oid(result, oid);
        }
index fb41f223ada2b523aea5d53501f7e77fbd424af3..3cc886f7fe22e5c707073a258a7c49b81ee36369 100644 (file)
@@ -99,7 +99,7 @@ RelationCreateStorage(RelFileNode rnode, char relpersistence)
                        break;
                default:
                        elog(ERROR, "invalid relpersistence: %c", relpersistence);
-                       return NULL;                            /* placate compiler */
+                       return NULL;            /* placate compiler */
        }
 
        srel = smgropen(rnode, backend);
index c1603737eb557ab71fa8435b1f0a23145a77135c..c0e40980d5fce560c3a09fe3c75222daac9ae505 100644 (file)
@@ -61,7 +61,7 @@ CreateAccessMethod(CreateAmStmt *stmt)
                                 errhint("Must be superuser to create an access method.")));
 
        /* Check if name is used */
-       amoid = GetSysCacheOid1(AMNAME,  Anum_pg_am_oid,
+       amoid = GetSysCacheOid1(AMNAME, Anum_pg_am_oid,
                                                        CStringGetDatum(stmt->amname));
        if (OidIsValid(amoid))
        {
index 3ee70560476ef3d1437325bef1c8340c62a8242e..cacc023619c4c0e5b7d9c16b34290ec1d4150b25 100644 (file)
@@ -70,8 +70,8 @@ typedef struct
 
 static void rebuild_relation(Relation OldHeap, Oid indexOid, bool verbose);
 static void copy_table_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
-                          bool verbose, bool *pSwapToastByContent,
-                          TransactionId *pFreezeXid, MultiXactId *pCutoffMulti);
+                               bool verbose, bool *pSwapToastByContent,
+                               TransactionId *pFreezeXid, MultiXactId *pCutoffMulti);
 static List *get_tables_to_cluster(MemoryContext cluster_context);
 
 
@@ -614,7 +614,7 @@ rebuild_relation(Relation OldHeap, Oid indexOid, bool verbose)
 
        /* Copy the heap data into the new table in the desired order */
        copy_table_data(OIDNewHeap, tableOid, indexOid, verbose,
-                                  &swap_toast_by_content, &frozenXid, &cutoffMulti);
+                                       &swap_toast_by_content, &frozenXid, &cutoffMulti);
 
        /*
         * Swap the physical files of the target and transient tables, then
index cd04e4ea81b99cfcf9276e44d54e0f169d99068f..806962a686bf2437ae1afdfa7d4be49f8ff5de77 100644 (file)
@@ -83,7 +83,7 @@ unique_key_recheck(PG_FUNCTION_ARGS)
                                (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
                                 errmsg("function \"%s\" must be fired for INSERT or UPDATE",
                                                funcname)));
-               ItemPointerSetInvalid(&checktid);               /* keep compiler quiet */
+               ItemPointerSetInvalid(&checktid);       /* keep compiler quiet */
        }
 
        slot = table_slot_create(trigdata->tg_relation, NULL);
@@ -109,7 +109,7 @@ unique_key_recheck(PG_FUNCTION_ARGS)
        tmptid = checktid;
        {
                IndexFetchTableData *scan = table_index_fetch_begin(trigdata->tg_relation);
-               bool call_again = false;
+               bool            call_again = false;
 
                if (!table_index_fetch_tuple(scan, &tmptid, SnapshotSelf, slot,
                                                                         &call_again, NULL))
index 9707afabd98d15377dee1781e96e11475b2dcfcd..5015e5b3b602a1d46cc25999686c8f8827f21da9 100644 (file)
@@ -2033,7 +2033,7 @@ get_database_oid(const char *dbname, bool missing_ok)
 
        /* We assume that there can be at most one matching tuple */
        if (HeapTupleIsValid(dbtuple))
-               oid = ((Form_pg_database)GETSTRUCT(dbtuple))->oid;
+               oid = ((Form_pg_database) GETSTRUCT(dbtuple))->oid;
        else
                oid = InvalidOid;
 
index a6c6de78f11605223838957eac79564e59885190..039a87c15519d4c722149ee51d677fac5da06bc1 100644 (file)
@@ -606,7 +606,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
 static void
 ExplainPrintSettings(ExplainState *es)
 {
-       int             num;
+       int                     num;
        struct config_generic **gucs;
 
        /* bail out if information about settings not requested */
@@ -622,13 +622,13 @@ ExplainPrintSettings(ExplainState *es)
 
        if (es->format != EXPLAIN_FORMAT_TEXT)
        {
-               int             i;
+               int                     i;
 
                ExplainOpenGroup("Settings", "Settings", true, es);
 
                for (i = 0; i < num; i++)
                {
-                       char *setting;
+                       char       *setting;
                        struct config_generic *conf = gucs[i];
 
                        setting = GetConfigOptionByName(conf->name, NULL, true);
@@ -640,14 +640,14 @@ ExplainPrintSettings(ExplainState *es)
        }
        else
        {
-               int             i;
-               StringInfoData  str;
+               int                     i;
+               StringInfoData str;
 
                initStringInfo(&str);
 
                for (i = 0; i < num; i++)
                {
-                       char *setting;
+                       char       *setting;
                        struct config_generic *conf = gucs[i];
 
                        if (i > 0)
@@ -705,8 +705,8 @@ ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc)
        ExplainNode(ps, NIL, NULL, NULL, es);
 
        /*
-        * If requested, include information about GUC parameters with values
-        * that don't match the built-in defaults.
+        * If requested, include information about GUC parameters with values that
+        * don't match the built-in defaults.
         */
        ExplainPrintSettings(es);
 }
@@ -1674,7 +1674,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
                                if (es->costs && es->verbose &&
                                        outerPlanState(planstate)->worker_jit_instrument)
                                {
-                                       PlanState *child = outerPlanState(planstate);
+                                       PlanState  *child = outerPlanState(planstate);
                                        int                     n;
                                        SharedJitInstrumentation *w = child->worker_jit_instrument;
 
index d4723fced898785d3f371315b457e340bb70749e..300bb1261f757421aa3dc4ab6adc248b49d0832b 100644 (file)
@@ -903,9 +903,9 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
 
                        t_sql = DirectFunctionCall3Coll(replace_text,
                                                                                        C_COLLATION_OID,
-                                                                               t_sql,
-                                                                               CStringGetTextDatum("@extschema@"),
-                                                                               CStringGetTextDatum(qSchemaName));
+                                                                                       t_sql,
+                                                                                       CStringGetTextDatum("@extschema@"),
+                                                                                       CStringGetTextDatum(qSchemaName));
                }
 
                /*
@@ -916,9 +916,9 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
                {
                        t_sql = DirectFunctionCall3Coll(replace_text,
                                                                                        C_COLLATION_OID,
-                                                                               t_sql,
-                                                                               CStringGetTextDatum("MODULE_PATHNAME"),
-                                                                               CStringGetTextDatum(control->module_pathname));
+                                                                                       t_sql,
+                                                                                       CStringGetTextDatum("MODULE_PATHNAME"),
+                                                                                       CStringGetTextDatum(control->module_pathname));
                }
 
                /* And now back to C string */
index 7e7c03ef1247534c4e4cca4e1d02b493a6a59fa3..62a4c4fb9beb8932930ae60ba262509071097dbb 100644 (file)
@@ -96,8 +96,8 @@ static void update_relispartition(Oid relationId, bool newval);
  */
 struct ReindexIndexCallbackState
 {
-       bool        concurrent;                 /* flag from statement */
-       Oid         locked_table_oid;   /* tracks previously locked table */
+       bool            concurrent;             /* flag from statement */
+       Oid                     locked_table_oid;       /* tracks previously locked table */
 };
 
 /*
@@ -396,7 +396,7 @@ WaitForOlderSnapshots(TransactionId limitXmin, bool progress)
                {
                        if (progress)
                        {
-                               PGPROC *holder = BackendIdGetProc(old_snapshots[i].backendId);
+                               PGPROC     *holder = BackendIdGetProc(old_snapshots[i].backendId);
 
                                pgstat_progress_update_param(PROGRESS_WAITFOR_CURRENT_PID,
                                                                                         holder->pid);
@@ -984,7 +984,7 @@ DefineIndex(Oid relationId,
         */
        if (partitioned && stmt->relation && !stmt->relation->inh)
        {
-               PartitionDesc   pd = RelationGetPartitionDesc(rel);
+               PartitionDesc pd = RelationGetPartitionDesc(rel);
 
                if (pd->nparts != 0)
                        flags |= INDEX_CREATE_INVALID;
@@ -3003,7 +3003,7 @@ ReindexRelationConcurrently(Oid relationOid, int options)
        /* Get a session-level lock on each table. */
        foreach(lc, relationLocks)
        {
-               LockRelId   *lockrelid = (LockRelId *) lfirst(lc);
+               LockRelId  *lockrelid = (LockRelId *) lfirst(lc);
 
                LockRelationIdForSession(lockrelid, ShareUpdateExclusiveLock);
        }
@@ -3112,8 +3112,8 @@ ReindexRelationConcurrently(Oid relationOid, int options)
 
                /*
                 * The index is now valid in the sense that it contains all currently
-                * interesting tuples.  But since it might not contain tuples deleted just
-                * before the reference snap was taken, we have to wait out any
+                * interesting tuples.  But since it might not contain tuples deleted
+                * just before the reference snap was taken, we have to wait out any
                 * transactions that might have older snapshots.
                 */
                pgstat_progress_update_param(PROGRESS_CREATEIDX_PHASE,
@@ -3250,7 +3250,7 @@ ReindexRelationConcurrently(Oid relationOid, int options)
         */
        foreach(lc, relationLocks)
        {
-               LockRelId   *lockrelid = (LockRelId *) lfirst(lc);
+               LockRelId  *lockrelid = (LockRelId *) lfirst(lc);
 
                UnlockRelationIdForSession(lockrelid, ShareUpdateExclusiveLock);
        }
index a191916d0324450c314a7d2c424750abb35ad976..95ec352abe688ad08ef73b70efbf2ae4aa53fbbf 100644 (file)
@@ -465,9 +465,9 @@ UpdateStatisticsForTypeChange(Oid statsOid, Oid relationOid, int attnum,
                elog(ERROR, "cache lookup failed for statistics object %u", statsOid);
 
        /*
-        * When none of the defined statistics types contain datum values
-        * from the table's columns then there's no need to reset the stats.
-        * Functional dependencies and ndistinct stats should still hold true.
+        * When none of the defined statistics types contain datum values from the
+        * table's columns then there's no need to reset the stats. Functional
+        * dependencies and ndistinct stats should still hold true.
         */
        if (!statext_is_kind_built(oldtup, STATS_EXT_MCV))
        {
index bfcf9472d7af0a189a9e8502850136ff2fe84952..7fa8dcce61476a6ae7149c24b014e9e374f60d20 100644 (file)
@@ -379,7 +379,7 @@ static void ATExecCheckNotNull(AlteredTableInfo *tab, Relation rel,
                                   const char *colName, LOCKMODE lockmode);
 static bool NotNullImpliedByRelConstraints(Relation rel, Form_pg_attribute attr);
 static bool ConstraintImpliedByRelConstraint(Relation scanrel,
-                                                                        List *partConstraint, List *existedConstraints);
+                                                                List *partConstraint, List *existedConstraints);
 static ObjectAddress ATExecColumnDefault(Relation rel, const char *colName,
                                        Node *newDefault, LOCKMODE lockmode);
 static ObjectAddress ATExecAddIdentity(Relation rel, const char *colName,
@@ -1099,9 +1099,9 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
        }
 
        /*
-        * Now add any newly specified CHECK constraints to the new relation.
-        * Same as for defaults above, but these need to come after partitioning
-        * is set up.
+        * Now add any newly specified CHECK constraints to the new relation. Same
+        * as for defaults above, but these need to come after partitioning is set
+        * up.
         */
        if (stmt->constraints)
                AddRelationNewConstraints(rel, NIL, stmt->constraints,
@@ -1401,9 +1401,9 @@ RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid,
         */
        if (IsSystemClass(relOid, classform) && relkind == RELKIND_INDEX)
        {
-               HeapTuple               locTuple;
-               Form_pg_index   indexform;
-               bool                    indisvalid;
+               HeapTuple       locTuple;
+               Form_pg_index indexform;
+               bool            indisvalid;
 
                locTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(relOid));
                if (!HeapTupleIsValid(locTuple))
@@ -1786,6 +1786,7 @@ ExecuteTruncateGuts(List *explicit_rels, List *relids, List *relids_logged,
                        {
                                Relation        toastrel = relation_open(toast_relid,
                                                                                                         AccessExclusiveLock);
+
                                RelationSetNewRelfilenode(toastrel,
                                                                                  toastrel->rd_rel->relpersistence);
                                table_close(toastrel, NoLock);
@@ -4336,6 +4337,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
                        /* nothing to do here, oid columns don't exist anymore */
                        break;
                case AT_SetTableSpace:  /* SET TABLESPACE */
+
                        /*
                         * Only do this for partitioned tables and indexes, for which this
                         * is just a catalog change.  Other relation types which have
@@ -4626,8 +4628,8 @@ ATRewriteTables(AlterTableStmt *parsetree, List **wqueue, LOCKMODE lockmode)
                {
                        /*
                         * If required, test the current data within the table against new
-                        * constraints generated by ALTER TABLE commands, but don't rebuild
-                        * data.
+                        * constraints generated by ALTER TABLE commands, but don't
+                        * rebuild data.
                         */
                        if (tab->constraints != NIL || tab->verify_new_notnull ||
                                tab->partition_constraint != NULL)
@@ -4798,8 +4800,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
        {
                /*
                 * If we are rebuilding the tuples OR if we added any new but not
-                * verified NOT NULL constraints, check all not-null constraints.
-                * This is a bit of overkill but it minimizes risk of bugs, and
+                * verified NOT NULL constraints, check all not-null constraints. This
+                * is a bit of overkill but it minimizes risk of bugs, and
                 * heap_attisnull is a pretty cheap test anyway.
                 */
                for (i = 0; i < newTupDesc->natts; i++)
@@ -4941,8 +4943,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
                        {
                                /*
                                 * If there's no rewrite, old and new table are guaranteed to
-                                * have the same AM, so we can just use the old slot to
-                                * verify new constraints etc.
+                                * have the same AM, so we can just use the old slot to verify
+                                * new constraints etc.
                                 */
                                insertslot = oldslot;
                        }
@@ -6209,9 +6211,8 @@ ATExecSetNotNull(AlteredTableInfo *tab, Relation rel,
                /*
                 * Ordinarily phase 3 must ensure that no NULLs exist in columns that
                 * are set NOT NULL; however, if we can find a constraint which proves
-                * this then we can skip that.  We needn't bother looking if
-                * we've already found that we must verify some other NOT NULL
-                * constraint.
+                * this then we can skip that.  We needn't bother looking if we've
+                * already found that we must verify some other NOT NULL constraint.
                 */
                if (!tab->verify_new_notnull &&
                        !NotNullImpliedByRelConstraints(rel, (Form_pg_attribute) GETSTRUCT(tuple)))
@@ -10503,7 +10504,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
         */
        if (tab->rewrite)
        {
-               Relation    newrel;
+               Relation        newrel;
 
                newrel = table_open(RelationGetRelid(rel), NoLock);
                RelationClearMissing(newrel);
@@ -10657,8 +10658,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
                                        {
                                                /*
                                                 * Changing the type of a column that is used by a
-                                                * generated column is not allowed by SQL standard.
-                                                * It might be doable with some thinking and effort.
+                                                * generated column is not allowed by SQL standard. It
+                                                * might be doable with some thinking and effort.
                                                 */
                                                ereport(ERROR,
                                                                (errcode(ERRCODE_SYNTAX_ERROR),
@@ -10862,13 +10863,13 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
 
        /*
         * Here we go --- change the recorded column type and collation.  (Note
-        * heapTup is a copy of the syscache entry, so okay to scribble on.)
-        * First fix up the missing value if any.
+        * heapTup is a copy of the syscache entry, so okay to scribble on.) First
+        * fix up the missing value if any.
         */
        if (attTup->atthasmissing)
        {
-               Datum       missingval;
-               bool        missingNull;
+               Datum           missingval;
+               bool            missingNull;
 
                /* if rewrite is true the missing value should already be cleared */
                Assert(tab->rewrite == 0);
@@ -10881,7 +10882,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
 
                /* if it's a null array there is nothing to do */
 
-               if (! missingNull)
+               if (!missingNull)
                {
                        /*
                         * Get the datum out of the array and repack it in a new array
@@ -10890,12 +10891,12 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
                         * changed, only the array metadata.
                         */
 
-                       int one = 1;
-                       bool isNull;
-                       Datum       valuesAtt[Natts_pg_attribute];
-                       bool        nullsAtt[Natts_pg_attribute];
-                       bool        replacesAtt[Natts_pg_attribute];
-                       HeapTuple   newTup;
+                       int                     one = 1;
+                       bool            isNull;
+                       Datum           valuesAtt[Natts_pg_attribute];
+                       bool            nullsAtt[Natts_pg_attribute];
+                       bool            replacesAtt[Natts_pg_attribute];
+                       HeapTuple       newTup;
 
                        MemSet(valuesAtt, 0, sizeof(valuesAtt));
                        MemSet(nullsAtt, false, sizeof(nullsAtt));
@@ -10910,12 +10911,12 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
                                                                                   attTup->attalign,
                                                                                   &isNull);
                        missingval = PointerGetDatum(
-                               construct_array(&missingval,
-                                                               1,
-                                                               targettype,
-                                                               tform->typlen,
-                                                               tform->typbyval,
-                                                               tform->typalign));
+                                                                                construct_array(&missingval,
+                                                                                                                1,
+                                                                                                                targettype,
+                                                                                                                tform->typlen,
+                                                                                                                tform->typbyval,
+                                                                                                                tform->typalign));
 
                        valuesAtt[Anum_pg_attribute_attmissingval - 1] = missingval;
                        replacesAtt[Anum_pg_attribute_attmissingval - 1] = true;
@@ -12311,16 +12312,16 @@ ATExecSetTableSpaceNoStorage(Relation rel, Oid newTableSpace)
        Oid                     reloid = RelationGetRelid(rel);
 
        /*
-        * Shouldn't be called on relations having storage; these are processed
-        * in phase 3.
+        * Shouldn't be called on relations having storage; these are processed in
+        * phase 3.
         */
        Assert(!RELKIND_HAS_STORAGE(rel->rd_rel->relkind));
 
        /* Can't allow a non-shared relation in pg_global */
        if (newTableSpace == GLOBALTABLESPACE_OID)
                ereport(ERROR,
-               (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                       errmsg("only shared relations can be placed in pg_global tablespace")));
+                               (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+                                errmsg("only shared relations can be placed in pg_global tablespace")));
 
        /*
         * No work if no change in tablespace.
@@ -15044,7 +15045,7 @@ ComputePartitionAttrs(ParseState *pstate, Relation rel, List *partParams, AttrNu
                                i = -1;
                                while ((i = bms_next_member(expr_attrs, i)) >= 0)
                                {
-                                       AttrNumber  attno = i + FirstLowInvalidHeapAttributeNumber;
+                                       AttrNumber      attno = i + FirstLowInvalidHeapAttributeNumber;
 
                                        if (TupleDescAttr(RelationGetDescr(rel), attno - 1)->attgenerated)
                                                ereport(ERROR,
@@ -15202,7 +15203,7 @@ PartConstraintImpliedByRelConstraint(Relation scanrel,
 bool
 ConstraintImpliedByRelConstraint(Relation scanrel, List *testConstraint, List *provenConstraint)
 {
-       List            *existConstraint = list_copy(provenConstraint);
+       List       *existConstraint = list_copy(provenConstraint);
        TupleConstr *constr = RelationGetDescr(scanrel)->constr;
        int                     num_check,
                                i;
@@ -15240,8 +15241,8 @@ ConstraintImpliedByRelConstraint(Relation scanrel, List *testConstraint, List *p
         * not-false and try to prove the same for testConstraint.
         *
         * Note that predicate_implied_by assumes its first argument is known
-        * immutable.  That should always be true for both NOT NULL and
-        * partition constraints, so we don't test it here.
+        * immutable.  That should always be true for both NOT NULL and partition
+        * constraints, so we don't test it here.
         */
        return predicate_implied_by(testConstraint, existConstraint, true);
 }
index 8ec963f1cfb3675abbe902e8463a742c3f57978a..33df2ec0af3bdf6afc0a68d3e919fad742238e48 100644 (file)
@@ -1143,9 +1143,9 @@ GetDefaultTablespace(char relpersistence, bool partitioned)
 
        /*
         * Allow explicit specification of database's default tablespace in
-        * default_tablespace without triggering permissions checks.  Don't
-        * allow specifying that when creating a partitioned table, however,
-        * since the result is confusing.
+        * default_tablespace without triggering permissions checks.  Don't allow
+        * specifying that when creating a partitioned table, however, since the
+        * result is confusing.
         */
        if (result == MyDatabaseTableSpace)
        {
index 2beb37814507d1da399144eecefa29c51cf3cafa..209021a61a93518876ad338cfc1d378d7eeb38b7 100644 (file)
@@ -4245,9 +4245,9 @@ AfterTriggerExecute(EState *estate,
                case AFTER_TRIGGER_FDW_REUSE:
 
                        /*
-                        * Store tuple in the slot so that tg_trigtuple does not
-                        * reference tuplestore memory.  (It is formally possible for the
-                        * trigger function to queue trigger events that add to the same
+                        * Store tuple in the slot so that tg_trigtuple does not reference
+                        * tuplestore memory.  (It is formally possible for the trigger
+                        * function to queue trigger events that add to the same
                         * tuplestore, which can push other tuples out of memory.)  The
                         * distinction is academic, because we start with a minimal tuple
                         * that is stored as a heap tuple, constructed in different memory
index afdd3307acd4c372921a9b3451d78486f506705b..d69a73d13ed703080fae89ebdff579096c07bf86 100644 (file)
@@ -88,13 +88,13 @@ void
 ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel)
 {
        VacuumParams params;
-       bool verbose = false;
-       bool skip_locked = false;
-       bool analyze = false;
-       bool freeze = false;
-       bool full = false;
-       bool disable_page_skipping = false;
-       ListCell        *lc;
+       bool            verbose = false;
+       bool            skip_locked = false;
+       bool            analyze = false;
+       bool            freeze = false;
+       bool            full = false;
+       bool            disable_page_skipping = false;
+       ListCell   *lc;
 
        /* Set default value */
        params.index_cleanup = VACOPT_TERNARY_DEFAULT;
@@ -103,7 +103,7 @@ ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel)
        /* Parse options list */
        foreach(lc, vacstmt->options)
        {
-               DefElem *opt = (DefElem *) lfirst(lc);
+               DefElem    *opt = (DefElem *) lfirst(lc);
 
                /* Parse common options for VACUUM and ANALYZE */
                if (strcmp(opt->defname, "verbose") == 0)
@@ -593,8 +593,9 @@ vacuum_open_relation(Oid relid, RangeVar *relation, int options,
        /*
         * Determine the log level.
         *
-        * For manual VACUUM or ANALYZE, we emit a WARNING to match the log statements
-        * in the permission checks; otherwise, only log if the caller so requested.
+        * For manual VACUUM or ANALYZE, we emit a WARNING to match the log
+        * statements in the permission checks; otherwise, only log if the caller
+        * so requested.
         */
        if (!IsAutoVacuumWorkerProcess())
                elevel = WARNING;
@@ -1328,9 +1329,9 @@ vac_update_datfrozenxid(void)
                }
 
                /*
-                * Some table AMs might not need per-relation xid / multixid
-                * horizons. It therefore seems reasonable to allow relfrozenxid and
-                * relminmxid to not be set (i.e. set to their respective Invalid*Id)
+                * Some table AMs might not need per-relation xid / multixid horizons.
+                * It therefore seems reasonable to allow relfrozenxid and relminmxid
+                * to not be set (i.e. set to their respective Invalid*Id)
                 * independently. Thus validate and compute horizon for each only if
                 * set.
                 *
index 0a7b2b8f477d240481ea7143bb5850227197b83d..5d64471eeda193f0dc3fc019ff8323ee04d35a99 100644 (file)
@@ -2367,10 +2367,10 @@ get_last_attnums_walker(Node *node, LastAttnumInfo *info)
 static void
 ExecComputeSlotInfo(ExprState *state, ExprEvalStep *op)
 {
-       PlanState *parent = state->parent;
+       PlanState  *parent = state->parent;
        TupleDesc       desc = NULL;
        const TupleTableSlotOps *tts_ops = NULL;
-       bool isfixed = false;
+       bool            isfixed = false;
 
        if (op->d.fetch.known_desc != NULL)
        {
@@ -3313,7 +3313,7 @@ ExecBuildAggTransCall(ExprState *state, AggState *aggstate,
  */
 ExprState *
 ExecBuildGroupingEqual(TupleDesc ldesc, TupleDesc rdesc,
-                                          const TupleTableSlotOps * lops, const TupleTableSlotOps * rops,
+                                          const TupleTableSlotOps *lops, const TupleTableSlotOps *rops,
                                           int numCols,
                                           const AttrNumber *keyColIdx,
                                           const Oid *eqfunctions,
index a018925d4ed115f5bec78cc8198acbf276bbc627..612a88456eb330b8f38085ab3bd8c66247984785 100644 (file)
@@ -4038,7 +4038,7 @@ void
 ExecEvalSysVar(ExprState *state, ExprEvalStep *op, ExprContext *econtext,
                           TupleTableSlot *slot)
 {
-       Datum d;
+       Datum           d;
 
        /* slot_getsysattr has sufficient defenses against bad attnums */
        d = slot_getsysattr(slot,
index ed7c0606bf1bb72c12b8256a28e10e71cd299c85..44e4a6d104c1a7a7b90f7db694c07d293ca98d4d 100644 (file)
@@ -2551,7 +2551,7 @@ EvalPlanQualSlot(EPQState *epqstate,
 
                if (relation)
                        *slot = table_slot_create(relation,
-                                                                                &epqstate->estate->es_tupleTable);
+                                                                         &epqstate->estate->es_tupleTable);
                else
                        *slot = ExecAllocTableSlot(&epqstate->estate->es_tupleTable,
                                                                           epqstate->origslot->tts_tupleDescriptor,
index 3d4b01cb4d69ec32223a5462558a44d6e23d503c..da9074c54c9821f4bad705d225dca45d7c2d34af 100644 (file)
@@ -1058,7 +1058,7 @@ ExecParallelRetrieveJitInstrumentation(PlanState *planstate,
         * instrumentation in per-query context.
         */
        ibytes = offsetof(SharedJitInstrumentation, jit_instr)
-                        + mul_size(shared_jit->num_workers, sizeof(JitInstrumentation));
+               + mul_size(shared_jit->num_workers, sizeof(JitInstrumentation));
        planstate->worker_jit_instrument =
                MemoryContextAlloc(planstate->state->es_query_cxt, ibytes);
 
@@ -1133,7 +1133,7 @@ ExecParallelCleanup(ParallelExecutorInfo *pei)
        /* Accumulate JIT instrumentation, if any. */
        if (pei->jit_instrumentation)
                ExecParallelRetrieveJitInstrumentation(pei->planstate,
-                                                                                       pei->jit_instrumentation);
+                                                                                          pei->jit_instrumentation);
 
        /* Free any serialized parameters. */
        if (DsaPointerIsValid(pei->param_exec))
index 6cdbb9db421adc0dd4183a8540d62f21c48808dd..73ba298c5da68499219fa5ff98cf7bb62bd80eef 100644 (file)
@@ -145,12 +145,12 @@ typedef struct PartitionDispatchData
        TupleTableSlot *tupslot;
        AttrNumber *tupmap;
        int                     indexes[FLEXIBLE_ARRAY_MEMBER];
-} PartitionDispatchData;
+}                      PartitionDispatchData;
 
 /* struct to hold result relations coming from UPDATE subplans */
 typedef struct SubplanResultRelHashElem
 {
-       Oid             relid;          /* hash key -- must be first */
+       Oid                     relid;                  /* hash key -- must be first */
        ResultRelInfo *rri;
 } SubplanResultRelHashElem;
 
@@ -375,7 +375,7 @@ ExecFindPartition(ModifyTableState *mtstate,
                                if (proute->subplan_resultrel_htab)
                                {
                                        Oid                     partoid = partdesc->oids[partidx];
-                                       SubplanResultRelHashElem   *elem;
+                                       SubplanResultRelHashElem *elem;
 
                                        elem = hash_search(proute->subplan_resultrel_htab,
                                                                           &partoid, HASH_FIND, NULL);
@@ -474,7 +474,7 @@ ExecHashSubPlanResultRelsByOid(ModifyTableState *mtstate,
                ResultRelInfo *rri = &mtstate->resultRelInfo[i];
                bool            found;
                Oid                     partoid = RelationGetRelid(rri->ri_RelationDesc);
-               SubplanResultRelHashElem   *elem;
+               SubplanResultRelHashElem *elem;
 
                elem = (SubplanResultRelHashElem *)
                        hash_search(htab, &partoid, HASH_ENTER, &found);
@@ -762,9 +762,9 @@ ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate,
                                 * It's safe to reuse these from the partition root, as we
                                 * only process one tuple at a time (therefore we won't
                                 * overwrite needed data in slots), and the results of
-                                * projections are independent of the underlying
-                                * storage. Projections and where clauses themselves don't
-                                * store state / are independent of the underlying storage.
+                                * projections are independent of the underlying storage.
+                                * Projections and where clauses themselves don't store state
+                                * / are independent of the underlying storage.
                                 */
                                leaf_part_rri->ri_onConflict->oc_ProjSlot =
                                        rootResultRelInfo->ri_onConflict->oc_ProjSlot;
@@ -892,7 +892,7 @@ ExecInitRoutingInfo(ModifyTableState *mtstate,
 {
        MemoryContext oldcxt;
        PartitionRoutingInfo *partrouteinfo;
-       int             rri_index;
+       int                     rri_index;
 
        oldcxt = MemoryContextSwitchTo(proute->memcxt);
 
@@ -1668,16 +1668,16 @@ ExecCreatePartitionPruneState(PlanState *planstate,
                        }
                        else
                        {
-                               int             pd_idx = 0;
-                               int             pp_idx;
+                               int                     pd_idx = 0;
+                               int                     pp_idx;
 
                                /*
                                 * Some new partitions have appeared since plan time, and
                                 * those are reflected in our PartitionDesc but were not
                                 * present in the one used to construct subplan_map and
                                 * subpart_map.  So we must construct new and longer arrays
-                                * where the partitions that were originally present map to the
-                                * same place, and any added indexes map to -1, as if the
+                                * where the partitions that were originally present map to
+                                * the same place, and any added indexes map to -1, as if the
                                 * new partitions had been pruned.
                                 */
                                pprune->subpart_map = palloc(sizeof(int) * partdesc->nparts);
index f8f6463358f8883fe126c2b66515f5565363dd3a..0326284c83f4c410421d3dc855d1b218fed9bde3 100644 (file)
@@ -227,7 +227,7 @@ retry:
 static bool
 tuples_equal(TupleTableSlot *slot1, TupleTableSlot *slot2)
 {
-       int         attrnum;
+       int                     attrnum;
 
        Assert(slot1->tts_tupleDescriptor->natts ==
                   slot2->tts_tupleDescriptor->natts);
@@ -265,8 +265,8 @@ tuples_equal(TupleTableSlot *slot1, TupleTableSlot *slot2)
 
                if (!DatumGetBool(FunctionCall2Coll(&typentry->eq_opr_finfo,
                                                                                        att->attcollation,
-                                                                               slot1->tts_values[attrnum],
-                                                                               slot2->tts_values[attrnum])))
+                                                                                       slot1->tts_values[attrnum],
+                                                                                       slot2->tts_values[attrnum])))
                        return false;
        }
 
@@ -406,7 +406,7 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot)
                resultRelInfo->ri_TrigDesc->trig_insert_before_row)
        {
                if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
-                       skip_tuple = true;              /* "do nothing" */
+                       skip_tuple = true;      /* "do nothing" */
        }
 
        if (!skip_tuple)
@@ -471,7 +471,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
        {
                if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
                                                                  tid, NULL, slot))
-                       skip_tuple = true;              /* "do nothing" */
+                       skip_tuple = true;      /* "do nothing" */
        }
 
        if (!skip_tuple)
@@ -490,7 +490,7 @@ ExecSimpleRelationUpdate(EState *estate, EPQState *epqstate,
                if (resultRelInfo->ri_PartitionCheck)
                        ExecPartitionCheck(resultRelInfo, slot, estate, true);
 
-               simple_table_update(rel, tid, slot,estate->es_snapshot,
+               simple_table_update(rel, tid, slot, estate->es_snapshot,
                                                        &update_indexes);
 
                if (resultRelInfo->ri_NumIndices > 0 && update_indexes)
@@ -591,8 +591,8 @@ CheckSubscriptionRelkind(char relkind, const char *nspname,
                                                 const char *relname)
 {
        /*
-        * We currently only support writing to regular tables.  However, give
-        * more specific error for partitioned and foreign tables.
+        * We currently only support writing to regular tables.  However, give a
+        * more specific error for partitioned and foreign tables.
         */
        if (relkind == RELKIND_PARTITIONED_TABLE)
                ereport(ERROR,
@@ -600,14 +600,14 @@ CheckSubscriptionRelkind(char relkind, const char *nspname,
                                 errmsg("cannot use relation \"%s.%s\" as logical replication target",
                                                nspname, relname),
                                 errdetail("\"%s.%s\" is a partitioned table.",
-                                               nspname, relname)));
+                                                  nspname, relname)));
        else if (relkind == RELKIND_FOREIGN_TABLE)
                ereport(ERROR,
                                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
                                 errmsg("cannot use relation \"%s.%s\" as logical replication target",
                                                nspname, relname),
                                 errdetail("\"%s.%s\" is a foreign table.",
-                                               nspname, relname)));
+                                                  nspname, relname)));
 
        if (relkind != RELKIND_RELATION)
                ereport(ERROR,
@@ -615,5 +615,5 @@ CheckSubscriptionRelkind(char relkind, const char *nspname,
                                 errmsg("cannot use relation \"%s.%s\" as logical replication target",
                                                nspname, relname),
                                 errdetail("\"%s.%s\" is not a table.",
-                                               nspname, relname)));
+                                                  nspname, relname)));
 }
index 881131aff26b07050ec95a36e52db10e63fcb516..67c4be5108f21abbd7f884367b699cf54f682b99 100644 (file)
@@ -81,7 +81,8 @@ ExecScanFetch(ScanState *node,
 
                        /* Check if it meets the access-method conditions */
                        if (!(*recheckMtd) (node, slot))
-                               return ExecClearTuple(slot);    /* would not be returned by scan */
+                               return ExecClearTuple(slot);    /* would not be returned by
+                                                                                                * scan */
 
                        return slot;
                }
index 55d1669db099ac8b3b4ffa6d5a3c70b84ea6fd94..ad13fd9a05bbe0abb71ebd5fcc15238c6d95b5f7 100644 (file)
 
 static TupleDesc ExecTypeFromTLInternal(List *targetList,
                                           bool skipjunk);
-static pg_attribute_always_inline void
-slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp,
+static pg_attribute_always_inline void slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp,
                                           int natts);
 static inline void tts_buffer_heap_store_tuple(TupleTableSlot *slot,
-                                                                                          HeapTuple tuple,
-                                                                                          Buffer buffer,
-                                                                                          bool transfer_pin);
+                                                       HeapTuple tuple,
+                                                       Buffer buffer,
+                                                       bool transfer_pin);
 static void tts_heap_store_tuple(TupleTableSlot *slot, HeapTuple tuple, bool shouldFree);
 
 
@@ -138,7 +137,7 @@ tts_virtual_getsysattr(TupleTableSlot *slot, int attnum, bool *isnull)
 {
        elog(ERROR, "virtual tuple table slot does not have system attributes");
 
-       return 0; /* silence compiler warnings */
+       return 0;                                       /* silence compiler warnings */
 }
 
 /*
@@ -164,7 +163,7 @@ tts_virtual_materialize(TupleTableSlot *slot)
        for (int natt = 0; natt < desc->natts; natt++)
        {
                Form_pg_attribute att = TupleDescAttr(desc, natt);
-               Datum val;
+               Datum           val;
 
                if (att->attbyval || slot->tts_isnull[natt])
                        continue;
@@ -200,7 +199,7 @@ tts_virtual_materialize(TupleTableSlot *slot)
        for (int natt = 0; natt < desc->natts; natt++)
        {
                Form_pg_attribute att = TupleDescAttr(desc, natt);
-               Datum val;
+               Datum           val;
 
                if (att->attbyval || slot->tts_isnull[natt])
                        continue;
@@ -210,7 +209,7 @@ tts_virtual_materialize(TupleTableSlot *slot)
                if (att->attlen == -1 &&
                        VARATT_IS_EXTERNAL_EXPANDED(DatumGetPointer(val)))
                {
-                       Size data_length;
+                       Size            data_length;
 
                        /*
                         * We want to flatten the expanded value so that the materialized
@@ -228,7 +227,7 @@ tts_virtual_materialize(TupleTableSlot *slot)
                }
                else
                {
-                       Size data_length = 0;
+                       Size            data_length = 0;
 
                        data = (char *) att_align_nominal(data, att->attalign);
                        data_length = att_addlength_datum(data_length, att->attlen, val);
@@ -382,7 +381,7 @@ tts_heap_materialize(TupleTableSlot *slot)
 static void
 tts_heap_copyslot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
 {
-       HeapTuple tuple;
+       HeapTuple       tuple;
        MemoryContext oldcontext;
 
        oldcontext = MemoryContextSwitchTo(dstslot->tts_mcxt);
@@ -499,7 +498,7 @@ tts_minimal_getsysattr(TupleTableSlot *slot, int attnum, bool *isnull)
 {
        elog(ERROR, "minimal tuple table slot does not have system attributes");
 
-       return 0; /* silence compiler warnings */
+       return 0;                                       /* silence compiler warnings */
 }
 
 static void
@@ -1077,8 +1076,10 @@ TupleTableSlot *
 MakeTupleTableSlot(TupleDesc tupleDesc,
                                   const TupleTableSlotOps *tts_ops)
 {
-       Size            basesz, allocsz;
+       Size            basesz,
+                               allocsz;
        TupleTableSlot *slot;
+
        basesz = tts_ops->base_slot_size;
 
        /*
@@ -1866,7 +1867,7 @@ void
 slot_getsomeattrs_int(TupleTableSlot *slot, int attnum)
 {
        /* Check for caller errors */
-       Assert(slot->tts_nvalid < attnum); /* slot_getsomeattr checked */
+       Assert(slot->tts_nvalid < attnum);      /* slot_getsomeattr checked */
        Assert(attnum > 0);
 
        if (unlikely(attnum > slot->tts_tupleDescriptor->natts))
@@ -1876,8 +1877,8 @@ slot_getsomeattrs_int(TupleTableSlot *slot, int attnum)
        slot->tts_ops->getsomeattrs(slot, attnum);
 
        /*
-        * If the underlying tuple doesn't have enough attributes, tuple descriptor
-        * must have the missing attributes.
+        * If the underlying tuple doesn't have enough attributes, tuple
+        * descriptor must have the missing attributes.
         */
        if (unlikely(slot->tts_nvalid < attnum))
        {
index 965e5dea70ed17051d577747b113e52358ea6926..b34f565bfe73e3f733e5302e7b826b3da99bb041 100644 (file)
@@ -1762,7 +1762,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
                                if (junkFilter)
                                {
                                        TupleTableSlot *slot =
-                                               MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple);
+                                       MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple);
 
                                        *junkFilter = ExecInitJunkFilter(tlist, slot);
                                }
@@ -1929,7 +1929,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
                if (junkFilter)
                {
                        TupleTableSlot *slot =
-                               MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple);
+                       MakeSingleTupleTableSlot(NULL, &TTSOpsMinimalTuple);
 
                        *junkFilter = ExecInitJunkFilterConversion(tlist,
                                                                                                           CreateTupleDescCopy(tupdesc),
index fd3c71e7641b7bb1744bb1368c4efb54d0416086..43ab9fb392494afc43d4361c4f7ac5f06be05b4e 100644 (file)
@@ -754,7 +754,7 @@ process_ordered_aggregate_single(AggState *aggstate,
                          oldAbbrevVal == newAbbrevVal &&
                          DatumGetBool(FunctionCall2Coll(&pertrans->equalfnOne,
                                                                                         pertrans->aggCollation,
-                                                                                oldVal, *newVal)))))
+                                                                                        oldVal, *newVal)))))
                {
                        /* equal to prior, so forget this one */
                        if (!pertrans->inputtypeByVal && !*isNull)
index 4de1d2b484d300714acaef638583b26a9a1c31a2..d2da5d3a95187854aef09d2061866e507873a5a3 100644 (file)
@@ -700,10 +700,10 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
        Assert(HeapTupleIsValid(tup));
 
        /* Build the TupleTableSlot for the given tuple */
-       ExecStoreHeapTuple(tup,                 /* tuple to store */
+       ExecStoreHeapTuple(tup,         /* tuple to store */
                                           gm_state->gm_slots[reader],  /* slot in which to store
                                                                                                         * the tuple */
-                                          true);               /* pfree tuple when done with it */
+                                          true);       /* pfree tuple when done with it */
 
        return true;
 }
index aa43296e26cfe30bee27aa621cae9b44b92fedcb..5ccdc1af2e892816adfbf8efc18777a757ca8b1c 100644 (file)
@@ -750,7 +750,7 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags)
                rclauses = lappend(rclauses, ExecInitExpr(lsecond(hclause->args),
                                                                                                  (PlanState *) hjstate));
                rhclauses = lappend(rhclauses, ExecInitExpr(lsecond(hclause->args),
-                                                                                                  innerPlanState(hjstate)));
+                                                                                                       innerPlanState(hjstate)));
                hoperators = lappend_oid(hoperators, hclause->opno);
                hcollations = lappend_oid(hcollations, hclause->inputcollid);
        }
index 8fd52e9c8038e52d5c50e308a43d02a442850780..5dce284fe7344a0dda8b6975c4b6c049b3b44144 100644 (file)
@@ -192,9 +192,9 @@ IndexOnlyNext(IndexOnlyScanState *node)
 
                /*
                 * Fill the scan tuple slot with data from the index.  This might be
-                * provided in either HeapTuple or IndexTuple format.  Conceivably
-                * an index AM might fill both fields, in which case we prefer the
-                * heap format, since it's probably a bit cheaper to fill a slot from.
+                * provided in either HeapTuple or IndexTuple format.  Conceivably an
+                * index AM might fill both fields, in which case we prefer the heap
+                * format, since it's probably a bit cheaper to fill a slot from.
                 */
                if (scandesc->xs_hitup)
                {
index c97eb60f779c17f2d4bae89cc2744605c873893c..73bfd424d98572be3139bc3b0c874927a3bd4c1c 100644 (file)
@@ -242,7 +242,7 @@ IndexNextWithReorder(IndexScanState *node)
                                                                scandesc->xs_orderbynulls,
                                                                node) <= 0)
                        {
-                               HeapTuple tuple;
+                               HeapTuple       tuple;
 
                                tuple = reorderqueue_pop(node);
 
index 7674ac893c26f2ebf1d70228509db5c49c7a128a..4067554ed9448de545dbddfdf8a312bb630f504e 100644 (file)
@@ -327,7 +327,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
        /* node returns unmodified slots from the outer plan */
        lrstate->ps.resultopsset = true;
        lrstate->ps.resultops = ExecGetResultSlotOps(outerPlanState(lrstate),
-                                                                                                       &lrstate->ps.resultopsfixed);
+                                                                                                &lrstate->ps.resultopsfixed);
 
        /*
         * LockRows nodes do no projections, so initialize projection info for
index d3a0dece5ad5ad56b4fdef337d6cc0e28b89cc18..8acdaf205739751050cf43d60ba8b212dabc6283 100644 (file)
@@ -865,6 +865,7 @@ ldelete:;
                                                                goto ldelete;
 
                                                case TM_SelfModified:
+
                                                        /*
                                                         * This can be reached when following an update
                                                         * chain from a tuple updated by another session,
@@ -1070,7 +1071,7 @@ ExecUpdate(ModifyTableState *mtstate,
        {
                if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
                                                                  tupleid, oldtuple, slot))
-                       return NULL;        /* "do nothing" */
+                       return NULL;            /* "do nothing" */
        }
 
        /* INSTEAD OF ROW UPDATE Triggers */
@@ -1079,7 +1080,7 @@ ExecUpdate(ModifyTableState *mtstate,
        {
                if (!ExecIRUpdateTriggers(estate, resultRelInfo,
                                                                  oldtuple, slot))
-                       return NULL;        /* "do nothing" */
+                       return NULL;            /* "do nothing" */
        }
        else if (resultRelInfo->ri_FdwRoutine)
        {
@@ -1401,6 +1402,7 @@ lreplace:;
                                                        return NULL;
 
                                                case TM_SelfModified:
+
                                                        /*
                                                         * This can be reached when following an update
                                                         * chain from a tuple updated by another session,
index 8bd7430a9187ed249fea2e42182f7e71d9270b1b..436b43f8ca5025d5ecc3ea24be99d25f3c6c7049 100644 (file)
@@ -228,8 +228,8 @@ ExecReScanSeqScan(SeqScanState *node)
        scan = node->ss.ss_currentScanDesc;
 
        if (scan != NULL)
-               table_rescan(scan,      /* scan desc */
-                                        NULL); /* new scan keys */
+               table_rescan(scan,              /* scan desc */
+                                        NULL);         /* new scan keys */
 
        ExecScanReScan((ScanState *) node);
 }
index 749b4eced34c4aea222603b1c0fa8ab51ce1f6f8..3662fcada8c652a21952a34d684fe8514b8109f6 100644 (file)
@@ -684,7 +684,7 @@ execTuplesUnequal(TupleTableSlot *slot1,
                /* Apply the type-specific equality function */
                if (!DatumGetBool(FunctionCall2Coll(&eqfunctions[i],
                                                                                        collations[i],
-                                                                               attr1, attr2)))
+                                                                                       attr1, attr2)))
                {
                        result = true;          /* they are unequal */
                        break;
index 707ec0d19015f038b7b6920d0e40dc9b97df3ef7..de8c0060510bb064a2071c7fdb52df2280afdb34 100644 (file)
@@ -131,6 +131,7 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate, int eflags)
        ExecInitScanTupleSlot(estate, &subquerystate->ss,
                                                  ExecGetResultType(subquerystate->subplan),
                                                  ExecGetResultSlotOps(subquerystate->subplan, NULL));
+
        /*
         * The slot used as the scantuple isn't the slot above (outside of EPQ),
         * but the one from the node below.
index 62466be70235e42e289c5c6fa5fe13dd73439c12..caf3b71f9e1d0218497f70dedaee26bd526e979f 100644 (file)
@@ -2154,8 +2154,8 @@ CheckPAMAuth(Port *port, const char *user, const char *password)
         * later used inside the PAM conversation to pass the password to the
         * authentication module.
         */
-       pam_passw_conv.appdata_ptr = unconstify(char *, password); /* from password above,
-                                                                                                        * not allocated */
+       pam_passw_conv.appdata_ptr = unconstify(char *, password);      /* from password above,
+                                                                                                                                * not allocated */
 
        /* Optionally, one can set the service name in pg_hba.conf */
        if (port->hba->pamservice && port->hba->pamservice[0] != '\0')
index c38a71df587f30f82dce3ab9032d00754bf05746..673066a4561ace7dad54ce57c3b89684eaa7dcff 100644 (file)
@@ -68,10 +68,10 @@ static bool dummy_ssl_passwd_cb_called = false;
 static bool ssl_is_server_start;
 
 static int ssl_protocol_version_to_openssl(int v, const char *guc_name,
-                                                                                  int loglevel);
+                                                               int loglevel);
 #ifndef SSL_CTX_set_min_proto_version
-static int SSL_CTX_set_min_proto_version(SSL_CTX *ctx, int version);
-static int SSL_CTX_set_max_proto_version(SSL_CTX *ctx, int version);
+static int     SSL_CTX_set_min_proto_version(SSL_CTX *ctx, int version);
+static int     SSL_CTX_set_max_proto_version(SSL_CTX *ctx, int version);
 #endif
 
 
@@ -192,9 +192,10 @@ be_tls_init(bool isServerStart)
 
        if (ssl_min_protocol_version)
        {
-               int ssl_ver = ssl_protocol_version_to_openssl(ssl_min_protocol_version,
-                                                                                                         "ssl_min_protocol_version",
-                                                                                                         isServerStart ? FATAL : LOG);
+               int                     ssl_ver = ssl_protocol_version_to_openssl(ssl_min_protocol_version,
+                                                                                                                         "ssl_min_protocol_version",
+                                                                                                                         isServerStart ? FATAL : LOG);
+
                if (ssl_ver == -1)
                        goto error;
                SSL_CTX_set_min_proto_version(context, ssl_ver);
@@ -202,9 +203,10 @@ be_tls_init(bool isServerStart)
 
        if (ssl_max_protocol_version)
        {
-               int ssl_ver = ssl_protocol_version_to_openssl(ssl_max_protocol_version,
-                                                                                                         "ssl_max_protocol_version",
-                                                                                                         isServerStart ? FATAL : LOG);
+               int                     ssl_ver = ssl_protocol_version_to_openssl(ssl_max_protocol_version,
+                                                                                                                         "ssl_max_protocol_version",
+                                                                                                                         isServerStart ? FATAL : LOG);
+
                if (ssl_ver == -1)
                        goto error;
                SSL_CTX_set_max_proto_version(context, ssl_ver);
@@ -1150,6 +1152,7 @@ be_tls_get_peer_serial(Port *port, char *ptr, size_t len)
                serial = X509_get_serialNumber(port->peer);
                b = ASN1_INTEGER_to_BN(serial, NULL);
                decimal = BN_bn2dec(b);
+
                BN_free(b);
                strlcpy(ptr, decimal, len);
                OPENSSL_free(decimal);
index 4ad17d0c31e556d5ac9705c9413c39314cbc5d66..665149defe28b8da36f600eb80ebc572af87baef 100644 (file)
@@ -476,10 +476,10 @@ bms_member_index(Bitmapset *a, int x)
        }
 
        /*
-        * Now add bits of the last word, but only those before the item.
-        * We can do that by applying a mask and then using popcount again.
-        * To get 0-based index, we want to count only preceding bits, not
-        * the item itself, so we subtract 1.
+        * Now add bits of the last word, but only those before the item. We can
+        * do that by applying a mask and then using popcount again. To get
+        * 0-based index, we want to count only preceding bits, not the item
+        * itself, so we subtract 1.
         */
        mask = ((bitmapword) 1 << bitnum) - 1;
        result += bmw_popcount(a->words[wordnum] & mask);
index 9dae586a518084f518ed84ec97568fe8f5c06099..bfad6b7987827f953a200927d614892581d0bafc 100644 (file)
@@ -161,9 +161,9 @@ clauselist_selectivity_simple(PlannerInfo *root,
        int                     listidx;
 
        /*
-        * If there's exactly one clause (and it was not estimated yet), just
-        * go directly to clause_selectivity(). None of what we might do below
-        * is relevant.
+        * If there's exactly one clause (and it was not estimated yet), just go
+        * directly to clause_selectivity(). None of what we might do below is
+        * relevant.
         */
        if ((list_length(clauses) == 1) &&
                bms_num_members(estimatedclauses) == 0)
index ccc8c11a9859e6c7b16a548c4dac0d09d6aa3ab8..bbf204ddfb86041c2cbabd7662424873746da980 100644 (file)
@@ -311,6 +311,7 @@ expand_partitioned_rtentry(PlannerInfo *root, RelOptInfo *relinfo,
        if (!root->partColsUpdated)
                root->partColsUpdated =
                        has_partition_attrs(parentrel, parentrte->updatedCols, NULL);
+
        /*
         * There shouldn't be any generated columns in the partition key.
         */
index 36aee35d462eb68a4da73c2e82bca98cb18eaa5d..d66471c7a464ebdfa2071e5abaa0e7810bb6d4dc 100644 (file)
@@ -3621,7 +3621,7 @@ create_limit_path(PlannerInfo *root, RelOptInfo *rel,
  */
 void
 adjust_limit_rows_costs(double *rows,  /* in/out parameter */
-                                               Cost *startup_cost,     /* in/out parameter */
+                                               Cost *startup_cost, /* in/out parameter */
                                                Cost *total_cost,       /* in/out parameter */
                                                int64 offset_est,
                                                int64 count_est)
index 4564c0ae815d43229f2f09cf1b15e62b16712d80..bbeaada2ae1c02f0a4871332fa57fd412999f260 100644 (file)
@@ -1053,8 +1053,8 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
                                                                                                          InvalidOid, &found_whole_row);
 
                        /*
-                        * Prevent this for the same reason as for constraints below.
-                        * Note that defaults cannot contain any vars, so it's OK that the
+                        * Prevent this for the same reason as for constraints below. Note
+                        * that defaults cannot contain any vars, so it's OK that the
                         * error message refers to generated columns.
                         */
                        if (found_whole_row)
@@ -3845,11 +3845,11 @@ transformPartitionBound(ParseState *pstate, Relation parent,
                 * any necessary validation.
                 */
                result_spec->lowerdatums =
-                                       transformPartitionRangeBounds(pstate, spec->lowerdatums,
-                                                                                                 parent);
+                       transformPartitionRangeBounds(pstate, spec->lowerdatums,
+                                                                                 parent);
                result_spec->upperdatums =
-                                       transformPartitionRangeBounds(pstate, spec->upperdatums,
-                                                                                                 parent);
+                       transformPartitionRangeBounds(pstate, spec->upperdatums,
+                                                                                 parent);
        }
        else
                elog(ERROR, "unexpected partition strategy: %d", (int) strategy);
@@ -3876,17 +3876,17 @@ transformPartitionRangeBounds(ParseState *pstate, List *blist,
        i = j = 0;
        foreach(lc, blist)
        {
-               Node *expr = lfirst(lc);
+               Node       *expr = lfirst(lc);
                PartitionRangeDatum *prd = NULL;
 
                /*
-                * Infinite range bounds -- "minvalue" and "maxvalue" -- get passed
-                * in as ColumnRefs.
+                * Infinite range bounds -- "minvalue" and "maxvalue" -- get passed in
+                * as ColumnRefs.
                 */
                if (IsA(expr, ColumnRef))
                {
-                       ColumnRef *cref = (ColumnRef *) expr;
-                       char *cname = NULL;
+                       ColumnRef  *cref = (ColumnRef *) expr;
+                       char       *cname = NULL;
 
                        /*
                         * There should be a single field named either "minvalue" or
@@ -3899,8 +3899,8 @@ transformPartitionRangeBounds(ParseState *pstate, List *blist,
                        if (cname == NULL)
                        {
                                /*
-                                * ColumnRef is not in the desired single-field-name form.
-                                * For consistency between all partition strategies, let the
+                                * ColumnRef is not in the desired single-field-name form. For
+                                * consistency between all partition strategies, let the
                                 * expression transformation report any errors rather than
                                 * doing it ourselves.
                                 */
@@ -3965,8 +3965,8 @@ transformPartitionRangeBounds(ParseState *pstate, List *blist,
        }
 
        /*
-        * Once we see MINVALUE or MAXVALUE for one column, the remaining
-        * columns must be the same.
+        * Once we see MINVALUE or MAXVALUE for one column, the remaining columns
+        * must be the same.
         */
        validateInfiniteBounds(pstate, result);
 
@@ -4030,13 +4030,13 @@ transformPartitionBoundValue(ParseState *pstate, Node *val,
 
        /*
         * Check that the input expression's collation is compatible with one
-        * specified for the parent's partition key (partcollation).  Don't
-        * throw an error if it's the default collation which we'll replace with
-        * the parent's collation anyway.
+        * specified for the parent's partition key (partcollation).  Don't throw
+        * an error if it's the default collation which we'll replace with the
+        * parent's collation anyway.
         */
        if (IsA(value, CollateExpr))
        {
-               Oid             exprCollOid = exprCollation(value);
+               Oid                     exprCollOid = exprCollation(value);
 
                if (OidIsValid(exprCollOid) &&
                        exprCollOid != DEFAULT_COLLATION_OID &&
index 4d6595b24977f6c7b962373755311b8360e78c52..b207b765f2a5fb6d476b068acf84e256012b7b4f 100644 (file)
@@ -38,7 +38,7 @@ typedef struct PartitionDirectoryData
 {
        MemoryContext pdir_mcxt;
        HTAB       *pdir_hash;
-} PartitionDirectoryData;
+}                      PartitionDirectoryData;
 
 typedef struct PartitionDirectoryEntry
 {
@@ -74,9 +74,9 @@ RelationBuildPartitionDesc(Relation rel)
 
        /*
         * Get partition oids from pg_inherits.  This uses a single snapshot to
-        * fetch the list of children, so while more children may be getting
-        * added concurrently, whatever this function returns will be accurate
-        * as of some well-defined point in time.
+        * fetch the list of children, so while more children may be getting added
+        * concurrently, whatever this function returns will be accurate as of
+        * some well-defined point in time.
         */
        inhoids = find_inheritance_children(RelationGetRelid(rel), NoLock);
        nparts = list_length(inhoids);
@@ -122,14 +122,14 @@ RelationBuildPartitionDesc(Relation rel)
                 *
                 * Note that this algorithm assumes that PartitionBoundSpec we manage
                 * to fetch is the right one -- so this is only good enough for
-                * concurrent ATTACH PARTITION, not concurrent DETACH PARTITION
-                * or some hypothetical operation that changes the partition bounds.
+                * concurrent ATTACH PARTITION, not concurrent DETACH PARTITION or
+                * some hypothetical operation that changes the partition bounds.
                 */
                if (boundspec == NULL)
                {
                        Relation        pg_class;
-                       SysScanDesc     scan;
-                       ScanKeyData     key[1];
+                       SysScanDesc scan;
+                       ScanKeyData key[1];
                        Datum           datum;
                        bool            isnull;
 
@@ -301,7 +301,7 @@ PartitionDirectoryLookup(PartitionDirectory pdir, Relation rel)
 void
 DestroyPartitionDirectory(PartitionDirectory pdir)
 {
-       HASH_SEQ_STATUS status;
+       HASH_SEQ_STATUS status;
        PartitionDirectoryEntry *pde;
 
        hash_seq_init(&status, pdir->pdir_hash);
index 765d58d120c400eaffdabd440ae97f14a588745e..b455c59cd709815ee44d700c32b0f9eecdfe2cda 100644 (file)
@@ -57,7 +57,7 @@ static void libpqrcv_get_senderinfo(WalReceiverConn *conn,
                                                char **sender_host, int *sender_port);
 static char *libpqrcv_identify_system(WalReceiverConn *conn,
                                                 TimeLineID *primary_tli);
-static int libpqrcv_server_version(WalReceiverConn *conn);
+static int     libpqrcv_server_version(WalReceiverConn *conn);
 static void libpqrcv_readtimelinehistoryfile(WalReceiverConn *conn,
                                                                 TimeLineID tli, char **filename,
                                                                 char **content, int *len);
index 65f86ad73dbf74c96af0dc22c208eb2387ad69ff..acebf5893e90b7b977ee87bff54fbdac6f0b27eb 100644 (file)
@@ -468,8 +468,8 @@ ReorderBufferReturnTupleBuf(ReorderBuffer *rb, ReorderBufferTupleBuf *tuple)
 Oid *
 ReorderBufferGetRelids(ReorderBuffer *rb, int nrelids)
 {
-       Oid        *relids;
-       Size    alloc_len;
+       Oid                *relids;
+       Size            alloc_len;
 
        alloc_len = sizeof(Oid) * nrelids;
 
@@ -1327,8 +1327,8 @@ ReorderBufferBuildTupleCidHash(ReorderBuffer *rb, ReorderBufferTXN *txn)
                else
                {
                        /*
-                        * Maybe we already saw this tuple before in this transaction,
-                        * but if so it must have the same cmin.
+                        * Maybe we already saw this tuple before in this transaction, but
+                        * if so it must have the same cmin.
                         */
                        Assert(ent->cmin == change->data.tuplecid.cmin);
 
@@ -2464,8 +2464,8 @@ ReorderBufferSerializeChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
                        }
                case REORDER_BUFFER_CHANGE_TRUNCATE:
                        {
-                               Size    size;
-                               char   *data;
+                               Size            size;
+                               char       *data;
 
                                /* account for the OIDs of truncated relations */
                                size = sizeof(Oid) * change->data.truncate.nrelids;
@@ -2767,7 +2767,7 @@ ReorderBufferRestoreChange(ReorderBuffer *rb, ReorderBufferTXN *txn,
                        /* the base struct contains all the data, easy peasy */
                case REORDER_BUFFER_CHANGE_TRUNCATE:
                        {
-                               Oid        *relids;
+                               Oid                *relids;
 
                                relids = ReorderBufferGetRelids(rb,
                                                                                                change->data.truncate.nrelids);
index 182fe5bc82509f2c57be64cc1d2084b851a618f8..808a6f5b8362fd9dff9433fa656315018cd36411 100644 (file)
@@ -730,11 +730,11 @@ copy_replication_slot(FunctionCallInfo fcinfo, bool logical_slot)
                SpinLockRelease(&src->mutex);
 
                /*
-                * Check if the source slot still exists and is valid. We regard it
-                * as invalid if the type of replication slot or name has been
-                * changed, or the restart_lsn either is invalid or has gone backward.
-                * (The restart_lsn could go backwards if the source slot is dropped
-                * and copied from an older slot during installation.)
+                * Check if the source slot still exists and is valid. We regard it as
+                * invalid if the type of replication slot or name has been changed,
+                * or the restart_lsn either is invalid or has gone backward. (The
+                * restart_lsn could go backwards if the source slot is dropped and
+                * copied from an older slot during installation.)
                 *
                 * Since erroring out will release and drop the destination slot we
                 * don't need to release it here.
index 6c160c13c6faa17923a114b614dd378f6677dce9..83734575c28baf2eb3c9e97f2499bd38dfc64ff4 100644 (file)
@@ -276,9 +276,8 @@ SyncRepWaitForLSN(XLogRecPtr lsn, bool commit)
                                           WAIT_EVENT_SYNC_REP);
 
                /*
-                * If the postmaster dies, we'll probably never get an
-                * acknowledgment, because all the wal sender processes will exit. So
-                * just bail out.
+                * If the postmaster dies, we'll probably never get an acknowledgment,
+                * because all the wal sender processes will exit. So just bail out.
                 */
                if (rc & WL_POSTMASTER_DEATH)
                {
index d52ec7b2cf1a2cecd325ce981aaa53bd64aaa2c2..6abc780778301208e6200ba671be818a7d732da6 100644 (file)
@@ -808,11 +808,11 @@ WalRcvQuickDieHandler(SIGNAL_ARGS)
         * anyway.
         *
         * Note we use _exit(2) not _exit(0).  This is to force the postmaster
-        * into a system reset cycle if someone sends a manual SIGQUIT to a
-        * random backend.  This is necessary precisely because we don't clean up
-        * our shared memory state.  (The "dead man switch" mechanism in
-        * pmsignal.c should ensure the postmaster sees this as a crash, too, but
-        * no harm in being doubly sure.)
+        * into a system reset cycle if someone sends a manual SIGQUIT to a random
+        * backend.  This is necessary precisely because we don't clean up our
+        * shared memory state.  (The "dead man switch" mechanism in pmsignal.c
+        * should ensure the postmaster sees this as a crash, too, but no harm in
+        * being doubly sure.)
         */
        _exit(2);
 }
index 8aa12ec9124772d0210e4034c045024419288ebf..3f3136802235d4e68e36fa610d550bdb7756571c 100644 (file)
@@ -218,7 +218,7 @@ typedef struct
        int                     write_head;
        int                     read_heads[NUM_SYNC_REP_WAIT_MODE];
        WalTimeSample last_read[NUM_SYNC_REP_WAIT_MODE];
-}                      LagTracker;
+} LagTracker;
 
 static LagTracker *lag_tracker;
 
@@ -1407,7 +1407,7 @@ WalSndWaitForWal(XLogRecPtr loc)
                sleeptime = WalSndComputeSleeptime(GetCurrentTimestamp());
 
                wakeEvents = WL_LATCH_SET | WL_EXIT_ON_PM_DEATH |
-                                        WL_SOCKET_READABLE | WL_TIMEOUT;
+                       WL_SOCKET_READABLE | WL_TIMEOUT;
 
                if (pq_is_send_pending())
                        wakeEvents |= WL_SOCKET_WRITEABLE;
@@ -2255,7 +2255,7 @@ WalSndLoop(WalSndSendDataCallback send_data)
                        int                     wakeEvents;
 
                        wakeEvents = WL_LATCH_SET | WL_EXIT_ON_PM_DEATH | WL_TIMEOUT |
-                                                WL_SOCKET_READABLE;
+                               WL_SOCKET_READABLE;
 
                        /*
                         * Use fresh timestamp, not last_processed, to reduce the chance
index 0b26e4166d90c4b673258c2c7ef1a5e254fd8127..8a71c2b534ae88c9af2aad1e55c5d85d00c65d39 100644 (file)
@@ -279,8 +279,8 @@ dependency_degree(int numrows, HeapTuple *rows, int k, AttrNumber *dependency,
         * build an array of SortItem(s) sorted using the multi-sort support
         *
         * XXX This relies on all stats entries pointing to the same tuple
-        * descriptor.  For now that assumption holds, but it might change in
-        * the future for example if we support statistics on multiple tables.
+        * descriptor.  For now that assumption holds, but it might change in the
+        * future for example if we support statistics on multiple tables.
         */
        items = build_sorted_items(numrows, &nitems, rows, stats[0]->tupDesc,
                                                           mss, k, attnums_dep);
@@ -300,8 +300,8 @@ dependency_degree(int numrows, HeapTuple *rows, int k, AttrNumber *dependency,
        {
                /*
                 * Check if the group ended, which may be either because we processed
-                * all the items (i==nitems), or because the i-th item is not equal
-                * to the preceding one.
+                * all the items (i==nitems), or because the i-th item is not equal to
+                * the preceding one.
                 */
                if (i == nitems ||
                        multi_sort_compare_dims(0, k - 2, &items[i - 1], &items[i], mss) != 0)
index ac0ae52ecfd85ded70bf76d471203b4437a5e33b..cc6112df3b3f3af4df8b6b291c0774bb1fbc1385 100644 (file)
@@ -67,7 +67,7 @@ static VacAttrStats **lookup_var_attr_stats(Relation rel, Bitmapset *attrs,
                                          int nvacatts, VacAttrStats **vacatts);
 static void statext_store(Relation pg_stext, Oid relid,
                          MVNDistinct *ndistinct, MVDependencies *dependencies,
-                         MCVList * mcvlist, VacAttrStats **stats);
+                         MCVList *mcvlist, VacAttrStats **stats);
 
 
 /*
@@ -317,7 +317,7 @@ lookup_var_attr_stats(Relation rel, Bitmapset *attrs,
 static void
 statext_store(Relation pg_stext, Oid statOid,
                          MVNDistinct *ndistinct, MVDependencies *dependencies,
-                         MCVList * mcv, VacAttrStats **stats)
+                         MCVList *mcv, VacAttrStats **stats)
 {
        HeapTuple       stup,
                                oldtup;
@@ -538,9 +538,9 @@ build_attnums_array(Bitmapset *attrs, int *numattrs)
        {
                /*
                 * Make sure the bitmap contains only user-defined attributes. As
-                * bitmaps can't contain negative values, this can be violated in
-                * two ways. Firstly, the bitmap might contain 0 as a member, and
-                * secondly the integer value might be larger than MaxAttrNumber.
+                * bitmaps can't contain negative values, this can be violated in two
+                * ways. Firstly, the bitmap might contain 0 as a member, and secondly
+                * the integer value might be larger than MaxAttrNumber.
                 */
                Assert(AttrNumberIsForUserDefinedAttr(j));
                Assert(j <= MaxAttrNumber);
@@ -600,7 +600,7 @@ build_sorted_items(int numrows, int *nitems, HeapTuple *rows, TupleDesc tdesc,
        idx = 0;
        for (i = 0; i < numrows; i++)
        {
-               bool    toowide = false;
+               bool            toowide = false;
 
                items[idx].values = &values[idx * numattrs];
                items[idx].isnull = &isnull[idx * numattrs];
@@ -608,8 +608,8 @@ build_sorted_items(int numrows, int *nitems, HeapTuple *rows, TupleDesc tdesc,
                /* load the values/null flags from sample rows */
                for (j = 0; j < numattrs; j++)
                {
-                       Datum   value;
-                       bool    isnull;
+                       Datum           value;
+                       bool            isnull;
 
                        value = heap_getattr(rows[i], attnums[j], tdesc, &isnull);
 
@@ -988,7 +988,7 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
        int                     listidx;
        StatisticExtInfo *stat;
        List       *stat_clauses;
-       Selectivity     simple_sel,
+       Selectivity simple_sel,
                                mcv_sel,
                                mcv_basesel,
                                mcv_totalsel,
@@ -1006,9 +1006,9 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
         * Pre-process the clauses list to extract the attnums seen in each item.
         * We need to determine if there's any clauses which will be useful for
         * selectivity estimations with extended stats. Along the way we'll record
-        * all of the attnums for each clause in a list which we'll reference later
-        * so we don't need to repeat the same work again. We'll also keep track of
-        * all attnums seen.
+        * all of the attnums for each clause in a list which we'll reference
+        * later so we don't need to repeat the same work again. We'll also keep
+        * track of all attnums seen.
         *
         * We also skip clauses that we already estimated using different types of
         * statistics (we treat them as incompatible).
@@ -1066,9 +1066,10 @@ statext_mcv_clauselist_selectivity(PlannerInfo *root, List *clauses, int varReli
        }
 
        /*
-        * First compute "simple" selectivity, i.e. without the extended statistics,
-        * and essentially assuming independence of the columns/clauses. We'll then
-        * use the various selectivities computed from MCV list to improve it.
+        * First compute "simple" selectivity, i.e. without the extended
+        * statistics, and essentially assuming independence of the
+        * columns/clauses. We'll then use the various selectivities computed from
+        * MCV list to improve it.
         */
        simple_sel = clauselist_selectivity_simple(root, stat_clauses, varRelid,
                                                                                           jointype, sjinfo, NULL);
@@ -1105,16 +1106,16 @@ statext_clauselist_selectivity(PlannerInfo *root, List *clauses, int varRelid,
                                                           JoinType jointype, SpecialJoinInfo *sjinfo,
                                                           RelOptInfo *rel, Bitmapset **estimatedclauses)
 {
-       Selectivity     sel;
+       Selectivity sel;
 
        /* First, try estimating clauses using a multivariate MCV list. */
        sel = statext_mcv_clauselist_selectivity(root, clauses, varRelid, jointype,
                                                                                         sjinfo, rel, estimatedclauses);
 
        /*
-        * Then, apply functional dependencies on the remaining clauses by
-        * calling dependencies_clauselist_selectivity.  Pass 'estimatedclauses'
-        * so the function can properly skip clauses already estimated above.
+        * Then, apply functional dependencies on the remaining clauses by calling
+        * dependencies_clauselist_selectivity.  Pass 'estimatedclauses' so the
+        * function can properly skip clauses already estimated above.
         *
         * The reasoning for applying dependencies last is that the more complex
         * stats can track more complex correlations between the attributes, and
index 05ab6c9bb7aee99edeb0afc94848a2d8cdf04b7a..d22820dec743dd388d280b17f33ff8d560d26ad1 100644 (file)
@@ -209,20 +209,20 @@ statext_mcv_build(int numrows, HeapTuple *rows, Bitmapset *attrs,
         *
         * Using the same algorithm might exclude items that are close to the
         * "average" frequency of the sample. But that does not say whether the
-        * observed frequency is close to the base frequency or not. We also
-        * need to consider unexpectedly uncommon items (again, compared to the
-        * base frequency), and the single-column algorithm does not have to.
+        * observed frequency is close to the base frequency or not. We also need
+        * to consider unexpectedly uncommon items (again, compared to the base
+        * frequency), and the single-column algorithm does not have to.
         *
         * We simply decide how many items to keep by computing minimum count
-        * using get_mincount_for_mcv_list() and then keep all items that seem
-        * to be more common than that.
+        * using get_mincount_for_mcv_list() and then keep all items that seem to
+        * be more common than that.
         */
        mincount = get_mincount_for_mcv_list(numrows, totalrows);
 
        /*
-        * Walk the groups until we find the first group with a count below
-        * the mincount threshold (the index of that group is the number of
-        * groups we want to keep).
+        * Walk the groups until we find the first group with a count below the
+        * mincount threshold (the index of that group is the number of groups we
+        * want to keep).
         */
        for (i = 0; i < nitems; i++)
        {
@@ -240,7 +240,7 @@ statext_mcv_build(int numrows, HeapTuple *rows, Bitmapset *attrs,
         */
        if (nitems > 0)
        {
-               int     j;
+               int                     j;
 
                /*
                 * Allocate the MCV list structure, set the global parameters.
@@ -485,7 +485,7 @@ statext_mcv_load(Oid mvoid)
  * (or a longer type) instead of using an array of bool items.
  */
 bytea *
-statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats)
+statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats)
 {
        int                     i;
        int                     dim;
@@ -603,7 +603,7 @@ statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats)
                        info[dim].nbytes = 0;
                        for (i = 0; i < info[dim].nvalues; i++)
                        {
-                               Size    len;
+                               Size            len;
 
                                values[dim][i] = PointerGetDatum(PG_DETOAST_DATUM(values[dim][i]));
 
@@ -616,7 +616,7 @@ statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats)
                        info[dim].nbytes = 0;
                        for (i = 0; i < info[dim].nvalues; i++)
                        {
-                               Size    len;
+                               Size            len;
 
                                /* c-strings include terminator, so +1 byte */
                                values[dim][i] = PointerGetDatum(PG_DETOAST_DATUM(values[dim][i]));
@@ -636,11 +636,11 @@ statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats)
         * for each attribute, deduplicated values and items).
         *
         * The header fields are copied one by one, so that we don't need any
-        * explicit alignment (we copy them while deserializing). All fields
-        * after this need to be properly aligned, for direct access.
+        * explicit alignment (we copy them while deserializing). All fields after
+        * this need to be properly aligned, for direct access.
         */
        total_length = MAXALIGN(VARHDRSZ + (3 * sizeof(uint32))
-                       + sizeof(AttrNumber) + (ndims * sizeof(Oid)));
+                                                       + sizeof(AttrNumber) + (ndims * sizeof(Oid)));
 
        /* dimension info */
        total_length += MAXALIGN(ndims * sizeof(DimensionInfo));
@@ -650,14 +650,14 @@ statext_mcv_serialize(MCVList * mcvlist, VacAttrStats **stats)
                total_length += MAXALIGN(info[i].nbytes);
 
        /*
-        * And finally the items (no additional alignment needed, we start
-        * at proper alignment and the itemsize formula uses MAXALIGN)
+        * And finally the items (no additional alignment needed, we start at
+        * proper alignment and the itemsize formula uses MAXALIGN)
         */
        total_length += mcvlist->nitems * itemsize;
 
        /*
-        * Allocate space for the whole serialized MCV list (we'll skip bytes,
-        * so we set them to zero to make the result more compressible).
+        * Allocate space for the whole serialized MCV list (we'll skip bytes, so
+        * we set them to zero to make the result more compressible).
         */
        raw = palloc0(total_length);
        SET_VARSIZE(raw, total_length);
@@ -1189,8 +1189,8 @@ pg_stats_ext_mcvlist_items(PG_FUNCTION_ARGS)
                HeapTuple       tuple;
                Datum           result;
 
-               StringInfoData  itemValues;
-               StringInfoData  itemNulls;
+               StringInfoData itemValues;
+               StringInfoData itemNulls;
 
                int                     i;
 
@@ -1213,9 +1213,9 @@ pg_stats_ext_mcvlist_items(PG_FUNCTION_ARGS)
                 */
                values = (char **) palloc0(5 * sizeof(char *));
 
-               values[0] = (char *) palloc(64 * sizeof(char)); /* item index */
-               values[3] = (char *) palloc(64 * sizeof(char)); /* frequency */
-               values[4] = (char *) palloc(64 * sizeof(char)); /* base frequency */
+               values[0] = (char *) palloc(64 * sizeof(char)); /* item index */
+               values[3] = (char *) palloc(64 * sizeof(char)); /* frequency */
+               values[4] = (char *) palloc(64 * sizeof(char)); /* base frequency */
 
                outfuncs = (Oid *) palloc0(sizeof(Oid) * mcvlist->ndimensions);
                fmgrinfo = (FmgrInfo *) palloc0(sizeof(FmgrInfo) * mcvlist->ndimensions);
@@ -1376,7 +1376,7 @@ pg_mcv_list_send(PG_FUNCTION_ARGS)
  */
 static bool *
 mcv_get_match_bitmap(PlannerInfo *root, List *clauses,
-                                         Bitmapset *keys, MCVList * mcvlist, bool is_or)
+                                        Bitmapset *keys, MCVList *mcvlist, bool is_or)
 {
        int                     i;
        ListCell   *l;
index 33d7941a405c879339d330198eb128d32741e577..bee79d84dc9faf9c71479632a7c11f6690930ea4 100644 (file)
@@ -2813,12 +2813,12 @@ RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
                case RELKIND_MATVIEW:
                        {
                                /*
-                                * Not every table AM uses BLCKSZ wide fixed size
-                                * blocks. Therefore tableam returns the size in bytes - but
-                                * for the purpose of this routine, we want the number of
-                                * blocks. Therefore divide, rounding up.
+                                * Not every table AM uses BLCKSZ wide fixed size blocks.
+                                * Therefore tableam returns the size in bytes - but for the
+                                * purpose of this routine, we want the number of blocks.
+                                * Therefore divide, rounding up.
                                 */
-                               uint64 szbytes;
+                               uint64          szbytes;
 
                                szbytes = table_relation_size(relation, forkNum);
 
index fdac9850e02371b06449b592a6cc80c3750746e5..ffae52089f7b5474e81c88fc6526f60cd4244995 100644 (file)
@@ -1731,7 +1731,7 @@ FileClose(File file)
                         * see LruDelete.
                         */
                        elog(vfdP->fdstate & FD_TEMP_FILE_LIMIT ? LOG : data_sync_elevel(LOG),
-                               "could not close file \"%s\": %m", vfdP->fileName);
+                                "could not close file \"%s\": %m", vfdP->fileName);
                }
 
                --nfile;
index e0712f906a1e51849fabf0f855fdb8c9129cc6d8..bff254c2b28c6552a706189de835ba278ca344c5 100644 (file)
@@ -856,7 +856,7 @@ WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action)
        if (rc < 0)
                ereport(ERROR,
                                (errcode_for_socket_access(),
-                                /* translator: %s is a syscall name, such as "poll()" */
+               /* translator: %s is a syscall name, such as "poll()" */
                                 errmsg("%s failed: %m",
                                                "epoll_ctl()")));
 }
@@ -1089,7 +1089,7 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
                        waiting = false;
                        ereport(ERROR,
                                        (errcode_for_socket_access(),
-                                        /* translator: %s is a syscall name, such as "poll()" */
+                       /* translator: %s is a syscall name, such as "poll()" */
                                         errmsg("%s failed: %m",
                                                        "epoll_wait()")));
                }
@@ -1215,7 +1215,7 @@ WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
                        waiting = false;
                        ereport(ERROR,
                                        (errcode_for_socket_access(),
-                                        /* translator: %s is a syscall name, such as "poll()" */
+                       /* translator: %s is a syscall name, such as "poll()" */
                                         errmsg("%s failed: %m",
                                                        "poll()")));
                }
index 48f4311464278fe0cec1ee13d6c912a0a28e2490..86acec09f3861cc6cbc65163e9e12c004b86cbaf 100644 (file)
@@ -370,7 +370,7 @@ void
 PostmasterDeathSignalInit(void)
 {
 #ifdef USE_POSTMASTER_DEATH_SIGNAL
-       int             signum = POSTMASTER_DEATH_SIGNAL;
+       int                     signum = POSTMASTER_DEATH_SIGNAL;
 
        /* Register our signal handler. */
        pqsignal(signum, postmaster_death_handler);
index 4bfbd57464c201f1458f4726b0bdbf54bdfd9447..ade8d713aae2eb64c2887dbc0af4ceb0eb36c4d4 100644 (file)
@@ -181,7 +181,7 @@ pg_rotate_logfile(PG_FUNCTION_ARGS)
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
                                 (errmsg("must be superuser to rotate log files with adminpack 1.0"),
-                                 /* translator: %s is a SQL function name */
+               /* translator: %s is a SQL function name */
                                  errhint("Consider using %s, which is part of core, instead.",
                                                  "pg_logfile_rotate()"))));
 
index 106d227a5acd51ff2087a35de78b24a81c1b5ab1..f838b0f758a33dc95d2b726e7bb10d56f529d009 100644 (file)
@@ -906,7 +906,7 @@ WaitForLockersMultiple(List *locktags, LOCKMODE lockmode, bool progress)
                         */
                        if (progress)
                        {
-                               PGPROC *holder = BackendIdGetProc(lockholders->backendId);
+                               PGPROC     *holder = BackendIdGetProc(lockholders->backendId);
 
                                pgstat_progress_update_param(PROGRESS_WAITFOR_CURRENT_PID,
                                                                                         holder->pid);
@@ -925,9 +925,10 @@ WaitForLockersMultiple(List *locktags, LOCKMODE lockmode, bool progress)
                        PROGRESS_WAITFOR_DONE,
                        PROGRESS_WAITFOR_CURRENT_PID
                };
-               const int64     values[] = {
+               const int64 values[] = {
                        0, 0, 0
                };
+
                pgstat_progress_update_multi_param(3, index, values);
        }
 
index 8191118b619d042b2ab04821fe29c6cd6147c287..dba8c397feb85fff4e994a06bae968681866e187 100644 (file)
@@ -91,7 +91,7 @@ static const int NSmgr = lengthof(smgrsw);
  */
 static HTAB *SMgrRelationHash = NULL;
 
-static dlist_head      unowned_relns;
+static dlist_head unowned_relns;
 
 /* local function prototypes */
 static void smgrshutdown(int code, Datum arg);
@@ -713,7 +713,7 @@ smgrimmedsync(SMgrRelation reln, ForkNumber forknum)
 void
 AtEOXact_SMgr(void)
 {
-       dlist_mutable_iter      iter;
+       dlist_mutable_iter iter;
 
        /*
         * Zap all unowned SMgrRelations.  We rely on smgrclose() to remove each
@@ -721,8 +721,8 @@ AtEOXact_SMgr(void)
         */
        dlist_foreach_modify(iter, &unowned_relns)
        {
-               SMgrRelation    rel = dlist_container(SMgrRelationData, node,
-                                                                                         iter.cur);
+               SMgrRelation rel = dlist_container(SMgrRelationData, node,
+                                                                                  iter.cur);
 
                Assert(rel->smgr_owner == NULL);
 
index 096735c8071ad9bd819198d94ccd8ea105897ac7..705f229b27fd64b2cb58ad8517d955f4dc0e6f0f 100644 (file)
@@ -548,8 +548,8 @@ RegisterSyncRequest(const FileTag *ftag, SyncRequestType type,
        for (;;)
        {
                /*
-                * Notify the checkpointer about it.  If we fail to queue a message
-                * in retryOnError mode, we have to sleep and try again ... ugly, but
+                * Notify the checkpointer about it.  If we fail to queue a message in
+                * retryOnError mode, we have to sleep and try again ... ugly, but
                 * hopefully won't happen often.
                 *
                 * XXX should we CHECK_FOR_INTERRUPTS in this loop?  Escaping with an
index ee9e349a5ba1d56421d031371d6aaf0e1c3fe9ec..7bb81df970789fee183f89f292a008f89ab3bef3 100644 (file)
@@ -113,8 +113,8 @@ DestReceiver *
 CreateDestReceiver(CommandDest dest)
 {
        /*
-        * It's ok to cast the constness away as any modification of the none receiver
-        * would be a bug (which gets easier to catch this way).
+        * It's ok to cast the constness away as any modification of the none
+        * receiver would be a bug (which gets easier to catch this way).
         */
 
        switch (dest)
index 69a691f18e780de16d696afacfd275c81722ef6d..3a6a878ffac7a59a92c6e7adcf39ef7c57328e01 100644 (file)
@@ -3023,6 +3023,7 @@ DCH_from_char(FormatNode *node, char *in, TmFromChar *out)
        int                     len,
                                value;
        bool            fx_mode = false;
+
        /* number of extra skipped characters (more than given in format string) */
        int                     extra_skip = 0;
 
@@ -3049,8 +3050,8 @@ DCH_from_char(FormatNode *node, char *in, TmFromChar *out)
                                /*
                                 * In non FX (fixed format) mode one format string space or
                                 * separator match to one space or separator in input string.
-                                * Or match nothing if there is no space or separator in
-                                * the current position of input string.
+                                * Or match nothing if there is no space or separator in the
+                                * current position of input string.
                                 */
                                extra_skip--;
                                if (isspace((unsigned char) *s) || is_separator_char(s))
@@ -3176,11 +3177,13 @@ DCH_from_char(FormatNode *node, char *in, TmFromChar *out)
                                                                n->key->name)));
                                break;
                        case DCH_TZH:
+
                                /*
                                 * Value of TZH might be negative.  And the issue is that we
                                 * might swallow minus sign as the separator.  So, if we have
-                                * skipped more characters than specified in the format string,
-                                * then we consider prepending last skipped minus to TZH.
+                                * skipped more characters than specified in the format
+                                * string, then we consider prepending last skipped minus to
+                                * TZH.
                                 */
                                if (*s == '+' || *s == '-' || *s == ' ')
                                {
index a3c6adaf64019014feac82726bd45b0215b1ba64..f526106530500d96ff0dd476c72c8d3608203d13 100644 (file)
@@ -219,7 +219,7 @@ pg_read_file(PG_FUNCTION_ARGS)
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
                                 (errmsg("must be superuser to read files with adminpack 1.0"),
-                                 /* translator: %s is a SQL function name */
+               /* translator: %s is a SQL function name */
                                  errhint("Consider using %s, which is part of core, instead.",
                                                  "pg_file_read()"))));
 
index 28e85e397e5f95e15d1ae1289855a13b2bd05043..f2be61431059fef2a4570f45880c7e1878ca64ca 100644 (file)
@@ -88,7 +88,7 @@ static int    point_inside(Point *p, int npts, Point *plist);
 static inline void line_construct(LINE *result, Point *pt, float8 m);
 static inline float8 line_sl(LINE *line);
 static inline float8 line_invsl(LINE *line);
-static bool    line_interpt_line(Point *result, LINE *l1, LINE *l2);
+static bool line_interpt_line(Point *result, LINE *l1, LINE *l2);
 static bool line_contain_point(LINE *line, Point *point);
 static float8 line_closept_point(Point *result, LINE *line, Point *pt);
 
@@ -96,10 +96,10 @@ static float8 line_closept_point(Point *result, LINE *line, Point *pt);
 static inline void statlseg_construct(LSEG *lseg, Point *pt1, Point *pt2);
 static inline float8 lseg_sl(LSEG *lseg);
 static inline float8 lseg_invsl(LSEG *lseg);
-static bool    lseg_interpt_line(Point *result, LSEG *lseg, LINE *line);
-static bool    lseg_interpt_lseg(Point *result, LSEG *l1, LSEG *l2);
+static bool lseg_interpt_line(Point *result, LSEG *lseg, LINE *line);
+static bool lseg_interpt_lseg(Point *result, LSEG *l1, LSEG *l2);
 static int     lseg_crossing(float8 x, float8 y, float8 px, float8 py);
-static bool    lseg_contain_point(LSEG *lseg, Point *point);
+static bool lseg_contain_point(LSEG *lseg, Point *point);
 static float8 lseg_closept_point(Point *result, LSEG *lseg, Point *pt);
 static float8 lseg_closept_line(Point *result, LSEG *lseg, LINE *line);
 static float8 lseg_closept_lseg(Point *result, LSEG *on_lseg, LSEG *to_lseg);
@@ -692,9 +692,9 @@ static bool
 box_contain_box(BOX *contains_box, BOX *contained_box)
 {
        return FPge(contains_box->high.x, contained_box->high.x) &&
-                  FPle(contains_box->low.x, contained_box->low.x) &&
-                  FPge(contains_box->high.y, contained_box->high.y) &&
-                  FPle(contains_box->low.y, contained_box->low.y);
+               FPle(contains_box->low.x, contained_box->low.x) &&
+               FPge(contains_box->high.y, contained_box->high.y) &&
+               FPle(contains_box->low.y, contained_box->low.y);
 }
 
 
@@ -2378,8 +2378,8 @@ dist_ppath(PG_FUNCTION_ARGS)
        Assert(path->npts > 0);
 
        /*
-        * The distance from a point to a path is the smallest distance
-        * from the point to any of its constituent segments.
+        * The distance from a point to a path is the smallest distance from the
+        * point to any of its constituent segments.
         */
        for (i = 0; i < path->npts; i++)
        {
@@ -2553,9 +2553,9 @@ lseg_interpt_line(Point *result, LSEG *lseg, LINE *line)
        LINE            tmp;
 
        /*
-        * First, we promote the line segment to a line, because we know how
-        * to find the intersection point of two lines.  If they don't have
-        * an intersection point, we are done.
+        * First, we promote the line segment to a line, because we know how to
+        * find the intersection point of two lines.  If they don't have an
+        * intersection point, we are done.
         */
        line_construct(&tmp, &lseg->p[0], lseg_sl(lseg));
        if (!line_interpt_line(&interpt, &tmp, line))
@@ -2602,8 +2602,8 @@ line_closept_point(Point *result, LINE *line, Point *point)
        LINE            tmp;
 
        /*
-        * We drop a perpendicular to find the intersection point.  Ordinarily
-        * we should always find it, but that can fail in the presence of NaN
+        * We drop a perpendicular to find the intersection point.  Ordinarily we
+        * should always find it, but that can fail in the presence of NaN
         * coordinates, and perhaps even from simple roundoff issues.
         */
        line_construct(&tmp, point, line_invsl(line));
@@ -2693,8 +2693,8 @@ lseg_closept_lseg(Point *result, LSEG *on_lseg, LSEG *to_lseg)
                return 0.0;
 
        /*
-        * Then, we find the closest points from the endpoints of the second
-        * line segment, and keep the closest one.
+        * Then, we find the closest points from the endpoints of the second line
+        * segment, and keep the closest one.
         */
        dist = lseg_closept_point(result, on_lseg, &to_lseg->p[0]);
        d = lseg_closept_point(&point, on_lseg, &to_lseg->p[1]);
@@ -3063,7 +3063,7 @@ static bool
 box_contain_point(BOX *box, Point *point)
 {
        return box->high.x >= point->x && box->low.x <= point->x &&
-                  box->high.y >= point->y && box->low.y <= point-> y;
+               box->high.y >= point->y && box->low.y <= point->y;
 }
 
 Datum
@@ -3150,7 +3150,7 @@ static bool
 box_contain_lseg(BOX *box, LSEG *lseg)
 {
        return box_contain_point(box, &lseg->p[0]) &&
-                  box_contain_point(box, &lseg->p[1]);
+               box_contain_point(box, &lseg->p[1]);
 }
 
 Datum
index bb4bac85f7d1087c69d06aa1fa744952c9111734..a9784d067c77257d0cdaa8548c60558b2c88b718 100644 (file)
@@ -207,7 +207,7 @@ IsValidJsonNumber(const char *str, int len)
         */
        if (*str == '-')
        {
-               dummy_lex.input = unconstify(char *, str) + 1;
+               dummy_lex.input = unconstify(char *, str) +1;
                dummy_lex.input_length = len - 1;
        }
        else
@@ -2192,7 +2192,7 @@ json_build_object(PG_FUNCTION_ARGS)
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                                 errmsg("argument list must have even number of elements"),
-                                /* translator: %s is a SQL function name */
+               /* translator: %s is a SQL function name */
                                 errhint("The arguments of %s must consist of alternating keys and values.",
                                                 "json_build_object()")));
 
index 036d771386f5d727508c4de720d3e7da7e1d82b8..c742172bd8cb2564061c95f37957dc5f524fe433 100644 (file)
@@ -1155,7 +1155,7 @@ jsonb_build_object(PG_FUNCTION_ARGS)
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                                 errmsg("argument list must have even number of elements"),
-                                /* translator: %s is a SQL function name */
+               /* translator: %s is a SQL function name */
                                 errhint("The arguments of %s must consist of alternating keys and values.",
                                                 "jsonb_build_object()")));
 
index 704e5720cf5b6153085a872b91751748f36b8c27..f4dfc504d6e900a8c554fee2533b8895f9cf021b 100644 (file)
@@ -152,7 +152,7 @@ GenericMatchText(const char *s, int slen, const char *p, int plen, Oid collation
 {
        if (collation && !lc_ctype_is_c(collation) && collation != DEFAULT_COLLATION_OID)
        {
-               pg_locale_t             locale = pg_newlocale_from_collation(collation);
+               pg_locale_t locale = pg_newlocale_from_collation(collation);
 
                if (locale && !locale->deterministic)
                        ereport(ERROR,
index 7528c80f7c342e00caa8e5c1360a69d8d58c87bf..e2583bc680a063db4c8f812b2f53c318bb0b2fe7 100644 (file)
@@ -262,9 +262,9 @@ match_pattern_prefix(Node *leftop,
         * optimized equality or prefix tests use bytewise comparisons, which is
         * not consistent with nondeterministic collations.  The actual
         * pattern-matching implementation functions will later error out that
-        * pattern-matching is not supported with nondeterministic collations.
-        * (We could also error out here, but by doing it later we get more
-        * precise error messages.)  (It should be possible to support at least
+        * pattern-matching is not supported with nondeterministic collations. (We
+        * could also error out here, but by doing it later we get more precise
+        * error messages.)  (It should be possible to support at least
         * Pattern_Prefix_Exact, but no point as along as the actual
         * pattern-matching implementations don't support it.)
         *
index c7df630c3c36ac1a3e15448c772f7a721657a1d7..70138feb29e7696adcf7b6b8622168fe2717b1cc 100644 (file)
@@ -182,7 +182,7 @@ invalid_syntax:
                         errmsg("invalid input syntax for type %s: \"%s\"",
                                        "smallint", s)));
 
-       return 0;                       /* keep compiler quiet */
+       return 0;                                       /* keep compiler quiet */
 }
 
 /*
@@ -258,7 +258,7 @@ invalid_syntax:
                         errmsg("invalid input syntax for type %s: \"%s\"",
                                        "integer", s)));
 
-       return 0;                       /* keep compiler quiet */
+       return 0;                                       /* keep compiler quiet */
 }
 
 /*
index c5be472bcedff4c8ffaa60af947e3e0338a7639c..00a9a33eccc1871d8edb123581f369ef25bde456 100644 (file)
@@ -1101,8 +1101,8 @@ setup_regexp_matches(text *orig_str, text *pattern, pg_re_flags *re_flags,
                        /* enlarge output space if needed */
                        while (array_idx + matchctx->npatterns * 2 + 1 > array_len)
                        {
-                               array_len += array_len + 1;             /* 2^n-1 => 2^(n+1)-1 */
-                               if (array_len > MaxAllocSize/sizeof(int))
+                               array_len += array_len + 1; /* 2^n-1 => 2^(n+1)-1 */
+                               if (array_len > MaxAllocSize / sizeof(int))
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                                                         errmsg("too many regular expression matches")));
@@ -1117,8 +1117,9 @@ setup_regexp_matches(text *orig_str, text *pattern, pg_re_flags *re_flags,
 
                                for (i = 1; i <= matchctx->npatterns; i++)
                                {
-                                       int             so = pmatch[i].rm_so;
-                                       int             eo = pmatch[i].rm_eo;
+                                       int                     so = pmatch[i].rm_so;
+                                       int                     eo = pmatch[i].rm_eo;
+
                                        matchctx->match_locs[array_idx++] = so;
                                        matchctx->match_locs[array_idx++] = eo;
                                        if (so >= 0 && eo >= 0 && (eo - so) > maxlen)
@@ -1127,8 +1128,9 @@ setup_regexp_matches(text *orig_str, text *pattern, pg_re_flags *re_flags,
                        }
                        else
                        {
-                               int             so = pmatch[0].rm_so;
-                               int             eo = pmatch[0].rm_eo;
+                               int                     so = pmatch[0].rm_so;
+                               int                     eo = pmatch[0].rm_eo;
+
                                matchctx->match_locs[array_idx++] = so;
                                matchctx->match_locs[array_idx++] = eo;
                                if (so >= 0 && eo >= 0 && (eo - so) > maxlen)
@@ -1190,10 +1192,10 @@ setup_regexp_matches(text *orig_str, text *pattern, pg_re_flags *re_flags,
                 * interest.
                 *
                 * Worst case: assume we need the maximum size (maxlen*eml), but take
-                * advantage of the fact that the original string length in bytes is an
-                * upper bound on the byte length of any fetched substring (and we know
-                * that len+1 is safe to allocate because the varlena header is longer
-                * than 1 byte).
+                * advantage of the fact that the original string length in bytes is
+                * an upper bound on the byte length of any fetched substring (and we
+                * know that len+1 is safe to allocate because the varlena header is
+                * longer than 1 byte).
                 */
                if (maxsiz > orig_len)
                        conv_bufsiz = orig_len + 1;
@@ -1248,9 +1250,10 @@ build_regexp_match_result(regexp_matches_ctx *matchctx)
                }
                else if (buf)
                {
-                       int             len = pg_wchar2mb_with_len(matchctx->wide_str + so,
-                                                                                          buf,
-                                                                                          eo - so);
+                       int                     len = pg_wchar2mb_with_len(matchctx->wide_str + so,
+                                                                                                  buf,
+                                                                                                  eo - so);
+
                        Assert(len < bufsiz);
                        elems[i] = PointerGetDatum(cstring_to_text_with_len(buf, len));
                        nulls[i] = false;
@@ -1409,15 +1412,15 @@ build_regexp_split_result(regexp_matches_ctx *splitctx)
 
        if (buf)
        {
-               int             bufsiz PG_USED_FOR_ASSERTS_ONLY = splitctx->conv_bufsiz;
-               int             len;
+               int                     bufsiz PG_USED_FOR_ASSERTS_ONLY = splitctx->conv_bufsiz;
+               int                     len;
 
                endpos = splitctx->match_locs[splitctx->next_match * 2];
                if (endpos < startpos)
                        elog(ERROR, "invalid match starting position");
                len = pg_wchar2mb_with_len(splitctx->wide_str + startpos,
                                                                   buf,
-                                                                  endpos-startpos);
+                                                                  endpos - startpos);
                Assert(len < bufsiz);
                return PointerGetDatum(cstring_to_text_with_len(buf, len));
        }
index 095334b3363491c7ab5bc609b7c1da1247cd2787..b9e0f5c048d3c3b6f78b680c0ab31f3fafdbecfa 100644 (file)
@@ -635,10 +635,10 @@ ri_restrict(TriggerData *trigdata, bool is_no_action)
        oldslot = trigdata->tg_trigslot;
 
        /*
-        * If another PK row now exists providing the old key values, we
-        * should not do anything.  However, this check should only be
-        * made in the NO ACTION case; in RESTRICT cases we don't wish to
-        * allow another row to be substituted.
+        * If another PK row now exists providing the old key values, we should
+        * not do anything.  However, this check should only be made in the NO
+        * ACTION case; in RESTRICT cases we don't wish to allow another row to be
+        * substituted.
         */
        if (is_no_action &&
                ri_Check_Pk_Match(pk_rel, fk_rel, oldslot, riinfo))
@@ -651,8 +651,8 @@ ri_restrict(TriggerData *trigdata, bool is_no_action)
                elog(ERROR, "SPI_connect failed");
 
        /*
-        * Fetch or prepare a saved plan for the restrict lookup (it's the
-        * same query for delete and update cases)
+        * Fetch or prepare a saved plan for the restrict lookup (it's the same
+        * query for delete and update cases)
         */
        ri_BuildQueryKey(&qkey, riinfo, RI_PLAN_RESTRICT_CHECKREF);
 
@@ -713,7 +713,7 @@ ri_restrict(TriggerData *trigdata, bool is_no_action)
        ri_PerformCheck(riinfo, &qkey, qplan,
                                        fk_rel, pk_rel,
                                        oldslot, NULL,
-                                       true,   /* must detect new rows */
+                                       true,           /* must detect new rows */
                                        SPI_OK_SELECT);
 
        if (SPI_finish() != SPI_OK_FINISH)
@@ -813,13 +813,13 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
        }
 
        /*
-        * We have a plan now. Build up the arguments from the key values
-        * in the deleted PK tuple and delete the referencing rows
+        * We have a plan now. Build up the arguments from the key values in the
+        * deleted PK tuple and delete the referencing rows
         */
        ri_PerformCheck(riinfo, &qkey, qplan,
                                        fk_rel, pk_rel,
                                        oldslot, NULL,
-                                       true,   /* must detect new rows */
+                                       true,           /* must detect new rows */
                                        SPI_OK_DELETE);
 
        if (SPI_finish() != SPI_OK_FINISH)
@@ -940,7 +940,7 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
        ri_PerformCheck(riinfo, &qkey, qplan,
                                        fk_rel, pk_rel,
                                        oldslot, newslot,
-                                       true,   /* must detect new rows */
+                                       true,           /* must detect new rows */
                                        SPI_OK_UPDATE);
 
        if (SPI_finish() != SPI_OK_FINISH)
@@ -1119,7 +1119,7 @@ ri_set(TriggerData *trigdata, bool is_set_null)
        ri_PerformCheck(riinfo, &qkey, qplan,
                                        fk_rel, pk_rel,
                                        oldslot, NULL,
-                                       true,   /* must detect new rows */
+                                       true,           /* must detect new rows */
                                        SPI_OK_UPDATE);
 
        if (SPI_finish() != SPI_OK_FINISH)
@@ -1132,18 +1132,17 @@ ri_set(TriggerData *trigdata, bool is_set_null)
        else
        {
                /*
-                * If we just deleted or updated the PK row whose key was equal to
-                * the FK columns' default values, and a referencing row exists in
-                * the FK table, we would have updated that row to the same values
-                * it already had --- and RI_FKey_fk_upd_check_required would
-                * hence believe no check is necessary.  So we need to do another
-                * lookup now and in case a reference still exists, abort the
-                * operation.  That is already implemented in the NO ACTION
-                * trigger, so just run it.  (This recheck is only needed in the
-                * SET DEFAULT case, since CASCADE would remove such rows in case
-                * of a DELETE operation or would change the FK key values in case
-                * of an UPDATE, while SET NULL is certain to result in rows that
-                * satisfy the FK constraint.)
+                * If we just deleted or updated the PK row whose key was equal to the
+                * FK columns' default values, and a referencing row exists in the FK
+                * table, we would have updated that row to the same values it already
+                * had --- and RI_FKey_fk_upd_check_required would hence believe no
+                * check is necessary.  So we need to do another lookup now and in
+                * case a reference still exists, abort the operation.  That is
+                * already implemented in the NO ACTION trigger, so just run it. (This
+                * recheck is only needed in the SET DEFAULT case, since CASCADE would
+                * remove such rows in case of a DELETE operation or would change the
+                * FK key values in case of an UPDATE, while SET NULL is certain to
+                * result in rows that satisfy the FK constraint.)
                 */
                return ri_restrict(trigdata, true);
        }
@@ -1170,8 +1169,8 @@ RI_FKey_pk_upd_check_required(Trigger *trigger, Relation pk_rel,
        riinfo = ri_FetchConstraintInfo(trigger, pk_rel, true);
 
        /*
-        * If any old key value is NULL, the row could not have been
-        * referenced by an FK row, so no check is needed.
+        * If any old key value is NULL, the row could not have been referenced by
+        * an FK row, so no check is needed.
         */
        if (ri_NullCheck(RelationGetDescr(pk_rel), oldslot, riinfo, true) != RI_KEYS_NONE_NULL)
                return false;
@@ -1213,14 +1212,17 @@ RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel,
         */
        if (ri_nullcheck == RI_KEYS_ALL_NULL)
                return false;
+
        /*
-        * If some new key values are NULL, the behavior depends on the match type.
+        * If some new key values are NULL, the behavior depends on the match
+        * type.
         */
        else if (ri_nullcheck == RI_KEYS_SOME_NULL)
        {
                switch (riinfo->confmatchtype)
                {
                        case FKCONSTR_MATCH_SIMPLE:
+
                                /*
                                 * If any new key value is NULL, the row must satisfy the
                                 * constraint, so no check is needed.
@@ -1228,12 +1230,14 @@ RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel,
                                return false;
 
                        case FKCONSTR_MATCH_PARTIAL:
+
                                /*
                                 * Don't know, must run full check.
                                 */
                                break;
 
                        case FKCONSTR_MATCH_FULL:
+
                                /*
                                 * If some new key values are NULL, the row fails the
                                 * constraint.  We must not throw error here, because the row
@@ -1251,12 +1255,12 @@ RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel,
         */
 
        /*
-        * If the original row was inserted by our own transaction, we
-        * must fire the trigger whether or not the keys are equal.  This
-        * is because our UPDATE will invalidate the INSERT so that the
-        * INSERT RI trigger will not do anything; so we had better do the
-        * UPDATE check.  (We could skip this if we knew the INSERT
-        * trigger already fired, but there is no easy way to know that.)
+        * If the original row was inserted by our own transaction, we must fire
+        * the trigger whether or not the keys are equal.  This is because our
+        * UPDATE will invalidate the INSERT so that the INSERT RI trigger will
+        * not do anything; so we had better do the UPDATE check.  (We could skip
+        * this if we knew the INSERT trigger already fired, but there is no easy
+        * way to know that.)
         */
        xminDatum = slot_getsysattr(oldslot, MinTransactionIdAttributeNumber, &isnull);
        Assert(!isnull);
index 1e3bcb47b8690134ecdc127c59faace7f1f2537b..f911511158dc20c42b2eff904dc2c0fa3a3c4792 100644 (file)
@@ -1566,7 +1566,7 @@ pg_get_statisticsobj_worker(Oid statextid, bool missing_ok)
         */
        if (!ndistinct_enabled || !dependencies_enabled || !mcv_enabled)
        {
-               bool    gotone = false;
+               bool            gotone = false;
 
                appendStringInfoString(&buf, " (");
 
index 4003631d8f5f64a0fa6d19a3f1086e057c9ec6b8..332dc860c4f85284edbd3d71fc8c8190a658965a 100644 (file)
@@ -755,8 +755,8 @@ bpchareq(PG_FUNCTION_ARGS)
                pg_newlocale_from_collation(collid)->deterministic)
        {
                /*
-                * Since we only care about equality or not-equality, we can avoid all the
-                * expense of strcoll() here, and just do bitwise comparison.
+                * Since we only care about equality or not-equality, we can avoid all
+                * the expense of strcoll() here, and just do bitwise comparison.
                 */
                if (len1 != len2)
                        result = false;
@@ -793,8 +793,8 @@ bpcharne(PG_FUNCTION_ARGS)
                pg_newlocale_from_collation(collid)->deterministic)
        {
                /*
-                * Since we only care about equality or not-equality, we can avoid all the
-                * expense of strcoll() here, and just do bitwise comparison.
+                * Since we only care about equality or not-equality, we can avoid all
+                * the expense of strcoll() here, and just do bitwise comparison.
                 */
                if (len1 != len2)
                        result = true;
@@ -983,7 +983,7 @@ hashbpchar(PG_FUNCTION_ARGS)
        Oid                     collid = PG_GET_COLLATION();
        char       *keydata;
        int                     keylen;
-       pg_locale_t     mylocale = 0;
+       pg_locale_t mylocale = 0;
        Datum           result;
 
        if (!collid)
@@ -1010,7 +1010,7 @@ hashbpchar(PG_FUNCTION_ARGS)
                        int32_t         ulen = -1;
                        UChar      *uchar = NULL;
                        Size            bsize;
-                       uint8_t    *buf;
+                       uint8_t    *buf;
 
                        ulen = icu_to_uchar(&uchar, keydata, keylen);
 
@@ -1043,7 +1043,7 @@ hashbpcharextended(PG_FUNCTION_ARGS)
        Oid                     collid = PG_GET_COLLATION();
        char       *keydata;
        int                     keylen;
-       pg_locale_t     mylocale = 0;
+       pg_locale_t mylocale = 0;
        Datum           result;
 
        if (!collid)
@@ -1071,7 +1071,7 @@ hashbpcharextended(PG_FUNCTION_ARGS)
                        int32_t         ulen = -1;
                        UChar      *uchar = NULL;
                        Size            bsize;
-                       uint8_t    *buf;
+                       uint8_t    *buf;
 
                        ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
 
index f82ce92ce3da41a74f1bcc290b69f842a2628b21..e166effa5e5e3d35c5b5135ea8b6436f23a0cd5f 100644 (file)
@@ -1152,7 +1152,7 @@ text_position_setup(text *t1, text *t2, Oid collid, TextPositionState *state)
 {
        int                     len1 = VARSIZE_ANY_EXHDR(t1);
        int                     len2 = VARSIZE_ANY_EXHDR(t2);
-       pg_locale_t     mylocale = 0;
+       pg_locale_t mylocale = 0;
 
        check_collation_set(collid);
 
@@ -1723,11 +1723,11 @@ texteq(PG_FUNCTION_ARGS)
                                        len2;
 
                /*
-                * Since we only care about equality or not-equality, we can avoid all the
-                * expense of strcoll() here, and just do bitwise comparison.  In fact, we
-                * don't even have to do a bitwise comparison if we can show the lengths
-                * of the strings are unequal; which might save us from having to detoast
-                * one or both values.
+                * Since we only care about equality or not-equality, we can avoid all
+                * the expense of strcoll() here, and just do bitwise comparison.  In
+                * fact, we don't even have to do a bitwise comparison if we can show
+                * the lengths of the strings are unequal; which might save us from
+                * having to detoast one or both values.
                 */
                len1 = toast_raw_datum_size(arg1);
                len2 = toast_raw_datum_size(arg2);
@@ -1873,7 +1873,7 @@ text_starts_with(PG_FUNCTION_ARGS)
        Datum           arg1 = PG_GETARG_DATUM(0);
        Datum           arg2 = PG_GETARG_DATUM(1);
        Oid                     collid = PG_GET_COLLATION();
-       pg_locale_t     mylocale = 0;
+       pg_locale_t mylocale = 0;
        bool            result;
        Size            len1,
                                len2;
@@ -5346,7 +5346,7 @@ text_concat_ws(PG_FUNCTION_ARGS)
 Datum
 text_left(PG_FUNCTION_ARGS)
 {
-       int             n = PG_GETARG_INT32(1);
+       int                     n = PG_GETARG_INT32(1);
 
        if (n < 0)
        {
index d0f6f715e6f109efe379fd84b086f742f7658823..969884d4856ca743b64e4d340e30f9e6e9191ea4 100644 (file)
@@ -2635,9 +2635,9 @@ RelationClearRelation(Relation relation, bool rebuild)
                         * there should be no PartitionDirectory with a pointer to the old
                         * entry.
                         *
-                        * Note that newrel and relation have already been swapped, so
-                        * the "old" partition descriptor is actually the one hanging off
-                        * of newrel.
+                        * Note that newrel and relation have already been swapped, so the
+                        * "old" partition descriptor is actually the one hanging off of
+                        * newrel.
                         */
                        MemoryContextSetParent(newrel->rd_pdcxt, relation->rd_pdcxt);
                        newrel->rd_partdesc = NULL;
index f870a07d2a1927e1dcbf86a26fa8ded69b15da10..7ad0aa0b9444b1ac400a6e507b9b4833e85d258a 100644 (file)
@@ -656,7 +656,7 @@ EstimateRelationMapSpace(void)
 void
 SerializeRelationMap(Size maxSize, char *startAddress)
 {
-       SerializedActiveRelMaps    *relmaps;
+       SerializedActiveRelMaps *relmaps;
 
        Assert(maxSize >= EstimateRelationMapSpace());
 
@@ -673,7 +673,7 @@ SerializeRelationMap(Size maxSize, char *startAddress)
 void
 RestoreRelationMap(char *startAddress)
 {
-       SerializedActiveRelMaps    *relmaps;
+       SerializedActiveRelMaps *relmaps;
 
        if (active_shared_updates.num_mappings != 0 ||
                active_local_updates.num_mappings != 0 ||
index ac98c19155f557113b85aa6cdffd1e3245833510..476538354ddf267d0ef8baeb46a073faca566767 100644 (file)
@@ -1235,7 +1235,7 @@ GetSysCacheOid(int cacheId,
        result = heap_getattr(tuple, oidcol,
                                                  SysCache[cacheId]->cc_tupdesc,
                                                  &isNull);
-       Assert(!isNull); /* columns used as oids should never be NULL */
+       Assert(!isNull);                        /* columns used as oids should never be NULL */
        ReleaseSysCache(tuple);
        return result;
 }
index ead8b371a7399499aee6dfeb2357f87c6072dc2f..f039567e20958350bad02fe749f75cab990bc1a5 100644 (file)
@@ -554,7 +554,7 @@ static void
 record_C_func(HeapTuple procedureTuple,
                          PGFunction user_fn, const Pg_finfo_record *inforec)
 {
-       Oid                     fn_oid =  ((Form_pg_proc) GETSTRUCT(procedureTuple))->oid;
+       Oid                     fn_oid = ((Form_pg_proc) GETSTRUCT(procedureTuple))->oid;
        CFuncHashTabEntry *entry;
        bool            found;
 
index 9f5e2925dec92f30d0081027a29bfdbb3a56573b..66985cc2e92ed38167f9ac0dd3e26780defc9774 100644 (file)
@@ -653,6 +653,7 @@ hash_uint32_extended(uint32 k, uint64 seed)
        /* report the result */
        PG_RETURN_UINT64(((uint64) b << 32) | c);
 }
+
 /*
  * string_hash: hash function for keys that are NUL-terminated strings.
  *
index a5950c1e8c2bf939de0250fde581521604064bc1..3bf96de256df6d06c7fd04dd9287cce10f0a137d 100644 (file)
@@ -39,7 +39,7 @@ volatile uint32 CritSectionCount = 0;
 
 int                    MyProcPid;
 pg_time_t      MyStartTime;
-TimestampTz    MyStartTimestamp;
+TimestampTz MyStartTimestamp;
 struct Port *MyProcPort;
 int32          MyCancelKey;
 int                    MyPMChildSlot;
index c180a9910d4834cbb50c644f6c2f0a7c17df30b1..83c951485624a15f5fac49dff8790f6373c0ec7c 100644 (file)
@@ -592,8 +592,8 @@ InitializeSessionUserId(const char *rolename, Oid roleid)
        AssertState(!OidIsValid(AuthenticatedUserId));
 
        /*
-        * Make sure syscache entries are flushed for recent catalog changes.
-        * This allows us to find roles that were created on-the-fly during
+        * Make sure syscache entries are flushed for recent catalog changes. This
+        * allows us to find roles that were created on-the-fly during
         * authentication.
         */
        AcceptInvalidationMessages();
index dea5dcb368522a3bbc9e13332826cabe8fc197f4..bc2be43e2165dc20599f86d7e98e0b823354073c 100644 (file)
@@ -561,7 +561,7 @@ char *
 pg_any_to_server(const char *s, int len, int encoding)
 {
        if (len <= 0)
-               return unconstify(char *, s);           /* empty string is always valid */
+               return unconstify(char *, s);   /* empty string is always valid */
 
        if (encoding == DatabaseEncoding->encoding ||
                encoding == PG_SQL_ASCII)
@@ -634,11 +634,11 @@ char *
 pg_server_to_any(const char *s, int len, int encoding)
 {
        if (len <= 0)
-               return unconstify(char *, s);           /* empty string is always valid */
+               return unconstify(char *, s);   /* empty string is always valid */
 
        if (encoding == DatabaseEncoding->encoding ||
                encoding == PG_SQL_ASCII)
-               return unconstify(char *, s);           /* assume data is valid */
+               return unconstify(char *, s);   /* assume data is valid */
 
        if (DatabaseEncoding->encoding == PG_SQL_ASCII)
        {
index ed51da42341a4a9e7f9ed7f25b35aee4bdf2ee73..8acfa303c5d2fe8c61bd95b92ef1016c5bc8fd6b 100644 (file)
@@ -459,13 +459,13 @@ const struct config_enum_entry ssl_protocol_versions_info[] = {
 
 static struct config_enum_entry shared_memory_options[] = {
 #ifndef WIN32
-       { "sysv", SHMEM_TYPE_SYSV, false},
+       {"sysv", SHMEM_TYPE_SYSV, false},
 #endif
 #ifndef EXEC_BACKEND
-       { "mmap", SHMEM_TYPE_MMAP, false},
+       {"mmap", SHMEM_TYPE_MMAP, false},
 #endif
 #ifdef WIN32
-       { "windows", SHMEM_TYPE_WINDOWS, false},
+       {"windows", SHMEM_TYPE_WINDOWS, false},
 #endif
        {NULL, 0, false}
 };
@@ -1599,6 +1599,7 @@ static struct config_bool ConfigureNamesBool[] =
                true,
                NULL, NULL, NULL
        },
+
        /*
         * WITH OIDS support, and consequently default_with_oids, was removed in
         * PostgreSQL 12, but we tolerate the parameter being set to false to
@@ -8894,21 +8895,21 @@ ShowAllGUCConfig(DestReceiver *dest)
 struct config_generic **
 get_explain_guc_options(int *num)
 {
-       int             i;
+       int                     i;
        struct config_generic **result;
 
        *num = 0;
 
        /*
-        * Allocate enough space to fit all GUC_EXPLAIN options. We may not
-        * need all the space, but there are fairly few such options so we
-        * don't waste a lot of memory.
+        * Allocate enough space to fit all GUC_EXPLAIN options. We may not need
+        * all the space, but there are fairly few such options so we don't waste
+        * a lot of memory.
         */
        result = palloc(sizeof(struct config_generic *) * num_guc_explain_variables);
 
        for (i = 0; i < num_guc_variables; i++)
        {
-               bool modified;
+               bool            modified;
                struct config_generic *conf = guc_variables[i];
 
                /* return only options visible to the user */
@@ -8927,15 +8928,17 @@ get_explain_guc_options(int *num)
                switch (conf->vartype)
                {
                        case PGC_BOOL:
-                       {
-                               struct config_bool *lconf = (struct config_bool *) conf;
-                               modified = (lconf->boot_val != *(lconf->variable));
-                       }
-                       break;
+                               {
+                                       struct config_bool *lconf = (struct config_bool *) conf;
+
+                                       modified = (lconf->boot_val != *(lconf->variable));
+                               }
+                               break;
 
                        case PGC_INT:
                                {
                                        struct config_int *lconf = (struct config_int *) conf;
+
                                        modified = (lconf->boot_val != *(lconf->variable));
                                }
                                break;
@@ -8943,6 +8946,7 @@ get_explain_guc_options(int *num)
                        case PGC_REAL:
                                {
                                        struct config_real *lconf = (struct config_real *) conf;
+
                                        modified = (lconf->boot_val != *(lconf->variable));
                                }
                                break;
@@ -8950,6 +8954,7 @@ get_explain_guc_options(int *num)
                        case PGC_STRING:
                                {
                                        struct config_string *lconf = (struct config_string *) conf;
+
                                        modified = (strcmp(lconf->boot_val, *(lconf->variable)) != 0);
                                }
                                break;
@@ -8957,6 +8962,7 @@ get_explain_guc_options(int *num)
                        case PGC_ENUM:
                                {
                                        struct config_enum *lconf = (struct config_enum *) conf;
+
                                        modified = (lconf->boot_val != *(lconf->variable));
                                }
                                break;
index 62e6b652af9ed8a4ac6c093484212e3130988f28..1135ca91227b89a2b3d44ab526a33236ef54e2cf 100644 (file)
@@ -1676,6 +1676,7 @@ ensure_active_superblock(dsa_area *area, dsa_area_pool *pool,
                        return false;
                }
        }
+
        /*
         * This shouldn't happen: get_best_segment() or make_new_segment()
         * promised that we can successfully allocate npages.
@@ -2267,7 +2268,7 @@ static void
 check_for_freed_segments_locked(dsa_area *area)
 {
        size_t          freed_segment_counter;
-       int             i;
+       int                     i;
 
        Assert(LWLockHeldByMe(DSA_AREA_LOCK(area)));
        freed_segment_counter = area->control->freed_segment_counter;
index 5abc64f5f78443776eaecade93f11439849b732c..9a9069e551ea430e3ff9d986f1a3bd67240ebdaf 100644 (file)
@@ -185,7 +185,7 @@ static const char *default_timezone = NULL;
 "# allows any local user to connect as any PostgreSQL user, including\n" \
 "# the database superuser.  If you do not trust all your local users,\n" \
 "# use another authentication method.\n"
-static bool    authwarning = false;
+static bool authwarning = false;
 
 /*
  * Centralized knowledge of switches to pass to backend
@@ -2431,11 +2431,11 @@ check_need_password(const char *authmethodlocal, const char *authmethodhost)
                !(pwprompt || pwfilename))
        {
                pg_log_error("must specify a password for the superuser to enable %s authentication",
-                               (strcmp(authmethodlocal, "md5") == 0 ||
-                                strcmp(authmethodlocal, "password") == 0 ||
-                                strcmp(authmethodlocal, "scram-sha-256") == 0)
-                               ? authmethodlocal
-                               : authmethodhost);
+                                        (strcmp(authmethodlocal, "md5") == 0 ||
+                                         strcmp(authmethodlocal, "password") == 0 ||
+                                         strcmp(authmethodlocal, "scram-sha-256") == 0)
+                                        ? authmethodlocal
+                                        : authmethodhost);
                exit(1);
        }
 }
@@ -3067,8 +3067,8 @@ main(int argc, char *argv[])
        char            pg_ctl_path[MAXPGPATH];
 
        /*
-        * Ensure that buffering behavior of stdout matches what it is
-        * in interactive usage (at least on most platforms).  This prevents
+        * Ensure that buffering behavior of stdout matches what it is in
+        * interactive usage (at least on most platforms).  This prevents
         * unexpected output ordering when, eg, output is redirected to a file.
         * POSIX says we must do this before any other usage of these files.
         */
index 8f89be64cd4ccecabbba2fc397c96fb4ee1ff394..bb4257ff188f2dbdb4fd01cb3f68f7e5d9801e1e 100644 (file)
@@ -123,7 +123,8 @@ CleanupPriorWALFiles(void)
                        if ((IsXLogFileName(walfile) || IsPartialXLogFileName(walfile)) &&
                                strcmp(walfile + 8, exclusiveCleanupFileName + 8) < 0)
                        {
-                               char            WALFilePath[MAXPGPATH * 2]; /* the file path including archive */
+                               char            WALFilePath[MAXPGPATH * 2]; /* the file path
+                                                                                                                * including archive */
 
                                /*
                                 * Use the original file name again now, including any
index 5301e8837540c926c419940a1e6c79e7f9ff744b..86859b7813f83e47540b712f5772ac95779c494d 100644 (file)
@@ -633,7 +633,7 @@ main(int argc, char **argv)
        {
                /* translator: second %s is an option name */
                pg_log_error("%s needs a slot to be specified using --slot",
-                               do_drop_slot ? "--drop-slot" : "--create-slot");
+                                        do_drop_slot ? "--drop-slot" : "--create-slot");
                fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
                                progname);
                exit(1);
@@ -728,7 +728,7 @@ main(int argc, char **argv)
        if (do_drop_slot)
        {
                if (verbose)
-                       pg_log_info("dropping replication slot \"%s\"", replication_slot);
+                       pg_log_info("dropping replication slot \"%s\"", replication_slot);
 
                if (!DropReplicationSlot(conn, replication_slot))
                        exit(1);
index 547eb8de8614d2e496e224e7954bda4cf1cfc28f..2e45c1464255e6078e0b57d9e65f515e95fce029 100644 (file)
@@ -133,9 +133,9 @@ sendFeedback(PGconn *conn, TimestampTz now, bool force, bool replyRequested)
 
        if (verbose)
                pg_log_info("confirming write up to %X/%X, flush to %X/%X (slot %s)",
-                               (uint32) (output_written_lsn >> 32), (uint32) output_written_lsn,
-                               (uint32) (output_fsync_lsn >> 32), (uint32) output_fsync_lsn,
-                               replication_slot);
+                                       (uint32) (output_written_lsn >> 32), (uint32) output_written_lsn,
+                                       (uint32) (output_fsync_lsn >> 32), (uint32) output_fsync_lsn,
+                                       replication_slot);
 
        replybuf[len] = 'r';
        len += 1;
@@ -1021,11 +1021,11 @@ prepareToTerminate(PGconn *conn, XLogRecPtr endpos, bool keepalive, XLogRecPtr l
        {
                if (keepalive)
                        pg_log_info("endpos %X/%X reached by keepalive",
-                                       (uint32) (endpos >> 32), (uint32) endpos);
+                                               (uint32) (endpos >> 32), (uint32) endpos);
                else
                        pg_log_info("endpos %X/%X reached by record at %X/%X",
-                                       (uint32) (endpos >> 32), (uint32) (endpos),
-                                       (uint32) (lsn >> 32), (uint32) lsn);
+                                               (uint32) (endpos >> 32), (uint32) (endpos),
+                                               (uint32) (lsn >> 32), (uint32) lsn);
 
        }
 }
index e9854527e23857d181242d5e5a01e48d83693cf3..bf99a7dbc8e71921564543aa836780832a7ca062 100644 (file)
@@ -378,8 +378,8 @@ CheckServerVersionForStreaming(PGconn *conn)
                const char *serverver = PQparameterStatus(conn, "server_version");
 
                pg_log_error("incompatible server version %s; client does not support streaming from server versions older than %s",
-                               serverver ? serverver : "'unknown'",
-                               "9.3");
+                                        serverver ? serverver : "'unknown'",
+                                        "9.3");
                return false;
        }
        else if (serverMajor > maxServerMajor)
@@ -387,8 +387,8 @@ CheckServerVersionForStreaming(PGconn *conn)
                const char *serverver = PQparameterStatus(conn, "server_version");
 
                pg_log_error("incompatible server version %s; client does not support streaming from server versions newer than %s",
-                               serverver ? serverver : "'unknown'",
-                               PG_VERSION);
+                                        serverver ? serverver : "'unknown'",
+                                        PG_VERSION);
                return false;
        }
        return true;
@@ -620,8 +620,8 @@ ReceiveXlogStream(PGconn *conn, StreamCtl *stream)
                        if (stream->startpos > stoppos)
                        {
                                pg_log_error("server stopped streaming timeline %u at %X/%X, but reported next timeline %u to begin at %X/%X",
-                                               stream->timeline, (uint32) (stoppos >> 32), (uint32) stoppos,
-                                               newtimeline, (uint32) (stream->startpos >> 32), (uint32) stream->startpos);
+                                                        stream->timeline, (uint32) (stoppos >> 32), (uint32) stoppos,
+                                                        newtimeline, (uint32) (stream->startpos >> 32), (uint32) stream->startpos);
                                goto error;
                        }
 
index b402e49896080513957ed08583bcdac84f82a11c..522a2450886f1fcf3eb7a2a4a789914b2cfee1ea 100644 (file)
@@ -1021,9 +1021,9 @@ findParentsByOid(TableInfo *self,
                                if (parent == NULL)
                                {
                                        pg_log_error("failed sanity check, parent OID %u of table \"%s\" (OID %u) not found",
-                                                         inhinfo[i].inhparent,
-                                                         self->dobj.name,
-                                                         oid);
+                                                                inhinfo[i].inhparent,
+                                                                self->dobj.name,
+                                                                oid);
                                        exit_nicely(1);
                                }
                                self->parents[j++] = parent;
index a0d7644a8adc52c8c463fde9c17acf33b0db026e..952caef52c338fa4a247708b98ea45f1c62e2bcd 100644 (file)
@@ -235,7 +235,7 @@ InitCompressorZlib(CompressorState *cs, int level)
 
        if (deflateInit(zp, level) != Z_OK)
                fatal("could not initialize compression library: %s",
-                                         zp->msg);
+                         zp->msg);
 
        /* Just be paranoid - maybe End is called after Start, with no Write */
        zp->next_out = (void *) cs->zlibOut;
@@ -334,7 +334,7 @@ ReadDataFromArchiveZlib(ArchiveHandle *AH, ReadFunc readF)
 
        if (inflateInit(zp) != Z_OK)
                fatal("could not initialize compression library: %s",
-                                         zp->msg);
+                         zp->msg);
 
        /* no minimal chunk size for zlib */
        while ((cnt = readF(AH, &buf, &buflen)))
@@ -586,7 +586,7 @@ cfread(void *ptr, int size, cfp *fp)
                        const char *errmsg = gzerror(fp->compressedfp, &errnum);
 
                        fatal("could not read from input file: %s",
-                                                 errnum == Z_ERRNO ? strerror(errno) : errmsg);
+                                 errnum == Z_ERRNO ? strerror(errno) : errmsg);
                }
        }
        else
index 3dfdae3a579fcbc283d29bba577a46e7e1f3e9e4..7152fd6457d4597d68d9a868324430b3c8990621 100644 (file)
@@ -1158,7 +1158,7 @@ parseWorkerCommand(ArchiveHandle *AH, TocEntry **te, T_Action *act,
        }
        else
                fatal("unrecognized command received from master: \"%s\"",
-                                         msg);
+                         msg);
 }
 
 /*
@@ -1201,7 +1201,7 @@ parseWorkerResponse(ArchiveHandle *AH, TocEntry *te,
        }
        else
                fatal("invalid message received from worker: \"%s\"",
-                                         msg);
+                         msg);
 
        return status;
 }
@@ -1439,7 +1439,7 @@ ListenToWorkers(ArchiveHandle *AH, ParallelState *pstate, bool do_wait)
        }
        else
                fatal("invalid message received from worker: \"%s\"",
-                                         msg);
+                         msg);
 
        /* Free the string returned from getMessageFromWorker */
        free(msg);
@@ -1744,7 +1744,7 @@ pgpipe(int handles[2])
        if ((s = socket(AF_INET, SOCK_STREAM, 0)) == PGINVALID_SOCKET)
        {
                pg_log_error("pgpipe: could not create socket: error code %d",
-                                 WSAGetLastError());
+                                        WSAGetLastError());
                return -1;
        }
 
@@ -1755,21 +1755,21 @@ pgpipe(int handles[2])
        if (bind(s, (SOCKADDR *) &serv_addr, len) == SOCKET_ERROR)
        {
                pg_log_error("pgpipe: could not bind: error code %d",
-                                 WSAGetLastError());
+                                        WSAGetLastError());
                closesocket(s);
                return -1;
        }
        if (listen(s, 1) == SOCKET_ERROR)
        {
                pg_log_error("pgpipe: could not listen: error code %d",
-                                 WSAGetLastError());
+                                        WSAGetLastError());
                closesocket(s);
                return -1;
        }
        if (getsockname(s, (SOCKADDR *) &serv_addr, &len) == SOCKET_ERROR)
        {
                pg_log_error("pgpipe: getsockname() failed: error code %d",
-                                 WSAGetLastError());
+                                        WSAGetLastError());
                closesocket(s);
                return -1;
        }
@@ -1780,7 +1780,7 @@ pgpipe(int handles[2])
        if ((tmp_sock = socket(AF_INET, SOCK_STREAM, 0)) == PGINVALID_SOCKET)
        {
                pg_log_error("pgpipe: could not create second socket: error code %d",
-                                 WSAGetLastError());
+                                        WSAGetLastError());
                closesocket(s);
                return -1;
        }
@@ -1789,7 +1789,7 @@ pgpipe(int handles[2])
        if (connect(handles[1], (SOCKADDR *) &serv_addr, len) == SOCKET_ERROR)
        {
                pg_log_error("pgpipe: could not connect socket: error code %d",
-                                 WSAGetLastError());
+                                        WSAGetLastError());
                closesocket(handles[1]);
                handles[1] = -1;
                closesocket(s);
@@ -1798,7 +1798,7 @@ pgpipe(int handles[2])
        if ((tmp_sock = accept(s, (SOCKADDR *) &serv_addr, &len)) == PGINVALID_SOCKET)
        {
                pg_log_error("pgpipe: could not accept connection: error code %d",
-                                 WSAGetLastError());
+                                        WSAGetLastError());
                closesocket(handles[1]);
                handles[1] = -1;
                closesocket(s);
index d764d36936daccab94393f9512b0e91e13da8639..564772ea7e39e2d104ef02b4fb6ca2e30b5762b7 100644 (file)
@@ -324,7 +324,7 @@ ProcessArchiveRestoreOptions(Archive *AHX)
                                        break;
                                default:
                                        fatal("unexpected section code %d",
-                                                                 (int) te->section);
+                                                 (int) te->section);
                                        break;
                        }
                }
@@ -608,7 +608,7 @@ RestoreArchive(Archive *AHX)
                                                                {
                                                                        /* complain and emit unmodified command */
                                                                        pg_log_warning("could not find where to insert IF EXISTS in statement \"%s\"",
-                                                                                         dropStmtOrig);
+                                                                                                  dropStmtOrig);
                                                                        appendPQExpBufferStr(ftStmt, dropStmt);
                                                                }
                                                        }
@@ -889,7 +889,7 @@ restore_toc_entry(ArchiveHandle *AH, TocEntry *te, bool is_parallel)
                                        _selectOutputSchema(AH, te->namespace);
 
                                        pg_log_info("processing data for table \"%s.%s\"",
-                                                 te->namespace, te->tag);
+                                                               te->namespace, te->tag);
 
                                        /*
                                         * In parallel restore, if we created the table earlier in
@@ -1288,8 +1288,8 @@ EndRestoreBlobs(ArchiveHandle *AH)
 
        pg_log_info(ngettext("restored %d large object",
                                                 "restored %d large objects",
-                                                 AH->blobCount),
-                 AH->blobCount);
+                                                AH->blobCount),
+                               AH->blobCount);
 }
 
 
@@ -1320,12 +1320,12 @@ StartRestoreBlob(ArchiveHandle *AH, Oid oid, bool drop)
                        loOid = lo_create(AH->connection, oid);
                        if (loOid == 0 || loOid != oid)
                                fatal("could not create large object %u: %s",
-                                                         oid, PQerrorMessage(AH->connection));
+                                         oid, PQerrorMessage(AH->connection));
                }
                AH->loFd = lo_open(AH->connection, oid, INV_WRITE);
                if (AH->loFd == -1)
                        fatal("could not open large object %u: %s",
-                                                 oid, PQerrorMessage(AH->connection));
+                                 oid, PQerrorMessage(AH->connection));
        }
        else
        {
@@ -1429,7 +1429,7 @@ SortTocFromFile(Archive *AHX)
                te = getTocEntryByDumpId(AH, id);
                if (!te)
                        fatal("could not find entry for ID %d",
-                                                 id);
+                                 id);
 
                /* Mark it wanted */
                ropt->idWanted[id - 1] = true;
@@ -1662,10 +1662,10 @@ dump_lo_buf(ArchiveHandle *AH)
                pg_log_debug(ngettext("wrote %lu byte of large object data (result = %lu)",
                                                          "wrote %lu bytes of large object data (result = %lu)",
                                                          AH->lo_buf_used),
-                         (unsigned long) AH->lo_buf_used, (unsigned long) res);
+                                        (unsigned long) AH->lo_buf_used, (unsigned long) res);
                if (res != AH->lo_buf_used)
                        fatal("could not write to large object (result: %lu, expected: %lu)",
-                                                 (unsigned long) res, (unsigned long) AH->lo_buf_used);
+                                 (unsigned long) res, (unsigned long) AH->lo_buf_used);
        }
        else
        {
@@ -1772,12 +1772,12 @@ warn_or_exit_horribly(ArchiveHandle *AH, const char *fmt,...)
        if (AH->currentTE != NULL && AH->currentTE != AH->lastErrorTE)
        {
                pg_log_generic(PG_LOG_INFO, "from TOC entry %d; %u %u %s %s %s",
-                                 AH->currentTE->dumpId,
-                                 AH->currentTE->catalogId.tableoid,
-                                 AH->currentTE->catalogId.oid,
-                                 AH->currentTE->desc ? AH->currentTE->desc : "(no desc)",
-                                 AH->currentTE->tag ? AH->currentTE->tag : "(no tag)",
-                                 AH->currentTE->owner ? AH->currentTE->owner : "(no owner)");
+                                          AH->currentTE->dumpId,
+                                          AH->currentTE->catalogId.tableoid,
+                                          AH->currentTE->catalogId.oid,
+                                          AH->currentTE->desc ? AH->currentTE->desc : "(no desc)",
+                                          AH->currentTE->tag ? AH->currentTE->tag : "(no tag)",
+                                          AH->currentTE->owner ? AH->currentTE->owner : "(no owner)");
        }
        AH->lastErrorStage = AH->stage;
        AH->lastErrorTE = AH->currentTE;
@@ -2111,7 +2111,7 @@ _discoverArchiveFormat(ArchiveHandle *AH)
 
                        if (snprintf(buf, MAXPGPATH, "%s/toc.dat", AH->fSpec) >= MAXPGPATH)
                                fatal("directory name too long: \"%s\"",
-                                                         AH->fSpec);
+                                         AH->fSpec);
                        if (stat(buf, &st) == 0 && S_ISREG(st.st_mode))
                        {
                                AH->format = archDirectory;
@@ -2121,7 +2121,7 @@ _discoverArchiveFormat(ArchiveHandle *AH)
 #ifdef HAVE_LIBZ
                        if (snprintf(buf, MAXPGPATH, "%s/toc.dat.gz", AH->fSpec) >= MAXPGPATH)
                                fatal("directory name too long: \"%s\"",
-                                                         AH->fSpec);
+                                         AH->fSpec);
                        if (stat(buf, &st) == 0 && S_ISREG(st.st_mode))
                        {
                                AH->format = archDirectory;
@@ -2129,7 +2129,7 @@ _discoverArchiveFormat(ArchiveHandle *AH)
                        }
 #endif
                        fatal("directory \"%s\" does not appear to be a valid archive (\"toc.dat\" does not exist)",
-                                                 AH->fSpec);
+                                 AH->fSpec);
                        fh = NULL;                      /* keep compiler quiet */
                }
                else
@@ -2152,7 +2152,7 @@ _discoverArchiveFormat(ArchiveHandle *AH)
                        fatal("could not read input file: %m");
                else
                        fatal("input file is too short (read %lu, expected 5)",
-                                                 (unsigned long) cnt);
+                                 (unsigned long) cnt);
        }
 
        /* Save it, just in case we need it later */
@@ -2321,7 +2321,7 @@ _allocAH(const char *FileSpec, const ArchiveFormat fmt,
        AH->currUser = NULL;            /* unknown */
        AH->currSchema = NULL;          /* ditto */
        AH->currTablespace = NULL;      /* ditto */
-       AH->currTableAm = NULL; /* ditto */
+       AH->currTableAm = NULL;         /* ditto */
 
        AH->toc = (TocEntry *) pg_malloc0(sizeof(TocEntry));
 
@@ -2465,11 +2465,11 @@ mark_dump_job_done(ArchiveHandle *AH,
                                   void *callback_data)
 {
        pg_log_info("finished item %d %s %s",
-                 te->dumpId, te->desc, te->tag);
+                               te->dumpId, te->desc, te->tag);
 
        if (status != 0)
                fatal("worker process failed: exit code %d",
-                                         status);
+                         status);
 }
 
 
@@ -2589,7 +2589,7 @@ ReadToc(ArchiveHandle *AH)
                /* Sanity check */
                if (te->dumpId <= 0)
                        fatal("entry ID %d out of range -- perhaps a corrupt TOC",
-                                                 te->dumpId);
+                                 te->dumpId);
 
                te->hadDumper = ReadInt(AH);
 
@@ -2702,7 +2702,7 @@ ReadToc(ArchiveHandle *AH)
                        AH->ReadExtraTocPtr(AH, te);
 
                pg_log_debug("read TOC entry %d (ID %d) for %s %s",
-                         i, te->dumpId, te->desc, te->tag);
+                                        i, te->dumpId, te->desc, te->tag);
 
                /* link completed entry into TOC circular list */
                te->prev = AH->toc->prev;
@@ -2738,12 +2738,12 @@ processEncodingEntry(ArchiveHandle *AH, TocEntry *te)
                encoding = pg_char_to_encoding(ptr1);
                if (encoding < 0)
                        fatal("unrecognized encoding \"%s\"",
-                                                 ptr1);
+                                 ptr1);
                AH->public.encoding = encoding;
        }
        else
                fatal("invalid ENCODING item: %s",
-                                         te->defn);
+                         te->defn);
 
        free(defn);
 }
@@ -2761,7 +2761,7 @@ processStdStringsEntry(ArchiveHandle *AH, TocEntry *te)
                AH->public.std_strings = false;
        else
                fatal("invalid STDSTRINGS item: %s",
-                                         te->defn);
+                         te->defn);
 }
 
 static void
@@ -3193,7 +3193,7 @@ _doSetSessionAuth(ArchiveHandle *AH, const char *user)
                if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
                        /* NOT warn_or_exit_horribly... use -O instead to skip this. */
                        fatal("could not set session user to \"%s\": %s",
-                                                 user, PQerrorMessage(AH->connection));
+                                 user, PQerrorMessage(AH->connection));
 
                PQclear(res);
        }
@@ -3415,7 +3415,8 @@ static void
 _selectTableAccessMethod(ArchiveHandle *AH, const char *tableam)
 {
        PQExpBuffer cmd;
-       const char *want, *have;
+       const char *want,
+                          *have;
 
        have = AH->currTableAm;
        want = tableam;
@@ -3530,7 +3531,7 @@ _getObjectDescription(PQExpBuffer buf, TocEntry *te, ArchiveHandle *AH)
        }
 
        pg_log_warning("don't know how to set owner for object type \"%s\"",
-                         type);
+                                  type);
 }
 
 /*
@@ -3688,7 +3689,7 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, bool isData)
                else
                {
                        pg_log_warning("don't know how to set owner for object type \"%s\"",
-                                         te->desc);
+                                                  te->desc);
                }
        }
 
@@ -3805,12 +3806,12 @@ ReadHead(ArchiveHandle *AH)
 
                if (AH->version < K_VERS_1_0 || AH->version > K_VERS_MAX)
                        fatal("unsupported version (%d.%d) in file header",
-                                                 vmaj, vmin);
+                                 vmaj, vmin);
 
                AH->intSize = AH->ReadBytePtr(AH);
                if (AH->intSize > 32)
                        fatal("sanity check on integer size (%lu) failed",
-                                                 (unsigned long) AH->intSize);
+                                 (unsigned long) AH->intSize);
 
                if (AH->intSize > sizeof(int))
                        pg_log_warning("archive was made on a machine with larger integers, some operations might fail");
@@ -3824,7 +3825,7 @@ ReadHead(ArchiveHandle *AH)
 
                if (AH->format != fmt)
                        fatal("expected format (%d) differs from format found in file (%d)",
-                                                 AH->format, fmt);
+                                 AH->format, fmt);
        }
 
        if (AH->version >= K_VERS_1_2)
@@ -3995,8 +3996,8 @@ restore_toc_entries_prefork(ArchiveHandle *AH, TocEntry *pending_list)
                {
                        /* OK, restore the item and update its dependencies */
                        pg_log_info("processing item %d %s %s",
-                                 next_work_item->dumpId,
-                                 next_work_item->desc, next_work_item->tag);
+                                               next_work_item->dumpId,
+                                               next_work_item->desc, next_work_item->tag);
 
                        (void) restore_toc_entry(AH, next_work_item, false);
 
@@ -4085,8 +4086,8 @@ restore_toc_entries_parallel(ArchiveHandle *AH, ParallelState *pstate,
                        if ((next_work_item->reqs & (REQ_SCHEMA | REQ_DATA)) == 0)
                        {
                                pg_log_info("skipping item %d %s %s",
-                                         next_work_item->dumpId,
-                                         next_work_item->desc, next_work_item->tag);
+                                                       next_work_item->dumpId,
+                                                       next_work_item->desc, next_work_item->tag);
                                /* Update its dependencies as though we'd completed it */
                                reduce_dependencies(AH, next_work_item, &ready_list);
                                /* Loop around to see if anything else can be dispatched */
@@ -4094,8 +4095,8 @@ restore_toc_entries_parallel(ArchiveHandle *AH, ParallelState *pstate,
                        }
 
                        pg_log_info("launching item %d %s %s",
-                                 next_work_item->dumpId,
-                                 next_work_item->desc, next_work_item->tag);
+                                               next_work_item->dumpId,
+                                               next_work_item->desc, next_work_item->tag);
 
                        /* Dispatch to some worker */
                        DispatchJobForTocEntry(AH, pstate, next_work_item, ACT_RESTORE,
@@ -4186,7 +4187,7 @@ restore_toc_entries_postfork(ArchiveHandle *AH, TocEntry *pending_list)
        for (te = pending_list->pending_next; te != pending_list; te = te->pending_next)
        {
                pg_log_info("processing missed item %d %s %s",
-                         te->dumpId, te->desc, te->tag);
+                                       te->dumpId, te->desc, te->tag);
                (void) restore_toc_entry(AH, te, false);
        }
 }
@@ -4472,7 +4473,7 @@ mark_restore_job_done(ArchiveHandle *AH,
        ParallelReadyList *ready_list = (ParallelReadyList *) callback_data;
 
        pg_log_info("finished item %d %s %s",
-                 te->dumpId, te->desc, te->tag);
+                               te->dumpId, te->desc, te->tag);
 
        if (status == WORKER_CREATE_DONE)
                mark_create_done(AH, te);
@@ -4485,7 +4486,7 @@ mark_restore_job_done(ArchiveHandle *AH,
                AH->public.n_errors++;
        else if (status != 0)
                fatal("worker process failed: exit code %d",
-                                         status);
+                         status);
 
        reduce_dependencies(AH, te, ready_list);
 }
@@ -4657,7 +4658,7 @@ repoint_table_dependencies(ArchiveHandle *AH)
                                te->dependencies[i] = tabledataid;
                                te->dataLength = Max(te->dataLength, tabledatate->dataLength);
                                pg_log_debug("transferring dependency %d -> %d to %d",
-                                         te->dumpId, olddep, tabledataid);
+                                                        te->dumpId, olddep, tabledataid);
                        }
                }
        }
@@ -4791,7 +4792,7 @@ static void
 inhibit_data_for_failed_table(ArchiveHandle *AH, TocEntry *te)
 {
        pg_log_info("table \"%s\" could not be created, will not restore its data",
-                 te->tag);
+                               te->tag);
 
        if (AH->tableDataId[te->dumpId] != 0)
        {
index ae5306b9dad0a80b5df4196bc48aed3301ce5c13..497b81b6840b3048d8940116adea4f68c635751b 100644 (file)
@@ -445,7 +445,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te)
 
                                default:                /* Always have a default */
                                        fatal("unrecognized data block type (%d) while searching archive",
-                                                                 blkType);
+                                                 blkType);
                                        break;
                        }
                        _readBlockHeader(AH, &blkType, &id);
@@ -482,7 +482,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te)
        /* Are we sane? */
        if (id != te->dumpId)
                fatal("found unexpected block ID (%d) when reading data -- expected %d",
-                                         id, te->dumpId);
+                         id, te->dumpId);
 
        switch (blkType)
        {
@@ -496,7 +496,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te)
 
                default:                                /* Always have a default */
                        fatal("unrecognized data block type %d while restoring archive",
-                                                 blkType);
+                                 blkType);
                        break;
        }
 }
@@ -910,11 +910,11 @@ _readBlockHeader(ArchiveHandle *AH, int *type, int *id)
        int                     byt;
 
        /*
-        * Note: if we are at EOF with a pre-1.3 input file, we'll fatal()
-        * inside ReadInt rather than returning EOF.  It doesn't seem worth
-        * jumping through hoops to deal with that case better, because no such
-        * files are likely to exist in the wild: only some 7.1 development
-        * versions of pg_dump ever generated such files.
+        * Note: if we are at EOF with a pre-1.3 input file, we'll fatal() inside
+        * ReadInt rather than returning EOF.  It doesn't seem worth jumping
+        * through hoops to deal with that case better, because no such files are
+        * likely to exist in the wild: only some 7.1 development versions of
+        * pg_dump ever generated such files.
         */
        if (AH->version < K_VERS_1_3)
                *type = BLK_DATA;
index 9fd3b8a79f6a450f5550dc14c87b3be5c1ddf88b..8af5c7bebdb1a875a4d0ec5cf2bce0aa59b0e9f7 100644 (file)
@@ -53,7 +53,7 @@ _check_database_version(ArchiveHandle *AH)
                        remoteversion > AH->public.maxRemoteVersion))
        {
                pg_log_error("server version: %s; %s version: %s",
-                                 remoteversion_str, progname, PG_VERSION);
+                                        remoteversion_str, progname, PG_VERSION);
                fatal("aborting because of server version mismatch");
        }
 
@@ -138,7 +138,7 @@ _connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser)
                newuser = requser;
 
        pg_log_info("connecting to database \"%s\" as user \"%s\"",
-                 newdb, newuser);
+                               newdb, newuser);
 
        password = AH->savedPassword;
 
@@ -182,7 +182,7 @@ _connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser)
                {
                        if (!PQconnectionNeedsPassword(newConn))
                                fatal("could not reconnect to database: %s",
-                                                         PQerrorMessage(newConn));
+                                         PQerrorMessage(newConn));
                        PQfinish(newConn);
 
                        if (password)
@@ -304,8 +304,8 @@ ConnectDatabase(Archive *AHX,
        /* check to see that the backend connection was successfully made */
        if (PQstatus(AH->connection) == CONNECTION_BAD)
                fatal("connection to database \"%s\" failed: %s",
-                                         PQdb(AH->connection) ? PQdb(AH->connection) : "",
-                                         PQerrorMessage(AH->connection));
+                         PQdb(AH->connection) ? PQdb(AH->connection) : "",
+                         PQerrorMessage(AH->connection));
 
        /* Start strict; later phases may override this. */
        PQclear(ExecuteSqlQueryForSingleRow((Archive *) AH,
@@ -383,7 +383,7 @@ static void
 die_on_query_failure(ArchiveHandle *AH, const char *query)
 {
        pg_log_error("query failed: %s",
-                         PQerrorMessage(AH->connection));
+                                PQerrorMessage(AH->connection));
        fatal("query was: %s", query);
 }
 
@@ -427,8 +427,8 @@ ExecuteSqlQueryForSingleRow(Archive *fout, const char *query)
        if (ntups != 1)
                fatal(ngettext("query returned %d row instead of one: %s",
                                           "query returned %d rows instead of one: %s",
-                                                          ntups),
-                                         ntups, query);
+                                          ntups),
+                         ntups, query);
 
        return res;
 }
@@ -571,7 +571,7 @@ ExecuteSqlCommandBuf(Archive *AHX, const char *buf, size_t bufLen)
                if (AH->pgCopyIn &&
                        PQputCopyData(AH->connection, buf, bufLen) <= 0)
                        fatal("error returned by PQputCopyData: %s",
-                                                 PQerrorMessage(AH->connection));
+                                 PQerrorMessage(AH->connection));
        }
        else if (AH->outputKind == OUTPUT_OTHERDATA)
        {
@@ -620,7 +620,7 @@ EndDBCopyMode(Archive *AHX, const char *tocEntryTag)
 
                if (PQputCopyEnd(AH->connection, NULL) <= 0)
                        fatal("error returned by PQputCopyEnd: %s",
-                                                 PQerrorMessage(AH->connection));
+                                 PQerrorMessage(AH->connection));
 
                /* Check command status and return to normal libpq state */
                res = PQgetResult(AH->connection);
@@ -632,7 +632,7 @@ EndDBCopyMode(Archive *AHX, const char *tocEntryTag)
                /* Do this to ensure we've pumped libpq back to idle state */
                if (PQgetResult(AH->connection) != NULL)
                        pg_log_warning("unexpected extra results during COPY of table \"%s\"",
-                                         tocEntryTag);
+                                                  tocEntryTag);
 
                AH->pgCopyIn = false;
        }
index 70eca82c91430d4324d46b63ab4fdd6540cb3d97..cfa2f6ec747528003e0c3c32a780fc3086f95d66 100644 (file)
@@ -348,7 +348,7 @@ _WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
 
        if (dLen > 0 && cfwrite(data, dLen, ctx->dataFH) != dLen)
                fatal("could not write to output file: %s",
-                                         get_cfp_error(ctx->dataFH));
+                         get_cfp_error(ctx->dataFH));
 
 
        return;
@@ -452,7 +452,7 @@ _LoadBlobs(ArchiveHandle *AH)
                /* Can't overflow because line and fname are the same length. */
                if (sscanf(line, "%u %s\n", &oid, fname) != 2)
                        fatal("invalid line in large object TOC file \"%s\": \"%s\"",
-                                                 fname, line);
+                                 fname, line);
 
                StartRestoreBlob(AH, oid, AH->public.ropt->dropSchema);
                snprintf(path, MAXPGPATH, "%s/%s", ctx->directory, fname);
@@ -461,7 +461,7 @@ _LoadBlobs(ArchiveHandle *AH)
        }
        if (!cfeof(ctx->blobsTocFH))
                fatal("error reading large object TOC file \"%s\"",
-                                         fname);
+                         fname);
 
        if (cfclose(ctx->blobsTocFH) != 0)
                fatal("could not close large object TOC file \"%s\": %m",
@@ -486,7 +486,7 @@ _WriteByte(ArchiveHandle *AH, const int i)
 
        if (cfwrite(&c, 1, ctx->dataFH) != 1)
                fatal("could not write to output file: %s",
-                                         get_cfp_error(ctx->dataFH));
+                         get_cfp_error(ctx->dataFH));
 
        return 1;
 }
@@ -516,7 +516,7 @@ _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len)
 
        if (cfwrite(buf, len, ctx->dataFH) != len)
                fatal("could not write to output file: %s",
-                                         get_cfp_error(ctx->dataFH));
+                         get_cfp_error(ctx->dataFH));
 
        return;
 }
index b52593c3c0c3572cd99f728c74b0815cfb2e07f3..569df9b4b53b9d98e0383e453f1d9a3dc93aaec5 100644 (file)
@@ -553,10 +553,10 @@ _tarReadRaw(ArchiveHandle *AH, void *buf, size_t len, TAR_MEMBER *th, FILE *fh)
                                        const char *errmsg = gzerror(th->zFH, &errnum);
 
                                        fatal("could not read from input file: %s",
-                                                                 errnum == Z_ERRNO ? strerror(errno) : errmsg);
+                                                 errnum == Z_ERRNO ? strerror(errno) : errmsg);
 #else
                                        fatal("could not read from input file: %s",
-                                                                 strerror(errno));
+                                                 strerror(errno));
 #endif
                                }
                        }
@@ -691,7 +691,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te)
                        if (pos1 < 6 || strncmp(te->copyStmt, "COPY ", 5) != 0 ||
                                strcmp(te->copyStmt + pos1, " FROM stdin;\n") != 0)
                                fatal("unexpected COPY statement syntax: \"%s\"",
-                                                         te->copyStmt);
+                                         te->copyStmt);
 
                        /* Emit all but the FROM part ... */
                        ahwrite(te->copyStmt, 1, pos1, AH);
@@ -1113,7 +1113,7 @@ _tarAddFile(ArchiveHandle *AH, TAR_MEMBER *th)
                snprintf(buf1, sizeof(buf1), INT64_FORMAT, (int64) len);
                snprintf(buf2, sizeof(buf2), INT64_FORMAT, (int64) th->fileLen);
                fatal("actual file length (%s) does not match expected (%s)",
-                                         buf1, buf2);
+                         buf1, buf2);
        }
 
        pad = ((len + 511) & ~511) - len;
@@ -1150,7 +1150,7 @@ _tarPositionTo(ArchiveHandle *AH, const char *filename)
                snprintf(buf1, sizeof(buf1), INT64_FORMAT, (int64) ctx->tarFHpos);
                snprintf(buf2, sizeof(buf2), INT64_FORMAT, (int64) ctx->tarNextMember);
                pg_log_debug("moving from position %s to next member at file position %s",
-                         buf1, buf2);
+                                        buf1, buf2);
 
                while (ctx->tarFHpos < ctx->tarNextMember)
                        _tarReadRaw(AH, &c, 1, NULL, ctx->tarFH);
@@ -1188,8 +1188,8 @@ _tarPositionTo(ArchiveHandle *AH, const char *filename)
                id = atoi(th->targetFile);
                if ((TocIDRequired(AH, id) & REQ_DATA) != 0)
                        fatal("restoring data out of order is not supported in this archive format: "
-                                                 "\"%s\" is required, but comes before \"%s\" in the archive file.",
-                                                 th->targetFile, filename);
+                                 "\"%s\" is required, but comes before \"%s\" in the archive file.",
+                                 th->targetFile, filename);
 
                /* Header doesn't match, so read to next header */
                len = ((th->fileLen + 511) & ~511); /* Padded length */
@@ -1234,8 +1234,8 @@ _tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th)
                if (len != 512)
                        fatal(ngettext("incomplete tar header found (%lu byte)",
                                                   "incomplete tar header found (%lu bytes)",
-                                                                  len),
-                                                 (unsigned long) len);
+                                                  len),
+                                 (unsigned long) len);
 
                /* Calc checksum */
                chk = tarChecksum(h);
@@ -1274,7 +1274,7 @@ _tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th)
                snprintf(posbuf, sizeof(posbuf), UINT64_FORMAT, (uint64) hPos);
                snprintf(lenbuf, sizeof(lenbuf), UINT64_FORMAT, (uint64) len);
                pg_log_debug("TOC Entry %s at %s (length %s, checksum %d)",
-                         tag, posbuf, lenbuf, sum);
+                                        tag, posbuf, lenbuf, sum);
        }
 
        if (chk != sum)
@@ -1284,7 +1284,7 @@ _tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th)
                snprintf(posbuf, sizeof(posbuf), UINT64_FORMAT,
                                 (uint64) ftello(ctx->tarFH));
                fatal("corrupt tar header found in %s (expected %d, computed %d) file position %s",
-                                         tag, sum, chk, posbuf);
+                         tag, sum, chk, posbuf);
        }
 
        th->targetFile = pg_strdup(tag);
index e8ce719a0a61ae5e23461aca8ce3b7fa27aca5f4..38a01758a14b6dce300de0243b4a29cf78280c75 100644 (file)
@@ -135,7 +135,7 @@ static const CatalogId nilCatalogId = {0, 0};
 
 /* override for standard extra_float_digits setting */
 static bool have_extra_float_digits = false;
-static int extra_float_digits;
+static int     extra_float_digits;
 
 /*
  * The default number of rows per INSERT when
@@ -601,7 +601,7 @@ main(int argc, char **argv)
                                        errno == ERANGE)
                                {
                                        pg_log_error("rows-per-insert must be in range %d..%d",
-                                                         1, INT_MAX);
+                                                                1, INT_MAX);
                                        exit_nicely(1);
                                }
                                dopt.dump_inserts = (int) rowsPerInsert;
@@ -1112,13 +1112,14 @@ setup_connection(Archive *AH, const char *dumpencoding,
                ExecuteSqlStatement(AH, "SET INTERVALSTYLE = POSTGRES");
 
        /*
-        * Use an explicitly specified extra_float_digits if it has been
-        * provided. Otherwise, set extra_float_digits so that we can dump float
-        * data exactly (given correctly implemented float I/O code, anyway).
+        * Use an explicitly specified extra_float_digits if it has been provided.
+        * Otherwise, set extra_float_digits so that we can dump float data
+        * exactly (given correctly implemented float I/O code, anyway).
         */
        if (have_extra_float_digits)
        {
                PQExpBuffer q = createPQExpBuffer();
+
                appendPQExpBuffer(q, "SET extra_float_digits TO %d",
                                                  extra_float_digits);
                ExecuteSqlStatement(AH, q->data);
@@ -1921,7 +1922,7 @@ dumpTableData_copy(Archive *fout, void *dcontext)
        /* Do this to ensure we've pumped libpq back to idle state */
        if (PQgetResult(conn) != NULL)
                pg_log_warning("unexpected extra results during COPY of table \"%s\"",
-                                 classname);
+                                          classname);
 
        destroyPQExpBuffer(q);
        return 1;
@@ -3468,7 +3469,7 @@ dumpBlobs(Archive *fout, void *arg)
                        loFd = lo_open(conn, blobOid, INV_READ);
                        if (loFd == -1)
                                fatal("could not open large object %u: %s",
-                                                         blobOid, PQerrorMessage(conn));
+                                         blobOid, PQerrorMessage(conn));
 
                        StartBlob(fout, blobOid);
 
@@ -3478,7 +3479,7 @@ dumpBlobs(Archive *fout, void *arg)
                                cnt = lo_read(conn, loFd, buf, LOBBUFSIZE);
                                if (cnt < 0)
                                        fatal("error reading large object %u: %s",
-                                                                 blobOid, PQerrorMessage(conn));
+                                                 blobOid, PQerrorMessage(conn));
 
                                WriteData(fout, buf, cnt);
                        } while (cnt > 0);
@@ -3711,7 +3712,7 @@ dumpPolicy(Archive *fout, PolicyInfo *polinfo)
        else
        {
                pg_log_error("unexpected policy command type: %c",
-                                 polinfo->polcmd);
+                                        polinfo->polcmd);
                exit_nicely(1);
        }
 
@@ -3838,7 +3839,7 @@ getPublications(Archive *fout)
 
                if (strlen(pubinfo[i].rolname) == 0)
                        pg_log_warning("owner of publication \"%s\" appears to be invalid",
-                                         pubinfo[i].dobj.name);
+                                                  pubinfo[i].dobj.name);
 
                /* Decide whether we want to dump it */
                selectDumpableObject(&(pubinfo[i].dobj), fout);
@@ -4172,7 +4173,7 @@ getSubscriptions(Archive *fout)
 
                if (strlen(subinfo[i].rolname) == 0)
                        pg_log_warning("owner of subscription \"%s\" appears to be invalid",
-                                         subinfo[i].dobj.name);
+                                                  subinfo[i].dobj.name);
 
                /* Decide whether we want to dump it */
                selectDumpableObject(&(subinfo[i].dobj), fout);
@@ -4488,7 +4489,7 @@ binary_upgrade_extension_member(PQExpBuffer upgrade_buffer,
        }
        if (extobj == NULL)
                fatal("could not find parent extension for %s %s",
-                                         objtype, objname);
+                         objtype, objname);
 
        appendPQExpBufferStr(upgrade_buffer,
                                                 "\n-- For binary upgrade, handle extension membership the hard way\n");
@@ -4620,7 +4621,7 @@ getNamespaces(Archive *fout, int *numNamespaces)
 
                if (strlen(nsinfo[i].rolname) == 0)
                        pg_log_warning("owner of schema \"%s\" appears to be invalid",
-                                         nsinfo[i].dobj.name);
+                                                  nsinfo[i].dobj.name);
        }
 
        PQclear(res);
@@ -4968,7 +4969,7 @@ getTypes(Archive *fout, int *numTypes)
 
                if (strlen(tyinfo[i].rolname) == 0)
                        pg_log_warning("owner of data type \"%s\" appears to be invalid",
-                                         tyinfo[i].dobj.name);
+                                                  tyinfo[i].dobj.name);
        }
 
        *numTypes = ntups;
@@ -5053,7 +5054,7 @@ getOperators(Archive *fout, int *numOprs)
 
                if (strlen(oprinfo[i].rolname) == 0)
                        pg_log_warning("owner of operator \"%s\" appears to be invalid",
-                                         oprinfo[i].dobj.name);
+                                                  oprinfo[i].dobj.name);
        }
 
        PQclear(res);
@@ -5355,7 +5356,7 @@ getOpclasses(Archive *fout, int *numOpclasses)
 
                if (strlen(opcinfo[i].rolname) == 0)
                        pg_log_warning("owner of operator class \"%s\" appears to be invalid",
-                                         opcinfo[i].dobj.name);
+                                                  opcinfo[i].dobj.name);
        }
 
        PQclear(res);
@@ -5439,7 +5440,7 @@ getOpfamilies(Archive *fout, int *numOpfamilies)
 
                if (strlen(opfinfo[i].rolname) == 0)
                        pg_log_warning("owner of operator family \"%s\" appears to be invalid",
-                                         opfinfo[i].dobj.name);
+                                                  opfinfo[i].dobj.name);
        }
 
        PQclear(res);
@@ -5608,7 +5609,7 @@ getAggregates(Archive *fout, int *numAggs)
                agginfo[i].aggfn.rolname = pg_strdup(PQgetvalue(res, i, i_rolname));
                if (strlen(agginfo[i].aggfn.rolname) == 0)
                        pg_log_warning("owner of aggregate function \"%s\" appears to be invalid",
-                                         agginfo[i].aggfn.dobj.name);
+                                                  agginfo[i].aggfn.dobj.name);
                agginfo[i].aggfn.lang = InvalidOid; /* not currently interesting */
                agginfo[i].aggfn.prorettype = InvalidOid;       /* not saved */
                agginfo[i].aggfn.proacl = pg_strdup(PQgetvalue(res, i, i_aggacl));
@@ -5868,7 +5869,7 @@ getFuncs(Archive *fout, int *numFuncs)
 
                if (strlen(finfo[i].rolname) == 0)
                        pg_log_warning("owner of function \"%s\" appears to be invalid",
-                                         finfo[i].dobj.name);
+                                                  finfo[i].dobj.name);
        }
 
        PQclear(res);
@@ -6664,7 +6665,7 @@ getTables(Archive *fout, int *numTables)
                /* Emit notice if join for owner failed */
                if (strlen(tblinfo[i].rolname) == 0)
                        pg_log_warning("owner of table \"%s\" appears to be invalid",
-                                         tblinfo[i].dobj.name);
+                                                  tblinfo[i].dobj.name);
        }
 
        if (dopt->lockWaitTimeout)
@@ -6706,7 +6707,7 @@ getOwnedSeqs(Archive *fout, TableInfo tblinfo[], int numTables)
                owning_tab = findTableByOid(seqinfo->owning_tab);
                if (owning_tab == NULL)
                        fatal("failed sanity check, parent table with OID %u of sequence with OID %u not found",
-                                                 seqinfo->owning_tab, seqinfo->dobj.catId.oid);
+                                 seqinfo->owning_tab, seqinfo->dobj.catId.oid);
 
                /*
                 * Only dump identity sequences if we're going to dump the table that
@@ -7470,7 +7471,7 @@ getRules(Archive *fout, int *numRules)
                ruleinfo[i].ruletable = findTableByOid(ruletableoid);